2
0
mirror of https://github.com/frappe/bench.git synced 2024-09-23 04:29:02 +00:00

[merge] remove os_packages

This commit is contained in:
Rushabh Mehta 2016-03-14 12:26:48 +05:30
commit 2c97b35279
75 changed files with 1776 additions and 104 deletions

20
.travis.yml Normal file
View File

@ -0,0 +1,20 @@
language: python
dist: trusty
sudo: required
python:
- "2.7"
install:
- sudo apt-get purge -y mysql-common mysql-server mysql-client
- sudo python $TRAVIS_BUILD_DIR/installer/install.py --user travis --skip-bench-setup
# - sudo bash $TRAVIS_BUILD_DIR/install_scripts/setup_frappe.sh --skip-install-bench --mysql-root-password travis
- mkdir -p ~/bench-repo
- cp -r $TRAVIS_BUILD_DIR/* ~/bench-repo/
- cd ~ && sudo python bench-repo/installer/install.py --only-dependencies
script:
- cd ~
- sudo pip install --upgrade pip
- sudo pip install -e bench-repo
- sudo python -m unittest bench.tests.test_setup_production.TestSetupProduction.test_setup_production

View File

@ -42,7 +42,7 @@ def write_appstxt(apps, bench='.'):
def get_app(app, git_url, branch=None, bench='.', build_asset_files=True, verbose=False):
logger.info('getting app {}'.format(app))
shallow_clone = '--depth 1' if check_git_for_shallow_clone() and get_config().get('shallow_clone') else ''
shallow_clone = '--depth 1' if check_git_for_shallow_clone() and get_config(bench).get('shallow_clone') else ''
branch = '--branch {branch}'.format(branch=branch) if branch else ''
exec_cmd("git clone {git_url} {branch} {shallow_clone} --origin upstream {app}".format(
git_url=git_url,
@ -54,7 +54,7 @@ def get_app(app, git_url, branch=None, bench='.', build_asset_files=True, verbos
install_app(app, bench=bench, verbose=verbose)
if build_asset_files:
build_assets(bench=bench)
conf = get_config()
conf = get_config(bench=bench)
if conf.get('restart_supervisor_on_update'):
restart_supervisor_processes(bench=bench)
@ -81,7 +81,7 @@ def install_app(app, bench='.', verbose=False):
add_to_appstxt(app, bench=bench)
def pull_all_apps(bench='.'):
rebase = '--rebase' if get_config().get('rebase_on_pull') else ''
rebase = '--rebase' if get_config(bench).get('rebase_on_pull') else ''
for app in get_apps(bench=bench):
app_dir = get_repo_dir(app, bench=bench)

View File

@ -13,12 +13,13 @@ from .utils import set_default_site as _set_default_site
from .utils import (build_assets, patch_sites, exec_cmd, update_bench, get_env_cmd, get_frappe, setup_logging,
get_config, update_config, restart_supervisor_processes, put_config, default_config, update_requirements,
backup_all_sites, backup_site, get_sites, prime_wheel_cache, is_root, set_mariadb_host, drop_privileges,
fix_file_perms, fix_prod_setup_perms, set_ssl_certificate, set_ssl_certificate_key, get_cmd_output, post_upgrade,
fix_file_perms, fix_prod_setup_perms, set_ssl_certificate, set_ssl_certificate_key,
get_cmd_output, post_upgrade, get_bench_name,
pre_upgrade, validate_upgrade, PatchError, download_translations_p, setup_socketio, before_update)
from .app import get_app as _get_app
from .app import new_app as _new_app
from .app import pull_all_apps, get_apps, get_current_frappe_version, is_version_upgrade, switch_to_v4, switch_to_v5, switch_to_master, switch_to_develop
from .config import generate_nginx_config, generate_supervisor_config, generate_redis_cache_config, generate_redis_async_broker_config
from .config import generate_nginx_config, generate_supervisor_config, generate_redis_cache_config, generate_redis_async_broker_config, generate_redis_celery_broker_config
from .production_setup import setup_production as _setup_production
from .migrate_to_v5 import migrate_to_v5
import os
@ -74,7 +75,7 @@ def check_uid():
def change_uid():
if is_root() and not cmd_requires_root():
frappe_user = get_config().get('frappe_user')
frappe_user = get_config(".").get('frappe_user')
if frappe_user:
drop_privileges(uid_name=frappe_user, gid_name=frappe_user)
os.environ['HOME'] = pwd.getpwnam(frappe_user).pw_dir
@ -206,7 +207,7 @@ def _update(pull=False, patch=False, build=False, bench=False, auto=False, resta
if not (pull or patch or build or bench or requirements):
pull, patch, build, bench, requirements = True, True, True, True, True
conf = get_config()
conf = get_config(".")
version_upgrade = is_version_upgrade()
@ -449,6 +450,11 @@ def setup_redis_async_broker():
"generate config for redis async broker"
generate_redis_async_broker_config()
@click.command('redis-celery-broker')
def setup_redis_celery_broker():
"generate config for redis celery broker"
generate_redis_celery_broker_config()
@click.command('production')
@click.argument('user')
def setup_production(user):
@ -496,6 +502,7 @@ setup.add_command(setup_sudoers)
setup.add_command(setup_supervisor)
setup.add_command(setup_redis_cache)
setup.add_command(setup_redis_async_broker)
setup.add_command(setup_redis_celery_broker)
setup.add_command(setup_auto_update)
setup.add_command(setup_dnsmasq)
setup.add_command(setup_backups)
@ -576,7 +583,8 @@ def patch():
def _fix_prod_perms():
"Fix permissions if supervisor processes were run as root"
if os.path.exists("config/supervisor.conf"):
exec_cmd("supervisorctl stop frappe:")
bench_name = get_bench_name(bench_path=".")
exec_cmd("supervisorctl stop {bench_name}-processes:".format(bench_name=bench_name))
fix_prod_setup_perms()

View File

@ -3,9 +3,10 @@ import getpass
import json
import subprocess
import shutil
import socket
from distutils.spawn import find_executable
from jinja2 import Environment, PackageLoader
from .utils import get_sites, get_config, update_config, get_redis_version
from .utils import get_sites, get_config, update_config, get_redis_version, update_common_site_config, get_bench_name
env = Environment(loader=PackageLoader('bench', 'templates'), trim_blocks=True)
@ -29,7 +30,8 @@ def generate_supervisor_config(bench='.', user=None):
sites = get_sites(bench=bench)
if not user:
user = getpass.getuser()
config = get_config()
config = get_config(bench=bench)
config = template.render(**{
"bench_dir": bench_dir,
@ -40,7 +42,11 @@ def generate_supervisor_config(bench='.', user=None):
"node": find_executable('node') or find_executable('nodejs'),
"redis_cache_config": os.path.join(bench_dir, 'config', 'redis_cache.conf'),
"redis_async_broker_config": os.path.join(bench_dir, 'config', 'redis_async_broker.conf'),
"frappe_version": get_current_frappe_version()
"redis_celery_broker_config": os.path.join(bench_dir, 'config', 'redis_celery_broker.conf'),
"frappe_version": get_current_frappe_version(),
"webserver_port": config.get('webserver_port', 8000),
"gunicorn_workers": config.get('gunicorn_workers', 2),
"bench_name": get_bench_name(bench)
})
write_config_file(bench, 'supervisor.conf', config)
update_config({'restart_supervisor_on_update': True})
@ -49,6 +55,27 @@ def get_site_config(site, bench='.'):
with open(os.path.join(bench, 'sites', site, 'site_config.json')) as f:
return json.load(f)
def generate_common_site_config(bench='.'):
'''Generates the default common_site_config.json while a new bench is created'''
config = get_config(bench=bench)
common_site_config = {}
for bench_config_field, site_config_field in (
("redis_celery_broker_port", "celery_broker"),
("redis_async_broker_port", "async_redis_server"),
("redis_cache_port", "cache_redis_server")
):
port = config.get(bench_config_field)
if config.get(bench_config_field):
redis_url = "redis://localhost:{0}".format(port)
common_site_config[site_config_field] = redis_url
# TODO Optionally we need to add the host or domain name in case dns_multitenant is false
if common_site_config:
update_common_site_config(common_site_config, bench=bench)
def get_sites_with_config(bench='.'):
sites = get_sites(bench=bench)
ret = []
@ -68,8 +95,9 @@ def generate_nginx_config(bench='.'):
sites_dir = os.path.join(bench_dir, "sites")
sites = get_sites_with_config(bench=bench)
user = getpass.getuser()
config = get_config(bench)
if get_config().get('serve_default_site'):
if config.get('serve_default_site'):
try:
with open("sites/currentsite.txt") as f:
default_site = {'name': f.read().strip()}
@ -80,29 +108,56 @@ def generate_nginx_config(bench='.'):
config = template.render(**{
"sites_dir": sites_dir,
"http_timeout": get_config().get("http_timeout", 120),
"http_timeout": config.get("http_timeout", 120),
"default_site": default_site,
"dns_multitenant": get_config().get('dns_multitenant'),
"sites": sites
"dns_multitenant": config.get('dns_multitenant'),
"sites": sites,
"webserver_port": config.get('webserver_port', 8000),
"socketio_port": config.get('socketio_port', 3000),
"bench_name": get_bench_name(bench)
})
write_config_file(bench, 'nginx.conf', config)
def generate_redis_cache_config(bench='.'):
template = env.get_template('redis_cache.conf')
conf = {
"maxmemory": get_config().get('cache_maxmemory', '50'),
"port": get_config().get('redis_cache_port', '11311'),
"redis_version": get_redis_version()
}
config = template.render(**conf)
write_config_file(bench, 'redis_cache.conf', config)
def generate_redis_celery_broker_config(bench='.'):
"""Redis that is used for queueing celery tasks"""
_generate_redis_config(
template_name='redis_celery_broker.conf',
context={
"port": get_config(bench).get('redis_celery_broker_port', '11311'),
"bench_path": os.path.abspath(bench),
},
bench=bench
)
def generate_redis_async_broker_config(bench='.'):
template = env.get_template('redis_async_broker.conf')
conf = {
"port": get_config().get('redis_async_broker_port', '12311'),
"redis_version": get_redis_version()
}
config = template.render(**conf)
write_config_file(bench, 'redis_async_broker.conf', config)
"""Redis that is used to do pub/sub"""
_generate_redis_config(
template_name='redis_async_broker.conf',
context={
"port": get_config(bench).get('redis_async_broker_port', '12311'),
},
bench=bench
)
def generate_redis_cache_config(bench='.'):
"""Redis that is used and optimized for caching"""
config = get_config(bench=bench)
_generate_redis_config(
template_name='redis_cache.conf',
context={
"maxmemory": config.get('cache_maxmemory', '50'),
"port": config.get('redis_cache_port', '13311'),
"redis_version": get_redis_version(),
},
bench=bench
)
def _generate_redis_config(template_name, context, bench):
template = env.get_template(template_name)
if "pid_path" not in context:
context["pid_path"] = os.path.abspath(os.path.join(bench, "config", "pids"))
redis_config = template.render(**context)
write_config_file(bench, template_name, redis_config)

View File

@ -62,15 +62,10 @@ def setup_production(user, bench='.'):
supervisor_conf_filename = 'frappe.conf'
links = (
(os.path.abspath(os.path.join(bench, 'config', 'nginx.conf')), '/etc/nginx/conf.d/frappe.conf'),
(os.path.abspath(os.path.join(bench, 'config', 'supervisor.conf')), os.path.join(get_supervisor_confdir(), supervisor_conf_filename)),
)
for src, dest in links:
if not os.path.exists(dest):
os.symlink(src, dest)
os.symlink(os.path.abspath(os.path.join(bench, 'config', 'supervisor.conf')), os.path.join(get_supervisor_confdir(), supervisor_conf_filename))
os.symlink(os.path.abspath(os.path.join(bench, 'config', 'nginx.conf')), '/etc/nginx/conf.d/frappe.conf')
exec_cmd('supervisorctl reload')
if os.environ.get('NO_SERVICE_RESTART'):
return

View File

@ -173,7 +173,7 @@ def bump(repo, bump_type, develop='develop', master='master', remote='upstream')
print 'Released {tag} for {repo}'.format(tag=tag_name, repo=repo)
def release(repo, bump_type, develop, master):
if not get_config().get('release_bench'):
if not get_config(".").get('release_bench'):
print 'bench not configured to release'
sys.exit(1)
global github_username, github_password

View File

@ -1,12 +1,9 @@
server_names_hash_bucket_size 64;
upstream frappe {
server 127.0.0.1:8000 fail_timeout=0;
upstream {{ bench_name }}-frappe {
server 127.0.0.1:{{ webserver_port }} fail_timeout=0;
}
upstream socketio-server {
server 127.0.0.1:3000 fail_timeout=0;
upstream {{ bench_name}}-socketio-server {
server 127.0.0.1:{{ socketio_port }} fail_timeout=0;
}
{% macro location_block(site, port=80, default=False, server_name=None, sites=None, dns_multitenant=False) -%}
@ -24,7 +21,7 @@ upstream socketio-server {
}
location /socket.io {
proxy_pass http://socketio-server;
proxy_pass http://{{ bench_name }}-socketio-server;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
@ -36,10 +33,10 @@ upstream socketio-server {
}
location / {
try_files /{{ "$host" if dns_multitenant else site.name }}/public/$uri @magic;
try_files /{{ "$host" if dns_multitenant else site.name }}/public/$uri @webserver;
}
location @magic {
location @webserver {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
{% if not dns_multitenant %}
@ -47,9 +44,9 @@ upstream socketio-server {
{% endif %}
proxy_set_header Host $host;
proxy_set_header X-Use-X-Accel-Redirect True;
proxy_read_timeout {{http_timeout}};
proxy_read_timeout {{ http_timeout }};
proxy_redirect off;
proxy_pass http://frappe;
proxy_pass http://{{ bench_name }}-frappe;
}
{%- endmacro %}

View File

@ -33,6 +33,8 @@ http {
#keepalive_timeout 0;
keepalive_timeout 65;
server_names_hash_bucket_size 64;
#gzip on;
index index.html index.htm;
@ -41,4 +43,4 @@ http {
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
}
}

View File

@ -1,3 +1,4 @@
dbfilename redis_async_broker.rdb
pidfile redis_async_broker.pid
port {{port}}
dir {{pid_path}}
pidfile {{pid_path}}/redis_async_broker.pid
port {{port}}

View File

@ -1,7 +1,8 @@
dbfilename redis_cache_dump.rdb
pidfile redis_cache.pid
dir {{pid_path}}
pidfile {{pid_path}}/redis_cache.pid
port {{port}}
maxmemory {{maxmemory}}mb
maxmemory-policy allkeys-lru
save ""
appendonly no
appendonly no

View File

@ -0,0 +1,4 @@
dbfilename redis_celery_broker.rdb
dir {{pid_path}}
pidfile {{pid_path}}/redis_celery_broker.pid
port {{port}}

View File

@ -2,8 +2,8 @@
; priority=1 --> Lower priorities indicate programs that start first and shut down last
; killasgroup=true --> send kill signal to child processes too
[program:frappe-web]
command={{ bench_dir }}/env/bin/gunicorn -b 127.0.0.1:8000 -w 2 -t {{http_timeout}} frappe.app:application
[program:{{ bench_name }}-frappe-web]
command={{ bench_dir }}/env/bin/gunicorn -b 127.0.0.1:{{ webserver_port }} -w {{ gunicorn_workers }} -t {{ http_timeout }} frappe.app:application --preload
priority=4
autostart=true
autorestart=true
@ -12,7 +12,7 @@ stderr_logfile={{ bench_dir }}/logs/web.error.log
user={{ user }}
directory={{ sites_dir }}
[program:frappe-worker]
[program:{{ bench_name }}-frappe-worker]
command={{ bench_dir }}/env/bin/python -m frappe.celery_app worker -n jobs@%%h --soft-time-limit 360 --time-limit 390 --loglevel INFO -Ofair
priority=4
autostart=true
@ -24,7 +24,7 @@ stopwaitsecs=400
directory={{ sites_dir }}
killasgroup=true
[program:frappe-longjob-worker]
[program:{{ bench_name }}-frappe-longjob-worker]
command={{ bench_dir }}/env/bin/python -m frappe.celery_app worker -n longjobs@%%h --soft-time-limit 1500 --time-limit 1530 --loglevel INFO
priority=2
autostart=true
@ -36,7 +36,7 @@ stopwaitsecs=1540
directory={{ sites_dir }}
killasgroup=true
[program:frappe-async-worker]
[program:{{ bench_name }}-frappe-async-worker]
command={{ bench_dir }}/env/bin/python -m frappe.celery_app worker -n async@%%h --soft-time-limit 1500 --time-limit 1530 --loglevel INFO
priority=2
autostart=true
@ -48,7 +48,7 @@ stopwaitsecs=1540
directory={{ sites_dir }}
killasgroup=true
[program:frappe-workerbeat]
[program:{{ bench_name }}-frappe-workerbeat]
command={{ bench_dir }}/env/bin/python -m frappe.celery_app beat -s beat.schedule
priority=3
autostart=true
@ -58,9 +58,7 @@ stderr_logfile={{ bench_dir }}/logs/workerbeat.error.log
user={{ user }}
directory={{ sites_dir }}
{% if frappe_version > 4%}
[program:redis-cache]
[program:{{ bench_name }}-redis-cache]
command={{ redis_server }} {{ redis_cache_config }}
priority=1
autostart=true
@ -69,10 +67,19 @@ stdout_logfile={{ bench_dir }}/logs/redis-cache.log
stderr_logfile={{ bench_dir }}/logs/redis-cache.error.log
user={{ user }}
directory={{ sites_dir }}
{% endif %}
{% if frappe_version > 5%}
[program:redis-async-broker]
[program:{{ bench_name }}-redis-celery-broker]
command={{ redis_server }} {{ redis_celery_broker_config }}
priority=1
autostart=true
autorestart=true
stdout_logfile={{ bench_dir }}/logs/redis-celery-broker.log
stderr_logfile={{ bench_dir }}/logs/redis-celery-broker.error.log
user={{ user }}
directory={{ sites_dir }}
{% if frappe_version > 5 %}
[program:{{ bench_name }}-redis-async-broker]
command={{ redis_server }} {{ redis_async_broker_config }}
priority=1
autostart=true
@ -83,7 +90,7 @@ user={{ user }}
directory={{ sites_dir }}
{% if node %}
[program:node-socketio]
[program:{{ bench_name }}-node-socketio]
command={{ node }} {{ bench_dir }}/apps/frappe/socketio.js
priority=4
autostart=true
@ -91,10 +98,13 @@ autorestart=true
stdout_logfile={{ bench_dir }}/logs/node-socketio.log
stderr_logfile={{ bench_dir }}/logs/node-socketio.error.log
user={{ user }}
directory={{ sites_dir }}
directory={{ bench_dir }}
{% endif %}
{% endif %}
[group:frappe]
programs=frappe-web,frappe-worker,frappe-workerbeat
[group:{{ bench_name }}-processes]
programs={{ bench_name }}-frappe-web,{{ bench_name }}-frappe-worker,{{ bench_name }}-frappe-longjob-worker,{{ bench_name }}-frappe-async-worker,{{ bench_name }}-frappe-workerbeat {%- if node -%} ,{{ bench_name }}-node-socketio {%- endif%}
[group:{{ bench_name }}-redis]
programs={{ bench_name }}-redis-cache,{{ bench_name }}-redis-celery-broker {%- if frappe_version > 5 -%} ,{{ bench_name }}-redis-async-broker {%- endif %}

0
bench/tests/__init__.py Normal file
View File

138
bench/tests/test_init.py Normal file
View File

@ -0,0 +1,138 @@
from __future__ import unicode_literals
import unittest
import bench
import bench.utils
import json
import os
import shutil
class TestBenchInit(unittest.TestCase):
def setUp(self):
self.benches_path = "."
self.benches = []
def tearDown(self):
for bench_name in self.benches:
bench_path = os.path.join(self.benches_path, bench_name)
if os.path.exists(bench_path):
shutil.rmtree(bench_path, ignore_errors=True)
def test_init(self, bench_name="test-bench"):
self.init_bench(bench_name)
self.assert_folders(bench_name)
self.assert_virtual_env(bench_name)
self.assert_bench_config(bench_name)
self.assert_config(bench_name)
self.assert_socketio(bench_name)
def test_multiple_benches(self):
# 1st bench
self.test_init("test-bench-1")
self.assert_ports("test-bench-1", {
"webserver_port": 8000,
"socketio_port": 9000,
"redis_celery_broker_port": 11000,
"redis_async_broker_port": 12000,
"redis_cache_port": 13000
})
self.assert_common_site_config("test-bench-1", {
"celery_broker": "redis://localhost:11000",
"async_redis_server": "redis://localhost:12000",
"cache_redis_server": "redis://localhost:13000"
})
# 2nd bench
self.test_init("test-bench-2")
self.assert_ports("test-bench-2", {
"webserver_port": 8001,
"socketio_port": 9001,
"redis_celery_broker_port": 11001,
"redis_async_broker_port": 12001,
"redis_cache_port": 13001
})
self.assert_common_site_config("test-bench-2", {
"celery_broker": "redis://localhost:11001",
"async_redis_server": "redis://localhost:12001",
"cache_redis_server": "redis://localhost:13001"
})
def init_bench(self, bench_name):
self.benches.append(bench_name)
bench.utils.init(bench_name)
def assert_folders(self, bench_name):
for folder in bench.utils.folders_in_bench:
self.assert_exists(bench_name, folder)
self.assert_exists(bench_name, "sites", "assets")
self.assert_exists(bench_name, "apps", "frappe")
self.assert_exists(bench_name, "apps", "frappe", "setup.py")
def assert_virtual_env(self, bench_name):
bench_path = os.path.abspath(bench_name)
python = os.path.join(bench_path, "env", "bin", "python")
python_path = bench.utils.get_cmd_output('{python} -c "import os; print os.path.dirname(os.__file__)"'.format(python=python))
# part of bench's virtualenv
self.assertTrue(python_path.startswith(bench_path))
self.assert_exists(python_path)
self.assert_exists(python_path, "site-packages")
self.assert_exists(python_path, "site-packages", "IPython")
self.assert_exists(python_path, "site-packages", "pip")
site_packages = os.listdir(os.path.join(python_path, "site-packages"))
self.assertTrue(any(package.startswith("MySQL_python-1.2.5") for package in site_packages))
def assert_bench_config(self, bench_name):
config_json = os.path.join(bench_name, "config.json")
self.assertTrue(os.path.exists(config_json))
config = self.load_json(config_json)
for key, value in bench.utils.default_config.items():
self.assertEquals(config.get(key), value)
def assert_config(self, bench_name):
for config, search_key in (
("redis_celery_broker.conf", "redis_celery_broker.rdb"),
("redis_async_broker.conf", "redis_async_broker.rdb"),
("redis_cache.conf", "redis_cache_dump.rdb")):
self.assert_exists(bench_name, "config", config)
with open(os.path.join(bench_name, "config", config), "r") as f:
f = f.read().decode("utf-8")
self.assertTrue(search_key in f)
def assert_socketio(self, bench_name):
self.assert_exists(bench_name, "node_modules")
self.assert_exists(bench_name, "node_modules", "socket.io")
def assert_ports(self, bench_name, ports):
config_path = os.path.join(bench_name, 'config.json')
config = self.load_json(config_path)
for key, port in ports.items():
self.assertEquals(config.get(key), port)
def assert_common_site_config(self, bench_name, expected_config):
common_site_config_path = os.path.join(bench_name, 'sites', 'common_site_config.json')
config = self.load_json(common_site_config_path)
for key, value in expected_config.items():
self.assertEquals(config.get(key), value)
def assert_exists(self, *args):
self.assertTrue(os.path.exists(os.path.join(*args)))
def load_json(self, path):
with open(path, "r") as f:
return json.loads(f.read().decode("utf-8"))

View File

@ -0,0 +1,105 @@
from __future__ import unicode_literals
from bench.tests.test_init import TestBenchInit
from bench.production_setup import setup_production, get_supervisor_confdir
import bench.utils
import os
import getpass
import re
import unittest
import time
class TestSetupProduction(TestBenchInit):
# setUp, tearDown and other tests are defiend in TestBenchInit
def test_setup_production(self):
self.test_multiple_benches()
user = getpass.getuser()
for bench_name in ("test-bench-1", "test-bench-2"):
setup_production(user, bench_name)
self.assert_nginx_config(bench_name)
self.assert_supervisor_config(bench_name)
# test after start of both benches
for bench_name in ("test-bench-1", "test-bench-2"):
self.assert_supervisor_process(bench_name)
self.assert_nginx_process()
def assert_nginx_config(self, bench_name):
conf_src = os.path.join(os.path.abspath(bench_name), 'config', 'nginx.conf')
conf_dest = "/etc/nginx/conf.d/{bench_name}.conf".format(bench_name=bench_name)
self.assertTrue(os.path.exists(conf_src))
self.assertTrue(os.path.exists(conf_dest))
# symlink matches
self.assertEquals(os.path.realpath(conf_dest), conf_src)
# file content
with open(conf_src, "r") as f:
f = f.read().decode("utf-8")
for key in (
"upstream {bench_name}-frappe",
"upstream {bench_name}-socketio-server"
):
self.assertTrue(key.format(bench_name=bench_name) in f)
def assert_supervisor_config(self, bench_name):
conf_src = os.path.join(os.path.abspath(bench_name), 'config', 'supervisor.conf')
supervisor_conf_dir = get_supervisor_confdir()
conf_dest = "{supervisor_conf_dir}/{bench_name}.conf".format(supervisor_conf_dir=supervisor_conf_dir, bench_name=bench_name)
self.assertTrue(os.path.exists(conf_src))
self.assertTrue(os.path.exists(conf_dest))
# symlink matches
self.assertEquals(os.path.realpath(conf_dest), conf_src)
# file content
with open(conf_src, "r") as f:
f = f.read().decode("utf-8")
for key in (
"program:{bench_name}-frappe-web",
"program:{bench_name}-frappe-worker",
"program:{bench_name}-frappe-longjob-worker",
"program:{bench_name}-frappe-async-worker",
"program:{bench_name}-frappe-workerbeat",
"program:{bench_name}-redis-cache",
"program:{bench_name}-redis-celery-broker",
"program:{bench_name}-redis-async-broker",
"program:{bench_name}-node-socketio",
"group:{bench_name}-processes",
"group:{bench_name}-redis"
):
self.assertTrue(key.format(bench_name=bench_name) in f)
def assert_supervisor_process(self, bench_name):
out = bench.utils.get_cmd_output("sudo supervisorctl status")
if "STARTING" in out:
time.sleep(10)
out = bench.utils.get_cmd_output("sudo supervisorctl status")
for key in (
"{bench_name}-processes:{bench_name}-frappe-web[\s]+RUNNING",
"{bench_name}-processes:{bench_name}-frappe-worker[\s]+RUNNING",
"{bench_name}-processes:{bench_name}-frappe-longjob-worker[\s]+RUNNING",
"{bench_name}-processes:{bench_name}-frappe-async-worker[\s]+RUNNING",
"{bench_name}-processes:{bench_name}-frappe-workerbeat[\s]+RUNNING",
"{bench_name}-processes:{bench_name}-node-socketio[\s]+RUNNING",
"{bench_name}-redis:{bench_name}-redis-cache[\s]+RUNNING",
"{bench_name}-redis:{bench_name}-redis-celery-broker[\s]+RUNNING",
"{bench_name}-redis:{bench_name}-redis-async-broker[\s]+RUNNING",
):
self.assertTrue(re.search(key.format(bench_name=bench_name), out))
def assert_nginx_process(self):
out = bench.utils.get_cmd_output("sudo nginx -t 2>&1")
self.assertTrue("nginx: configuration file /etc/nginx/nginx.conf test is successful" in out)

View File

@ -30,9 +30,11 @@ default_config = {
'rebase_on_pull': False,
'update_bench_on_update': True,
'frappe_user': getpass.getuser(),
'shallow_clone': True
'shallow_clone': True,
}
folders_in_bench = ('apps', 'sites', 'config', 'logs', 'config/pids')
def get_frappe(bench='.'):
frappe = get_env_cmd('frappe', bench=bench)
if not os.path.exists(frappe):
@ -47,7 +49,7 @@ def init(path, apps_path=None, no_procfile=False, no_backups=False,
no_auto_update=False, frappe_path=None, frappe_branch=None, wheel_cache_dir=None,
verbose=False):
from .app import get_app, install_apps_from_path
from .config import generate_redis_cache_config, generate_redis_async_broker_config
from .config import generate_redis_cache_config, generate_redis_async_broker_config, generate_redis_celery_broker_config, generate_common_site_config
global FRAPPE_VERSION
if os.path.exists(path):
@ -56,34 +58,86 @@ def init(path, apps_path=None, no_procfile=False, no_backups=False,
# sys.exit(1)
os.mkdir(path)
for dirname in ('apps', 'sites', 'config', 'logs'):
for dirname in folders_in_bench:
os.mkdir(os.path.join(path, dirname))
setup_logging()
setup_env(bench=path)
put_config(default_config, bench=path)
# if wheel_cache_dir:
# update_config({"wheel_cache_dir":wheel_cache_dir}, bench=path)
# prime_wheel_cache(bench=path)
bench_config = make_bench_config()
put_config(bench_config, bench=path)
generate_common_site_config(bench=path)
if not frappe_path:
frappe_path = 'https://github.com/frappe/frappe.git'
get_app('frappe', frappe_path, branch=frappe_branch, bench=path, build_asset_files=False, verbose=verbose)
if apps_path:
install_apps_from_path(apps_path, bench=path)
FRAPPE_VERSION = get_current_frappe_version(bench=path)
if FRAPPE_VERSION > 5:
setup_socketio(bench=path)
build_assets(bench=path)
generate_redis_celery_broker_config(bench=path)
generate_redis_cache_config(bench=path)
generate_redis_async_broker_config(bench=path)
if not no_procfile:
setup_procfile(bench=path)
if not no_backups:
setup_backups(bench=path)
if not no_auto_update:
setup_auto_update(bench=path)
if apps_path:
install_apps_from_path(apps_path, bench=path)
FRAPPE_VERSION = get_current_frappe_version(bench=path)
if FRAPPE_VERSION > 5:
setup_socketio(bench=path)
build_assets(bench=path)
generate_redis_cache_config(bench=path)
generate_redis_async_broker_config(bench=path)
def make_bench_config():
bench_config = {}
bench_config.update(default_config)
bench_config.update(make_ports())
bench_config.update(get_gunicorn_workers())
return bench_config
def get_gunicorn_workers():
'''This function will return the maximum workers that can be started depending upon
number of cpu's present on the machine'''
return {
"gunicorn_workers": multiprocessing.cpu_count()
}
def make_ports(benches_path="."):
default_ports = {
"webserver_port": 8000,
"socketio_port": 9000,
"redis_celery_broker_port": 11000,
"redis_async_broker_port": 12000,
"redis_cache_port": 13000
}
# collect all existing ports
existing_ports = {}
for folder in os.listdir(benches_path):
bench = os.path.join(benches_path, folder)
if os.path.isdir(bench):
bench_config = get_config(bench)
for key in default_ports.keys():
value = bench_config.get(key)
if value:
existing_ports.setdefault(key, []).append(value)
# new port value = max of existing port value + 1
ports = {}
for key, value in default_ports.items():
existing_value = existing_ports.get(key, [])
if existing_value:
value = max(existing_value) + 1
ports[key] = value
return ports
def exec_cmd(cmd, cwd='.'):
from .cli import from_command_line
@ -239,7 +293,7 @@ def setup_logging(bench='.'):
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
def get_config(bench='.'):
def get_config(bench):
config_path = os.path.join(bench, 'config.json')
if not os.path.exists(config_path):
return {}
@ -302,7 +356,9 @@ def get_cmd_output(cmd, cwd='.'):
def restart_supervisor_processes(bench='.'):
conf = get_config(bench=bench)
cmd = conf.get('supervisor_restart_cmd', 'sudo supervisorctl restart frappe:')
bench_name = get_bench_name(bench)
cmd = conf.get('supervisor_restart_cmd',
'sudo supervisorctl restart {bench_name}-processes:'.format(bench_name=bench_name))
exec_cmd(cmd, cwd=bench)
def get_site_config(site, bench='.'):
@ -395,11 +451,16 @@ def update_common_site_config(ddict, bench='.'):
update_json_file(os.path.join(bench, 'sites', 'common_site_config.json'), ddict)
def update_json_file(filename, ddict):
with open(filename, 'r') as f:
content = json.load(f)
if os.path.exists(filename):
with open(filename, 'r') as f:
content = json.load(f)
else:
content = {}
content.update(ddict)
with open(filename, 'w') as f:
content = json.dump(content, f, indent=1)
content = json.dump(content, f, indent=1, sort_keys=True)
def drop_privileges(uid_name='nobody', gid_name='nogroup'):
# from http://stackoverflow.com/a/2699996
@ -421,7 +482,7 @@ def drop_privileges(uid_name='nobody', gid_name='nogroup'):
# Ensure a very conservative umask
os.umask(022)
def fix_prod_setup_perms(frappe_user=None):
def fix_prod_setup_perms(bench='.', frappe_user=None):
files = [
"logs/web.error.log",
"logs/web.log",
@ -434,7 +495,7 @@ def fix_prod_setup_perms(frappe_user=None):
]
if not frappe_user:
frappe_user = get_config().get('frappe_user')
frappe_user = get_config(bench).get('frappe_user')
if not frappe_user:
print "frappe user not set"
@ -658,3 +719,6 @@ def validate_pillow_dependencies(bench, requirements):
print "sudo apt-get install -y libtiff5-dev libjpeg8-dev zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev python-tk"
raise
def get_bench_name(bench_path):
return os.path.basename(os.path.abspath(bench_path))

View File

@ -16,7 +16,7 @@ get_passwd() {
}
set_opts () {
OPTS=`getopt -o v --long verbose,mysql-root-password:,frappe-user:,bench-branch:,setup-production,skip-setup-bench,help -n 'parse-options' -- "$@"`
OPTS=`getopt -o v --long verbose,mysql-root-password:,frappe-user:,bench-branch:,setup-production,skip-install-bench,skip-setup-bench,help -n 'parse-options' -- "$@"`
if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi
@ -27,6 +27,7 @@ set_opts () {
FRAPPE_USER=false
BENCH_BRANCH="master"
SETUP_PROD=false
INSTALL_BENCH=true
SETUP_BENCH=true
if [ -f ~/frappe_passwords.sh ]; then
@ -50,6 +51,7 @@ set_opts () {
--setup-production ) SETUP_PROD=true; shift;;
--bench-branch ) BENCH_BRANCH="$2"; shift;;
--skip-setup-bench ) SETUP_BENCH=false; shift;;
--skip-install-bench ) INSTALL_BENCH=false; shift;;
-- ) shift; break ;;
* ) break ;;
esac
@ -202,14 +204,12 @@ install_packages() {
elif [ $OS == "debian" ] || [ $OS == "Ubuntu" ]; then
export DEBIAN_FRONTEND=noninteractive
setup_debconf
if [ $OS == "debian" ]; then
run_cmd bash -c "curl -sL https://deb.nodesource.com/setup_0.12 | bash -"
fi
run_cmd bash -c "curl -sL https://deb.nodesource.com/setup_0.12 | sudo bash -"
run_cmd sudo apt-get update
run_cmd sudo apt-get install -y python-dev python-setuptools build-essential python-mysqldb git \
ntp vim screen htop mariadb-server mariadb-common libmariadbclient-dev \
libxslt1.1 libxslt1-dev redis-server libssl-dev libcrypto++-dev postfix nginx \
supervisor python-pip fontconfig libxrender1 libxext6 xfonts-75dpi xfonts-base nodejs npm
supervisor python-pip fontconfig libxrender1 libxext6 xfonts-75dpi xfonts-base nodejs
if [ $OS_VER == "precise" ]; then
run_cmd sudo apt-get install -y libtiff4-dev libjpeg8-dev zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.5-dev tk8.5-dev python-tk
@ -454,9 +454,12 @@ main() {
configure_mariadb
echo "Adding frappe user"
add_user
install_bench
if $SETUP_BENCH; then
setup_bench
if $INSTALL_BENCH; then
install_bench
if $SETUP_BENCH; then
setup_bench
fi
fi
echo

0
installer/__init__.py Normal file
View File

204
installer/install.py Normal file
View File

@ -0,0 +1,204 @@
# wget setup_frappe.py | python
import os
import sys
import pwd
import stat
import subprocess
import string
from random import choice
from distutils.spawn import find_executable
from setuptools.command import easy_install as easy_install
def install_bench(args):
# pre-requisites for bench repo cloning
install_pip()
install_ansible()
install_git()
# which user to use for bench repo cloning
user_password = add_user(args.user)
# stop install
if args.skip_bench_setup:
return
# clone bench repo
cloned = clone_bench_repo(args.user)
# install pre-requisites
installed = install_prerequisites(args.user)
# install bench
# if cloned:
# install_bench_cmd(user)
def install_python27():
version = (sys.version_info[0], sys.version_info[1])
if version == (2, 7):
return
print "Installing Python 2.7"
# install python 2.7
success = run_os_command({
"apt-get": "sudo apt-get install -y python2.7",
"yum": "sudo yum install -y python27",
"brew": "brew install python"
})
if not success:
could_not_install("Python 2.7")
# replace current python with python2.7
os.execvp("python2.7", ([] if is_sudo_user() else ["sudo"]) + ["python2.7", __file__] + sys.argv[1:])
def install_git():
if find_executable("git"):
# git already installed
return
print "Installing Git"
success = run_os_command({
"apt-get": "sudo apt-get install -y git",
"yum": "sudo yum install -y git",
"brew": "brew install git"
})
if not success:
could_not_install("Git")
def add_user(user):
if user=="root":
raise Exception("--user cannot be root")
elif not user:
raise Exception("Please pass --user USER. For example: --user frappe")
user_password = None
try:
pwd.getpwnam(user)
except KeyError:
# user does not exist
success = run_os_command({
"adduser": "sudo adduser --create-home {user}".format(user=user)
})
if not success:
raise Exception("Could not create user {0}. Please add the user manually.".format(user))
user_password = get_random_string()
subprocess.check_call(["chpasswd", "{user}:{password}".format(user=user, password=user_password)])
finally:
# give read and execute rights to "Others" for the user's folder
user_folder = get_user_folder(user)
user_folder_stat = os.stat(user_folder)
os.chmod(user_folder, user_folder_stat.st_mode | stat.S_IROTH)
os.chmod(user_folder, user_folder_stat.st_mode | stat.S_IXOTH)
return user_password
def install_pip():
"""Install pip for the user or upgrade to latest version if already present"""
try:
import pip
except ImportError:
easy_install.main(['pip'])
def install_ansible():
try:
import ansible
except ImportError:
import pip
pip.main(["install", "ansible"])
def clone_bench_repo(user):
"""Clones the bench repository in the user folder"""
bench_repo = os.path.join(get_user_folder(user), 'bench-repo')
success = run_os_command(
{"git": "git clone https://github.com/frappe/bench {bench_repo}".format(bench_repo=bench_repo)}
)
return success
def install_dependencies():
"""Installs the pre-requisites like mariadb, nginx, redis etc. for the user"""
playbooks_path = get_playbooks_path()
for playbook in os.listdir(playbooks_path):
if playbook.endswith('.yml'):
success = run_playbook(os.path.join(playbooks_path, playbook))
return success
def run_os_command(command_map):
"""command_map is a dictionary of {"executable": command}. For ex. {"apt-get": "sudo apt-get install -y python2.7"} """
success = False
for executable, command in command_map.items():
if find_executable(executable):
returncode = subprocess.check_call(command.split())
success = ( returncode == 0 )
break
return success
def could_not_install(package):
raise Exception("Could not install {0}. Please install it manually.".format(package))
def is_sudo_user():
return os.geteuid() == 0
def get_user_folder(user):
return os.path.expanduser("~{user}".format(user=user))
def get_random_string(length=16):
"""generate a random string"""
return ''.join([choice(string.letters + string.digits) for i in range(length)])
def get_playbooks_path():
return os.path.abspath(os.path.join(os.getcwd(), 'bench-repo', 'installer', 'playbooks'))
def run_playbook(playbook_name):
success = subprocess.check_call("{sudo} ansible-playbook -c local {playbook_name}"
.format(playbook_name=playbook_name, sudo="sudo" if is_sudo_user() else "")
.split())
return success
def install_bench_cmd(user):
"""Installs bench using pip from the bench-repo"""
pass
def parse_commandline_args():
import argparse
parser = argparse.ArgumentParser(description='Frappe Installer')
parser.add_argument('--user', metavar='USER', dest='user', action='store',
help="System user which will be used to start various processes required by Frappe framework. If this user doesn't exist, it will be created.")
parser.add_argument('--skip-bench-setup', dest='skip_bench_setup', action='store_true', default=False,
help="Skip cloning and installation of bench.")
parser.add_argument('--only-dependencies', dest='only_dependencies', action='store_true', default=False,
help="Only install dependencies via ansible")
args = parser.parse_args()
return args
if __name__ == "__main__":
try:
import argparse
except ImportError:
# install python2.7
install_python27()
args = parse_commandline_args()
if args.only_dependencies:
install_dependencies()
else:
install_bench(args)

View File

@ -0,0 +1,95 @@
---
- name: Install dependencies
hosts: localhost
become: yes
become_user: root
vars:
- mysql_conf_tpl: ../templates/mariadb_config.cnf
- nginx_conf_file: ../templates/nginx.conf
- mysql_secure_installation: True
roles:
- locale
- swap
- mariadb
- { role: epel, when: "ansible_os_family == 'RedHat'" }
- nginx
- logwatch
- fail2ban
- bash_screen_wall
- frappe_selinux
- dns_caching
- wkhtmltopdf
- ntpd
tasks:
- name: Set hostname
hostname: name='{{ hostname }}'
- name: Install the 'Development tools' package group (Redhat)
yum: name="@Development tools" state=present
when: ansible_os_family == 'RedHat'
- name: Install packages
yum: name={{ item }} state=present
with_items:
- bzip2-devel
- cronie
- freetype-devel
- git
- lcms2-devel
- libjpeg-devel
- libtiff-devel
- libwebp-devel
- libXext
- libXrender
- libzip-devel
- nodejs
- npm
- openssl-devel
- postfix
- python-devel
- python-pip
- redis
- screen
- sudo
- supervisor
- tcl-devel
- tk-devel
- vim
- which
- xorg-x11-fonts-75dpi
- xorg-x11-fonts-Type1
- zlib-devel
when: ansible_os_family == 'RedHat'
- name: Install packages
apt: pkg={{ item }} state=present force=yes
with_items:
- build-essential
- fontconfig
- git
- htop
- libcrypto++-dev
- libfreetype6-dev
- libjpeg8-dev
- liblcms2-dev
- libssl-dev
- libtiff5-dev
- libwebp-dev
- libxext6
- libxrender1
- libxslt1-dev
- libxslt1.1
- nodejs
- npm
- ntp
- postfix
- python-dev
- python-pip
- python-tk
- redis-server
- screen
- supervisor
- tcl8.6-dev
- tk8.6-dev
- vim
- xfonts-75dpi
- xfonts-base
- zlib1g-dev
when: ansible_os_family == 'Debian'

View File

@ -0,0 +1,8 @@
if [ $TERM != 'screen' ]
then
PS1='HEY! USE SCREEN '$PS1
fi
sw() {
screen -x $1 || screen -S $1
}

View File

@ -0,0 +1,3 @@
---
- name: Setup bash screen wall
copy: src=screen_wall.sh dest=/etc/profile.d/screen_wall.sh

View File

@ -0,0 +1,3 @@
---
- name: restart network manager
service: name=NetworkManager state=restarted

View File

@ -0,0 +1,7 @@
- name: add dnsmasq to network config
lineinfile: >
dest=/etc/NetworkManager/NetworkManager.conf
regexp="dns="
line="dns=dnsmasq"
state=present
notify: restart network manager

View File

@ -0,0 +1,42 @@
# Ansible Role: EPEL Repository
Installs the EPEL repository (Extra Packages for Enterprise Linux) for RHEL/CentOS.
## Requirements
This role only is needed/runs on RHEL and its derivatives.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
epel_release:
"4": 10
"5": 4
"6": 8
"7": 5
A mapping from RHEL major version to current EPEL release version.
epel_repo_url: "http://download.fedoraproject.org/pub/epel/{{ ansible_distribution_major_version }}/{{ ansible_userspace_architecture }}{{ '/' if ansible_distribution_major_version < '7' else '/e/' }}epel-release-{{ ansible_distribution_major_version }}-{{ epel_release[ansible_distribution_major_version] }}.noarch.rpm"
epel_repo_gpg_key_url: "/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }}"
The EPEL repo URL and GPG key URL. Generally, these should not be changed, but if this role is out of date, or if you need a very specific version, these can both be overridden.
## Dependencies
None.
## Example Playbook
- hosts: servers
roles:
- { role: geerlingguy.repo-epel }
## License
MIT / BSD
## Author Information
This role was created in 2014 by [Jeff Geerling](http://jeffgeerling.com/), author of [Ansible for DevOps](http://ansiblefordevops.com/).

View File

@ -0,0 +1,9 @@
---
epel_release:
"4": 10
"5": 4
"6": 8
"7": 5
epel_repo_url: "http://download.fedoraproject.org/pub/epel/{{ ansible_distribution_major_version }}/{{ ansible_userspace_architecture }}{{ '/' if ansible_distribution_major_version < '7' else '/e/' }}epel-release-{{ ansible_distribution_major_version }}-{{ epel_release[ansible_distribution_major_version] }}.noarch.rpm"
epel_repo_gpg_key_url: "/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }}"

View File

@ -0,0 +1,18 @@
---
dependencies: []
galaxy_info:
author: geerlingguy
description: EPEL repository for RHEL/CentOS.
company: "Midwestern Mac, LLC"
license: "license (BSD, MIT)"
min_ansible_version: 1.4
platforms:
- name: EL
versions:
- 4
- 5
- 6
- 7
categories:
- packaging

View File

@ -0,0 +1,10 @@
---
- name: Install EPEL repo.
yum:
name: "{{ epel_repo_url }}"
state: present
- name: Import EPEL GPG key.
rpm_key:
key: "{{ epel_repo_gpg_key_url }}"
state: present

View File

@ -0,0 +1,2 @@
---
fail2ban_nginx_access_log: /var/log/nginx/access.log

View File

@ -0,0 +1,3 @@
---
- name: restart fail2ban
service: name=fail2ban state=restarted

View File

@ -0,0 +1,21 @@
---
- name: Install fail2ban
yum: name=fail2ban state=present
- name: Enable fail2ban
service: name=fail2ban enabled=yes
- name: Create jail.d
file: path=/etc/fail2ban/jail.d state=directory
- name: Setup filters
template: src="{{item}}-filter.conf.j2" dest="/etc/fail2ban/filter.d/{{item}}.conf"
with_items:
- nginx-proxy
notify: restart fail2ban
- name: setup jails
template: src="{{item}}-jail.conf.j2" dest="/etc/fail2ban/jail.d/{{item}}.conf"
with_items:
- nginx-proxy
notify: restart fail2ban

View File

@ -0,0 +1,10 @@
# Block IPs trying to use server as proxy.
[Definition]
failregex = <HOST>.*\" 400
<HOST>.*"[A-Z]* /(cms|muieblackcat|db|cpcommerce|cgi-bin|wp-login|joomla|awstatstotals|wp-content|wp-includes|pma|phpmyadmin|myadmin|mysql|mysqladmin|sqladmin|mypma|admin|xampp|mysqldb|pmadb|phpmyadmin1|phpmyadmin2).*" 4[\d][\d]
<HOST>.*".*supports_implicit_sdk_logging.*" 4[\d][\d]
<HOST>.*".*activities?advertiser_tracking_enabled.*" 4[\d][\d]
<HOST>.*".*/picture?type=normal.*" 4[\d][\d]
<HOST>.*".*/announce.php?info_hash=.*" 4[\d][\d]
ignoreregex =

View File

@ -0,0 +1,8 @@
## block hosts trying to abuse our server as a forward proxy
[nginx-proxy]
enabled = true
filter = nginx-proxy
logpath = {{ fail2ban_nginx_access_log }}
action = iptables-multiport[name=NoNginxProxy, port="http,https"]
maxretry = 2
bantime = 86400

View File

@ -0,0 +1,32 @@
module frappe_selinux 1.0;
require {
type user_home_dir_t;
type httpd_t;
type user_home_t;
type soundd_port_t;
class tcp_socket name_connect;
class lnk_file read;
class dir { getattr search };
class file { read open };
}
#============= httpd_t ==============
#!!!! This avc is allowed in the current policy
allow httpd_t soundd_port_t:tcp_socket name_connect;
#!!!! This avc is allowed in the current policy
allow httpd_t user_home_dir_t:dir search;
#!!!! This avc is allowed in the current policy
allow httpd_t user_home_t:dir { getattr search };
#!!!! This avc can be allowed using the boolean 'httpd_read_user_content'
allow httpd_t user_home_t:file open;
#!!!! This avc is allowed in the current policy
allow httpd_t user_home_t:file read;
#!!!! This avc is allowed in the current policy
allow httpd_t user_home_t:lnk_file read;

View File

@ -0,0 +1,21 @@
---
- name: Install deps
yum: name="{{item}}" state=present
with_items:
- policycoreutils-python
- selinux-policy-devel
- name: Check enabled SELinux modules
shell: semanage module -l
register: enabled_modules
- name: Copy frappe_selinux policy
copy: src=frappe_selinux.te dest=/root/frappe_selinux.te
register: dest_frappe_selinux_te
- name: Compile frappe_selinux policy
shell: "make -f /usr/share/selinux/devel/Makefile frappe_selinux.pp && semodule -i frappe_selinux.pp"
args:
chdir: /root/
when: "enabled_modules.stdout.find('frappe_selinux') == -1 or dest_frappe_selinux_te.changed"

View File

@ -0,0 +1,2 @@
locale_keymap: us
locale_lang: en_US.utf8

View File

@ -0,0 +1,12 @@
---
- name: Check current locale
shell: localectl
register: locale_test
- name: Set Locale
command: "localectl set-locale LANG={{ locale_lang }}"
when: locale_test.stdout.find('LANG={{ locale_lang }}') == -1
- name: Set keymap
command: "localectl set-keymap {{ locale_keymap }}"
when: "locale_test.stdout.find('Keymap: {{locale_keymap}}') == -1"

View File

@ -0,0 +1,3 @@
---
logwatch_emails: "{{ admin_emails }}"
logwatch_detail: High

View File

@ -0,0 +1,6 @@
---
- name: Install logwatch
yum: name=logwatch state=present
- name: Copy logwatch config
template: src=logwatch.conf.j2 dest=/etc/logwatch/conf/logwatch.conf backup=yes

View File

@ -0,0 +1,2 @@
MailTo = {{ logwatch_emails }}
Detail = {{ logwatch_detail }}

View File

@ -0,0 +1,64 @@
# Ansible Role: MariaDB
Installs MariaDB
## Supported platforms
```
CentOS 6 & 7
Ubuntu 14.04
```
## Post install
Run `mysql_secure_installation`
## Requirements
None
## Role Variables
MariaDB version:
```
mariadb_version: 10.0
```
Configuration template:
```
mysql_conf_tpl: change_me
```
Configuration filename:
```
mysql_conf_file: settings.cnf
```
### Experimental unattended mysql_secure_installation
```
ansible-playbook release.yml --extra-vars "mysql_secure_installation=true mysql_root_password=your_very_secret_password"
```
## Dependencies
None
## Example Playbook
```
- hosts: servers
roles:
- { role: pcextreme.mariadb }
```
## License
MIT / BSD
## Author Information
Created by [Attila van der Velde](https://github.com/vdvm)

View File

@ -0,0 +1,8 @@
---
mariadb_version: 10.0
mysql_conf_tpl: change_me
mysql_conf_file: settings.cnf
mysql_secure_installation: false
mysql_root_password: frappe

View File

@ -0,0 +1,3 @@
---
- name: restart mysql
service: name=mysql state=restarted

View File

@ -0,0 +1,19 @@
---
galaxy_info:
author: "Attila van der Velde"
description: "Installs MariaDB"
company: "PCextreme B.V."
license: "license (MIT, BSD)"
min_ansible_version: 1.8
platforms:
- name: EL
versions:
- 6
- 7
- name: Ubuntu
versions:
- trusty
categories:
- database:sql
dependencies: []

View File

@ -8,3 +8,5 @@
- MariaDB-server
- MariaDB-client
- MySQL-python
- MariaDB-devel

View File

@ -0,0 +1,18 @@
---
- include: centos.yml
when: ansible_distribution == 'CentOS' and ansible_distribution_major_version|int >= 6
- include: ubuntu.yml
when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == '14.04'
- name: Add configuration
template: src={{ mysql_conf_tpl }} dest={{ mysql_conf_dir[ansible_distribution] }}/{{ mysql_conf_file }} owner=root group=root mode=0644
when: mysql_conf_tpl != 'change_me'
notify: restart mysql
- name: Start and enable service
service: name=mysql state=started enabled=yes
- include: mysql_secure_installation.yml
- debug: var=mysql_secure_installation
when: mysql_secure_installation and mysql_root_password is defined

View File

@ -0,0 +1,59 @@
---
# Set root password
# UPDATE mysql.user SET Password=PASSWORD('mysecret') WHERE User='root';
# FLUSH PRIVILEGES;
- name: Set root Password
mysql_user: name=root host={{ item }} password={{ mysql_root_password }} state=present
with_items:
- localhost
- name: Add .my.cnf
command: '/usr/bin/whoami'
register: current_user
template: src=my.cnf.j2 dest=/home/{{ current_user }}/.my.cnf owner=root group=root mode=0600
- name: Set root Password
mysql_user: name=root host={{ item }} password={{ mysql_root_password }} state=present
with_items:
- 127.0.0.1
- ::1
- name: Reload privilege tables
command: 'mysql -ne "{{ item }}"'
with_items:
- FLUSH PRIVILEGES
changed_when: False
- name: Reload privilege tables
command: 'mysql -ne "{{ item }}"'
with_items:
- FLUSH PRIVILEGES
changed_when: False
- name: Remove anonymous users
command: 'mysql -ne "{{ item }}"'
with_items:
- DELETE FROM mysql.user WHERE User=''
changed_when: False
- name: Disallow root login remotely
command: 'mysql -ne "{{ item }}"'
with_items:
- DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')
changed_when: False
- name: Remove test database and access to it
command: 'mysql -ne "{{ item }}"'
with_items:
- DROP DATABASE if exists test
- DELETE FROM mysql.db WHERE Db='test' OR Db='test\\_%'
changed_when: False
ignore_errors: True
- name: Reload privilege tables
command: 'mysql -ne "{{ item }}"'
with_items:
- FLUSH PRIVILEGES
changed_when: False

View File

@ -0,0 +1,23 @@
---
- name: Add repo file
template: src=mariadb_ubuntu.list.j2 dest=/etc/apt/sources.list.d/mariadb.list owner=root group=root mode=0644
register: mariadb_list
- name: Add repo key
apt_key: id=1BB943DB url=http://keyserver.ubuntu.com/pks/lookup?op=get&search=0xCBCB082A1BB943DB state=present
register: mariadb_key
- name: Update apt cache
apt: update_cache=yes
when: mariadb_list.changed == True or mariadb_key.changed == True
- name: Unattended package installation
shell: export DEBIAN_FRONTEND=noninteractive
changed_when: false
- name: Install MariaDB
apt: pkg={{ item }} state=present
with_items:
- mariadb-server
- mariadb-client
- python-mysqldb

View File

@ -0,0 +1,7 @@
# MariaDB CentOS {{ ansible_distribution_major_version|int }} repository list
# http://mariadb.org/mariadb/repositories/
[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/{{ mariadb_version }}/centos{{ ansible_distribution_major_version|int }}-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1

View File

@ -0,0 +1,4 @@
# MariaDB Ubuntu {{ ansible_distribution_release | title }} repository list
# http://mariadb.org/mariadb/repositories/
deb http://ams2.mirrors.digitalocean.com/mariadb/repo/{{ mariadb_version }}/ubuntu {{ ansible_distribution_release | lower }} main
deb-src http://ams2.mirrors.digitalocean.com/mariadb/repo/{{ mariadb_version }}/ubuntu {{ ansible_distribution_release | lower }} main

View File

@ -0,0 +1,3 @@
[client]
user=root
password={{ mysql_root_password }}

View File

@ -0,0 +1,4 @@
---
mysql_conf_dir:
"CentOS": /etc/my.cnf.d
"Ubuntu": /etc/mysql/conf.d

View File

@ -0,0 +1,35 @@
---
language: python
python: "2.7"
env:
- SITE=test.yml
before_install:
- sudo apt-get update -qq
- sudo apt-get install -y curl
install:
# Install Ansible.
- pip install ansible
# Add ansible.cfg to pick up roles path.
- "{ echo '[defaults]'; echo 'roles_path = ../'; } >> ansible.cfg"
script:
# Check the role/playbook's syntax.
- "ansible-playbook -i tests/inventory tests/$SITE --syntax-check"
# Run the role/playbook with ansible-playbook.
- "ansible-playbook -i tests/inventory tests/$SITE --connection=local --sudo"
# Run the role/playbook again, checking to make sure it's idempotent.
- >
ansible-playbook -i tests/inventory tests/$SITE --connection=local --sudo
| grep -q 'changed=0.*failed=0'
&& (echo 'Idempotence test: pass' && exit 0)
|| (echo 'Idempotence test: fail' && exit 1)
# TODO - get the test working. Probably need to add a virtual host.
# Request a page via Nginx, to make sure Nginx is running and responds.
# - "curl http://localhost/"

View File

@ -0,0 +1,82 @@
# Ansible Role: Nginx
[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-nginx.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-nginx)
Installs Nginx on RedHat/CentOS or Debian/Ubuntu linux servers.
This role installs and configures the latest version of Nginx from the Nginx yum repository (on RedHat-based systems) or via apt (on Debian-based systems). You will likely need to do extra setup work after this role has installed Nginx, like adding your own [virtualhost].conf file inside `/etc/nginx/conf.d/`, describing the location and options to use for your particular website.
## Requirements
None.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
nginx_vhosts: []
A list of vhost definitions (server blocks) for Nginx virtual hosts. If left empty, you will need to supply your own virtual host configuration. See the commented example in `defaults/main.yml` for available server options. If you have a large number of customizations required for your server definition(s), you're likely better off managing the vhost configuration file yourself, leaving this variable set to `[]`.
nginx_remove_default_vhost: false
Whether to remove the 'default' virtualhost configuration supplied by Nginx. Useful if you want the base `/` URL to be directed at one of your own virtual hosts configured in a separate .conf file.
nginx_upstreams: []
If you are configuring Nginx as a load balancer, you can define one or more upstream sets using this variable. In addition to defining at least one upstream, you would need to configure one of your server blocks to proxy requests through the defined upstream (e.g. `proxy_pass http://myapp1;`). See the commented example in `defaults/main.yml` for more information.
nginx_user: "nginx"
The user under which Nginx will run. Defaults to `nginx` for RedHat, and `www-data` for Debian.
nginx_worker_processes: "1"
nginx_worker_connections: "1024"
`nginx_worker_processes` should be set to the number of cores present on your machine. Connections (find this number with `grep processor /proc/cpuinfo | wc -l`). `nginx_worker_connections` is the number of connections per process. Set this higher to handle more simultaneous connections (and remember that a connection will be used for as long as the keepalive timeout duration for every client!).
nginx_error_log: "/var/log/nginx/error.log warn"
nginx_access_log: "/var/log/nginx/access.log main buffer=16k"
Configuration of the default error and access logs. Set to `off` to disable a log entirely.
nginx_sendfile: "on"
nginx_tcp_nopush: "on"
nginx_tcp_nodelay: "on"
TCP connection options. See [this blog post](https://t37.net/nginx-optimization-understanding-sendfile-tcp_nodelay-and-tcp_nopush.html) for more information on these directives.
nginx_keepalive_timeout: "65"
nginx_keepalive_requests: "100"
Nginx keepalive settings. Timeout should be set higher (10s+) if you have more polling-style traffic (AJAX-powered sites especially), or lower (<10s) if you have a site where most users visit a few pages and don't send any further requests.
nginx_client_max_body_size: "64m"
This value determines the largest file upload possible, as uploads are passed through Nginx before hitting a backend like `php-fpm`. If you get an error like `client intended to send too large body`, it means this value is set too low.
nginx_proxy_cache_path: ""
Set as the `proxy_cache_path` directive in the `nginx.conf` file. By default, this will not be configured (if left as an empty string), but if you wish to use Nginx as a reverse proxy, you can set this to a valid value (e.g. `"/var/cache/nginx keys_zone=cache:32m"`) to use Nginx's cache (further proxy configuration can be done in individual server configurations).
nginx_default_release: ""
(For Debian/Ubuntu only) Allows you to set a different repository for the installation of Nginx. As an example, if you are running Debian's wheezy release, and want to get a newer version of Nginx, you can install the `wheezy-backports` repository and set that value here, and Ansible will use that as the `-t` option while installing Nginx.
## Dependencies
None.
## Example Playbook
- hosts: server
roles:
- { role: geerlingguy.nginx }
## License
MIT / BSD
## Author Information
This role was created in 2014 by [Jeff Geerling](http://jeffgeerling.com/), author of [Ansible for DevOps](http://ansiblefordevops.com/).

View File

@ -0,0 +1,47 @@
---
# Used only for Debian/Ubuntu installation, as the -t option for apt.
nginx_default_release: ""
nginx_worker_processes: "1"
nginx_worker_connections: "1024"
nginx_error_log: "/var/log/nginx/error.log warn"
nginx_access_log: "/var/log/nginx/access.log main buffer=16k"
nginx_sendfile: "on"
nginx_tcp_nopush: "on"
nginx_tcp_nodelay: "on"
nginx_keepalive_timeout: "65"
nginx_keepalive_requests: "100"
nginx_client_max_body_size: "64m"
nginx_proxy_cache_path: ""
nginx_remove_default_vhost: false
nginx_vhosts: []
# Example vhost below, showing all available options:
# - {
# listen: "80 default_server", # default: "80 default_server"
# server_name: "example.com", # default: N/A
# root: "/var/www/example.com", # default: N/A
# index: "index.html index.htm", # default: "index.html index.htm"
#
# # Properties that are only added if defined:
# error_page: "",
# access_log: "",
# extra_config: "" # Can be used to add extra config blocks (multiline).
# }
nginx_upstreams: []
# - {
# name: myapp1,
# strategy: "ip_hash", # "least_conn", etc.
# servers: {
# "srv1.example.com",
# "srv2.example.com weight=3",
# "srv3.example.com"
# }
# }
nginx_conf_file: nginx.conf.j2

View File

@ -0,0 +1,3 @@
---
- name: restart nginx
service: name=nginx state=restarted

View File

@ -0,0 +1,23 @@
---
dependencies: []
galaxy_info:
author: geerlingguy
description: Nginx installation for Linux/UNIX.
company: "Midwestern Mac, LLC"
license: "license (BSD, MIT)"
min_ansible_version: 1.4
platforms:
- name: EL
versions:
- 6
- 7
- name: Debian
versions:
- all
- name: Ubuntu
versions:
- all
categories:
- development
- web

View File

@ -0,0 +1,31 @@
---
# Variable setup.
- name: Include OS-specific variables.
include_vars: "{{ ansible_os_family }}.yml"
- name: Define nginx_user.
set_fact:
nginx_user: "{{ __nginx_user }}"
when: nginx_user is not defined
# Setup/install tasks.
- include: setup-RedHat.yml
when: ansible_os_family == 'RedHat'
- include: setup-Debian.yml
when: ansible_os_family == 'Debian'
# Nginx setup.
- name: Copy nginx configuration in place.
template:
src: "{{ nginx_conf_file }}"
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: 0644
notify: restart nginx
- name: Ensure nginx is started and enabled to start at boot.
service: name=nginx state=started enabled=yes
- include: vhosts.yml

View File

@ -0,0 +1,6 @@
---
- name: Ensure nginx is installed.
apt:
pkg: nginx
state: installed
default_release: "{{ nginx_default_release }}"

View File

@ -0,0 +1,11 @@
---
- name: Enable nginx repo.
template:
src: nginx.repo.j2
dest: /etc/yum.repos.d/nginx.repo
owner: root
group: root
mode: 0644
- name: Ensure nginx is installed.
yum: pkg=nginx state=installed enablerepo=nginx

View File

@ -0,0 +1,22 @@
---
- name: Remove default nginx vhost config file (if configured).
file:
path: "{{ nginx_default_vhost_path }}"
state: absent
when: nginx_remove_default_vhost
notify: restart nginx
- name: Add managed vhost config file (if any vhosts are configured).
template:
src: vhosts.j2
dest: "{{ nginx_vhost_path }}/vhosts.conf"
mode: 0644
when: nginx_vhosts
notify: restart nginx
- name: Remove managed vhost config file (if no vhosts are configured).
file:
path: "{{ nginx_vhost_path }}/vhosts.conf"
state: absent
when: not nginx_vhosts
notify: restart nginx

View File

@ -0,0 +1,51 @@
user {{ nginx_user }};
error_log {{ nginx_error_log }};
pid /var/run/nginx.pid;
worker_processes {{ nginx_worker_processes }};
events {
worker_connections {{ nginx_worker_connections }};
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
server_names_hash_bucket_size 64;
client_max_body_size {{ nginx_client_max_body_size }};
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log {{ nginx_access_log }};
sendfile {{ nginx_sendfile }};
tcp_nopush {{ nginx_tcp_nopush }};
tcp_nodelay {{ nginx_tcp_nodelay }};
keepalive_timeout {{ nginx_keepalive_timeout }};
keepalive_requests {{ nginx_keepalive_requests }};
#gzip on;
{% if nginx_proxy_cache_path %}
proxy_cache_path {{ nginx_proxy_cache_path }};
{% endif %}
{% for upstream in nginx_upstreams %}
upstream {{ upstream.name }} {
{% if upstream.strategy is defined %}
{{ upstream.strategy }};
{% endif %}
{% for server in upstream.servers %}
server {{ server }};
{% endfor %}
}
{% endfor %}
include {{ nginx_vhost_path }}/*;
}

View File

@ -0,0 +1,5 @@
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/{{ ansible_distribution_major_version }}/$basearch/
gpgcheck=0
enabled=1

View File

@ -0,0 +1,24 @@
{% for vhost in nginx_vhosts %}
server {
listen {{ vhost.listen | default('80 default_server') }};
server_name {{ vhost.server_name }};
root {{ vhost.root }};
index {{ vhost.index | default('index.html index.htm') }};
{% if vhost.error_page is defined %}
error_page {{ vhost.error_page }};
{% endif %}
{% if vhost.access_log is defined %}
access_log {{ vhost.access_log }};
{% endif %}
{% if vhost.return is defined %}
return {{ vhost.return }};
{% endif %}
{% if vhost.extra_parameters is defined %}
{{ vhost.extra_parameters }};
{% endif %}
}
{% endfor %}

View File

@ -0,0 +1 @@
localhost

View File

@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- ansible-role-nginx

View File

@ -0,0 +1,4 @@
---
nginx_vhost_path: /etc/nginx/sites-enabled
nginx_default_vhost_path: /etc/nginx/sites-enabled/default
__nginx_user: "www-data"

View File

@ -0,0 +1,4 @@
---
nginx_vhost_path: /etc/nginx/conf.d
nginx_default_vhost_path: /etc/nginx/conf.d/default.conf
__nginx_user: "nginx"

View File

@ -0,0 +1,9 @@
---
- name: Install ntpd
yum: name="{{item}}" state=installed
with_items:
- ntp
- ntpdate
- name: enable ntpd
service: name=ntpd enabled=yes state=started

View File

@ -0,0 +1 @@
swap_size_mb: 1024

View File

@ -0,0 +1,18 @@
- name: Create swap space
command: dd if=/dev/zero of=/extraswap bs=1M count={{swap_size_mb}}
when: ansible_swaptotal_mb < 1
- name: Make swap
command: mkswap /extraswap
when: ansible_swaptotal_mb < 1
- name: Add to fstab
action: lineinfile dest=/etc/fstab regexp="extraswap" line="/extraswap none swap sw 0 0" state=present
when: ansible_swaptotal_mb < 1
- name: Turn swap on
command: swapon -a
when: ansible_swaptotal_mb < 1
- name: Set swapiness
shell: echo 1 | tee /proc/sys/vm/swappiness

View File

@ -0,0 +1 @@
wkhtmltopdf_version: 0.12.2.1

View File

@ -0,0 +1,32 @@
---
- name: install base fonts
yum: name={{ item }} state=present
with_items:
- libXrender
- libXext
- xorg-x11-fonts-75dpi
- xorg-x11-fonts-Type1
when: ansible_os_family == 'RedHat'
- name: Install wkhtmltopdf rpm
yum: name=http://download.gna.org/wkhtmltopdf/0.12/{{ wkhtmltopdf_version }}/wkhtmltox-{{ wkhtmltopdf_version }}_linux-centos{{ ansible_distribution_major_version }}-{{ "amd64" if ansible_architecture == "x86_64" else "i386"}}.rpm
when: ansible_os_family == 'RedHat'
- name: install base fonts
apt: name={{ item }} state=present force=yes
with_items:
- libxrender1
- libxext6
- xfonts-75dpi
- xfonts-base
when: ansible_os_family == 'Debian'
- name: Download wkhtmltopdf
get_url:
url=http://download.gna.org/wkhtmltopdf/0.12/{{ wkhtmltopdf_version }}/wkhtmltox-{{ wkhtmltopdf_version }}_linux-{{ ansible_distribution_release }}-{{ "amd64" if ansible_architecture == "x86_64" else "i386"}}.deb
dest="/tmp/"
when: ansible_os_family == 'Debian'
- name: Install wkhtmltopdf deb
apt: deb=/tmp/wkhtmltox-{{ wkhtmltopdf_version }}_linux-{{ ansible_distribution_release }}-{{ "amd64" if ansible_architecture == "x86_64" else "i386"}}.deb
when: ansible_os_family == 'Debian'

View File

@ -0,0 +1,60 @@
[mysqld]
# GENERAL #
user = mysql
default-storage-engine = InnoDB
socket = /var/lib/mysql/mysql.sock
pid-file = /var/lib/mysql/mysql.pid
# MyISAM #
key-buffer-size = 32M
myisam-recover = FORCE,BACKUP
# SAFETY #
max-allowed-packet = 16M
max-connect-errors = 1000000
innodb = FORCE
# DATA STORAGE #
datadir = /var/lib/mysql/
# BINARY LOGGING #
log-bin = /var/lib/mysql/mysql-bin
expire-logs-days = 14
sync-binlog = 1
# REPLICATION #
server-id = 1
# CACHES AND LIMITS #
tmp-table-size = 32M
max-heap-table-size = 32M
query-cache-type = 0
query-cache-size = 0
max-connections = 500
thread-cache-size = 50
open-files-limit = 65535
table-definition-cache = 4096
table-open-cache = 10240
# INNODB #
innodb-flush-method = O_DIRECT
innodb-log-files-in-group = 2
innodb-log-file-size = 512M
innodb-flush-log-at-trx-commit = 1
innodb-file-per-table = 1
innodb-buffer-pool-size = {{ (ansible_memtotal_mb*0.685)|round|int }}M
innodb-file-format = barracuda
innodb-large-prefix = 1
collation-server = utf8mb4_unicode_ci
character-set-server = utf8mb4
character-set-client-handshake = FALSE
# LOGGING #
log-error = /var/lib/mysql/mysql-error.log
log-queries-not-using-indexes = 0
slow-query-log = 1
slow-query-log-file = /var/lib/mysql/mysql-slow.log
[mysql]
default-character-set = utf8mb4

View File

@ -0,0 +1,59 @@
user nginx;
worker_processes 6;
worker_rlimit_nofile 65535;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 2048;
multi_accept on;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
server_tokens off;
#tcp_nopush on;
keepalive_timeout 10;
keepalive_requests 10;
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/javascript image/svg+xml text/html "application/json; charset: utf-8" "text/html; charset: utf-8" application/font-woff;
server_names_hash_max_size 4096;
#server_names_hash_bucket_size 64;
open_file_cache max=65000 inactive=1m;
open_file_cache_valid 5s;
open_file_cache_min_uses 1;
open_file_cache_errors on;
ssl_protocols SSLv3 TLSv1;
ssl_ciphers ECDHE-RSA-AES256-SHA384:AES256-SHA256:RC4:HIGH:!MD5:!aNULL:!EDH:!AESGCM;
ssl_prefer_server_ciphers on;
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=web-cache:8m max_size=1000m inactive=600m;
include /etc/nginx/conf.d/*.conf;
}