road to swarm - still some mess to fix

This commit is contained in:
bunkerity 2021-03-16 17:56:24 +01:00
parent b8027d2bac
commit ceed904882
16 changed files with 188 additions and 42 deletions

View File

@ -11,6 +11,9 @@ class AutoConf :
self.__sites = {} self.__sites = {}
self.__config = Config(self.__swarm, api) self.__config = Config(self.__swarm, api)
def reload(self) :
return self.__config.reload(self.instances)
def pre_process(self, objs) : def pre_process(self, objs) :
for instance in objs : for instance in objs :
(id, name, labels) = self.__get_infos(instance) (id, name, labels) = self.__get_infos(instance)

View File

@ -92,8 +92,9 @@ class Config :
# Include the server conf # Include the server conf
utils.replace_in_file("/etc/nginx/nginx.conf", "}", "include /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf;\n}") utils.replace_in_file("/etc/nginx/nginx.conf", "}", "include /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf;\n}")
return self.__reload(instances) return self.reload(instances)
except Exception as e : except Exception as e :
traceback.print_exc()
utils.log("[!] Error while activating config : " + str(e)) utils.log("[!] Error while activating config : " + str(e))
return False return False
@ -107,9 +108,10 @@ class Config :
# Remove the include # Remove the include
utils.replace_in_file("/etc/nginx/nginx.conf", "include /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf;\n", "") utils.replace_in_file("/etc/nginx/nginx.conf", "include /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf;\n", "")
return self.__reload(instances) return self.reload(instances)
except Exception as e : except Exception as e :
traceback.print_exc()
utils.log("[!] Error while deactivating config : " + str(e)) utils.log("[!] Error while deactivating config : " + str(e))
return False return False
@ -127,13 +129,13 @@ class Config :
utils.log("[!] Error while deactivating config : " + str(e)) utils.log("[!] Error while deactivating config : " + str(e))
return False return False
def __reload(self, instances) : def reload(self, instances) :
return self.__api(instances, "/reload") return self.__api_call(instances, "/reload")
def __status(self, instances) : def __status(self, instances) :
return self.__api(instances, "/status") return self.__api_call(instances, "/status")
def __api(self, instances, path) : def __api_call(self, instances, path) :
ret = True ret = True
for instance_id, instance in instances.items() : for instance_id, instance in instances.items() :
# Reload the instance object just in case # Reload the instance object just in case
@ -146,7 +148,11 @@ class Config :
nodeID = task["NodeID"] nodeID = task["NodeID"]
taskID = task["ID"] taskID = task["ID"]
fqdn = name + "." + nodeID + "." + taskID fqdn = name + "." + nodeID + "." + taskID
req = requests.post("http://" + fqdn + ":8080" + self.__api + path) req = False
try :
req = requests.post("http://" + fqdn + ":8080" + self.__api + path)
except :
pass
if req and req.status_code == 200 : if req and req.status_code == 200 :
utils.log("[*] Sent reload order to instance " + fqdn + " (service.node.task)") utils.log("[*] Sent reload order to instance " + fqdn + " (service.node.task)")
else : else :

23
autoconf/ReloadServer.py Normal file
View File

@ -0,0 +1,23 @@
import socketserver, threading
class ReloadServerHandler(socketserver.BaseRequestHandler):
def handle(self) :
data = self.request.recv(512)
if not data :
return
with self.server.lock :
ret = self.server.autoconf.reload()
if ret :
self.request.sendall("ok")
else :
self.request.sendall("ko")
def run_reload_server(autoconf, lock) :
server = socketserver.UnixStreamServer("/tmp/autoconf.pid", ReloadServerHandler)
server.autoconf = autoconf
server.lock = lock
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return (server, thread)

View File

@ -1,8 +1,9 @@
#!/usr/bin/python3 #!/usr/bin/python3
from AutoConf import AutoConf from AutoConf import AutoConf
from ReloadServer import run_reload_server
import utils import utils
import docker, os, stat, sys import docker, os, stat, sys, select, threading
# Connect to the endpoint # Connect to the endpoint
endpoint = "/var/run/docker.sock" endpoint = "/var/run/docker.sock"
@ -23,6 +24,9 @@ api = ""
if swarm : if swarm :
api = os.getenv("API_URI") api = os.getenv("API_URI")
autoconf = AutoConf(swarm, api) autoconf = AutoConf(swarm, api)
lock = threading.Lock()
if swarm :
(server, thread) = run_reload_server(autoconf, lock)
# Get all bunkerized-nginx instances and web services created before # Get all bunkerized-nginx instances and web services created before
try : try :
@ -35,7 +39,8 @@ except docker.errors.APIError as e :
sys.exit(3) sys.exit(3)
# Process them before events # Process them before events
autoconf.pre_process(before) with lock :
autoconf.pre_process(before)
# Process events received from Docker # Process events received from Docker
try : try :
@ -55,7 +60,8 @@ try :
continue continue
# Process the event # Process the event
autoconf.process(server, event["Action"]) with lock :
autoconf.process(server, event["Action"])
except docker.errors.APIError as e : except docker.errors.APIError as e :
utils.log("[!] Docker API error " + str(e)) utils.log("[!] Docker API error " + str(e))

19
autoconf/reload.py Normal file
View File

@ -0,0 +1,19 @@
#!/usr/bin/python3
import sys, socket, os
if not os.path.exists("/tmp/autoconf.sock") :
sys.exit(1)
try :
client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client.connect("/tmp/autoconf.sock")
client.send("reload".encode("utf-8"))
data = client.recv(512)
client.close()
if not data or data.decode("utf-8") != "ok" :
sys.exit(3)
except Exception as e :
sys.exit(2)
sys.exit(0)

View File

@ -0,0 +1,26 @@
location ~ ^/%API_URI% {
rewrite_by_lua_block {
local api = require "api"
local api_uri = "%API_URI%"
if api.is_api_call(api_uri) then
ngx.header.content_type = 'text/plain'
if api.do_api_call(api_uri) then
ngx.log(ngx.WARN, "[API] API call " .. ngx.var.request_uri .. " successfull from " .. ngx.var.remote_addr)
ngx.say("ok")
else
ngx.log(ngx.WARN, "[API] API call " .. ngx.var.request_uri .. " failed from " .. ngx.var.remote_addr)
ngx.say("ko")
end
ngx.exit(ngx.HTTP_OK)
end
ngx.exit(ngx.OK)
}
}

View File

@ -1,3 +1,5 @@
load_module /usr/lib/nginx/modules/ngx_http_lua_module.so;
daemon on; daemon on;
pid /tmp/nginx-temp.pid; pid /tmp/nginx-temp.pid;
@ -8,12 +10,19 @@ events {
} }
http { http {
proxy_temp_path /tmp/proxy_temp;
client_body_temp_path /tmp/client_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
lua_package_path "/usr/local/lib/lua/?.lua;;";
server { server {
listen 0.0.0.0:%HTTP_PORT% default_server; listen 0.0.0.0:%HTTP_PORT% default_server;
server_name _; server_name _;
location ~ ^/.well-known/acme-challenge/ { location ~ ^/.well-known/acme-challenge/ {
root /acme-challenge; root /acme-challenge;
} }
%USE_API%
location / { location / {
return 444; return 444;
} }

View File

@ -6,14 +6,21 @@
# load some functions # load some functions
. /opt/entrypoint/utils.sh . /opt/entrypoint/utils.sh
# start nginx with temp conf for let's encrypt challenges # start nginx with temp conf for let's encrypt challenges and API
if [ "$(has_value AUTO_LETS_ENCRYPT yes)" != "" ] ; then if [ "$(has_value AUTO_LETS_ENCRYPT yes)" != "" ] || [ "$SWARM_MODE" = "yes" ] ; then
cp /opt/confs/global/nginx-temp.conf /tmp/nginx-temp.conf cp /opt/confs/global/nginx-temp.conf /tmp/nginx-temp.conf
cp /opt/confs/global/api-temp.conf /tmp/api.conf
if [ "$SWARM_MODE" = "yes" ] ; then
replace_in_file "/tmp/nginx-temp.conf" "%USE_API%" "include /tmp/api.conf;"
replace_in_file "/tmp/api.conf" "%API_URI%" "$API_URI"
else
replace_in_file "/tmp/nginx-temp.conf" "%USE_API%" ""
fi
replace_in_file "/tmp/nginx-temp.conf" "%HTTP_PORT%" "$HTTP_PORT" replace_in_file "/tmp/nginx-temp.conf" "%HTTP_PORT%" "$HTTP_PORT"
nginx -c /tmp/nginx-temp.conf nginx -c /tmp/nginx-temp.conf
if [ "$?" -eq 0 ] ; then if [ "$?" -eq 0 ] ; then
echo "[*] Successfully started temp nginx to solve Let's Encrypt challenges" echo "[*] Successfully started temp nginx"
else else
echo "[!] Can't start temp nginx to solve Let's Encrypt challenges" echo "[!] Can't start temp nginx"
fi fi
fi fi

View File

@ -6,6 +6,14 @@
# copy old conf to cache # copy old conf to cache
cp /etc/nginx/block-abusers.conf /cache cp /etc/nginx/block-abusers.conf /cache
# if we are running nginx
if [ -f /tmp/nginx.pid ] ; then
RELOAD="/usr/sbin/nginx -s reload > /dev/null 2>&1"
# if we are in autoconf
elif [ -f /tmp/autoconf.sock ] ; then
RELOAD="/opt/entrypoint/reload.py"
fi
# generate the new conf # generate the new conf
curl -s "https://iplists.firehol.org/files/firehol_abusers_30d.netset" | grep -v "^\#.*" | curl -s "https://iplists.firehol.org/files/firehol_abusers_30d.netset" | grep -v "^\#.*" |
while read entry ; do while read entry ; do
@ -21,8 +29,8 @@ if [ "$lines" -gt 1 ] ; then
job_log "[BLACKLIST] abusers list updated ($lines entries)" job_log "[BLACKLIST] abusers list updated ($lines entries)"
# reload nginx with the new config # reload nginx with the new config
mv /tmp/block-abusers.conf /etc/nginx/block-abusers.conf mv /tmp/block-abusers.conf /etc/nginx/block-abusers.conf
if [ -f /tmp/nginx.pid ] ; then if [ "$RELOAD" != "" ] ; then
/usr/sbin/nginx -s reload > /dev/null 2>&1 $RELOAD
# new config is ok : save it in the cache # new config is ok : save it in the cache
if [ "$?" -eq 0 ] ; then if [ "$?" -eq 0 ] ; then
cp /etc/nginx/block-abusers.conf /cache cp /etc/nginx/block-abusers.conf /cache
@ -30,7 +38,7 @@ if [ "$lines" -gt 1 ] ; then
else else
job_log "[NGINX] failed nginx reload after abusers list update fallback to old list" job_log "[NGINX] failed nginx reload after abusers list update fallback to old list"
cp /cache/block-abusers.conf /etc/nginx cp /cache/block-abusers.conf /etc/nginx
/usr/sbin/nginx -s reload > /dev/null 2>&1 $RELOAD
fi fi
else else
cp /etc/nginx/block-abusers.conf /cache cp /etc/nginx/block-abusers.conf /cache

View File

@ -6,8 +6,4 @@ if [ "$?" -ne 0 ] ; then
exit 1 exit 1
fi fi
# fix rights
chown -R root:nginx /etc/letsencrypt
chmod -R 740 /etc/letsencrypt
find /etc/letsencrypt -type d -exec chmod 750 {} \;
exit 0 exit 0

View File

@ -5,14 +5,17 @@
job_log "[CERTBOT] certificates have been renewed" job_log "[CERTBOT] certificates have been renewed"
# fix rights # if we are running nginx
chown -R root:nginx /etc/letsencrypt if [ -f /tmp/nginx.pid ] ; then
chmod -R 740 /etc/letsencrypt RELOAD="/usr/sbin/nginx -s reload > /dev/null 2>&1"
find /etc/letsencrypt -type d -exec chmod 750 {} \; # if we are in autoconf
elif [ -f /tmp/autoconf.sock ] ; then
RELOAD="echo reload > /tmp/autoconf.sock"
fi
# reload nginx # reload nginx
if [ -f /tmp/nginx.pid ] ; then if [ "$RELOAD" != "" ] ; then
/usr/sbin/nginx -s reload > /dev/null 2>&1 $RELOAD
if [ "$?" -eq 0 ] ; then if [ "$?" -eq 0 ] ; then
job_log "[NGINX] successfull nginx reload after certbot renew" job_log "[NGINX] successfull nginx reload after certbot renew"
else else

View File

@ -6,6 +6,14 @@
# copy old conf to cache # copy old conf to cache
cp /etc/nginx/block-tor-exit-node.conf /cache cp /etc/nginx/block-tor-exit-node.conf /cache
# if we are running nginx
if [ -f /tmp/nginx.pid ] ; then
RELOAD="/usr/sbin/nginx -s reload > /dev/null 2>&1"
# if we are in autoconf
elif [ -f /tmp/autoconf.sock ] ; then
RELOAD="/opt/entrypoint/reload.py"
fi
# generate the new conf # generate the new conf
curl -s "https://iplists.firehol.org/files/tor_exits.ipset" | grep -v "^\#.*" | curl -s "https://iplists.firehol.org/files/tor_exits.ipset" | grep -v "^\#.*" |
while read entry ; do while read entry ; do
@ -21,8 +29,8 @@ if [ "$lines" -gt 1 ] ; then
job_log "[BLACKLIST] TOR exit node list updated ($lines entries)" job_log "[BLACKLIST] TOR exit node list updated ($lines entries)"
# reload nginx with the new config # reload nginx with the new config
mv /tmp/block-tor-exit-node.conf /etc/nginx/block-tor-exit-node.conf mv /tmp/block-tor-exit-node.conf /etc/nginx/block-tor-exit-node.conf
if [ -f /tmp/nginx.pid ] ; then if [ "$RELOAD" != "" ] ; then
/usr/sbin/nginx -s reload > /dev/null 2>&1 $RELOAD
# new config is ok : save it in the cache # new config is ok : save it in the cache
if [ "$?" -eq 0 ] ; then if [ "$?" -eq 0 ] ; then
cp /etc/nginx/block-tor-exit-node.conf /cache cp /etc/nginx/block-tor-exit-node.conf /cache
@ -30,7 +38,7 @@ if [ "$lines" -gt 1 ] ; then
else else
job_log "[NGINX] failed nginx reload after TOR exit node list update fallback to old list" job_log "[NGINX] failed nginx reload after TOR exit node list update fallback to old list"
cp /cache/block-tor-exit-node.conf /etc/nginx cp /cache/block-tor-exit-node.conf /etc/nginx
/usr/sbin/nginx -s reload > /dev/null 2>&1 $RELOAD
fi fi
else else
cp /etc/nginx/block-tor-exit-node.conf /cache cp /etc/nginx/block-tor-exit-node.conf /cache

View File

@ -3,6 +3,14 @@
# load some functions # load some functions
. /opt/scripts/utils.sh . /opt/scripts/utils.sh
# if we are running nginx
if [ -f /tmp/nginx.pid ] ; then
RELOAD="/usr/sbin/nginx -s reload > /dev/null 2>&1"
# if we are in autoconf
elif [ -f /tmp/autoconf.sock ] ; then
RELOAD="/opt/entrypoint/reload.py"
fi
# MMDB from https://db-ip.com/db/download/ip-to-country-lite # MMDB from https://db-ip.com/db/download/ip-to-country-lite
URL="https://download.db-ip.com/free/dbip-country-lite-$(date +%Y-%m).mmdb.gz" URL="https://download.db-ip.com/free/dbip-country-lite-$(date +%Y-%m).mmdb.gz"
wget -O /tmp/geoip.mmdb.gz "$URL" > /dev/null 2>&1 wget -O /tmp/geoip.mmdb.gz "$URL" > /dev/null 2>&1
@ -13,8 +21,8 @@ if [ "$?" -eq 0 ] && [ -f /tmp/geoip.mmdb.gz ] ; then
exit 1 exit 1
fi fi
mv /tmp/geoip.mmdb /etc/nginx mv /tmp/geoip.mmdb /etc/nginx
if [ -f /tmp/nginx.pid ] ; then if [ "$RELOAD" != "" ] ; then
/usr/sbin/nginx -s reload > /dev/null 2>&1 $RELOAD
if [ "$?" -eq 0 ] ; then if [ "$?" -eq 0 ] ; then
cp /etc/nginx/geoip.mmdb /cache cp /etc/nginx/geoip.mmdb /cache
job_log "[NGINX] successfull nginx reload after GeoIP DB update" job_log "[NGINX] successfull nginx reload after GeoIP DB update"
@ -22,7 +30,7 @@ if [ "$?" -eq 0 ] && [ -f /tmp/geoip.mmdb.gz ] ; then
job_log "[NGINX] failed nginx reload after GeoIP DB update" job_log "[NGINX] failed nginx reload after GeoIP DB update"
if [ -f /cache/geoip.mmdb ] ; then if [ -f /cache/geoip.mmdb ] ; then
cp /cache/geoip.mmdb /etc/nginx/geoip.mmdb cp /cache/geoip.mmdb /etc/nginx/geoip.mmdb
/usr/sbin/nginx -s reload > /dev/null 2>&1 $RELOAD
fi fi
fi fi
else else

View File

@ -6,6 +6,14 @@
# copy old conf to cache # copy old conf to cache
cp /etc/nginx/block-proxies.conf /cache cp /etc/nginx/block-proxies.conf /cache
# if we are running nginx
if [ -f /tmp/nginx.pid ] ; then
RELOAD="/usr/sbin/nginx -s reload > /dev/null 2>&1"
# if we are in autoconf
elif [ -f /tmp/autoconf.sock ] ; then
RELOAD="/opt/entrypoint/reload.py"
fi
# generate the new conf # generate the new conf
curl -s "https://iplists.firehol.org/files/firehol_proxies.netset" | grep -v "^\#.*" | curl -s "https://iplists.firehol.org/files/firehol_proxies.netset" | grep -v "^\#.*" |
while read entry ; do while read entry ; do
@ -21,8 +29,8 @@ if [ "$lines" -gt 1 ] ; then
job_log "[BLACKLIST] proxies list updated ($lines entries)" job_log "[BLACKLIST] proxies list updated ($lines entries)"
# reload nginx with the new config # reload nginx with the new config
mv /tmp/block-proxies.conf /etc/nginx/block-proxies.conf mv /tmp/block-proxies.conf /etc/nginx/block-proxies.conf
if [ -f /tmp/nginx.pid ] ; then if [ "$RELOAD" != "" ] ; then
/usr/sbin/nginx -s reload > /dev/null 2>&1 $RELOAD
# new config is ok : save it in the cache # new config is ok : save it in the cache
if [ "$?" -eq 0 ] ; then if [ "$?" -eq 0 ] ; then
cp /etc/nginx/block-proxies.conf /cache cp /etc/nginx/block-proxies.conf /cache
@ -30,7 +38,7 @@ if [ "$lines" -gt 1 ] ; then
else else
job_log "[NGINX] failed nginx reload after proxies list update fallback to old list" job_log "[NGINX] failed nginx reload after proxies list update fallback to old list"
cp /cache/block-proxies.conf /etc/nginx cp /cache/block-proxies.conf /etc/nginx
/usr/sbin/nginx -s reload > /dev/null 2>&1 $RELOAD
fi fi
else else
cp /etc/nginx/block-proxies.conf /cache cp /etc/nginx/block-proxies.conf /cache

View File

@ -6,6 +6,14 @@
# save old conf # save old conf
cp /etc/nginx/map-referrer.conf /cache cp /etc/nginx/map-referrer.conf /cache
# if we are running nginx
if [ -f /tmp/nginx.pid ] ; then
RELOAD="/usr/sbin/nginx -s reload > /dev/null 2>&1"
# if we are in autoconf
elif [ -f /tmp/autoconf.sock ] ; then
RELOAD="/opt/entrypoint/reload.py"
fi
# generate new conf # generate new conf
BLACKLIST="$(curl -s https://raw.githubusercontent.com/mitchellkrogza/nginx-ultimate-bad-bot-blocker/master/_generator_lists/bad-referrers.list)" BLACKLIST="$(curl -s https://raw.githubusercontent.com/mitchellkrogza/nginx-ultimate-bad-bot-blocker/master/_generator_lists/bad-referrers.list)"
if [ "$?" -ne 0 ] ; then if [ "$?" -ne 0 ] ; then
@ -23,15 +31,15 @@ lines="$(wc -l /tmp/map-referrer.conf | cut -d ' ' -f 1)"
if [ "$lines" -gt 1 ] ; then if [ "$lines" -gt 1 ] ; then
mv /tmp/map-referrer.conf /etc/nginx/map-referrer.conf mv /tmp/map-referrer.conf /etc/nginx/map-referrer.conf
job_log "[BLACKLIST] referrers list updated ($lines entries)" job_log "[BLACKLIST] referrers list updated ($lines entries)"
if [ -f /tmp/nginx.pid ] ; then if [ "$RELOAD" != "" ] ; then
/usr/sbin/nginx -s reload > /dev/null 2>&1 $RELOAD
if [ "$?" -eq 0 ] ; then if [ "$?" -eq 0 ] ; then
cp /etc/nginx/map-referrer.conf /cache cp /etc/nginx/map-referrer.conf /cache
job_log "[NGINX] successfull nginx reload after referrers list update" job_log "[NGINX] successfull nginx reload after referrers list update"
else else
cp /cache/map-referrer.conf /etc/nginx cp /cache/map-referrer.conf /etc/nginx
job_log "[NGINX] failed nginx reload after referrers list update fallback to old list" job_log "[NGINX] failed nginx reload after referrers list update fallback to old list"
/usr/sbin/nginx -s reload > /dev/null 2>&1 $RELOAD
fi fi
else else
cp /etc/nginx/map-referrer.conf /cache cp /etc/nginx/map-referrer.conf /cache

View File

@ -6,6 +6,14 @@
# save old conf # save old conf
cp /etc/nginx/map-user-agent.conf /cache cp /etc/nginx/map-user-agent.conf /cache
# if we are running nginx
if [ -f /tmp/nginx.pid ] ; then
RELOAD="/usr/sbin/nginx -s reload > /dev/null 2>&1"
# if we are in autoconf
elif [ -f /tmp/autoconf.sock ] ; then
RELOAD="/opt/entrypoint/reload.py"
fi
# generate new conf # generate new conf
BLACKLIST="$(curl -s https://raw.githubusercontent.com/mitchellkrogza/nginx-ultimate-bad-bot-blocker/master/_generator_lists/bad-user-agents.list) BLACKLIST="$(curl -s https://raw.githubusercontent.com/mitchellkrogza/nginx-ultimate-bad-bot-blocker/master/_generator_lists/bad-user-agents.list)
$(curl -s https://raw.githubusercontent.com/JayBizzle/Crawler-Detect/master/raw/Crawlers.txt)" $(curl -s https://raw.githubusercontent.com/JayBizzle/Crawler-Detect/master/raw/Crawlers.txt)"
@ -25,15 +33,15 @@ lines="$(wc -l /tmp/map-user-agent.conf | cut -d ' ' -f 1)"
if [ "$lines" -gt 1 ] ; then if [ "$lines" -gt 1 ] ; then
mv /tmp/map-user-agent.conf /etc/nginx/map-user-agent.conf mv /tmp/map-user-agent.conf /etc/nginx/map-user-agent.conf
job_log "[BLACKLIST] user-agent list updated ($lines entries)" job_log "[BLACKLIST] user-agent list updated ($lines entries)"
if [ -f /tmp/nginx.pid ] ; then if [ "$RELOAD" != "" ] ; then
/usr/sbin/nginx -s reload > /dev/null 2>&1 $RELOAD
if [ "$?" -eq 0 ] ; then if [ "$?" -eq 0 ] ; then
cp /etc/nginx/map-user-agent.conf /cache cp /etc/nginx/map-user-agent.conf /cache
job_log "[NGINX] successfull nginx reload after user-agent list update" job_log "[NGINX] successfull nginx reload after user-agent list update"
else else
cp /cache/map-user-agent.conf /etc/nginx cp /cache/map-user-agent.conf /etc/nginx
job_log "[NGINX] failed nginx reload after user-agent list update fallback to old list" job_log "[NGINX] failed nginx reload after user-agent list update fallback to old list"
/usr/sbin/nginx -s reload > /dev/null 2>&1 $RELOAD
fi fi
else else
cp /etc/nginx/map-user-agent.conf /cache cp /etc/nginx/map-user-agent.conf /cache