bunkerweb 1.4.0

This commit is contained in:
bunkerity
2022-06-03 17:24:14 +02:00
parent 3a078326c5
commit a9f886804a
5245 changed files with 1432051 additions and 27894 deletions

217
autoconf/Config.py Normal file
View File

@@ -0,0 +1,217 @@
from traceback import format_exc
from threading import Thread, Lock
from time import sleep
from subprocess import run, DEVNULL, STDOUT
from glob import glob
from shutil import rmtree
from os import makedirs
from os.path import dirname
from json import loads
from API import API
from JobScheduler import JobScheduler
from ApiCaller import ApiCaller
from ConfigCaller import ConfigCaller
from logger import log
class Config(ApiCaller, ConfigCaller) :
def __init__(self, ctrl_type, lock=None) :
ApiCaller.__init__(self)
ConfigCaller.__init__(self)
self.__ctrl_type = ctrl_type
self.__lock = lock
self.__instances = []
self.__services = []
self.__configs = []
self.__config = {}
self.__scheduler = None
self.__scheduler_thread = None
self.__schedule = False
self.__schedule_lock = Lock()
def __get_full_env(self) :
env_instances = {}
for instance in self.__instances :
for variable, value in instance["env"].items() :
env_instances[variable] = value
env_services = {}
if not "SERVER_NAME" in env_instances :
env_instances["SERVER_NAME"] = ""
for service in self.__services :
for variable, value in service.items() :
env_services[service["SERVER_NAME"].split(" ")[0] + "_" + variable] = value
if env_instances["SERVER_NAME"] != "" :
env_instances["SERVER_NAME"] += " "
env_instances["SERVER_NAME"] += service["SERVER_NAME"].split(" ")[0]
return self._full_env(env_instances, env_services)
def __scheduler_run_pending(self) :
schedule = True
while schedule :
self.__scheduler.run_pending()
sleep(1)
self.__schedule_lock.acquire()
schedule = self.__schedule
self.__schedule_lock.release()
def update_needed(self, instances, services, configs=None) :
if instances != self.__instances :
return True
if services != self.__services :
return True
if configs is not None and configs != self.__configs :
return True
return False
def __get_config(self) :
config = {}
# extract instances variables
for instance in self.__instances :
for variable, value in instance["env"].items() :
config[variable] = value
# extract services variables
server_names = []
for service in self.__services :
first_server = service["SERVER_NAME"].split(" ")[0]
server_names.append(first_server)
for variable, value in service.items() :
config[first_server + "_" + variable] = value
config["SERVER_NAME"] = " ".join(server_names)
return config
def __get_apis(self) :
apis = []
for instance in self.__instances :
endpoint = "http://" + instance["hostname"] + ":5000"
host = "bwapi"
if "API_SERVER_NAME" in instance["env"] :
host = instance["env"]["API_SERVER_NAME"]
apis.append(API(endpoint, host=host))
return apis
def __write_configs(self) :
ret = True
for config_type in self.__configs :
rmtree("/data/configs/" + config_type)
makedirs("/data/configs/" + config_type, exist_ok=True)
for file, data in self.__configs[config_type].items() :
path = "/data/configs/" + config_type + "/" + file
if not path.endswith(".conf") :
path += ".conf"
makedirs(dirname(path), exist_ok=True)
try :
mode = "w"
if type(data) is bytes :
mode = "wb"
with open(path, mode) as f :
f.write(data)
except :
print(format_exc())
log("CONFIG", "", "Can't save file " + path)
ret = False
return ret
def apply(self, instances, services, configs=None) :
success = True
# stop scheduler just in case caller didn't do it
self.stop_scheduler()
# update values
# order here is important :
# __get_scheduler needs apis
# __get_apis needs __config
# __get_full_env needs __instances and __services
self.__instances = instances
self.__services = services
self.__configs = configs
self.__config = self.__get_full_env()
self._set_apis(self.__get_apis())
# write configs
ret = self.__write_configs()
if not ret :
success = False
log("CONFIG", "", "saving custom configs failed, configuration will not work as expected...")
# get env
env = self.__get_full_env()
# run jobs once
i = 1
for instance in self.__instances :
endpoint = "http://" + instance["hostname"] + ":5000"
host = "bwapi"
if "API_SERVER_NAME" in instance["env"] :
host = instance["env"]["API_SERVER_NAME"]
env["CLUSTER_INSTANCE_" + str(i)] = endpoint + " " + host
i += 1
if self.__scheduler is None :
self.__scheduler = JobScheduler(env=env, lock=self.__lock, apis=self._get_apis())
ret = self.__scheduler.reload(env)
if not ret :
success = False
log("CONFIG", "", "scheduler.reload() failed, configuration will not work as expected...")
# write config to /tmp/variables.env
with open("/tmp/variables.env", "w") as f :
for variable, value in self.__config.items() :
f.write(variable + "=" + value + "\n")
# run the generator
cmd = "python /opt/bunkerweb/gen/main.py --settings /opt/bunkerweb/settings.json --templates /opt/bunkerweb/confs --output /etc/nginx --variables /tmp/variables.env"
proc = run(cmd.split(" "), stdin=DEVNULL, stderr=STDOUT)
if proc.returncode != 0 :
success = False
log("CONFIG", "", "config generator failed, configuration will not work as expected...")
cmd = "chown -R root:101 /etc/nginx"
run(cmd.split(" "), stdin=DEVNULL, stdout=DEVNULL, stderr=STDOUT)
cmd = "chmod -R 770 /etc/nginx"
run(cmd.split(" "), stdin=DEVNULL, stdout=DEVNULL, stderr=STDOUT)
# send nginx configs
# send data folder
# reload nginx
ret = self._send_files("/etc/nginx", "/confs")
if not ret :
success = False
log("CONFIG", "", "sending nginx configs failed, configuration will not work as expected...")
ret = self._send_files("/data", "/data")
if not ret :
success = False
log("CONFIG", "", "sending custom configs failed, configuration will not work as expected...")
ret = self._send_to_apis("POST", "/reload")
if not ret :
success = False
log("CONFIG", "", "reload failed, configuration will not work as expected...")
return success
def start_scheduler(self) :
if self.__scheduler_thread is not None and self.__scheduler_thread.is_alive() :
raise Exception("scheduler is already running, can't run it twice")
self.__schedule = True
self.__scheduler_thread = Thread(target=self.__scheduler_run_pending)
self.__scheduler_thread.start()
def stop_scheduler(self) :
if self.__scheduler_thread is not None and self.__scheduler_thread.is_alive() :
self.__schedule_lock.acquire()
self.__schedule = False
self.__schedule_lock.release()
self.__scheduler_thread.join()
self.__scheduler_thread = None
def reload_scheduler(self, env) :
if self.__scheduler_thread is None :
return self.__scheduler.reload(env=env, apis=self._get_apis())
def __get_scheduler(self, env) :
self.__schedule_lock.acquire()
if self.__schedule :
self.__schedule_lock.release()
raise Exception("can't create new scheduler, old one is still running...")
self.__schedule_lock.release()
return JobScheduler(env=env, lock=self.__lock, apis=self._get_apis())

79
autoconf/Controller.py Normal file
View File

@@ -0,0 +1,79 @@
from abc import ABC, abstractmethod
from time import sleep
from Config import Config
from logger import log
class Controller(ABC) :
def __init__(self, ctrl_type, lock=None) :
self._type = ctrl_type
self._instances = []
self._services = []
self._supported_config_types = ["http", "stream", "server-http", "server-stream", "default-server-http", "modsec", "modsec-crs"]
self._configs = {}
for config_type in self._supported_config_types :
self._configs[config_type] = {}
self._config = Config(ctrl_type, lock)
def wait(self, wait_time) :
while True :
self._instances = self.get_instances()
if len(self._instances) == 0 :
log("CONTROLLER", "⚠️", "No instance found, waiting " + str(wait_time) + "s ...")
sleep(wait_time)
continue
all_ready = True
for instance in self._instances :
if not instance["health"] :
log("CONTROLLER", "⚠️", "Instance " + instance["name"] + " is not ready, waiting " + str(wait_time) + "s ...")
sleep(wait_time)
all_ready = False
break
if all_ready :
break
return self._instances
@abstractmethod
def _get_controller_instances(self) :
pass
@abstractmethod
def _to_instances(self, controller_instance) :
pass
def get_instances(self) :
instances = []
for controller_instance in self._get_controller_instances() :
for instance in self._to_instances(controller_instance) :
instances.append(instance)
return instances
@abstractmethod
def _get_controller_services(self) :
pass
@abstractmethod
def _to_services(self, controller_service) :
pass
def get_services(self) :
services = []
for controller_service in self._get_controller_services() :
for service in self._to_services(controller_service) :
services.append(service)
return services
@abstractmethod
def get_configs(self) :
pass
@abstractmethod
def apply_config(self) :
pass
@abstractmethod
def process_events(self) :
pass

View File

@@ -0,0 +1,68 @@
import traceback
from docker import DockerClient
from Controller import Controller
from logger import log
class DockerController(Controller) :
def __init__(self, docker_host) :
super().__init__("docker")
self.__client = DockerClient(base_url=docker_host)
def _get_controller_instances(self) :
return self.__client.containers.list(filters={"label" : "bunkerweb.AUTOCONF"})
def _to_instances(self, controller_instance) :
instance = {}
instance["name"] = controller_instance.name
instance["hostname"] = controller_instance.name
instance["health"] = controller_instance.status == "running" and controller_instance.attrs["State"]["Health"]["Status"] == "healthy"
instance["env"] = {}
for env in controller_instance.attrs["Config"]["Env"] :
variable = env.split("=")[0]
if variable in ["PATH", "NGINX_VERSION", "NJS_VERSION", "PKG_RELEASE"] :
continue
value = env.replace(variable + "=", "", 1)
instance["env"][variable] = value
server_name = []
for controller_service in self._get_controller_services() :
if "bunkerweb.SERVER_NAME" in controller_service.labels :
server_name.append(controller_service.labels["bunkerweb.SERVER_NAME"].split(" ")[0])
instance["env"]["SERVER_NAME"] = " ".join(server_name)
return [instance]
def _get_controller_services(self) :
return self.__client.containers.list(filters={"label" : "bunkerweb.SERVER_NAME"})
def _to_services(self, controller_service) :
service = {}
for variable, value in controller_service.labels.items() :
if not variable.startswith("bunkerweb.") :
continue
service[variable.replace("bunkerweb.", "", 1)] = value
return [service]
def get_configs(self) :
raise("get_configs is not supported with DockerController")
def apply_config(self) :
return self._config.apply(self._instances, self._services, configs=self._configs)
def process_events(self) :
for event in self.__client.events(decode=True, filters={"type": "container"}) :
self._instances = self.get_instances()
self._services = self.get_services()
if not self._config.update_needed(self._instances, self._services) :
continue
log("DOCKER-CONTROLLER", "", "Catched docker event, deploying new configuration ...")
try :
ret = self.apply_config()
if not ret :
log("DOCKER-CONTROLLER", "", "Error while deploying new configuration")
else :
log("DOCKER-CONTROLLER", "", "Successfully deployed new configuration 🚀")
except :
log("DOCKER-CONTROLLER", "", "Exception while deploying new configuration :")
print(traceback.format_exc())

View File

@@ -1,27 +1,54 @@
FROM alpine
FROM python:3-alpine
COPY gen/ /opt/bunkerized-nginx/gen
COPY entrypoint/ /opt/bunkerized-nginx/entrypoint
COPY confs/global/ /opt/bunkerized-nginx/confs/global
COPY confs/site/ /opt/bunkerized-nginx/confs/site
COPY jobs/ /opt/bunkerized-nginx/jobs
COPY settings.json /opt/bunkerized-nginx/
COPY misc/cron-autoconf /etc/crontabs/root
COPY autoconf/entrypoint.sh /opt/bunkerized-nginx/entrypoint/
COPY autoconf/requirements.txt /opt/bunkerized-nginx/entrypoint/
COPY autoconf/src/* /opt/bunkerized-nginx/entrypoint/
COPY VERSION /opt/bunkerized-nginx
# Install dependencies
COPY deps/requirements.txt /opt/bunkerweb/deps/requirements.txt
RUN apk add --no-cache --virtual build gcc python3-dev musl-dev libffi-dev openssl-dev cargo && \
mkdir /opt/bunkerweb/deps/python && \
pip install --no-cache-dir --require-hashes --target /opt/bunkerweb/deps/python -r /opt/bunkerweb/deps/requirements.txt && \
apk del build
RUN apk add --no-cache py3-pip bash certbot curl openssl socat && \
pip3 install -r /opt/bunkerized-nginx/gen/requirements.txt && \
pip3 install -r /opt/bunkerized-nginx/entrypoint/requirements.txt && \
pip3 install -r /opt/bunkerized-nginx/jobs/requirements.txt
# Copy files
# can't exclude specific files/dir from . so we are copying everything by hand
COPY api /opt/bunkerweb/api
COPY cli /opt/bunkerweb/cli
COPY confs /opt/bunkerweb/confs
COPY core /opt/bunkerweb/core
COPY gen /opt/bunkerweb/gen
COPY helpers /opt/bunkerweb/helpers
COPY job /opt/bunkerweb/job
COPY utils /opt/bunkerweb/utils
COPY settings.json /opt/bunkerweb/settings.json
COPY VERSION /opt/bunkerweb/VERSION
COPY autoconf /opt/bunkerweb/autoconf
COPY autoconf/prepare.sh /tmp
RUN chmod +x /tmp/prepare.sh && \
/tmp/prepare.sh && \
rm -f /tmp/prepare.sh
# Add nginx user, drop bwcli, setup data folders, permissions and logging
RUN apk add --no-cache git && \
ln -s /usr/local/bin/python3 /usr/bin/python3 && \
addgroup -g 101 nginx && \
adduser -h /var/cache/nginx -g nginx -s /bin/sh -G nginx -D -H -u 101 nginx && \
apk add --no-cache bash && \
cp /opt/bunkerweb/helpers/bwcli /usr/local/bin && \
mkdir /opt/bunkerweb/configs && \
for dir in $(echo "cache configs configs/http configs/stream configs/server-http configs/server-stream configs/default-server-http configs/default-server-stream configs/modsec configs/modsec-crs letsencrypt plugins www") ; do ln -s "/data/${dir}" "/opt/bunkerweb/${dir}" ; done && \
mkdir /opt/bunkerweb/tmp && \
chown -R root:nginx /opt/bunkerweb && \
find /opt/bunkerweb -type f -exec chmod 0740 {} \; && \
find /opt/bunkerweb -type d -exec chmod 0750 {} \; && \
chmod 770 /opt/bunkerweb/tmp && \
chmod 750 /opt/bunkerweb/gen/main.py /opt/bunkerweb/job/main.py /opt/bunkerweb/cli/main.py /usr/local/bin/bwcli /opt/bunkerweb/helpers/*.sh /opt/bunkerweb/autoconf/main.py /opt/bunkerweb/deps/python/bin/* && \
find /opt/bunkerweb/core/*/jobs/* -type f -exec chmod 750 {} \; && \
chown root:nginx /usr/local/bin/bwcli && \
mkdir /etc/nginx && \
chown -R nginx:nginx /etc/nginx && \
chmod -R 770 /etc/nginx && \
ln -s /data/letsencrypt /etc/letsencrypt && \
mkdir /var/log/letsencrypt /var/lib/letsencrypt && \
chown root:nginx /var/log/letsencrypt /var/lib/letsencrypt && \
chmod 770 /var/log/letsencrypt /var/lib/letsencrypt && \
ln -s /proc/1/fd/1 /var/log/letsencrypt/letsencrypt.log
#VOLUME /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache /etc/letsencrypt /acme-challenge
VOLUME /data /etc/nginx
ENTRYPOINT ["/opt/bunkerized-nginx/entrypoint/entrypoint.sh"]
WORKDIR /opt/bunkerweb/autoconf
CMD ["python", "/opt/bunkerweb/autoconf/main.py"]

View File

@@ -0,0 +1,191 @@
from traceback import format_exc
from kubernetes import client, config, watch
from threading import Thread, Lock
from logger import log
from Controller import Controller
from ConfigCaller import ConfigCaller
class IngressController(Controller, ConfigCaller) :
def __init__(self) :
Controller.__init__(self, "kubernetes")
ConfigCaller.__init__(self)
config.load_incluster_config()
self.__corev1 = client.CoreV1Api()
self.__networkingv1 = client.NetworkingV1Api()
self.__internal_lock = Lock()
def _get_controller_instances(self) :
controller_instances = []
for pod in self.__corev1.list_pod_for_all_namespaces(watch=False).items :
if pod.metadata.annotations != None and "bunkerweb.io/AUTOCONF" in pod.metadata.annotations :
controller_instances.append(pod)
return controller_instances
def _to_instances(self, controller_instance) :
instance = {}
instance["name"] = controller_instance.metadata.name
instance["hostname"] = controller_instance.status.pod_ip
health = False
if controller_instance.status.conditions is not None :
for condition in controller_instance.status.conditions :
if condition.type == "Ready" and condition.status == "True" :
health = True
break
instance["health"] = health
instance["env"] = {}
for env in controller_instance.spec.containers[0].env :
if env.value is not None :
instance["env"][env.name] = env.value
else :
instance["env"][env.name] = ""
for controller_service in self._get_controller_services() :
if controller_service.metadata.annotations is not None :
for annotation, value in controller_service.metadata.annotations.items() :
if not annotation.startswith("bunkerweb.io/") :
continue
variable = annotation.replace("bunkerweb.io/", "", 1)
if self._is_setting(variable) :
instance["env"][variable] = value
return [instance]
def _get_controller_services(self) :
return self.__networkingv1.list_ingress_for_all_namespaces(watch=False).items
def _to_services(self, controller_service) :
if controller_service.spec is None or controller_service.spec.rules is None :
return []
services = []
# parse rules
for rule in controller_service.spec.rules :
if rule.host is None :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring unsupported ingress rule without host.")
continue
service = {}
service["SERVER_NAME"] = rule.host
if rule.http is None :
services.append(service)
continue
location = 1
for path in rule.http.paths :
if path.path is None :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring unsupported ingress rule without path.")
continue
if path.backend.service is None :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring unsupported ingress rule without backend service.")
continue
if path.backend.service.port is None :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring unsupported ingress rule without backend service port.")
continue
if path.backend.service.port.number is None :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring unsupported ingress rule without backend service port number.")
continue
service_list = self.__corev1.list_service_for_all_namespaces(watch=False, field_selector="metadata.name=" + path.backend.service.name).items
if len(service_list) == 0 :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring ingress rule with service " + path.backend.service.name + " : service not found.")
continue
reverse_proxy_host = "http://" + path.backend.service.name + "." + service_list[0].metadata.namespace + ".svc.cluster.local:" + str(path.backend.service.port.number)
service["USE_REVERSE_PROXY"] = "yes"
service["REVERSE_PROXY_HOST_" + str(location)] = reverse_proxy_host
service["REVERSE_PROXY_URL_" + str(location)] = path.path
location += 1
services.append(service)
# parse tls
if controller_service.spec.tls is not None :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring unsupported tls.")
# parse annotations
if controller_service.metadata.annotations is not None :
for service in services :
for annotation, value in controller_service.metadata.annotations.items() :
if not annotation.startswith("bunkerweb.io/") :
continue
variable = annotation.replace("bunkerweb.io/", "", 1)
if not variable.startswith(service["SERVER_NAME"].split(" ")[0] + "_") :
continue
variable = variable.replace(service["SERVER_NAME"].split(" ")[0] + "_", "", 1)
if self._is_multisite_setting(variable) :
service[variable] = value
return services
def get_configs(self) :
configs = {}
supported_config_types = ["http", "stream", "server-http", "server-stream", "default-server-http", "modsec", "modsec-crs"]
for config_type in supported_config_types :
configs[config_type] = {}
for configmap in self.__corev1.list_config_map_for_all_namespaces(watch=False).items :
if configmap.metadata.annotations is None or "bunkerweb.io/CONFIG_TYPE" not in configmap.metadata.annotations :
continue
config_type = configmap.metadata.annotations["bunkerweb.io/CONFIG_TYPE"]
if config_type not in supported_config_types :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring unsupported CONFIG_TYPE " + config_type + " for ConfigMap " + configmap.metadata.name)
continue
if not configmap.data :
log("INGRESS-CONTROLLER", "⚠️", "Ignoring blank ConfigMap " + configmap.metadata.name)
continue
config_site = ""
if "bunkerweb.io/CONFIG_SITE" in configmap.metadata.annotations :
config_site = configmap.metadata.annotations["bunkerweb.io/CONFIG_SITE"] + "/"
for config_name, config_data in configmap.data.items() :
configs[config_type][config_site + config_name] = config_data
return configs
def __watch(self, watch_type) :
w = watch.Watch()
what = None
if watch_type == "pod" :
what = self.__corev1.list_pod_for_all_namespaces
elif watch_type == "ingress" :
what = self.__networkingv1.list_ingress_for_all_namespaces
elif watch_type == "configmap" :
what = self.__corev1.list_config_map_for_all_namespaces
else :
raise Exception("unsupported watch_type " + watch_type)
while True :
locked = False
try :
for event in w.stream(what) :
self.__internal_lock.acquire()
locked = True
self._instances = self.get_instances()
self._services = self.get_services()
self._configs = self.get_configs()
if not self._config.update_needed(self._instances, self._services, configs=self._configs) :
self.__internal_lock.release()
locked = False
continue
log("INGRESS-CONTROLLER", "", "Catched kubernetes event, deploying new configuration ...")
try :
ret = self.apply_config()
if not ret :
log("INGRESS-CONTROLLER", "", "Error while deploying new configuration ...")
else :
log("INGRESS-CONTROLLER", "", "Successfully deployed new configuration 🚀")
except :
log("INGRESS-CONTROLLER", "", "Exception while deploying new configuration :")
print(format_exc())
self.__internal_lock.release()
locked = False
except Exception as e :
log("INGRESS-CONTROLLER", "", "Exception while reading k8s event (type = " + watch_type + ") : ")
print(format_exc())
if locked :
self.__internal_lock.release()
def apply_config(self) :
self._config.stop_scheduler()
ret = self._config.apply(self._instances, self._services, configs=self._configs)
self._config.start_scheduler()
return ret
def process_events(self) :
watch_types = ["pod", "ingress", "configmap"]
threads = []
for watch_type in watch_types :
threads.append(Thread(target=self.__watch, args=(watch_type,)))
for thread in threads :
thread.start()
for thread in threads :
thread.join()

View File

@@ -0,0 +1,97 @@
from traceback import format_exc
from threading import Thread, Lock
from docker import DockerClient
from logger import log
from base64 import b64decode
from Controller import Controller
class SwarmController(Controller) :
def __init__(self, docker_host) :
super().__init__("swarm")
self.__client = DockerClient(base_url=docker_host)
self.__internal_lock = Lock()
def _get_controller_instances(self) :
return self.__client.services.list(filters={"label" : "bunkerweb.AUTOCONF"})
def _to_instances(self, controller_instance) :
instances = []
instance_env = {}
for env in controller_instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"] :
variable = env.split("=")[0]
value = env.replace(variable + "=", "", 1)
instance_env[variable] = value
for task in controller_instance.tasks() :
instance = {}
instance["name"] = task["ID"]
instance["hostname"] = controller_instance.name + "." + task["NodeID"] + "." + task["ID"]
instance["health"] = task["Status"]["State"] == "running"
instance["env"] = instance_env
instances.append(instance)
return instances
def _get_controller_services(self) :
return self.__client.services.list(filters={"label" : "bunkerweb.SERVER_NAME"})
def _to_services(self, controller_service) :
service = {}
for variable, value in controller_service.attrs["Spec"]["Labels"].items() :
if not variable.startswith("bunkerweb.") :
continue
service[variable.replace("bunkerweb.", "", 1)] = value
return [service]
def get_configs(self) :
configs = {}
for config_type in self._supported_config_types :
configs[config_type] = {}
for config in self.__client.configs.list(filters={"label" : "bunkerweb.CONFIG_TYPE"}) :
config_type = config.attrs["Spec"]["Labels"]["bunkerweb.CONFIG_TYPE"]
config_name = config.name
if config_type not in self._supported_config_types :
log("SWARM-CONTROLLER", "⚠️", "Ignoring unsupported CONFIG_TYPE " + config_type + " for Config " + config_name)
continue
config_site = ""
if "bunkerweb.CONFIG_SITE" in config.attrs["Spec"]["Labels"] :
config_site = config.attrs["Spec"]["Labels"]["bunkerweb.CONFIG_SITE"] + "/"
configs[config_type][config_site + config_name] = b64decode(config.attrs["Spec"]["Data"])
return configs
def apply_config(self) :
self._config.stop_scheduler()
ret = self._config.apply(self._instances, self._services, configs=self._configs)
self._config.start_scheduler()
return ret
def __event(self, event_type) :
for event in self.__client.events(decode=True, filters={"type": event_type}) :
self.__internal_lock.acquire()
self._instances = self.get_instances()
self._services = self.get_services()
self._configs = self.get_configs()
if not self._config.update_needed(self._instances, self._services, configs=self._configs) :
self.__internal_lock.release()
continue
log("SWARM-CONTROLLER", "", "Catched Swarm event, deploying new configuration ...")
try :
ret = self.apply_config()
if not ret :
log("SWARM-CONTROLLER", "", "Error while deploying new configuration ...")
else :
log("SWARM-CONTROLLER", "", "Successfully deployed new configuration 🚀")
except :
log("SWARM-CONTROLLER", "", "Exception while deploying new configuration :")
print(format_exc())
self.__internal_lock.release()
def process_events(self) :
event_types = ["service", "config"]
threads = []
for event_type in event_types :
threads.append(Thread(target=self.__event, args=(event_type,)))
for thread in threads :
thread.start()
for thread in threads :
thread.join()

View File

@@ -1,33 +0,0 @@
#!/bin/bash
echo "[*] Starting autoconf ..."
# check permissions
su -s "/opt/bunkerized-nginx/entrypoint/permissions.sh" nginx
if [ "$?" -ne 0 ] ; then
exit 1
fi
# trap SIGTERM and SIGINT
function trap_exit() {
echo "[*] Catched stop operation"
echo "[*] Stopping crond ..."
pkill -TERM crond
echo "[*] Stopping autoconf ..."
pkill -TERM python3
}
trap "trap_exit" TERM INT QUIT
# start cron
crond
# run autoconf app
/opt/bunkerized-nginx/entrypoint/app.py &
pid="$!"
# wait while app is up
wait "$pid"
# stop
echo "[*] autoconf stopped"
exit 0

69
autoconf/main.py Normal file
View File

@@ -0,0 +1,69 @@
#!/usr/bin/python3
import signal, os, traceback, time, subprocess
import sys
sys.path.append("/opt/bunkerweb/deps/python")
sys.path.append("/opt/bunkerweb/utils")
sys.path.append("/opt/bunkerweb/api")
sys.path.append("/opt/bunkerweb/job")
from SwarmController import SwarmController
from IngressController import IngressController
from DockerController import DockerController
from logger import log
# Get variables
swarm = os.getenv("SWARM_MODE", "no") == "yes"
kubernetes = os.getenv("KUBERNETES_MODE", "no") == "yes"
docker_host = os.getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
wait_retry_interval = int(os.getenv("WAIT_RETRY_INTERVAL", "5"))
def exit_handler(signum, frame) :
log("AUTOCONF", "", "Stop signal received, exiting...")
os._exit(0)
signal.signal(signal.SIGINT, exit_handler)
signal.signal(signal.SIGTERM, exit_handler)
try :
# Setup /data folder if needed
#if swarm or kubernetes :
proc = subprocess.run(["/opt/bunkerweb/helpers/data.sh", "AUTOCONF"], stdin=subprocess.DEVNULL, stderr=subprocess.STDOUT)
if proc.returncode != 0 :
os._exit(1)
# Instantiate the controller
if swarm :
log("AUTOCONF", "", "Swarm mode detected")
controller = SwarmController(docker_host)
elif kubernetes :
log("AUTOCONF", "", "Kubernetes mode detected")
controller = IngressController()
else :
log("AUTOCONF", "", "Docker mode detected")
controller = DockerController(docker_host)
# Wait for instances
log("AUTOCONF", "", "Waiting for BunkerWeb instances ...")
instances = controller.wait(wait_retry_interval)
log("AUTOCONF", "", "BunkerWeb instances are ready 🚀")
i = 1
for instance in instances :
log("AUTOCONF", "", "Instance #" + str(i) + " : " + instance["name"])
i += 1
# Run first configuration
ret = controller.apply_config()
if not ret :
log("AUTOCONF", "", "Error while applying initial configuration")
os._exit(1)
# Process events
log("AUTOCONF", "", "Processing events ...")
controller.process_events()
except :
log("AUTOCONF", "", "Exception while running autoconf :")
print(traceback.format_exc())
sys.exit(1)

View File

@@ -1,81 +0,0 @@
#!/bin/sh
# create nginx user
addgroup -g 101 nginx
adduser -h /var/cache/nginx -g nginx -s /bin/sh -G nginx -D -H -u 101 nginx
# prepare /opt
chown root:nginx /opt
chmod 750 /opt
# prepare /opt/bunkerized-nginx
chown -R root:nginx /opt/bunkerized-nginx
find /opt/bunkerized-nginx -type f -exec chmod 0740 {} \;
find /opt/bunkerized-nginx -type d -exec chmod 0750 {} \;
chmod ugo+x /opt/bunkerized-nginx/entrypoint/* /opt/bunkerized-nginx/scripts/*
chmod ugo+x /opt/bunkerized-nginx/gen/main.py
chmod ugo+x /opt/bunkerized-nginx/jobs/main.py
chmod ugo+x /opt/bunkerized-nginx/jobs/reload.py
chmod ugo+x /opt/bunkerized-nginx/jobs/certbot-*.py
chmod 770 /opt/bunkerized-nginx
chmod 440 /opt/bunkerized-nginx/settings.json
# prepare /var/log
mkdir /var/log/nginx
chown root:nginx /var/log/nginx
chmod 770 /var/log/nginx
ln -s /proc/1/fd/1 /var/log/nginx/jobs.log
mkdir /var/log/letsencrypt
chown nginx:nginx /var/log/letsencrypt
chmod 770 /var/log/letsencrypt
# prepare /etc/nginx
mkdir /etc/nginx
chown root:nginx /etc/nginx
chmod 770 /etc/nginx
# prepare /etc/letsencrypt
mkdir /etc/letsencrypt
chown root:nginx /etc/letsencrypt
chmod 770 /etc/letsencrypt
# prepare /var/lib/letsencrypt
mkdir /var/lib/letsencrypt
chown root:nginx /var/lib/letsencrypt
chmod 770 /var/lib/letsencrypt
# prepare /opt/bunkerized-nginx/cache
ln -s /cache /opt/bunkerized-nginx/cache
mkdir /cache
chown root:nginx /cache
chmod 770 /cache
# prepare /acme-challenge
ln -s /acme-challenge /opt/bunkerized-nginx/acme-challenge
mkdir -p /acme-challenge/.well-known/acme-challenge
chown -R root:nginx /acme-challenge
chmod -R 770 /acme-challenge
# prepare /http-confs
ln -s /http-confs /opt/bunkerized-nginx/http-confs
mkdir /http-confs
chown root:nginx /http-confs
chmod 770 /http-confs
# prepare /server-confs
ln -s /server-confs /opt/bunkerized-nginx/server-confs
mkdir /server-confs
chown root:nginx /server-confs
chmod 770 /server-confs
# prepare /modsec-confs
ln -s /modsec-confs /opt/bunkerized-nginx/modsec-confs
mkdir /modsec-confs
chown root:nginx /modsec-confs
chmod 770 /modsec-confs
# prepare /modsec-crs-confs
ln -s /modsec-crs-confs /opt/bunkerized-nginx/modsec-crs-confs
mkdir /modsec-crs-confs
chown root:nginx /modsec-crs-confs
chmod 770 /modsec-crs-confs

View File

@@ -1,5 +0,0 @@
docker
requests
jinja2
kubernetes
dnspython

View File

@@ -1,195 +0,0 @@
#!/usr/bin/python3
import subprocess, shutil, os, traceback, requests, time, dns.resolver, io, tarfile
import Controller
from logger import log
CONFIGS = {
"conf": "/etc/nginx",
"letsencrypt": "/etc/letsencrypt",
"http": "/http-confs",
"server": "/server-confs",
"modsec": "/modsec-confs",
"modsec-crs": "/modsec-crs-confs",
"acme": "/acme-challenge"
}
class Config :
def __init__(self, type, api_uri, http_port="8080") :
self.__type = type
self.__api_uri = api_uri
self.__http_port = http_port
def __jobs(self) :
log("config", "INFO", "starting jobs ...")
proc = subprocess.run(["/bin/su", "-c", "/opt/bunkerized-nginx/entrypoint/jobs.sh", "nginx"], capture_output=True)
stdout = proc.stdout.decode("ascii")
stderr = proc.stderr.decode("ascii")
if len(stdout) > 1 :
log("config", "INFO", "jobs stdout :\n" + stdout)
if stderr != "" :
log("config", "ERROR", "jobs stderr :\n" + stderr)
if proc.returncode != 0 :
log("config", "ERROR", "jobs error (return code = " + str(proc.returncode) + ")")
return False
return True
def gen(self, env) :
try :
# Write environment variables to a file
with open("/tmp/variables.env", "w") as f :
for k, v in env.items() :
f.write(k + "=" + v + "\n")
# Call the generator
proc = subprocess.run(["/bin/su", "-c", "/opt/bunkerized-nginx/gen/main.py --settings /opt/bunkerized-nginx/settings.json --templates /opt/bunkerized-nginx/confs --output /etc/nginx --variables /tmp/variables.env", "nginx"], capture_output=True)
# Print stdout/stderr
stdout = proc.stdout.decode("ascii")
stderr = proc.stderr.decode("ascii")
if len(stdout) > 1 :
log("config", "INFO", "generator output : " + stdout)
if stderr != "" :
log("config", "ERROR", "generator error : " + stderr)
# We're done
if proc.returncode == 0 :
if self.__type == Controller.Type.SWARM or self.__type == Controller.Type.KUBERNETES :
return self.__jobs()
return True
log("config", "ERROR", "error while generating config (return code = " + str(proc.returncode) + ")")
except Exception as e :
log("config", "ERROR", "exception while generating site config : " + traceback.format_exc())
return False
def reload(self, instances) :
ret = True
if self.__type == Controller.Type.DOCKER :
for instance in instances :
try :
instance.kill("SIGHUP")
except :
ret = False
elif self.__type == Controller.Type.SWARM or self.__type == Controller.Type.KUBERNETES :
ret = self.__api_call(instances, "/reload")
return ret
def send(self, instances, files="all") :
ret = True
fail = False
for name, path in CONFIGS.items() :
if files != "all" and name != files :
continue
file = self.__tarball(path)
if not self.__api_call(instances, "/" + name, file=file) :
log("config", "ERROR", "can't send config " + name + " to instance(s)")
fail = True
file.close()
if fail :
ret = False
return ret
def stop_temp(self, instances) :
return self.__api_call(instances, "/stop-temp")
def __tarball(self, path) :
file = io.BytesIO()
with tarfile.open(mode="w:gz", fileobj=file) as tar :
tar.add(path, arcname=".")
file.seek(0, 0)
return file
def __ping(self, instances) :
return self.__api_call(instances, "/ping")
def wait(self, instances) :
ret = True
if self.__type == Controller.Type.DOCKER :
ret = self.__wait_docker(instances)
elif self.__type == Controller.Type.SWARM or self.__type == Controller.Type.KUBERNETES :
ret = self.__wait_api(instances)
return ret
def __wait_docker(self, instances) :
all_healthy = False
i = 0
while i < 120 :
one_not_healthy = False
for instance in instances :
instance.reload()
if instance.attrs["State"]["Health"]["Status"] != "healthy" :
one_not_healthy = True
break
if not one_not_healthy :
all_healthy = True
break
time.sleep(1)
i += 1
return all_healthy
def __wait_api(self, instances) :
try :
with open("/etc/nginx/autoconf", "w") as f :
f.write("ok")
i = 1
started = False
while i <= 10 :
time.sleep(i)
if self.__ping(instances) :
started = True
break
i = i + 1
log("config", "INFO", "waiting " + str(i) + " seconds before retrying to contact bunkerized-nginx instances")
if started :
log("config", "INFO", "bunkerized-nginx instances started")
return True
else :
log("config", "ERROR", "bunkerized-nginx instances are not started")
except Exception as e :
log("config", "ERROR", "exception while waiting for bunkerized-nginx instances : " + traceback.format_exc())
return False
def __api_call(self, instances, path, file=None) :
ret = True
nb = 0
urls = []
if self.__type == Controller.Type.SWARM :
for instance in instances :
name = instance.name
try :
dns_result = dns.resolver.query("tasks." + name)
for ip in dns_result :
urls.append("http://" + ip.to_text() + ":" + self.__http_port + self.__api_uri + path)
except :
ret = False
elif self.__type == Controller.Type.KUBERNETES :
for instance in instances :
name = instance.metadata.name
try :
dns_result = dns.resolver.query(name + "." + instance.metadata.namespace + ".svc.cluster.local")
for ip in dns_result :
urls.append("http://" + ip.to_text() + ":" + self.__http_port + self.__api_uri + path)
except :
ret = False
for url in urls :
req = None
try :
if file == None :
req = requests.post(url)
else :
file.seek(0, 0)
req = requests.post(url, files={'file': file})
except :
pass
if req and req.status_code == 200 and req.text == "ok" :
log("config", "INFO", "successfully sent API order to " + url)
nb += 1
else :
log("config", "INFO", "failed API order to " + url)
ret = False
return ret and nb > 0

View File

@@ -1,68 +0,0 @@
import traceback
from abc import ABC, abstractmethod
from enum import Enum
from Config import Config
class Type(Enum) :
DOCKER = 1
SWARM = 2
KUBERNETES = 3
class Controller(ABC) :
def __init__(self, type, api_uri=None, lock=None, http_port="8080") :
self._config = Config(type, api_uri, http_port=http_port)
self.lock = lock
@abstractmethod
def get_env(self) :
pass
def _fix_env(self, env) :
fixed_env = env.copy()
blacklist = ["NGINX_VERSION", "NJS_VERSION", "PATH", "PKG_RELEASE"]
for key in blacklist :
if key in fixed_env :
del fixed_env[key]
return fixed_env
def gen_conf(self, env) :
try :
ret = self._config.gen(env)
except :
ret = False
return ret
@abstractmethod
def wait(self) :
pass
@abstractmethod
def process_events(self, current_env) :
pass
@abstractmethod
def reload(self) :
pass
def _reload(self, instances) :
try :
ret = self._config.reload(instances)
except :
ret = False
return ret
def _send(self, instances, files="all") :
try :
ret = self._config.send(instances, files=files)
except Exception as e :
ret = False
return ret
def _stop_temp(self, instances) :
try :
ret = self._config.stop_temp(instances)
except Exception as e :
ret = False
return ret

View File

@@ -1,78 +0,0 @@
import docker, time
import Controller
from logger import log
class DockerController(Controller.Controller) :
def __init__(self, docker_host) :
super().__init__(Controller.Type.DOCKER)
self.__client = docker.DockerClient(base_url=docker_host)
def __get_instances(self) :
return self.__client.containers.list(filters={"label" : "bunkerized-nginx.AUTOCONF"})
def __get_containers(self) :
return self.__client.containers.list(filters={"label" : "bunkerized-nginx.SERVER_NAME"})
def get_env(self) :
env = {}
for instance in self.__get_instances() :
for variable in instance.attrs["Config"]["Env"] :
env[variable.split("=")[0]] = variable.replace(variable.split("=")[0] + "=", "", 1)
first_servers = []
if "SERVER_NAME" in env and env["SERVER_NAME"] != "" :
first_servers = env["SERVER_NAME"].split(" ")
for container in self.__get_containers() :
first_server = container.labels["bunkerized-nginx.SERVER_NAME"].split(" ")[0]
first_servers.append(first_server)
for variable, value in container.labels.items() :
if variable.startswith("bunkerized-nginx.") and variable != "bunkerized-nginx.AUTOCONF" :
env[first_server + "_" + variable.replace("bunkerized-nginx.", "", 1)] = value
if len(first_servers) == 0 :
env["SERVER_NAME"] = ""
else :
env["SERVER_NAME"] = " ".join(first_servers)
return self._fix_env(env)
def process_events(self, current_env) :
old_env = current_env
# TODO : check why filter isn't working as expected
#for event in self.__client.events(decode=True, filters={"type": "container", "label": ["bunkerized-nginx.AUTOCONF", "bunkerized-nginx.SERVER_NAME"]}) :
for event in self.__client.events(decode=True, filters={"type": "container"}) :
new_env = self.get_env()
if new_env != old_env :
try :
log("controller", "INFO", "generating new configuration")
if self.gen_conf(new_env) :
old_env = new_env.copy()
log("controller", "INFO", "successfully generated new configuration")
if self.reload() :
log("controller", "INFO", "successful reload")
else :
log("controller", "ERROR", "failed reload")
else :
log("controller", "ERROR", "can't generate new configuration")
except :
log("controller", "ERROR", "exception while receiving event")
def reload(self) :
return self._reload(self.__get_instances())
def wait(self) :
try :
# Wait for a container
instances = self.__get_instances()
while len(instances) == 0 :
time.sleep(1)
instances = self.__get_instances()
# Generate first config
env = self.get_env()
if not self.gen_conf(env) :
return False, env
# Wait for nginx
return self._config.wait(instances), env
except :
pass
return False, {}

View File

@@ -1,191 +0,0 @@
from kubernetes import client, config, watch
from threading import Thread, Lock
import time
import Controller
from logger import log
class IngressController(Controller.Controller) :
def __init__(self, api_uri, http_port) :
super().__init__(Controller.Type.KUBERNETES, api_uri=api_uri, lock=Lock(), http_port=http_port)
config.load_incluster_config()
self.__api = client.CoreV1Api()
self.__extensions_api = client.ExtensionsV1beta1Api()
self.__old_env = {}
def __get_pods(self) :
return self.__api.list_pod_for_all_namespaces(watch=False, label_selector="bunkerized-nginx").items
def __get_ingresses(self) :
return self.__extensions_api.list_ingress_for_all_namespaces(watch=False, label_selector="bunkerized-nginx").items
def __get_services(self, autoconf=False) :
services = self.__api.list_service_for_all_namespaces(watch=False, label_selector="bunkerized-nginx").items
if not autoconf :
return services
services_autoconf = []
for service in services :
if service.metadata.annotations != None and "bunkerized-nginx.AUTOCONF" in service.metadata.annotations :
services_autoconf.append(service)
return services_autoconf
def __pod_to_env(self, pod_env) :
env = {}
for env_var in pod_env :
env[env_var.name] = env_var.value
if env_var.value == None :
env[env_var.name] = ""
return env
def __annotations_to_env(self, annotations) :
env = {}
prefix = ""
if "bunkerized-nginx.SERVER_NAME" in annotations :
prefix = annotations["bunkerized-nginx.SERVER_NAME"].split(" ")[0] + "_"
for annotation in annotations :
if annotation.startswith("bunkerized-nginx.") and annotation.replace("bunkerized-nginx.", "", 1) != "" and annotation.replace("bunkerized-nginx.", "", 1) != "AUTOCONF" :
env[prefix + annotation.replace("bunkerized-nginx.", "", 1)] = annotations[annotation]
return env
def __rules_to_env(self, rules, namespace="default") :
env = {}
first_servers = []
numbers = {}
for rule in rules :
rule = rule.to_dict()
prefix = ""
number = 1
if "host" in rule :
prefix = rule["host"] + "_"
first_servers.append(rule["host"])
if not rule["host"] in numbers :
numbers[rule["host"]] = 1
number = numbers[rule["host"]]
if not "http" in rule or not "paths" in rule["http"] :
continue
env[prefix + "USE_REVERSE_PROXY"] = "yes"
for path in rule["http"]["paths"] :
suffix = "_" + str(number)
env[prefix + "REVERSE_PROXY_URL" + suffix] = path["path"]
env[prefix + "REVERSE_PROXY_HOST" + suffix] = "http://" + path["backend"]["service_name"] + "." + namespace + ".svc.cluster.local:" + str(path["backend"]["service_port"])
number += 1
numbers[rule["host"]] = number
env["SERVER_NAME"] = " ".join(first_servers)
return env
def get_env(self) :
pods = self.__get_pods()
ingresses = self.__get_ingresses()
services = self.__get_services()
env = {}
first_servers = []
for pod in pods :
env.update(self.__pod_to_env(pod.spec.containers[0].env))
if "SERVER_NAME" in env and env["SERVER_NAME"] != "" :
first_servers.extend(env["SERVER_NAME"].split(" "))
for ingress in ingresses :
env.update(self.__rules_to_env(ingress.spec.rules, namespace=ingress.metadata.namespace))
if ingress.metadata.annotations != None :
env.update(self.__annotations_to_env(ingress.metadata.annotations))
if ingress.spec.tls :
for tls_entry in ingress.spec.tls :
for host in tls_entry.hosts :
env[host + "_AUTO_LETS_ENCRYPT"] = "yes"
if "SERVER_NAME" in env and env["SERVER_NAME"] != "" :
first_servers.extend(env["SERVER_NAME"].split(" "))
for service in services :
if service.metadata.annotations != None and "bunkerized-nginx.SERVER_NAME" in service.metadata.annotations :
env.update(self.__annotations_to_env(service.metadata.annotations))
first_servers.append(service.metadata.annotations["bunkerized-nginx.SERVER_NAME"])
first_servers = list(dict.fromkeys(first_servers))
if len(first_servers) == 0 :
env["SERVER_NAME"] = ""
else :
env["SERVER_NAME"] = " ".join(first_servers)
return self._fix_env(env)
def process_events(self, current_env) :
self.__old_env = current_env
t_pod = Thread(target=self.__watch, args=("pod",))
t_ingress = Thread(target=self.__watch, args=("ingress",))
t_service = Thread(target=self.__watch, args=("service",))
t_pod.start()
t_ingress.start()
t_service.start()
t_pod.join()
t_ingress.join()
t_service.join()
def __watch(self, type) :
w = watch.Watch()
what = None
if type == "pod" :
what = self.__api.list_pod_for_all_namespaces
elif type == "ingress" :
what = self.__extensions_api.list_ingress_for_all_namespaces
elif type == "service" :
what = self.__api.list_service_for_all_namespaces
for event in w.stream(what, label_selector="bunkerized-nginx") :
self.lock.acquire()
new_env = self.get_env()
if new_env != self.__old_env :
try :
if not self.gen_conf(new_env) :
raise Exception("can't generate configuration")
if not self.send() :
raise Exception("can't send configuration")
if not self.reload() :
raise Exception("can't reload configuration")
self.__old_env = new_env.copy()
log("CONTROLLER", "INFO", "successfully loaded new configuration")
except Exception as e :
log("controller", "ERROR", "error while computing new event : " + str(e))
self.lock.release()
def reload(self) :
return self._reload(self.__get_services(autoconf=True))
def send(self, files="all") :
return self._send(self.__get_services(autoconf=True), files=files)
def stop_temp(self) :
return self._stop_temp(self.__get_services(autoconf=True))
def wait(self) :
self.lock.acquire()
try :
# Wait for at least one bunkerized-nginx pod
pods = self.__get_pods()
while len(pods) == 0 :
time.sleep(1)
pods = self.__get_pods()
# Wait for at least one bunkerized-nginx service
services = self.__get_services(autoconf=True)
while len(services) == 0 :
time.sleep(1)
services = self.__get_services(autoconf=True)
# Generate first config
env = self.get_env()
if not self.gen_conf(env) :
self.lock.release()
return False, env
# Send the config
if not self.send() :
self.lock.release()
return False, env
# Stop the temporary server
if not self.stop_temp() :
self.lock.release()
return False, env
# Wait for bunkerized-nginx
if not self._config.wait(instances) :
self.lock.release()
return False, env
self.lock.release()
return self._config.wait(services), env
except :
pass
self.lock.release()
return False, {}

View File

@@ -1,51 +0,0 @@
import socketserver, threading, os, stat
from logger import log
class ReloadServerHandler(socketserver.BaseRequestHandler):
def handle(self) :
locked = False
try :
while True :
data = self.request.recv(512)
if not data or not data in [b"lock", b"reload", b"unlock", b"acme"] :
break
if data == b"lock" :
self.server.controller.lock.acquire()
locked = True
self.request.sendall(b"ok")
elif data == b"unlock" :
self.server.controller.lock.release()
locked = False
self.request.sendall(b"ok")
elif data == b"acme" :
ret = self.server.controller.send(files="acme")
if ret :
self.request.sendall(b"ok")
else :
self.request.sendall(b"ko")
elif data == b"reload" :
ret = self.server.controller.reload()
if ret :
self.request.sendall(b"ok")
else :
self.request.sendall(b"ko")
except Exception as e :
log("RELOADSERVER", "ERROR", "exception : " + str(e))
if locked :
self.server.controller.lock.release()
class ThreadingUnixServer(socketserver.ThreadingMixIn, socketserver.UnixStreamServer) :
pass
def run_reload_server(controller) :
server = ThreadingUnixServer("/tmp/autoconf.sock", ReloadServerHandler)
os.chown("/tmp/autoconf.sock", 0, 101)
os.chmod("/tmp/autoconf.sock", 0o770)
server.controller = controller
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return (server, thread)

View File

@@ -1,104 +0,0 @@
import docker, time
from threading import Lock
from logger import log
import Controller
class SwarmController(Controller.Controller) :
def __init__(self, docker_host, api_uri, http_port) :
super().__init__(Controller.Type.SWARM, api_uri=api_uri, lock=Lock(), http_port=http_port)
self.__client = docker.DockerClient(base_url=docker_host)
def __get_instances(self) :
return self.__client.services.list(filters={"label" : "bunkerized-nginx.AUTOCONF"})
def __get_services(self) :
return self.__client.services.list(filters={"label" : "bunkerized-nginx.SERVER_NAME"})
def get_env(self) :
env = {}
for instance in self.__get_instances() :
for variable in instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"] :
env[variable.split("=")[0]] = variable.replace(variable.split("=")[0] + "=", "", 1)
first_servers = []
if "SERVER_NAME" in env and env["SERVER_NAME"] != "" :
first_servers = env["SERVER_NAME"].split(" ")
for service in self.__get_services() :
first_server = service.attrs["Spec"]["Labels"]["bunkerized-nginx.SERVER_NAME"].split(" ")[0]
first_servers.append(first_server)
for variable, value in service.attrs["Spec"]["Labels"].items() :
if variable.startswith("bunkerized-nginx.") and variable != "bunkerized-nginx.AUTOCONF" :
env[first_server + "_" + variable.replace("bunkerized-nginx.", "", 1)] = value
if len(first_servers) == 0 :
env["SERVER_NAME"] = ""
else :
env["SERVER_NAME"] = " ".join(first_servers)
return self._fix_env(env)
def process_events(self, current_env) :
old_env = current_env
# TODO : check why filter isn't working as expected
#for event in self.__client.events(decode=True, filters={"type": "service", "label": ["bunkerized-nginx.AUTOCONF", "bunkerized-nginx.SERVER_NAME"]}) :
for event in self.__client.events(decode=True, filters={"type": "service"}) :
new_env = self.get_env()
if new_env != old_env :
self.lock.acquire()
try :
if not self.gen_conf(new_env) :
raise Exception("can't generate configuration")
if not self.send() :
raise Exception("can't send configuration")
if not self.reload() :
raise Exception("can't reload configuration")
self.__old_env = new_env.copy()
log("CONTROLLER", "INFO", "successfully loaded new configuration")
except Exception as e :
log("controller", "ERROR", "error while computing new event : " + str(e))
self.lock.release()
def reload(self) :
return self._reload(self.__get_instances())
def send(self, files="all") :
return self._send(self.__get_instances(), files=files)
def stop_temp(self) :
return self._stop_temp(self.__get_instances())
def wait(self) :
self.lock.acquire()
try :
# Wait for a service
instances = self.__get_instances()
while len(instances) == 0 :
time.sleep(1)
instances = self.__get_instances()
# Wait for temporary bunkerized-nginx
if not self._config.wait(instances) :
self.lock.release()
return False, env
# Generate first config
env = self.get_env()
if not self.gen_conf(env) :
self.lock.release()
return False, env
# Send the config
if not self.send() :
self.lock.release()
return False, env
# Stop the temporary server
if not self.stop_temp() :
self.lock.release()
return False, env
# Wait for bunkerized-nginx
if not self._config.wait(instances) :
self.lock.release()
return False, env
self.lock.release()
return True, env
except :
pass
self.lock.release()
return False, {}

View File

@@ -1,46 +0,0 @@
#!/usr/bin/python3
from ReloadServer import run_reload_server
import docker, os, stat, sys, select, threading
from DockerController import DockerController
from SwarmController import SwarmController
from IngressController import IngressController
from logger import log
# Get variables
swarm = os.getenv("SWARM_MODE", "no") == "yes"
kubernetes = os.getenv("KUBERNETES_MODE", "no") == "yes"
api_uri = os.getenv("API_URI", "")
docker_host = os.getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
http_port = os.getenv("HTTP_PORT", "8080")
# Instantiate the controller
if swarm :
log("autoconf", "INFO", "swarm mode detected")
controller = SwarmController(docker_host, api_uri, http_port)
elif kubernetes :
log("autoconf", "INFO", "kubernetes mode detected")
controller = IngressController(api_uri, http_port)
else :
log("autoconf", "INFO", "docker mode detected")
controller = DockerController(docker_host)
# Run the reload server in background if needed
if swarm or kubernetes :
log("autoconf", "INFO", "start reload server in background")
(server, thread) = run_reload_server(controller)
# Wait for instances
log("autoconf", "INFO", "wait until a bunkerized-nginx instance is started ...")
ret, env = controller.wait()
if ret :
log("autoconf", "INFO", "bunkerized-nginx instances started")
else :
log("autoconf", "ERROR", "bunkerized-nginx instances not started")
# Process events
log("autoconf", "INFO", "waiting for events ...")
controller.process_events(env)

View File

@@ -1,6 +0,0 @@
import datetime
def log(title, severity, msg) :
when = datetime.datetime.today().strftime("[%Y-%m-%d %H:%M:%S]")
what = title + " - " + severity + " - " + msg
print(when + " " + what, flush=True)