autoconf - init refactoring before k8s integration

This commit is contained in:
bunkerity
2021-07-28 11:56:45 +02:00
parent 0597074438
commit 01bba1d3f6
8 changed files with 79 additions and 24 deletions

195
autoconf/src/AutoConf.py Normal file
View File

@@ -0,0 +1,195 @@
from Config import Config
import utils
import os, time
class AutoConf :
def __init__(self, swarm, api) :
self.__swarm = swarm
self.__servers = {}
self.__instances = {}
self.__env = {}
self.__config = Config(self.__swarm, api)
def get_server(self, id) :
if id in self.__servers :
return self.__servers[id]
return False
def reload(self) :
return self.__config.reload(self.__instances)
def __gen_env(self) :
self.__env.clear()
# TODO : check actual state (e.g. : running, stopped ?)
for id, instance in self.__instances.items() :
env = []
if self.__swarm :
env = instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"]
else :
env = instance.attrs["Config"]["Env"]
for entry in env :
self.__env[entry.split("=")[0]] = entry.replace(entry.split("=")[0] + "=", "", 1)
blacklist = ["NGINX_VERSION", "NJS_VERSION", "PATH", "PKG_RELEASE"]
for entry in blacklist :
if entry in self.__env :
del self.__env[entry]
if not "SERVER_NAME" in self.__env or self.__env["SERVER_NAME"] == "" :
self.__env["SERVER_NAME"] = []
else :
self.__env["SERVER_NAME"] = self.__env["SERVER_NAME"].split(" ")
for server in self.__servers :
(id, name, labels) = self.__get_infos(self.__servers[server])
first_server = labels["bunkerized-nginx.SERVER_NAME"].split(" ")[0]
for label in labels :
if label.startswith("bunkerized-nginx.") :
self.__env[first_server + "_" + label.replace("bunkerized-nginx.", "", 1)] = labels[label]
for server_name in labels["bunkerized-nginx.SERVER_NAME"].split(" ") :
if not server_name in self.__env["SERVER_NAME"] :
self.__env["SERVER_NAME"].append(server_name)
self.__env["SERVER_NAME"] = " ".join(self.__env["SERVER_NAME"])
def pre_process(self, objs) :
for instance in objs :
(id, name, labels) = self.__get_infos(instance)
if "bunkerized-nginx.AUTOCONF" in labels :
if self.__swarm :
self.__process_instance(instance, "create", id, name, labels)
else :
if instance.status in ("restarting", "running", "created", "exited") :
self.__process_instance(instance, "create", id, name, labels)
if instance.status == "running" :
self.__process_instance(instance, "start", id, name, labels)
for server in objs :
(id, name, labels) = self.__get_infos(server)
if "bunkerized-nginx.SERVER_NAME" in labels :
if self.__swarm :
self.__process_server(server, "create", id, name, labels)
else :
if server.status in ("restarting", "running", "created", "exited") :
self.__process_server(server, "create", id, name, labels)
if server.status == "running" :
self.__process_server(server, "start", id, name, labels)
def process(self, obj, event) :
(id, name, labels) = self.__get_infos(obj)
if "bunkerized-nginx.AUTOCONF" in labels :
self.__process_instance(obj, event, id, name, labels)
elif "bunkerized-nginx.SERVER_NAME" in labels :
self.__process_server(obj, event, id, name, labels)
def __get_infos(self, obj) :
if self.__swarm :
id = obj.id
name = obj.name
labels = obj.attrs["Spec"]["Labels"]
else :
id = obj.id
name = obj.name
labels = obj.labels
return (id, name, labels)
def __process_instance(self, instance, event, id, name, labels) :
if event == "create" :
self.__instances[id] = instance
self.__gen_env()
utils.log("[*] bunkerized-nginx instance created : " + name + " / " + id)
if self.__swarm and len(self.__instances) == 1 :
if self.__config.generate(self.__env) :
utils.log("[*] Initial config succeeded")
if not self.__config.swarm_wait(self.__instances) :
utils.log("[!] Removing bunkerized-nginx instances from list (API not available)")
del self.__instances[id]
else :
utils.log("[!] Initial config failed")
elif not self.__swarm and len(self.__instances) == 1 :
utils.log("[*] Wait until bunkerized-nginx is healthy (timeout = 120s) ...")
i = 0
healthy = False
while i < 120 :
self.__instances[id].reload()
if self.__instances[id].attrs["State"]["Health"]["Status"] == "healthy" :
healthy = True
break
time.sleep(1)
i = i + 1
if not healthy :
utils.log("[!] Removing bunkerized-nginx instances from list (unhealthy)")
del self.__instances[id]
elif event == "start" :
self.__instances[id].reload()
self.__gen_env()
utils.log("[*] bunkerized-nginx instance started : " + name + " / " + id)
elif event == "die" :
self.__instances[id].reload()
self.__gen_env()
utils.log("[*] bunkerized-nginx instance stopped : " + name + " / " + id)
elif event == "destroy" or event == "remove" :
del self.__instances[id]
self.__gen_env()
utils.log("[*] bunkerized-nginx instance removed : " + name + " / " + id)
def __process_server(self, instance, event, id, name, labels) :
vars = { k.replace("bunkerized-nginx.", "", 1) : v for k, v in labels.items() if k.startswith("bunkerized-nginx.")}
if event == "create" :
utils.log("[*] Generating config for " + vars["SERVER_NAME"] + " ...")
self.__servers[id] = instance
self.__gen_env()
if self.__config.generate(self.__env) :
utils.log("[*] Generated config for " + vars["SERVER_NAME"])
if self.__swarm :
utils.log("[*] Activating config for " + vars["SERVER_NAME"] + " ...")
if self.__config.reload(self.__instances) :
utils.log("[*] Activated config for " + vars["SERVER_NAME"])
else :
utils.log("[!] Can't activate config for " + vars["SERVER_NAME"])
else :
utils.log("[!] Can't generate config for " + vars["SERVER_NAME"])
del self.__servers[id]
self.__gen_env()
self.__config.generate(self.__env)
elif event == "start" :
if id in self.__servers :
self.__servers[id].reload()
utils.log("[*] Activating config for " + vars["SERVER_NAME"] + " ...")
self.__gen_env()
if self.__config.reload(self.__instances) :
utils.log("[*] Activated config for " + vars["SERVER_NAME"])
else :
utils.log("[!] Can't activate config for " + vars["SERVER_NAME"])
elif event == "die" :
if id in self.__servers :
self.__servers[id].reload()
utils.log("[*] Deactivating config for " + vars["SERVER_NAME"])
self.__gen_env()
if self.__config.reload(self.__instances) :
utils.log("[*] Deactivated config for " + vars["SERVER_NAME"])
else :
utils.log("[!] Can't deactivate config for " + vars["SERVER_NAME"])
elif event == "destroy" or event == "remove" :
if id in self.__servers :
utils.log("[*] Removing config for " + vars["SERVER_NAME"])
del self.__servers[id]
self.__gen_env()
if self.__config.generate(self.__env) :
utils.log("[*] Removed config for " + vars["SERVER_NAME"])
else :
utils.log("[!] Can't remove config for " + vars["SERVER_NAME"])
utils.log("[*] Deactivating config for " + vars["SERVER_NAME"])
if self.__config.reload(self.__instances) :
utils.log("[*] Deactivated config for " + vars["SERVER_NAME"])
else :
utils.log("[!] Can't deactivate config for " + vars["SERVER_NAME"])

121
autoconf/src/Config.py Normal file
View File

@@ -0,0 +1,121 @@
#!/usr/bin/python3
import utils
import subprocess, shutil, os, traceback, requests, time
class Config :
def __init__(self, swarm, api) :
self.__swarm = swarm
self.__api = api
def __jobs(self) :
utils.log("[*] Starting jobs")
proc = subprocess.run(["/bin/su", "-c", "/opt/bunkerized-nginx/entrypoint/jobs.sh", "nginx"], capture_output=True)
stdout = proc.stdout.decode("ascii")
stderr = proc.stderr.decode("ascii")
if len(stdout) > 1 :
utils.log("[*] Jobs stdout :")
utils.log(stdout)
if stderr != "" :
utils.log("[!] Jobs stderr :")
utils.log(stderr)
if proc.returncode != 0 :
utils.log("[!] Jobs error : return code != 0")
return False
return True
def swarm_wait(self, instances) :
try :
with open("/etc/nginx/autoconf", "w") as f :
f.write("ok")
utils.log("[*] Waiting for bunkerized-nginx tasks ...")
i = 1
started = False
while i <= 10 :
time.sleep(i)
if self.__ping(instances) :
started = True
break
i = i + 1
utils.log("[!] Waiting " + str(i) + " seconds before retrying to contact bunkerized-nginx tasks")
if started :
utils.log("[*] bunkerized-nginx tasks started")
return True
else :
utils.log("[!] bunkerized-nginx tasks are not started")
except Exception as e :
utils.log("[!] Error while waiting for Swarm tasks : " + str(e))
return False
def generate(self, env) :
try :
# Write environment variables to a file
with open("/tmp/variables.env", "w") as f :
for k, v in env.items() :
f.write(k + "=" + v + "\n")
# Call the generator
proc = subprocess.run(["/bin/su", "-c", "/opt/bunkerized-nginx/gen/main.py --settings /opt/bunkerized-nginx/settings.json --templates /opt/bunkerized-nginx/confs --output /etc/nginx --variables /tmp/variables.env", "nginx"], capture_output=True)
# Print stdout/stderr
stdout = proc.stdout.decode("ascii")
stderr = proc.stderr.decode("ascii")
if len(stdout) > 1 :
utils.log("[*] Generator output :")
utils.log(stdout)
if stderr != "" :
utils.log("[*] Generator error :")
utils.log(error)
# We're done
if proc.returncode == 0 :
if self.__swarm :
return self.__jobs()
return True
utils.log("[!] Error while generating site config for " + env["SERVER_NAME"] + " : return code = " + str(proc.returncode))
except Exception as e :
utils.log("[!] Exception while generating site config : " + str(e))
return False
def reload(self, instances) :
return self.__api_call(instances, "/reload")
def __ping(self, instances) :
return self.__api_call(instances, "/ping")
def __api_call(self, instances, path) :
ret = True
for instance_id, instance in instances.items() :
# Reload the instance object just in case
instance.reload()
# Reload via API
if self.__swarm :
# Send POST request on http://serviceName.NodeID.TaskID:8000/action
name = instance.name
for task in instance.tasks() :
if task["Status"]["State"] != "running" :
continue
nodeID = task["NodeID"]
taskID = task["ID"]
fqdn = name + "." + nodeID + "." + taskID
req = False
try :
req = requests.post("http://" + fqdn + ":8080" + self.__api + path)
except :
pass
if req and req.status_code == 200 and req.text == "ok" :
utils.log("[*] Sent API order " + path + " to instance " + fqdn + " (service.node.task)")
else :
utils.log("[!] Can't send API order " + path + " to instance " + fqdn + " (service.node.task)")
ret = False
# Send SIGHUP to running instance
elif instance.status == "running" :
try :
instance.kill("SIGHUP")
utils.log("[*] Sent SIGHUP signal to bunkerized-nginx instance " + instance.name + " / " + instance.id)
except docker.errors.APIError as e :
utils.log("[!] Docker error while sending SIGHUP signal : " + str(e))
ret = False
return ret

View File

@@ -0,0 +1,23 @@
from abc import ABC, abstractmethod
from Config import Config
class ControllerType(Enum) :
DOCKER = 1
SWARM = 2
KUBERNETES = 3
class Controller(ABC) :
def __init__(self, type) :
self.__config = Config.from_controller_type(type)
@abstractmethod
def get_env(self) :
pass
def gen_conf(self, env) :
return self.__config.gen(env)
@abstractmethod
def process_events(self) :
pass

View File

@@ -0,0 +1,32 @@
import docker
from Controller import Controller, ControllerType
import utils
class DockerController(Controller) :
def __init__(self) :
super().__init__(ControllerType.DOCKER)
# TODO : honor env vars like DOCKER_HOST
self.__client = docker.DockerClient(base_url='unix:///var/run/docker.sock')
def __get_instances(self) :
return self.__client.containers.list(filters={"label" : "bunkerized-nginx.AUTOCONF"})
def __get_containers(self) :
return self.__client.containers.list(filters={"label" : "bunkerized-nginx.SERVER_NAME"})
def get_env(self) :
env = {}
for instance in self._get_instances() :
for variable in instance.attrs["Config"]["Env"] :
env[variable.split("=")[0]] = variable.replace(variable.split("=")[0] + "=", "", 1)
pass
def process_events(self, current_env) :
old_env = current_env
for event in client.events(decode=True, filter={"type": "container", "label": ["bunkerized-nginx.AUTOCONF", "bunkerized-nginx.SERVER_NAME"]}) :
new_env = self.get_env()
if new_env != old_env :
if (self.gen_conf(new_env)) :
old_env = new_env
utils.log("[*] Successfully generated new configuration")

View File

@@ -0,0 +1,100 @@
from kubernetes import client, config, watch
from threading import Thread, Lock
from Controller import Controller
class IngressController :
def __init__(self) :
super().__init__()
config.load_incluster_config()
self.__api = client.CoreV1Api()
self.__extensions_api = client.ExtensionsV1beta1Api()
self.__lock = Lock()
def __annotations_to_env(self, annotations, service=False) :
env = {}
prefix = ""
if service :
if not "bunkerized-nginx.SERVER_NAME" in annotations :
raise Exception("Missing bunkerized-nginx.SERVER_NAME annotation in Service.")
prefix = annotations["bunkerized-nginx.SERVER_NAME"].split(" ")[0] + "_"
for annotation in annotations :
if annotation.startswith("bunkerized-nginx.") and annotation.replace("bunkerized-nginx.", "", 1) != "" and annotation.replace("bunkerized-nginx.", "", 1) != "AUTOCONF" :
env[prefix + annotation.replace("bunkerized-nginx.", "", 1)] = annotations[annotation]
return env
def __rules_to_env(self, rules) :
env = {}
for rule in rules :
prefix = ""
if "host" in rule :
prefix = rule["host"] + "_"
if not "http" in rule or not "paths" in rule["http"] :
continue
for path in rule["http"]["paths"] :
env[prefix + "USE_REVERSE_PROXY"] = "yes"
env[prefix + "REVERSE_PROXY_URL"] = path["path"]
env[prefix + "REVERSE_PROXY_HOST"] = "http://" + path["backend"]["serviceName"] + ":" + str(path["backend"]["servicePort"])
return env
def get_env(self) :
ingresses = self.__get_ingresses()
services = self.__get_services()
env = {}
for ingress in ingresses :
if ingress.metadata.annotations == None :
continue
if "bunkerized-nginx.AUTOCONF" in ingress.metadata.annotations :
env.update(self.__annotations_to_env(ingress.metadata.annotations))
env.update(self.__rules_to_env(ingress.spec.rules))
for service in services :
if service.metadata.annotations == None :
continue
if "bunkerized-nginx.AUTOCONF" in service.metadata.annotations :
env.update(self.__annotations_to_env(service.metadata.annotations, service=True))
return env
def __get_ingresses(self) :
return self.__extensions_api.list_ingress_for_all_namespaces(watch=False).items
def __get_services(self) :
return self.__api.list_service_for_all_namespaces(watch=False).items
def watch_ingress(self) :
w = watch.Watch()
for event in w.stream(self.__extensions_api.list_ingress_for_all_namespaces) :
self.__lock.acquire()
# print("*** NEW INGRESS EVENT ***", flush=True)
# for k, v in event.items() :
# print(k + " :", flush=True)
# print(v, flush=True)
self.gen_conf()
self.__lock.release()
def watch_service(self) :
w = watch.Watch()
for event in w.stream(self.__api.list_service_for_all_namespaces) :
self.__lock.acquire()
self.gen_conf()
# print("*** NEW SERVICE EVENT ***", flush=True)
# for k, v in event.items() :
# print(k + " :", flush=True)
# print(v, flush=True)
self.__lock.release()
ic = IngressController()
print("*** INGRESSES ***")
print(ic.get_ingresses())
print("*** SERVICES ***")
print(ic.get_services())
print("*** LISTENING FOR EVENTS ***")
t_ingress = Thread(target=ic.watch_service)
t_service = Thread(target=ic.watch_service)
t_ingress.start()
t_service.start()
t_ingress.join()
t_service.join()

View File

@@ -0,0 +1,28 @@
import socketserver, threading, utils, os, stat
class ReloadServerHandler(socketserver.StreamRequestHandler):
def handle(self) :
try :
data = self.request.recv(512)
if not data :
return
with self.server.lock :
ret = self.server.autoconf.reload()
if ret :
self.request.sendall("ok".encode("utf-8"))
else :
self.request.sendall("ko".encode("utf-8"))
except Exception as e :
utils.log("Exception " + str(e))
def run_reload_server(autoconf, lock) :
server = socketserver.UnixStreamServer("/tmp/autoconf.sock", ReloadServerHandler)
os.chown("/tmp/autoconf.sock", 0, 101)
os.chmod("/tmp/autoconf.sock", 0o770)
server.autoconf = autoconf
server.lock = lock
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return (server, thread)

71
autoconf/src/app.py Normal file
View File

@@ -0,0 +1,71 @@
#!/usr/bin/python3
from AutoConf import AutoConf
from ReloadServer import run_reload_server
import utils
import docker, os, stat, sys, select, threading
# Check if we are in Swarm mode
swarm = os.getenv("SWARM_MODE") == "yes"
if swarm :
# Connect to the endpoint
endpoint = "/var/run/docker.sock"
if not os.path.exists(endpoint) or not stat.S_ISSOCK(os.stat(endpoint).st_mode) :
utils.log("[!] /var/run/docker.sock not found (is it mounted ?)")
sys.exit(1)
try :
client = docker.DockerClient(base_url='unix:///var/run/docker.sock')
except Exception as e :
utils.log("[!] Can't instantiate DockerClient : " + str(e))
sys.exit(2)
# Our object to process events
api = ""
if swarm :
api = os.getenv("API_URI")
autoconf = AutoConf(swarm, api)
lock = threading.Lock()
if swarm :
(server, thread) = run_reload_server(autoconf, lock)
# Get all bunkerized-nginx instances and web services created before
try :
if swarm :
before = client.services.list(filters={"label" : "bunkerized-nginx.AUTOCONF"}) + client.services.list(filters={"label" : "bunkerized-nginx.SERVER_NAME"})
else :
before = client.containers.list(all=True, filters={"label" : "bunkerized-nginx.AUTOCONF"}) + client.containers.list(filters={"label" : "bunkerized-nginx.SERVER_NAME"})
except docker.errors.APIError as e :
utils.log("[!] Docker API error " + str(e))
sys.exit(3)
# Process them before events
autoconf.pre_process(before)
# Process events received from Docker
try :
utils.log("[*] Listening for Docker events ...")
for event in client.events(decode=True) :
# Process only container/service events
if (swarm and event["Type"] != "service") or (not swarm and event["Type"] != "container") :
continue
# Get Container/Service object
try :
if swarm :
id = service_id=event["Actor"]["ID"]
server = client.services.get(service_id=id)
else :
id = event["id"]
server = client.containers.get(id)
except docker.errors.NotFound as e :
server = autoconf.get_server(id)
if not server :
continue
# Process the event
autoconf.process(server, event["Action"])
except docker.errors.APIError as e :
utils.log("[!] Docker API error " + str(e))
sys.exit(4)

24
autoconf/src/utils.py Normal file
View File

@@ -0,0 +1,24 @@
#!/usr/bin/python3
import datetime
def log(event) :
print("[" + str(datetime.datetime.now().replace(microsecond=0)) + "] " + event, flush=True)
def replace_in_file(file, old_str, new_str) :
with open(file) as f :
data = f.read()
data = data[::-1].replace(old_str[::-1], new_str[::-1], 1)[::-1]
with open(file, "w") as f :
f.write(data)
def install_cron(service, vars, crons) :
for var in vars :
if var in crons :
with open("/etc/crontabs/root", "a+") as f :
f.write(vars[var] + " /opt/cron/" + crons[var] + ".py " + service["Actor"]["ID"])
def uninstall_cron(service, vars, crons) :
for var in vars :
if var in crons :
replace_in_file("/etc/crontabs/root", vars[var] + " /opt/cron/" + crons[var] + ".py " + service["Actor"]["ID"] + "\n", "")