35 Commits

Author SHA1 Message Date
Florian Pitance
3a078326c5 Merge pull request #199 from Myzel394/patch-1
Fix typo
2021-10-24 18:25:37 +02:00
florian
d43b82b757 remote API - only do action if 403 2021-10-24 18:24:10 +02:00
florian
3850cacb9c prepare for v1.3.2 2021-10-23 16:56:10 +02:00
florian
c00c7f46a0 lua - verify certs when doing HTTPS requests 2021-10-23 10:10:24 +02:00
bunkerity
163af4a49d prepare for v1.3.2 2021-10-22 21:16:36 +02:00
bunkerity
98e85eb99f docs - update security tuning sections : distributed blacklist and request limit 2021-10-21 21:31:15 +02:00
bunkerity
2e63bb0256 docs - reflect kubernetes/swarm changes into the doc 2021-10-21 16:47:08 +02:00
bunkerity
6546a0edb7 disable country ban if IP is local, update default values of PERMISSIONS_POLICY and FEATURE_POLICY, upgrade archlinux packages before testing 2021-10-21 15:40:20 +02:00
bunkerity
ab00381746 ui - fix ROOT_FOLDER bug in serve-files.conf 2021-10-21 11:30:49 +02:00
bunkerity
9f7097de0d request limit - fix some LUA code 2021-10-19 17:21:30 +02:00
bunkerity
24d6337a57 limit req - multiple url support 2021-10-18 16:48:06 +02:00
bunkerity
bfb5319c16 limit req - add burst and delay parameters 2021-10-13 20:53:10 +02:00
bunkerity
4c77a14825 use annotations as env var in Ingress definition, fix cidr parsing for reserved ips, fix missing empty when job is external, fix ping check for remote api and init work hour/day support for request limit 2021-10-13 17:21:25 +02:00
bunkerity
4e45fa3874 integrations - acme without shared folder when using k8s/swarm 2021-10-12 16:58:13 +02:00
Myzel394
a9a26b82d9 fixed typo 2021-10-12 10:22:25 +00:00
bunkerity
00d91dcaaa jobs - move certbot hooks to python 2021-10-11 20:57:13 +02:00
bunkerity
650ad7ea49 integrations - fix missing acme folder when using Swarm or Kubernetes 2021-10-11 17:24:19 +02:00
bunkerity
7045c0c2b6 jobs - fix encoding error on CentOS 2021-10-08 17:10:01 +02:00
bunkerity
f0f432487b remote API - ban IP from distributed DB 2021-10-07 16:57:37 +02:00
bunkerity
fdc02be051 remote API - basic send of bad IPs 2021-10-07 12:00:20 +02:00
bunkerity
fb799765a4 jobs - fix str/bytes hell 2021-10-06 21:09:27 +02:00
bunkerity
d53f02b5b3 api - client side (untested) 2021-10-06 15:41:55 +02:00
bunkerity
7b9722fac4 jobs - add remote API 2021-10-06 12:13:13 +02:00
bunkerity
31ed4ff834 centos - update ca-certificates in install script 2021-10-05 16:06:35 +02:00
bunkerity
bc5f3ee88e fix CVEs and add init to Debian test image 2021-10-05 15:01:43 +02:00
bunkerity
a6b21aae8c fix typo in settings.json, bump Debian to bullseyes, init support of Arch Linux 2021-10-05 14:32:19 +02:00
bunkerity
64aa9c2530 init work remote API 2021-10-02 20:29:50 +02:00
bunkerity
5d94cc8f43 docs - init changes about storageless 2021-09-14 16:41:39 +02:00
bunkerity
e7ee21cbb5 antibot - fix path for templates and data 2021-09-14 11:30:33 +02:00
florian
a0f8cbdac1 antibot - fix LUA typo in recaptcha mode 2021-09-13 21:26:09 +02:00
Florian Pitance
178d7a6849 Merge pull request #182 from Nakinox/patch-2
Update docker-compose.yml
2021-09-13 21:20:26 +02:00
florian
ca81535bb3 swarm/k8s - less storage, more API 2021-09-05 00:36:15 +02:00
florian
062fa3e78a integration - continue work on storageless config for k8s and swarm 2021-09-03 22:40:37 +02:00
Nakinox
95f2d2af9c Update docker-compose.yml 2021-09-03 17:21:36 +02:00
bunkerity
e55dff8128 api - init work on storageless configuration 2021-09-03 12:04:30 +02:00
64 changed files with 4585 additions and 547 deletions

View File

@@ -20,7 +20,6 @@ jobs:
- name: Run Trivy security scanner
uses: aquasecurity/trivy-action@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
image-ref: 'bunkerized-nginx-autoconf'
format: 'table'
exit-code: '1'

View File

@@ -20,7 +20,6 @@ jobs:
- name: Run Trivy security scanner
uses: aquasecurity/trivy-action@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
image-ref: 'bunkerized-nginx-ui'
format: 'table'
exit-code: '1'

View File

@@ -20,7 +20,6 @@ jobs:
- name: Run Trivy security scanner
uses: aquasecurity/trivy-action@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
image-ref: 'bunkerized-nginx'
format: 'table'
exit-code: '1'

View File

@@ -20,6 +20,8 @@ jobs:
run: docker build -t centos-systemd -f tests/Dockerfile-centos .
- name: Build Fedora with systemd
run: docker build -t fedora-systemd -f tests/Dockerfile-fedora .
- name: Build Arch Linux with systemd
run: docker build -t archlinux-systemd -f tests/Dockerfile-archlinux .
- name: Debian test
run: ./tests/linux-run.sh debian-systemd test-debian
- name: Ubuntu test
@@ -28,3 +30,5 @@ jobs:
run: ./tests/linux-run.sh centos-systemd test-centos
- name: Fedora test
run: ./tests/linux-run.sh fedora-systemd test-fedora
- name: Arch Linux test
run: ./tests/linux-run.sh archlinux-systemd test-archlinux

View File

@@ -1,5 +1,17 @@
# Changelog
## v1.3.2 -
- Use API instead of a shared folder for Swarm and Kubernetes integrations
- Beta integration of distributed bad IPs database through a remote API
- Improvement of the request limiting feature : hour/day rate and multiple URL support
- Various bug fixes related to antibot feature
- Init support of Arch Linux
- Fix Moodle example
- Fix ROOT_FOLDER bug in serve-files.conf when using the UI
- Update default values for PERMISSIONS_POLICY and FEATURE_POLICY
- Disable COUNTRY ban if IP is local
## v1.3.1 - 2021/09/02
- Use ModSecurity v3.0.4 instead of v3.0.5 to fix memory leak
@@ -7,6 +19,7 @@
- Fix bug when LISTEN_HTTP=no and MULTISITE=yes
- Add CUSTOM_HEADER variable
- Add REVERSE_PROXY_BUFFERING variable
- Add REVERSE_PROXY_KEEPALIVE variable
- Fix documentation for modsec and modsec-crs special folders
## v1.3.0 - 2021/08/23

View File

@@ -12,8 +12,8 @@ RUN chmod +x /tmp/docker.sh && \
/tmp/docker.sh && \
rm -f /tmp/docker.sh
# Fix CVE-2021-22901, CVE-2021-22898, CVE-2021-22897, CVE-2021-33560 and CVE-2021-36159
RUN apk add "curl>=7.77.0-r0" "libgcrypt>=1.8.8-r0" "apk-tools>=2.12.6-r0"
# Fix CVE-2021-22945, CVE-2021-22946, CVE-2021-22947 and CVE-2021-40528
RUN apk add "curl>=7.79.0-r0" "libgcrypt>=1.8.8-r1"
VOLUME /www /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache /pre-server-confs /acme-challenge /plugins

View File

@@ -3,7 +3,7 @@
</p>
<p align="center">
<img src="https://img.shields.io/badge/bunkerized--nginx-1.3.1-blue" />
<img src="https://img.shields.io/badge/bunkerized--nginx-1.3.2-blue" />
<img src="https://img.shields.io/badge/nginx-1.20.1-blue" />
<img src="https://img.shields.io/github/last-commit/bunkerity/bunkerized-nginx" />
<img src="https://img.shields.io/github/workflow/status/bunkerity/bunkerized-nginx/Automatic%20test?label=automatic%20test" />
@@ -38,8 +38,8 @@ Non-exhaustive list of features :
- Automatic ban of strange behaviors
- Antibot challenge through cookie, javascript, captcha or recaptcha v3
- Block TOR, proxies, bad user-agents, countries, ...
- Block known bad IP with DNSBL
- Prevent bruteforce attacks with rate limiting
- Block known bad IP with DNSBL and distributed blacklist
- Prevent bruteforce attacks and protect API resources with rate limiting
- Plugins system for external security checks (ClamAV, CrowdSec, ...)
- Easy to configure with environment variables or web UI
- Seamless integration into existing environments : Linux, Docker, Swarm, Kubernetes, ...
@@ -105,7 +105,7 @@ You will find more information about Docker autoconf feature in the [documentati
## Swarm
Using bunkerized-nginx in a Docker Swarm cluster requires a shared folder accessible from both managers and workers (anything like NFS, GlusterFS, CephFS or even SSHFS will work). The deployment and configuration is very similar to the "Docker autoconf" one but with services instead of containers. A service based on the bunkerized-nginx-autoconf image needs to be scheduled on a manager node (don't worry it doesn't expose any network port for obvious security reasons). This service will listen for Docker Swarm events like service creation or deletion and generate the configuration according to the labels of each service. Once configuration generation is done, the bunkerized-nginx-autoconf service will send a reload order to all the bunkerized-nginx tasks so they can load the new configuration.
The deployment and configuration is very similar to the "Docker autoconf" one but with services instead of containers. A service based on the bunkerized-nginx-autoconf image needs to be scheduled on a manager node (don't worry it doesn't expose any network port for obvious security reasons). This service will listen for Docker Swarm events like service creation or deletion and generate the configuration according to the labels of each service. Once configuration generation is done, the bunkerized-nginx-autoconf service will send the configuration files and a reload order to all the bunkerized-nginx tasks so they can apply the new configuration. If you need to deliver static files (e.g., html, images, css, js, ...) a shared folder accessible from all bunkerized-nginx instances is needed (you can use a storage system like NFS, GlusterFS, CephFS on the host or a [Docker volume plugin](https://docs.docker.com/engine/extend/)).
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/swarm.png?raw=true" />
@@ -115,7 +115,7 @@ You will find more information about Docker Swarm integration in the [documentat
**This integration is still in beta, please fill an issue if you find a bug or have an idea on how to improve it.**
Using bunkerized-nginx in a Kubernetes cluster requires a shared folder accessible from the nodes (anything like NFS, GlusterFS, CephFS or even SSHFS will work). The bunkerized-nginx-autoconf acts as an Ingress Controller and connects to the k8s API to get cluster events and generate a new configuration when it's needed. Once the configuration is generated, the Ingress Controller sends a reload order to the bunkerized-nginx instances running in the cluster.
The bunkerized-nginx-autoconf acts as an Ingress Controller and connects to the k8s API to get cluster events and generate a new configuration when it's needed. Once the configuration is generated, the Ingress Controller sends the configuration files and a reload order to the bunkerized-nginx instances running in the cluster. If you need to deliver static files (e.g., html, images, css, js, ...) a shared folder accessible from all bunkerized-nginx instances is needed (you can use a storage system like NFS, GlusterFS, CephFS on the host or a [Kubernetes Volume that supports ReadOnlyMany access](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes)).
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/kubernetes.png?raw=true" />
@@ -130,6 +130,7 @@ List of supported Linux distributions :
- Ubuntu focal (20.04)
- CentOS 7
- Fedora 34
- Arch Linux
Unlike containers, Linux integration can be tedious because bunkerized-nginx has a bunch of dependencies that need to be installed before we can use it. Fortunately, we provide a [helper script](https://github.com/bunkerity/bunkerized-nginx/blob/master/helpers/install.sh) to make the process easier and automatic. Once installed, the configuration is really simple, all you have to do is to edit the `/opt/bunkerized-nginx/variables.env` configuration file and run the `bunkerized-nginx` command to apply it.

View File

@@ -1 +1 @@
1.3.1
1.3.2

View File

@@ -10,8 +10,9 @@ COPY misc/cron-autoconf /etc/crontabs/root
COPY autoconf/entrypoint.sh /opt/bunkerized-nginx/entrypoint/
COPY autoconf/requirements.txt /opt/bunkerized-nginx/entrypoint/
COPY autoconf/src/* /opt/bunkerized-nginx/entrypoint/
COPY VERSION /opt/bunkerized-nginx
RUN apk add --no-cache py3-pip bash certbot curl openssl && \
RUN apk add --no-cache py3-pip bash certbot curl openssl socat && \
pip3 install -r /opt/bunkerized-nginx/gen/requirements.txt && \
pip3 install -r /opt/bunkerized-nginx/entrypoint/requirements.txt && \
pip3 install -r /opt/bunkerized-nginx/jobs/requirements.txt
@@ -21,7 +22,6 @@ RUN chmod +x /tmp/prepare.sh && \
/tmp/prepare.sh && \
rm -f /tmp/prepare.sh
# Fix CVE-2021-36159
RUN apk add "apk-tools>=2.12.6-r0"
#VOLUME /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache /etc/letsencrypt /acme-challenge
ENTRYPOINT ["/opt/bunkerized-nginx/entrypoint/entrypoint.sh"]

View File

@@ -16,6 +16,7 @@ chmod ugo+x /opt/bunkerized-nginx/entrypoint/* /opt/bunkerized-nginx/scripts/*
chmod ugo+x /opt/bunkerized-nginx/gen/main.py
chmod ugo+x /opt/bunkerized-nginx/jobs/main.py
chmod ugo+x /opt/bunkerized-nginx/jobs/reload.py
chmod ugo+x /opt/bunkerized-nginx/jobs/certbot-*.py
chmod 770 /opt/bunkerized-nginx
chmod 440 /opt/bunkerized-nginx/settings.json
@@ -28,6 +29,11 @@ mkdir /var/log/letsencrypt
chown nginx:nginx /var/log/letsencrypt
chmod 770 /var/log/letsencrypt
# prepare /etc/nginx
mkdir /etc/nginx
chown root:nginx /etc/nginx
chmod 770 /etc/nginx
# prepare /etc/letsencrypt
mkdir /etc/letsencrypt
chown root:nginx /etc/letsencrypt
@@ -46,9 +52,21 @@ chmod 770 /cache
# prepare /acme-challenge
ln -s /acme-challenge /opt/bunkerized-nginx/acme-challenge
mkdir /acme-challenge
chown root:nginx /acme-challenge
chmod 770 /acme-challenge
mkdir -p /acme-challenge/.well-known/acme-challenge
chown -R root:nginx /acme-challenge
chmod -R 770 /acme-challenge
# prepare /http-confs
ln -s /http-confs /opt/bunkerized-nginx/http-confs
mkdir /http-confs
chown root:nginx /http-confs
chmod 770 /http-confs
# prepare /server-confs
ln -s /server-confs /opt/bunkerized-nginx/server-confs
mkdir /server-confs
chown root:nginx /server-confs
chmod 770 /server-confs
# prepare /modsec-confs
ln -s /modsec-confs /opt/bunkerized-nginx/modsec-confs

View File

@@ -1,11 +1,21 @@
#!/usr/bin/python3
import subprocess, shutil, os, traceback, requests, time, dns.resolver
import subprocess, shutil, os, traceback, requests, time, dns.resolver, io, tarfile
import Controller
from logger import log
CONFIGS = {
"conf": "/etc/nginx",
"letsencrypt": "/etc/letsencrypt",
"http": "/http-confs",
"server": "/server-confs",
"modsec": "/modsec-confs",
"modsec-crs": "/modsec-crs-confs",
"acme": "/acme-challenge"
}
class Config :
def __init__(self, type, api_uri, http_port="8080") :
@@ -19,9 +29,9 @@ class Config :
stdout = proc.stdout.decode("ascii")
stderr = proc.stderr.decode("ascii")
if len(stdout) > 1 :
log("config", "INFO", "jobs stdout : " + stdout)
log("config", "INFO", "jobs stdout :\n" + stdout)
if stderr != "" :
log("config", "ERROR", "jobs stderr : " + stderr)
log("config", "ERROR", "jobs stderr :\n" + stderr)
if proc.returncode != 0 :
log("config", "ERROR", "jobs error (return code = " + str(proc.returncode) + ")")
return False
@@ -64,12 +74,35 @@ class Config :
instance.kill("SIGHUP")
except :
ret = False
elif self.__type == Controller.Type.SWARM :
ret = self.__api_call(instances, "/reload")
elif self.__type == Controller.Type.KUBERNETES :
elif self.__type == Controller.Type.SWARM or self.__type == Controller.Type.KUBERNETES :
ret = self.__api_call(instances, "/reload")
return ret
def send(self, instances, files="all") :
ret = True
fail = False
for name, path in CONFIGS.items() :
if files != "all" and name != files :
continue
file = self.__tarball(path)
if not self.__api_call(instances, "/" + name, file=file) :
log("config", "ERROR", "can't send config " + name + " to instance(s)")
fail = True
file.close()
if fail :
ret = False
return ret
def stop_temp(self, instances) :
return self.__api_call(instances, "/stop-temp")
def __tarball(self, path) :
file = io.BytesIO()
with tarfile.open(mode="w:gz", fileobj=file) as tar :
tar.add(path, arcname=".")
file.seek(0, 0)
return file
def __ping(self, instances) :
return self.__api_call(instances, "/ping")
@@ -120,7 +153,7 @@ class Config :
log("config", "ERROR", "exception while waiting for bunkerized-nginx instances : " + traceback.format_exc())
return False
def __api_call(self, instances, path) :
def __api_call(self, instances, path, file=None) :
ret = True
nb = 0
urls = []
@@ -146,7 +179,11 @@ class Config :
for url in urls :
req = None
try :
req = requests.post(url)
if file == None :
req = requests.post(url)
else :
file.seek(0, 0)
req = requests.post(url, files={'file': file})
except :
pass
if req and req.status_code == 200 and req.text == "ok" :

View File

@@ -1,3 +1,4 @@
import traceback
from abc import ABC, abstractmethod
from enum import Enum
@@ -51,3 +52,17 @@ class Controller(ABC) :
except :
ret = False
return ret
def _send(self, instances, files="all") :
try :
ret = self._config.send(instances, files=files)
except Exception as e :
ret = False
return ret
def _stop_temp(self, instances) :
try :
ret = self._config.stop_temp(instances)
except Exception as e :
ret = False
return ret

View File

@@ -41,7 +41,9 @@ class IngressController(Controller.Controller) :
def __annotations_to_env(self, annotations) :
env = {}
prefix = annotations["bunkerized-nginx.SERVER_NAME"].split(" ")[0] + "_"
prefix = ""
if "bunkerized-nginx.SERVER_NAME" in annotations :
prefix = annotations["bunkerized-nginx.SERVER_NAME"].split(" ")[0] + "_"
for annotation in annotations :
if annotation.startswith("bunkerized-nginx.") and annotation.replace("bunkerized-nginx.", "", 1) != "" and annotation.replace("bunkerized-nginx.", "", 1) != "AUTOCONF" :
env[prefix + annotation.replace("bunkerized-nginx.", "", 1)] = annotations[annotation]
@@ -50,18 +52,26 @@ class IngressController(Controller.Controller) :
def __rules_to_env(self, rules, namespace="default") :
env = {}
first_servers = []
numbers = {}
for rule in rules :
rule = rule.to_dict()
prefix = ""
number = 1
if "host" in rule :
prefix = rule["host"] + "_"
first_servers.append(rule["host"])
if not rule["host"] in numbers :
numbers[rule["host"]] = 1
number = numbers[rule["host"]]
if not "http" in rule or not "paths" in rule["http"] :
continue
env[prefix + "USE_REVERSE_PROXY"] = "yes"
for path in rule["http"]["paths"] :
env[prefix + "USE_REVERSE_PROXY"] = "yes"
env[prefix + "REVERSE_PROXY_URL"] = path["path"]
env[prefix + "REVERSE_PROXY_HOST"] = "http://" + path["backend"]["service_name"] + "." + namespace + ".svc.cluster.local:" + str(path["backend"]["service_port"])
suffix = "_" + str(number)
env[prefix + "REVERSE_PROXY_URL" + suffix] = path["path"]
env[prefix + "REVERSE_PROXY_HOST" + suffix] = "http://" + path["backend"]["service_name"] + "." + namespace + ".svc.cluster.local:" + str(path["backend"]["service_port"])
number += 1
numbers[rule["host"]] = number
env["SERVER_NAME"] = " ".join(first_servers)
return env
@@ -77,6 +87,8 @@ class IngressController(Controller.Controller) :
first_servers.extend(env["SERVER_NAME"].split(" "))
for ingress in ingresses :
env.update(self.__rules_to_env(ingress.spec.rules, namespace=ingress.metadata.namespace))
if ingress.metadata.annotations != None :
env.update(self.__annotations_to_env(ingress.metadata.annotations))
if ingress.spec.tls :
for tls_entry in ingress.spec.tls :
for host in tls_entry.hosts :
@@ -96,9 +108,9 @@ class IngressController(Controller.Controller) :
def process_events(self, current_env) :
self.__old_env = current_env
t_pod = Thread(target=self.__watch_pod)
t_ingress = Thread(target=self.__watch_ingress)
t_service = Thread(target=self.__watch_service)
t_pod = Thread(target=self.__watch, args=("pod",))
t_ingress = Thread(target=self.__watch, args=("ingress",))
t_service = Thread(target=self.__watch, args=("service",))
t_pod.start()
t_ingress.start()
t_service.start()
@@ -106,63 +118,41 @@ class IngressController(Controller.Controller) :
t_ingress.join()
t_service.join()
def __watch_pod(self) :
def __watch(self, type) :
w = watch.Watch()
for event in w.stream(self.__api.list_pod_for_all_namespaces, label_selector="bunkerized-nginx") :
what = None
if type == "pod" :
what = self.__api.list_pod_for_all_namespaces
elif type == "ingress" :
what = self.__extensions_api.list_ingress_for_all_namespaces
elif type == "service" :
what = self.__api.list_service_for_all_namespaces
for event in w.stream(what, label_selector="bunkerized-nginx") :
self.lock.acquire()
new_env = self.get_env()
if new_env != self.__old_env :
try :
if self.gen_conf(new_env) :
self.__old_env = new_env.copy()
log("CONTROLLER", "INFO", "successfully generated new configuration")
if self.reload() :
log("controller", "INFO", "successful reload")
else :
log("controller", "ERROR", "failed reload")
except :
log("controller", "ERROR", "exception while receiving event")
self.lock.release()
def __watch_ingress(self) :
w = watch.Watch()
for event in w.stream(self.__extensions_api.list_ingress_for_all_namespaces, label_selector="bunkerized-nginx") :
self.lock.acquire()
new_env = self.get_env()
if new_env != self.__old_env :
try :
if self.gen_conf(new_env) :
self.__old_env = new_env.copy()
log("CONTROLLER", "INFO", "successfully generated new configuration")
if self.reload() :
log("controller", "INFO", "successful reload")
else :
log("controller", "ERROR", "failed reload")
except :
log("controller", "ERROR", "exception while receiving event")
self.lock.release()
def __watch_service(self) :
w = watch.Watch()
for event in w.stream(self.__api.list_service_for_all_namespaces, label_selector="bunkerized-nginx") :
self.lock.acquire()
new_env = self.get_env()
if new_env != self.__old_env :
try :
if self.gen_conf(new_env) :
self.__old_env = new_env.copy()
log("CONTROLLER", "INFO", "successfully generated new configuration")
if self.reload() :
log("controller", "INFO", "successful reload")
else :
log("controller", "ERROR", "failed reload")
except :
log("controller", "ERROR", "exception while receiving event")
if not self.gen_conf(new_env) :
raise Exception("can't generate configuration")
if not self.send() :
raise Exception("can't send configuration")
if not self.reload() :
raise Exception("can't reload configuration")
self.__old_env = new_env.copy()
log("CONTROLLER", "INFO", "successfully loaded new configuration")
except Exception as e :
log("controller", "ERROR", "error while computing new event : " + str(e))
self.lock.release()
def reload(self) :
return self._reload(self.__get_services(autoconf=True))
def send(self, files="all") :
return self._send(self.__get_services(autoconf=True), files=files)
def stop_temp(self) :
return self._stop_temp(self.__get_services(autoconf=True))
def wait(self) :
self.lock.acquire()
try :
@@ -171,20 +161,28 @@ class IngressController(Controller.Controller) :
while len(pods) == 0 :
time.sleep(1)
pods = self.__get_pods()
# Wait for at least one bunkerized-nginx service
services = self.__get_services(autoconf=True)
while len(services) == 0 :
time.sleep(1)
services = self.__get_services(autoconf=True)
# Generate first config
env = self.get_env()
if not self.gen_conf(env) :
self.lock.release()
return False, env
# Send the config
if not self.send() :
self.lock.release()
return False, env
# Stop the temporary server
if not self.stop_temp() :
self.lock.release()
return False, env
# Wait for bunkerized-nginx
if not self._config.wait(instances) :
self.lock.release()
return False, env
self.lock.release()
return self._config.wait(services), env
except :

View File

@@ -2,7 +2,7 @@ import socketserver, threading, os, stat
from logger import log
class ReloadServerHandler(socketserver.StreamRequestHandler):
class ReloadServerHandler(socketserver.BaseRequestHandler):
def handle(self) :
locked = False
@@ -10,7 +10,7 @@ class ReloadServerHandler(socketserver.StreamRequestHandler):
while True :
data = self.request.recv(512)
if not data or not data in [b"lock", b"reload", b"unlock"] :
if not data or not data in [b"lock", b"reload", b"unlock", b"acme"] :
break
if data == b"lock" :
self.server.controller.lock.acquire()
@@ -20,6 +20,12 @@ class ReloadServerHandler(socketserver.StreamRequestHandler):
self.server.controller.lock.release()
locked = False
self.request.sendall(b"ok")
elif data == b"acme" :
ret = self.server.controller.send(files="acme")
if ret :
self.request.sendall(b"ok")
else :
self.request.sendall(b"ko")
elif data == b"reload" :
ret = self.server.controller.reload()
if ret :
@@ -31,8 +37,11 @@ class ReloadServerHandler(socketserver.StreamRequestHandler):
if locked :
self.server.controller.lock.release()
class ThreadingUnixServer(socketserver.ThreadingMixIn, socketserver.UnixStreamServer) :
pass
def run_reload_server(controller) :
server = socketserver.UnixStreamServer("/tmp/autoconf.sock", ReloadServerHandler)
server = ThreadingUnixServer("/tmp/autoconf.sock", ReloadServerHandler)
os.chown("/tmp/autoconf.sock", 0, 101)
os.chmod("/tmp/autoconf.sock", 0o770)
server.controller = controller

View File

@@ -46,23 +46,27 @@ class SwarmController(Controller.Controller) :
if new_env != old_env :
self.lock.acquire()
try :
log("controller", "INFO", "generating new configuration")
if self.gen_conf(new_env) :
old_env = new_env.copy()
log("controller", "INFO", "successfully generated new configuration")
if self.reload() :
log("controller", "INFO", "successful reload")
else :
log("controller", "ERROR", "failed reload")
else :
log("controller", "ERROR", "can't generate new configuration")
except :
log("controller", "ERROR", "exception while receiving event")
if not self.gen_conf(new_env) :
raise Exception("can't generate configuration")
if not self.send() :
raise Exception("can't send configuration")
if not self.reload() :
raise Exception("can't reload configuration")
self.__old_env = new_env.copy()
log("CONTROLLER", "INFO", "successfully loaded new configuration")
except Exception as e :
log("controller", "ERROR", "error while computing new event : " + str(e))
self.lock.release()
def reload(self) :
return self._reload(self.__get_instances())
def send(self, files="all") :
return self._send(self.__get_instances(), files=files)
def stop_temp(self) :
return self._stop_temp(self.__get_instances())
def wait(self) :
self.lock.acquire()
try :
@@ -71,14 +75,29 @@ class SwarmController(Controller.Controller) :
while len(instances) == 0 :
time.sleep(1)
instances = self.__get_instances()
# Wait for temporary bunkerized-nginx
if not self._config.wait(instances) :
self.lock.release()
return False, env
# Generate first config
env = self.get_env()
if not self.gen_conf(env) :
self.lock.release()
return False, env
# Wait for nginx
# Send the config
if not self.send() :
self.lock.release()
return False, env
# Stop the temporary server
if not self.stop_temp() :
self.lock.release()
return False, env
# Wait for bunkerized-nginx
if not self._config.wait(instances) :
self.lock.release()
return False, env
self.lock.release()
return self._config.wait(instances), env
return True, env
except :
pass
self.lock.release()

View File

@@ -1,6 +1,4 @@
location ~ ^%API_URI%/ping {
return 444;
}
client_max_body_size 1G;
location ~ %API_URI% {
@@ -15,10 +13,10 @@ rewrite_by_lua_block {
ngx.header.content_type = 'text/plain'
if api.do_api_call(api_uri) then
logger.log(ngx.NOTICE, "API", "API call " .. ngx.var.request_uri .. " successfull from " .. ngx.var.remote_addr)
ngx.say("ok")
ngx.print("ok")
else
logger.log(ngx.WARN, "API", "API call " .. ngx.var.request_uri .. " failed from " .. ngx.var.remote_addr)
ngx.say("ko")
ngx.print("ko")
end
ngx.exit(ngx.HTTP_OK)
@@ -29,3 +27,4 @@ rewrite_by_lua_block {
}
}

View File

@@ -1,4 +1,5 @@
# todo : if api_uri == "random"
client_max_body_size 1G;
rewrite_by_lua_block {
local api = require "api"

View File

@@ -3,6 +3,8 @@ init_by_lua_block {
local dataloader = require "dataloader"
local logger = require "logger"
local cjson = require "cjson"
local remoteapi = require "remoteapi"
local iputils = require "resty.iputils"
local use_redis = {% if USE_REDIS == "yes" %}true{% else %}false{% endif +%}
@@ -12,6 +14,35 @@ local use_tor_exit_nodes = {% if has_value("BLOCK_TOR_EXIT_NODE", "yes") %}true{
local use_user_agents = {% if has_value("BLOCK_USER_AGENT", "yes") %}true{% else %}false{% endif +%}
local use_referrers = {% if has_value("BLOCK_REFERRER", "yes") %}true{% else %}false{% endif +%}
local use_remote_api = {% if has_value("USE_REMOTE_API", "yes") %}true{% else %}false{% endif +%}
-- Load reserved IPs
local reserved_ips = {
"0.0.0.0/8",
"10.0.0.0/8",
"100.64.0.0/10",
"127.0.0.0/8",
"169.254.0.0/16",
"172.16.0.0/12",
"192.0.0.0/24",
"192.0.2.0/24",
"192.88.99.0/24",
"192.168.0.0/16",
"198.18.0.0/15",
"198.51.100.0/24",
"203.0.113.0/24",
"224.0.0.0/4",
"233.252.0.0/24",
"240.0.0.0/4",
"255.255.255.255/32"
}
local success, err, forcible = ngx.shared.reserved_ips:set("data", cjson.encode(iputils.parse_cidrs(reserved_ips)), 0)
if not success then
logger.log(ngx.ERR, "INIT", "Can't load reserved IPs : " .. err)
end
-- Load blacklists
if not use_redis then
if use_proxies then
dataloader.load_ip("/etc/nginx/proxies.list", ngx.shared.proxies_data)
@@ -72,4 +103,45 @@ for dir in p:lines() do
end
p:close()
-- Remote API
if use_remote_api then
-- Save server
ngx.shared.remote_api:set("server", "{{ REMOTE_API_SERVER }}", 0)
-- Save version
local f = io.open("/opt/bunkerized-nginx/VERSION", "r")
ngx.shared.remote_api:set("version", f:read("*all"):gsub("[\r\n]", ""), 0)
f:close()
-- Save machine ID
local id = "empty"
local f = io.open("/etc/nginx/machine.id", "r")
if f == nil then
logger.log(ngx.ERR, "REMOTE API", "USE_REMOTE_API is set to yes but machine ID is not generated - communication with {{ REMOTE_API_SERVER }} won't work")
else
id = f:read("*all"):gsub("[\r\n]", "")
logger.log(ngx.ERR, "REMOTE API", "*NOT AN ERROR* Using existing machine ID (" .. id .. ")")
f:close()
end
ngx.shared.remote_api:set("id", id, 0)
-- Ping the remote API
local ping = "ko"
if id ~= "empty" then
if remoteapi.ping2() then
ping = "ok"
logger.log(ngx.ERR, "REMOTE API", "*NOT AN ERROR* Successfully requested the remote API")
else
logger.log(ngx.ERR, "REMOTE API", "Can't contact the remote API, feature will be disabled")
end
end
ngx.shared.remote_api:set("ping", ping, 0)
-- Load the database
if ping ~= "ko" then
dataloader.load_ip("/etc/nginx/remote-api.db", ngx.shared.remote_api_db)
end
end
}

View File

@@ -1,6 +1,6 @@
load_module /usr/lib/nginx/modules/ngx_http_lua_module.so;
daemon on;
#daemon on;
pid /tmp/nginx-temp.pid;

View File

@@ -76,6 +76,8 @@ http {
# lua path and dicts
lua_package_path "/opt/bunkerized-nginx/lua/?.lua;/opt/bunkerized-nginx/plugins/?.lua;/opt/bunkerized-nginx/deps/lib/lua/?.lua;;";
lua_package_cpath "/opt/bunkerized-nginx/deps/lib/?.so;/opt/bunkerized-nginx/deps/lib/lua/?.so;;";
lua_ssl_trusted_certificate "/opt/bunkerized-nginx/lua/misc/root-ca.pem";
lua_ssl_verify_depth 2;
{% if has_value("USE_WHITELIST_IP", "yes") %}lua_shared_dict whitelist_ip_cache 10m;{% endif +%}
{% if has_value("USE_WHITELIST_REVERSE", "yes") %}lua_shared_dict whitelist_reverse_cache 10m;{% endif +%}
{% if has_value("USE_BLACKLIST_IP", "yes") %}lua_shared_dict blacklist_ip_cache 10m;{% endif +%}
@@ -90,10 +92,14 @@ http {
{% if has_value("BLOCK_REFERRER", "yes") %}lua_shared_dict referrers_cache 10m;{% endif +%}
{% if has_value("USE_BAD_BEHAVIOR", "yes") %}lua_shared_dict behavior_ban 10m;{% endif +%}
{% if has_value("USE_BAD_BEHAVIOR", "yes") %}lua_shared_dict behavior_count 10m;{% endif +%}
{% if has_value("USE_LIMIT_REQ", "yes") %}lua_shared_dict limit_req {{ LIMIT_REQ_CACHE }};{% endif +%}
lua_shared_dict plugins_data 10m;
lua_shared_dict reserved_ips 1m;
{% if has_value("USE_REMOTE_API", "yes") %}lua_shared_dict remote_api 1m;{% endif +%}
{% if has_value("USE_REMOTE_API", "yes") %}lua_shared_dict remote_api_db 10m;{% endif +%}
# shared memory zone for limit_req
{% if has_value("USE_LIMIT_REQ", "yes") %}limit_req_zone $binary_remote_addr$uri zone=limit:{{ LIMIT_REQ_CACHE }} rate={{ LIMIT_REQ_RATE }};{% endif +%}
#{% if has_value("USE_LIMIT_REQ", "yes") %}limit_req_zone $binary_remote_addr$uri zone=limit:{{ LIMIT_REQ_CACHE }} rate={{ LIMIT_REQ_RATE }};{% endif +%}
# shared memory zone for limit_conn
{% if has_value("USE_LIMIT_CONN", "yes") %}limit_conn_zone $binary_remote_addr zone=ddos:{{ LIMIT_CONN_CACHE }};{% endif +%}

View File

@@ -6,7 +6,7 @@ location = {{ ANTIBOT_URI }} {
content_by_lua_block {
local cookie = require "cookie"
local recaptcha = require "recaptcha"
local loggger = require "logger"
local logger = require "logger"
if not cookie.is_set("uri") then
logger.log(ngx.WARN, "ANTIBOT", "recaptcha fail (1) for " .. ngx.var.remote_addr)
return ngx.exit(ngx.HTTP_FORBIDDEN)

View File

@@ -1,5 +1,8 @@
log_by_lua_block {
local logger = require "logger"
local cjson = require "cjson"
-- bad behavior
local use_bad_behavior = {% if USE_BAD_BEHAVIOR == "yes" %}true{% else %}false{% endif +%}
local behavior = require "behavior"
@@ -9,7 +12,47 @@ local bad_behavior_count_time = {{ BAD_BEHAVIOR_COUNT_TIME }}
local bad_behavior_ban_time = {{ BAD_BEHAVIOR_BAN_TIME }}
if use_bad_behavior then
behavior.count(bad_behavior_status_codes, bad_behavior_threshold, bad_behavior_count_time, bad_behavior_ban_time)
local new_bad_behavior_ban = false
if not behavior.is_banned() then
new_bad_behavior_ban = behavior.count(bad_behavior_status_codes, bad_behavior_threshold, bad_behavior_count_time, bad_behavior_ban_time)
end
end
-- remote API
local use_remote_api = {% if USE_REMOTE_API == "yes" %}true{% else %}false{% endif +%}
local remoteapi = require "remoteapi"
local iputils = require "resty.iputils"
if use_remote_api and ngx.status == ngx.HTTP_FORBIDDEN and not iputils.ip_in_cidrs(ngx.var.remote_addr, cjson.decode(ngx.shared.reserved_ips:get("data"))) and ngx.shared.remote_api:get("id") ~= "empty" then
if ngx.shared.remote_api:get("ping") == "ko" then
if remoteapi.ping2() then
ngx.shared.remote_api:set("ping", "ok", 0)
logger.log(ngx.NOTICE, "REMOTE API", "Successfully requested the remote API again")
else
logger.log(ngx.ERR, "REMOTE API", "Can't contact the remote API, feature will be disabled")
end
end
if ngx.shared.remote_api:get("ping") ~= "ko" then
local reason = "other"
if use_bad_behavior and new_bad_behavior_ban then
reason = "behavior"
end
local report_ip = function (premature, ip, reason)
if premature then
return
end
local remoteapi = require "remoteapi"
local logger = require "logger"
local res, data = remoteapi.ip(ip, reason)
-- TODO : find a way to log
end
local ok, err = ngx.timer.at(0, report_ip, ngx.var.remote_addr, reason)
if not ok then
logger.log(ngx.ERR, "REMOTE API", "Error while creating report timer " .. err)
else
logger.log(ngx.NOTICE, "REMOTE API", "Reporting " .. ngx.var.remote_addr .. "(reason: " .. reason .. ") to the remote API")
end
end
end
}

View File

@@ -57,19 +57,27 @@ local dnsbl_list = {% raw %}{{% endraw %}{% if DNSBL_LIST != "" %}{% set elemen
-- bad behavior
local use_bad_behavior = {% if USE_BAD_BEHAVIOR == "yes" %}true{% else %}false{% endif +%}
-- limit req
local use_limit_req = {% if USE_LIMIT_REQ == "yes" %}true{% else %}false{% endif +%}
-- remote API
local use_remote_api = {% if USE_REMOTE_API == "yes" %}true{% else %}false{% endif +%}
-- include LUA code
local whitelist = require "whitelist"
local blacklist = require "blacklist"
local dnsbl = require "dnsbl"
local dnsbl = require "dnsbl"
local cookie = require "cookie"
local cjson = require "cjson"
local javascript = require "javascript"
local captcha = require "captcha"
local recaptcha = require "recaptcha"
local iputils = require "resty.iputils"
local behavior = require "behavior"
local logger = require "logger"
local redis = require "resty.redis"
local redis = require "resty.redis"
local checker = require "checker"
local limitreq = require "limitreq"
-- user variables
local antibot_uri = "{{ ANTIBOT_URI }}"
@@ -145,6 +153,30 @@ if use_bad_behavior and behavior.is_banned() then
ngx.exit(ngx.HTTP_FORBIDDEN)
end
-- check if IP is banned because of "request limit"
if use_limit_req then
{% if USE_LIMIT_REQ == "yes" %}
{% for k, v in all.items() %}
{% if k.startswith("LIMIT_REQ_URL") and v != "" +%}
{% set url = v %}
{% set rate = all[k.replace("URL", "RATE")] if k.replace("URL", "RATE") in all else "1r/s" %}
{% set burst = all[k.replace("URL", "BURST")] if k.replace("URL", "BURST") in all else "5" %}
{% set delay = all[k.replace("URL", "DELAY")] if k.replace("URL", "DELAY") in all else "1" %}
{% if url == "/" %}
if limitreq.check("{{ rate }}", {{ burst }}, {{ delay }}) then
ngx.exit(ngx.HTTP_TOO_MANY_REQUESTS)
end
{% else %}
if ngx.var.uri == "{{ url }}" and limitreq.check("{{ rate }}", {{ burst }}, {{ delay }}) then
ngx.exit(ngx.HTTP_TOO_MANY_REQUESTS)
end
{% endif %}
{% endif %}
{% endfor %}
{% endif %}
end
-- our redis client
local redis_client = nil
if use_redis then
@@ -212,7 +244,7 @@ if use_referrer and ngx.var.http_referer ~= nil then
end
-- check if country is allowed
if use_country and ngx.var.allowed_country == "no" then
if use_country and ngx.var.allowed_country == "no" and not iputils.ip_in_cidrs(ngx.var.remote_addr, cjson.decode(ngx.shared.reserved_ips:get("data"))) then
logger.log(ngx.WARN, "COUNTRY", "Country of " .. ngx.var.remote_addr .. " is blacklisted")
ngx.exit(ngx.HTTP_FORBIDDEN)
end
@@ -224,6 +256,15 @@ if use_dnsbl and not dnsbl.cached() then
end
end
-- check if IP is in distributed DB
if use_remote_api then
local checker = checker:new("remote-api-db", ngx.shared.remote_api_db, redis_client, "simple")
if checker:check(iputils.ip2bin(ngx.var.remote_addr)) then
logger.log(ngx.WARN, "REMOTE API", "IP " .. ngx.var.remote_addr .. " is in the distributed DB")
ngx.exit(ngx.HTTP_FORBIDDEN)
end
end
-- cookie check
if use_antibot_cookie and ngx.var.uri ~= "/favicon.ico" then
if not cookie.is_set("uri") then

View File

@@ -65,9 +65,9 @@ server {
}
# requests limiting
{% if USE_LIMIT_REQ == "yes" +%}
include {{ NGINX_PREFIX }}limit-req.conf;
{% endif %}
#{% if USE_LIMIT_REQ == "yes" +%}
# include {{ NGINX_PREFIX }}limit-req.conf;
#{% endif %}
# connections limiting
{% if USE_LIMIT_CONN == "yes" +%}

View File

@@ -50,7 +50,7 @@ copyright = '2021, bunkerity'
author = 'bunkerity'
# The full version, including alpha/beta/rc tags
release = 'v1.3.1'
release = 'v1.3.2'
# -- General configuration ---------------------------------------------------

View File

@@ -238,7 +238,7 @@ You can set multiple url/host by adding a suffix number to the variable name lik
Values : *yes* | *no*
Default value : *no*
Context : *global*, *multisite*
Set this environment variable to *yes* if you're using bunkerized-nginx behind a reverse proxy. This means you will see the real client address instead of the proxy one inside your logs. Ssecurity tools will also then work correctly.
Set this environment variable to *yes* if you're using bunkerized-nginx behind a reverse proxy. This means you will see the real client address instead of the proxy one inside your logs. Security tools will also then work correctly.
`PROXY_REAL_IP_FROM`
Values : *\<list of trusted IP addresses and/or networks separated with spaces\>*
@@ -603,14 +603,14 @@ More info [here](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Refer
`FEATURE_POLICY`
Values : *&lt;directive&gt; &lt;allow list&gt;*
Default value : *accelerometer 'none'; ambient-light-sensor 'none'; autoplay 'none'; camera 'none'; display-capture 'none'; document-domain 'none'; encrypted-media 'none'; fullscreen 'none'; geolocation 'none'; gyroscope 'none'; magnetometer 'none'; microphone 'none'; midi 'none'; payment 'none'; picture-in-picture 'none'; speaker 'none'; sync-xhr 'none'; usb 'none'; vibrate 'none'; vr 'none'*
Default value : *accelerometer 'none'; ambient-light-sensor 'none'; autoplay 'none'; battery 'none'; camera 'none'; display-capture 'none'; document-domain 'none'; encrypted-media 'none'; fullscreen 'none'; geolocation 'none'; gyroscope 'none'; magnetometer 'none'; microphone 'none'; midi 'none'; payment 'none'; picture-in-picture 'none'; publickey-credentials-get 'none'; sync-xhr 'none'; usb 'none'; wake-lock 'none'; web-share 'none'; xr-spatial-tracking 'none"*
Context : *global*, *multisite*
Tells the browser which features can be used on the website.
More info [here](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Feature-Policy).
`PERMISSIONS_POLICY`
Values : *feature=(allow list)*
Default value : accelerometer=(), ambient-light-sensor=(), autoplay=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), fullscreen=(), geolocation=(), gyroscope=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), speaker=(), sync-xhr=(), usb=(), vibrate=(), vr=()
Default value : *accelerometer=(), ambient-light-sensor=(), autoplay=(), battery=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), fullscreen=(), geolocation=(), gyroscope=(), interest-cohort=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), publickey-credentials-get=(), screen-wake-lock=(), sync-xhr=(), usb=(), web-share=(), xr-spatial-tracking=()*
Context : *global*, *multisite*
Tells the browser which features can be used on the website.
More info [here](https://www.w3.org/TR/permissions-policy-1/).
@@ -686,6 +686,20 @@ Default value :
Context : *global*, *multisite*
The secret given by Google when `USE_ANTIBOT` is set to *recaptcha*.
### Distributed blacklist
`USE_REMOTE_API`
Values : *yes* | *no*
Default value : *yes*
Context : *global*, *multisite*
If set to yes, the instance will participate into the distributed blacklist shared among all other instances. The blacklist will be automaticaly downloaded on a periodic basis.
`REMOTE_API_SERVER`
Values : *\<any valid full URL\>*
Default value :
Context : *global*
Full URL of the remote API used for the distributed blacklist.
### External blacklists
`BLOCK_USER_AGENT`
@@ -828,19 +842,34 @@ Values : *yes* | *no*
Default value : *yes*
Context : *global*, *multisite*
If set to yes, the amount of HTTP requests made by a user for a given resource will be limited during a period of time.
More info rate limiting [here](https://www.nginx.com/blog/rate-limiting-nginx/) (the key used is $binary_remote_addr$uri).
`LIMIT_REQ_URL`
Values : *\<any valid url\>*
Default value :
Context : *global*, *multisite*
The URL where you want to apply the request limiting. Use special value of `/` to apply it globally for all URL.
You can set multiple rules by adding a suffix number to the variable name like this : `LIMIT_REQ_URL_1`, `LIMIT_REQ_URL_2`, `LIMIT_REQ_URL_3`, ...
`LIMIT_REQ_RATE`
Values : *Xr/s* | *Xr/m*
Values : *Xr/s* | *Xr/m* | *Xr/h* | *Xr/d*
Default value : *1r/s*
Context : *global*, *multisite*
The rate limit to apply when `USE_LIMIT_REQ` is set to *yes*. Default is 1 request to the same URI and from the same IP per second.
The rate limit to apply when `USE_LIMIT_REQ` is set to *yes*. Default is 1 request to the same URI and from the same IP per second. Possible value are : `s` (second), `m` (minute), `h` (hour) and `d` (day)).
You can set multiple rules by adding a suffix number to the variable name like this : `LIMIT_REQ_RATE_1`, `LIMIT_REQ_RATE_2`, `LIMIT_REQ_RATE_3`, ...
`LIMIT_REQ_BURST`
Values : *<any valid integer\>*
Default value : *2*
Values : *\<any valid integer\>*
Default value : *5*
Context : *global*, *multisite*
The number of requests to put in queue before rejecting requests.
The number of requests to put in queue before rejecting requests.
You can set multiple rules by adding a suffix number to the variable name like this : `LIMIT_REQ_BURST_1`, `LIMIT_REQ_BURST_2`, `LIMIT_REQ_BURST_3`, ...
`LIMIT_REQ_DELAY`
Values : *\<any valid float\>*
Default value : *1*
Context : *global*, *multisite*
The number of seconds to wait before requests in queue are processed. Values like `0.1`, `0.01` or `0.001` are also accepted.
You can set multiple rules by adding a suffix number to the variable name like this : `LIMIT_REQ_DELAY_1`, `LIMIT_REQ_DELAY_2`, `LIMIT_REQ_DELAY_3`, ...
`LIMIT_REQ_CACHE`
Values : *Xm* | *Xk*

Binary file not shown.

Before

Width:  |  Height:  |  Size: 128 KiB

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 148 KiB

After

Width:  |  Height:  |  Size: 89 KiB

View File

@@ -241,23 +241,13 @@ When your container is not needed anymore, you can delete it as usual. The autoc
## Docker Swarm
Using bunkerized-nginx in a Docker Swarm cluster requires a shared folder accessible from both managers and workers (anything like NFS, GlusterFS, CephFS or even SSHFS will work). The deployment and configuration is very similar to the "Docker autoconf" one but with services instead of containers. A service based on the bunkerized-nginx-autoconf image needs to be scheduled on a manager node (don't worry it doesn't expose any network port for obvious security reasons). This service will listen for Docker Swarm events like service creation or deletion and generate the configuration according to the labels of each service. Once configuration generation is done, the bunkerized-nginx-autoconf service will send a reload order to all the bunkerized-nginx tasks so they can load the new configuration.
The deployment and configuration is very similar to the "Docker autoconf" one but with services instead of containers. A service based on the bunkerized-nginx-autoconf image needs to be scheduled on a manager node (don't worry it doesn't expose any network port for obvious security reasons). This service will listen for Docker Swarm events like service creation or deletion and generate the configuration according to the labels of each service. Once configuration generation is done, the bunkerized-nginx-autoconf service will send the configuration files and a reload order to all the bunkerized-nginx tasks so they can apply the new configuration. If you need to deliver static files (e.g., html, images, css, js, ...) a shared folder accessible from all bunkerized-nginx instances is needed (you can use a storage system like NFS, GlusterFS, CephFS on the host or a [Docker volume plugin](https://docs.docker.com/engine/extend/)).
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/swarm.png?raw=true" />
**We will assume that a shared directory is mounted at the /shared location on both your managers and workers. Keep in mind that bunkerized-nginx and autoconf are running as unprivileged users with UID and GID 101. You must set the rights and permissions of the subfolders in /shared accordingly.**
In this setup we will deploy bunkerized-nginx in global mode on all workers and autoconf as a single replica on a manager.
First of all, you will need to setup the shared folders :
```shell
$ cd /shared
$ mkdir www confs letsencrypt acme-challenge
$ chown root:101 www confs letsencrypt acme-challenge
$ chmod 770 www confs letsencrypt acme-challenge
```
Then you will need to create 2 networks, one for the communication between bunkerized-nginx and autoconf and the other one for the communication between bunkerized-nginx and the web services :
First of all, you will need to create 2 networks, one for the communication between bunkerized-nginx and autoconf and the other one for the communication between bunkerized-nginx and the web services :
```shell
$ docker network create -d overlay --attachable bunkerized-net
$ docker network create -d overlay --attachable services-net
@@ -273,10 +263,6 @@ $ docker service create \
--network bunkerized-net \
-p published=80,target=8080,mode=host \
-p published=443,target=8443,mode=host \
--mount type=bind,source=/shared/confs,destination=/etc/nginx,ro \
--mount type=bind,source=/shared/www,destination=/www,ro \
--mount type=bind,source=/shared/letsencrypt,destination=/etc/letsencrypt,ro \
--mount type=bind,source=/shared/acme-challenge,destination=/acme-challenge,ro \
-e SWARM_MODE=yes \
-e USE_API=yes \
-e API_URI=/ChangeMeToSomethingHardToGuess \
@@ -297,9 +283,8 @@ $ docker service create \
--constraint node.role==manager \
--network bunkerized-net \
--mount type=bind,source=/var/run/docker.sock,destination=/var/run/docker.sock,ro \
--mount type=bind,source=/shared/confs,destination=/etc/nginx \
--mount type=bind,source=/shared/letsencrypt,destination=/etc/letsencrypt \
--mount type=bind,source=/shared/acme-challenge,destination=/acme-challenge \
--mount type=volume,source=cache-vol,destination=/cache \
--mount type=volume,source=certs-vol,destination=/etc/letsencrypt \
-e SWARM_MODE=yes \
-e API_URI=/ChangeMeToSomethingHardToGuess \
bunkerity/bunkerized-nginx-autoconf
@@ -322,11 +307,6 @@ services:
target: 8443
mode: host
protocol: tcp
volumes:
- /shared/confs:/etc/nginx:ro
- /shared/www:/www:ro
- /shared/letsencrypt:/etc/letsencrypt:ro
- /shared/acme-challenge:/acme-challenge:ro
environment:
- SWARM_MODE=yes
- USE_API=yes
@@ -350,9 +330,8 @@ services:
image: bunkerity/bunkerized-nginx-autoconf
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /shared/confs:/etc/nginx
- /shared/letsencrypt:/etc/letsencrypt
- /shared/acme-challenge:/acme-challenge
- cache-vol:/cache
- certs-vol:/etc/letsencrypt
environment:
- SWARM_MODE=yes
- API_URI=/ChangeMeToSomethingHardToGuess # must match API_URI from nginx
@@ -374,6 +353,10 @@ networks:
driver: overlay
attachable: true
name: services-net
# And the volumes too
volumes:
cache-vol:
certs-vol:
```
Check the logs of both autoconf and bunkerized-nginx services to see if everything is working as expected.
@@ -427,20 +410,10 @@ When your service is not needed anymore, you can delete it as usual. The autocon
**This integration is still in beta, please fill an issue if you find a bug or have an idea on how to improve it.**
Using bunkerized-nginx in a Kubernetes cluster requires a shared folder accessible from the nodes (anything like NFS, GlusterFS, CephFS or even SSHFS will work). The bunkerized-nginx-autoconf acts as an Ingress Controller and connects to the k8s API to get cluster events and generate a new configuration when it's needed. Once the configuration is generated, the Ingress Controller sends a reload order to the bunkerized-nginx instances running in the cluster.
The bunkerized-nginx-autoconf acts as an Ingress Controller and connects to the k8s API to get cluster events and generate a new configuration when it's needed. Once the configuration is generated, the Ingress Controller sends the configuration files and a reload order to the bunkerized-nginx instances running in the cluster. If you need to deliver static files (e.g., html, images, css, js, ...) a shared folder accessible from all bunkerized-nginx instances is needed (you can use a storage system like NFS, GlusterFS, CephFS on the host or a [Kubernetes Volume that supports ReadOnlyMany access](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes)).
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/kubernetes.png?raw=true" />
**We will assume that a shared directory is mounted at the /shared location on your nodes. Keep in mind that bunkerized-nginx and autoconf are running as unprivileged users with UID and GID 101. You must set the rights and permissions of the subfolders in /shared accordingly.**
First of all, you will need to setup the shared folders :
```shell
$ cd /shared
$ mkdir www confs letsencrypt acme-challenge
$ chown root:nginx www confs letsencrypt acme-challenge
$ chmod 770 www confs letsencrypt acme-challenge
```
The first step to do is to declare the RBAC authorization that will be used by the Ingress Controller to access the Kubernetes API. A ready-to-use declaration is available here :
```yaml
apiVersion: rbac.authorization.k8s.io/v1
@@ -506,7 +479,7 @@ spec:
- name: KUBERNETES_MODE
value: "yes"
- name: DNS_RESOLVERS
value: "kube-dns.kube-system.svc.cluster.local"
value: "coredns.kube-system.svc.cluster.local"
- name: USE_API
value: "yes"
- name: API_URI
@@ -515,36 +488,6 @@ spec:
value: ""
- name: MULTISITE
value: "yes"
volumeMounts:
- name: confs
mountPath: /etc/nginx
readOnly: true
- name: letsencrypt
mountPath: /etc/letsencrypt
readOnly: true
- name: acme-challenge
mountPath: /acme-challenge
readOnly: true
- name: www
mountPath: /www
readOnly: true
volumes:
- name: confs
hostPath:
path: /shared/confs
type: Directory
- name: letsencrypt
hostPath:
path: /shared/letsencrypt
type: Directory
- name: acme-challenge
hostPath:
path: /shared/acme-challenge
type: Directory
- name: www
hostPath:
path: /shared/www
type: Directory
---
apiVersion: v1
kind: Service
@@ -562,10 +505,19 @@ spec:
name: bunkerized-nginx
```
Important thing to note, labels and annotations defined are mandatory for autoconf to work.
You can now deploy the autoconf which will act as the ingress controller :
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-nginx
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -583,6 +535,30 @@ spec:
app: bunkerized-nginx-autoconf
spec:
serviceAccountName: bunkerized-nginx-ingress-controller
volumes:
- name: vol-nginx
persistentVolumeClaim:
claimName: pvc-nginx
initContainers:
- name: change-data-dir-permissions
command:
- chown
- -R
- 101:101
- /etc/letsencrypt
- /cache
image: busybox
volumeMounts:
- name: vol-nginx
mountPath: /etc/letsencrypt
subPath: letsencrypt
- name: vol-nginx
mountPath: /cache
subPath: cache
securityContext:
runAsNonRoot: false
runAsUser: 0
runAsGroup: 0
containers:
- name: bunkerized-nginx-autoconf
image: bunkerity/bunkerized-nginx-autoconf
@@ -592,25 +568,12 @@ spec:
- name: API_URI
value: "/ChangeMeToSomethingHardToGuess"
volumeMounts:
- name: confs
mountPath: /etc/nginx
- name: letsencrypt
- name: vol-nginx
mountPath: /etc/letsencrypt
- name: acme-challenge
mountPath: /acme-challenge
volumes:
- name: confs
hostPath:
path: /shared/confs
type: Directory
- name: letsencrypt
hostPath:
path: /shared/letsencrypt
type: Directory
- name: acme-challenge
hostPath:
path: /shared/acme-challenge
type: Directory
subPath: letsencrypt
- name: vol-nginx
mountPath: /cache
subPath: cache
```
Check the logs of both bunkerized-nginx and autoconf deployments to see if everything is working as expected.
@@ -721,17 +684,18 @@ List of supported Linux distributions :
- Ubuntu focal (20.04)
- CentOS 7
- Fedora 34
- Arch Linux
Unlike containers, Linux integration can be tedious because bunkerized-nginx has a bunch of dependencies that need to be installed before we can use it. Fortunately, we provide a helper script to make the process easier and automatic. Once installed, the configuration is really simple, all you have to do is to edit the `/opt/bunkerized-nginx/variables.env` configuration file and run the `bunkerized-nginx` command to apply it.
First of all you will need to install bunkerized-nginx. The recommended way is to use the official installer script :
```shell
$ curl -fsSL https://github.com/bunkerity/bunkerized-nginx/releases/download/v1.3.1/linux-install.sh -o /tmp/bunkerized-nginx.sh
$ curl -fsSL https://github.com/bunkerity/bunkerized-nginx/releases/download/v1.3.2/linux-install.sh -o /tmp/bunkerized-nginx.sh
```
Before executing it, you should also check the signature :
```shell
$ curl -fsSL https://github.com/bunkerity/bunkerized-nginx/releases/download/v1.3.1/linux-install.sh.asc -o /tmp/bunkerized-nginx.sh.asc
$ curl -fsSL https://github.com/bunkerity/bunkerized-nginx/releases/download/v1.3.2/linux-install.sh.asc -o /tmp/bunkerized-nginx.sh.asc
$ gpg --auto-key-locate hkps://keys.openpgp.org --locate-keys contact@bunkerity.com
$ gpg --verify /tmp/bunkerized-nginx.sh.asc /tmp/bunkerized-nginx.sh
```

View File

@@ -62,7 +62,7 @@ docker kill --signal=SIGHUP my-container
Swarm and Kubernetes reload (repeat for each node) :
```shell
$ curl http://node-local-ip:80/reload
$ curl http://node-local-ip:80/api-uri/reload
```
Linux reload :
@@ -141,6 +141,18 @@ You can use the `USE_ANTIBOT` environment variable to add that kind of checks wh
## External blacklists
### Distributed
**This feature is in beta and will be improved regularly.**
You can benefit from a distributed blacklist shared among all of the bunkerized-nginx users.
Each time a bunkerized-nginx instance detect a bad request, the offending IP is sent to a remote API and will enrich a database. An extract of the top malicious IP is downloaded on a periodic basis and integrated into bunkerized-nginx as a blacklist.
This feature is controlled with the `USE_REMOTE_API=yes` environment variable.
**To avoid poisoning, in addition to the various security checks made by the API we only mark IP as bad in the database if it has been seen by one of our honeypots under our control.**
### DNSBL
Automatic checks on external DNS BlackLists are enabled by default with the `USE_DNSBL=yes` environment variable. The list of DNSBL zones is also configurable, you just need to edit the `DNSBL_LIST` environment variable which contains the following value by default `bl.blocklist.de problems.dnsbl.sorbs.net sbl.spamhaus.org xbl.spamhaus.org`.
@@ -173,12 +185,16 @@ This list contains bad referrers domains known for spamming (downloaded from [he
### Requests
To limit bruteforce attacks we decided to use the [rate limiting feature in nginx](https://www.nginx.com/blog/rate-limiting-nginx/) so attackers will be limited to X request(s)/s for the same resource. That kind of protection might be useful against other attacks too (e.g., blind SQL injection).
To limit bruteforce attacks or rate limit access to your API you can use the "request limit" feature so attackers will be limited to X request(s) within a period of time for the same resource. That kind of protection might be useful against other attacks too (e.g., blind SQL injection).
Here is the list of related environment variables and their default value :
- `USE_LIMIT_REQ=yes` : enable/disable request limiting
- `LIMIT_REQ_RATE=1r/s` : the rate to apply for the same resource
- `LIMIT_REQ_BURST=2` : the number of request tu put in a queue before effectively rejecting requests
- `LIMIT_REQ_URL=` : the URL you want to protect, use `/` to apply the limit for all URL
- `LIMIT_REQ_RATE=1r/s` : the rate to apply for the resource, valid period are : `s` (second), `m` (minute), `h` (hour) and `d` (day)
- `LIMIT_REQ_BURST=5 : the number of request tu put in a queue before effectively rejecting requests
- `LIMIT_REQ_DELAY=1` : the number of seconds to wait before we proceed requests in queue
Please note that you can apply different rate to different URL by appending a number as suffix (more info [here](https://bunkerized-nginx.readthedocs.io/en/latest/environment_variables.html#requests-limiting)).
### Connections

View File

@@ -49,7 +49,7 @@ if [ ! -f "/etc/nginx/global.env" ] ; then
exit 1
fi
# start temp nginx to solve Let's Encrypt challenges if needed
# start temp nginx to solve Let's Encrypt challenges if needed and serve API
/opt/bunkerized-nginx/entrypoint/nginx-temp.sh
# only do config if we are not in swarm/kubernetes mode
@@ -75,15 +75,16 @@ else
fi
# start crond
crond
# wait until config has been generated if we are in swarm mode
if [ "$SWARM_MODE" = "yes" ] || [ "$KUBERNETES_MODE" = "yes" ] ; then
log "entrypoint" "INFO" "waiting until config has been generated ..."
while [ ! -f "/etc/nginx/autoconf" ] ; do
sleep 1
done
if [ "$SWARM_MODE" != "yes" ] && [ "$KUBERNETES_MODE" != "yes" ] ; then
crond
fi
# wait until config has been generated if we are in swarm mode
#if [ "$SWARM_MODE" = "yes" ] || [ "$KUBERNETES_MODE" = "yes" ] ; then
# log "entrypoint" "INFO" "waiting until config has been generated ..."
# while [ ! -f "/etc/nginx/autoconf" ] ; do
# sleep 1
# done
#fi
# stop temp config if needed
if [ -f "/tmp/nginx-temp.pid" ] ; then

View File

@@ -87,3 +87,11 @@ fi
if [ "$(has_value BLOCK_ABUSERS yes)" != "" ] ; then
/opt/bunkerized-nginx/jobs/main.py --name abusers --cache
fi
# remote API
if [ "$(has_value USE_REMOTE_API yes)" != "" ] ; then
/opt/bunkerized-nginx/jobs/main.py --name remote-api-register --cache --server "$(grep '^REMOTE_API_SERVER=' /etc/nginx/global.env | cut -d '=' -f 2)" --version "$(cat /opt/bunkerized-nginx/VERSION)"
if [ $? -eq 0 ] ; then
/opt/bunkerized-nginx/jobs/main.py --name remote-api-database --cache --server "$(grep '^REMOTE_API_SERVER=' /etc/nginx/global.env | cut -d '=' -f 2)" --version "$(cat /opt/bunkerized-nginx/VERSION)" --id "$(cat /opt/bunkerized-nginx/cache/machine.id)"
fi
fi

View File

@@ -7,7 +7,7 @@
if [ "$(has_value AUTO_LETS_ENCRYPT yes)" != "" ] || [ "$SWARM_MODE" = "yes" ] || [ "$AUTO_LETS_ENCRYPT" = "yes" ] || [ "$KUBERNETES_MODE" = "yes" ] ; then
cp /opt/bunkerized-nginx/confs/global/nginx-temp.conf /tmp/nginx-temp.conf
cp /opt/bunkerized-nginx/confs/global/api-temp.conf /tmp/api.conf
if [ "$SWARM_MODE" = "yes" ] ; then
if [ "$SWARM_MODE" = "yes" ] || [ "$KUBERNETES_MODE" = "yes" ] ; then
replace_in_file "/tmp/nginx-temp.conf" "%USE_API%" "include /tmp/api.conf;"
replace_in_file "/tmp/api.conf" "%API_URI%" "$API_URI"
API_WHITELIST_IP="${API_WHITELIST_IP-192.168.0.0/16 172.16.0.0/12 10.0.0.0/8}"
@@ -18,10 +18,15 @@ if [ "$(has_value AUTO_LETS_ENCRYPT yes)" != "" ] || [ "$SWARM_MODE" = "yes" ] |
fi
HTTP_PORT="${HTTP_PORT-8080}"
replace_in_file "/tmp/nginx-temp.conf" "%HTTP_PORT%" "$HTTP_PORT"
nginx -c /tmp/nginx-temp.conf
if [ "$?" -eq 0 ] ; then
echo "[*] Successfully started temp nginx"
if [ "$SWARM_MODE" = "yes" ] || [ "$KUBERNETES_MODE" = "yes" ] ; then
log "nginx-temp" "INFO" "start temporary nginx server and wait for autoconf events..."
nginx -c /tmp/nginx-temp.conf -g 'daemon off;'
else
echo "[!] Can't start temp nginx"
nginx -c /tmp/nginx-temp.conf -g 'daemon on;'
if [ "$?" -eq 0 ] ; then
log "nginx-temp" "INFO" "successfully started temp nginx"
else
log "nginx-temp" "ERROR" "can't start temp nginx"
fi
fi
fi

View File

@@ -29,7 +29,7 @@ services:
- REVERSE_PROXY_HOST=https://mymoodle:8443
mymoodle:
image: bitnami/moodle
image: bitnami/moodle:latest
restart: always
volumes:
- ./moodle-files:/bitnami/moodle
@@ -43,9 +43,10 @@ services:
- MOODLE_DATABASE_NAME=moodle
- MOODLE_DATABASE_USER=user
- MOODLE_DATABASE_PASSWORD=db-user-pwd # replace with a stronger password (must match MYSQL_PASSWORD)
depends_on:
- mydb
mydb:
image: mariadb
image: mariadb:10.5
restart: always
volumes:
- ./db-data:/var/lib/mysql
@@ -54,3 +55,5 @@ services:
- MYSQL_DATABASE=moodle
- MYSQL_USER=user
- MYSQL_PASSWORD=db-user-pwd # replace with a stronger password (must match MOODLE_DATABASE_PASSWORD)
- MARIADB_CHARACTER_SET=utf8mb4
- MARIADB_COLLATE=utf8mb4_unicode_ci

View File

@@ -28,7 +28,7 @@ services:
- admin.example.com_SERVE_FILES=no
- admin.example.com_USE_REVERSE_PROXY=yes
- admin.example.com_REVERSE_PROXY_URL=/admin/ # change it to something hard to guess
- admin.example.com_REVERSE_PROXY_HOST=http://myui:5000/
- admin.example.com_REVERSE_PROXY_HOST=http://myui:5000
- admin.example.com_REVERSE_PROXY_HEADERS=X-Script-Name /admin # must match REVERSE_PROXY_URL
- admin.example.com_USE_MODSECURITY=no
labels:

View File

@@ -45,7 +45,8 @@ class Templator :
real_config["NGINX_PREFIX"] = self.__target_path
if self.__config_global["MULTISITE"] == "yes" and type == "site" :
real_config["NGINX_PREFIX"] += first_server + "/"
real_config["ROOT_FOLDER"] += "/" + first_server
if not real_config["ROOT_FOLDER"].endswith("/" + first_server) :
real_config["ROOT_FOLDER"] += "/" + first_server
if real_config["ROOT_SITE_SUBFOLDER"] != "" :
real_config["ROOT_FOLDER"] += "/" + real_config["ROOT_SITE_SUBFOLDER"]
return real_config

View File

@@ -1,33 +1,19 @@
#!/bin/sh
# prepare /www
mkdir /www
chown -R root:nginx /www
chmod -R 770 /www
# prepare /acme-challenge
mkdir /acme-challenge
chown root:nginx /acme-challenge
chmod 770 /acme-challenge
# prepare /cache
mkdir /cache
chown root:nginx /cache
chmod 770 /cache
# prepare /plugins
mkdir /plugins
chown root:nginx /plugins
chmod 770 /plugins
# prepare symlinks
# prepare folders
folders="www http-confs server-confs modsec-confs modsec-crs-confs cache pre-server-confs acme-challenge plugins"
for folder in $folders ; do
if [ -e "/opt/bunkerized-nginx/$folder" ] ; then
rm -rf "/opt/bunkerized-nginx/$folder"
if [ -e "/opt/bunkerized-nginx/${folder}" ] ; then
rm -rf "/opt/bunkerized-nginx/${folder}"
fi
mkdir "/${folder}"
chown root:nginx "/${folder}"
chmod 770 "/${folder}"
ln -s "/$folder" "/opt/bunkerized-nginx/$folder"
done
mkdir -p /acme-challenge/.well-known/acme-challenge
chown -R root:nginx /acme-challenge
chmod -R 770 /acme-challenge
# prepare /var/log
rm -f /var/log/nginx/*

View File

@@ -290,6 +290,8 @@ elif [ "$(grep CentOS /etc/os-release)" != "" ] ; then
OS="centos"
elif [ "$(grep Fedora /etc/os-release)" != "" ] ; then
OS="fedora"
elif [ "$(grep Arch /etc/os-release)" != "" ] ; then
OS="archlinux"
elif [ "$(grep Alpine /etc/os-release)" != "" ] ; then
OS="alpine"
fi
@@ -346,6 +348,11 @@ module_hotfixes=true"
elif [ "$OS" = "fedora" ] ; then
echo "[*] Install nginx"
do_and_check_cmd dnf install -y nginx
elif [ "$OS" = "archlinux" ] ; then
echo "[*] Update pacman DB"
do_and_check_cmd pacman -Sy
echo "[*] Install nginx"
do_and_check_cmd pacman -S --noconfirm nginx
elif [ "$OS" = "alpine" ] ; then
echo "[*] Add nginx official repository"
get_sign_repo_key_rsa > /etc/apk/keys/nginx_signing.rsa.pub
@@ -373,6 +380,8 @@ fi
echo "[*] Update packet list"
if [ "$OS" = "debian" ] || [ "$OS" = "ubuntu" ] ; then
do_and_check_cmd apt update
elif [ "$OS" = "archlinux" ] ; then
do_and_check_cmd pacman -Sy
fi
echo "[*] Install compilation dependencies"
if [ "$OS" = "debian" ] || [ "$OS" = "ubuntu" ] ; then
@@ -380,11 +389,14 @@ if [ "$OS" = "debian" ] || [ "$OS" = "ubuntu" ] ; then
DEBIAN_FRONTEND=noninteractive do_and_check_cmd apt install -y $DEBIAN_DEPS
elif [ "$OS" = "centos" ] ; then
do_and_check_cmd yum install -y epel-release
CENTOS_DEPS="git autoconf pkg-config pcre-devel automake libtool gcc-c++ make gd-devel openssl-devel wget brotli-devel gnupg patch readline-devel"
CENTOS_DEPS="git autoconf pkg-config pcre-devel automake libtool gcc-c++ make gd-devel openssl-devel wget brotli-devel gnupg patch readline-devel ca-certificates"
do_and_check_cmd yum install -y $CENTOS_DEPS
elif [ "$OS" = "fedora" ] ; then
FEDORA_DEPS="git autoconf pkg-config pcre-devel automake libtool gcc-c++ make gd-devel openssl-devel wget brotli-devel gnupg libxslt-devel perl-ExtUtils-Embed gperftools-devel patch readline-devel"
do_and_check_cmd dnf install -y $FEDORA_DEPS
elif [ "$OS" = "archlinux" ] ; then
ARCHLINUX_DEPS="git autoconf pkgconf pcre2 automake libtool gcc make gd openssl wget brotli gnupg libxslt patch readline"
do_and_check_cmd pacman -S --noconfirm $ARCHLINUX_DEPS
elif [ "$OS" = "alpine" ] ; then
ALPINE_DEPS="git build autoconf libtool automake git geoip-dev yajl-dev g++ gcc curl-dev libxml2-dev pcre-dev make linux-headers musl-dev gd-dev gnupg brotli-dev openssl-dev patch readline-dev"
do_and_check_cmd apk add --no-cache --virtual build $ALPINE_DEPS
@@ -610,6 +622,12 @@ git_secure_clone https://github.com/openresty/lua-resty-redis.git 91585affcd9a8d
echo "[*] Install lua-resty-redis"
CHANGE_DIR="/tmp/bunkerized-nginx/lua-resty-redis" do_and_check_cmd make PREFIX=/opt/bunkerized-nginx/deps LUA_LIB_DIR=/opt/bunkerized-nginx/deps/lib/lua install
# Download and install lua-resty-upload
echo "[*] Clone openresty/lua-resty-upload"
git_secure_clone https://github.com/openresty/lua-resty-upload.git 7baca92c7e741979ae5857989bbf6cc0402d6126
echo "[*] Install lua-resty-upload"
CHANGE_DIR="/tmp/bunkerized-nginx/lua-resty-upload" do_and_check_cmd make PREFIX=/opt/bunkerized-nginx/deps LUA_LIB_DIR=/opt/bunkerized-nginx/deps/lib/lua install
# Download nginx and decompress sources
echo "[*] Download nginx-${NGINX_VERSION}.tar.gz"
do_and_check_cmd wget -O "/tmp/bunkerized-nginx/nginx-${NGINX_VERSION}.tar.gz" "https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz"
@@ -666,6 +684,9 @@ elif [ "$OS" = "fedora" ] ; then
do_and_check_cmd dnf install -y $FEDORA_DEPS
# Temp fix
do_and_check_cmd cp /usr/lib64/nginx/modules/ngx_stream_module.so /usr/lib/nginx/modules/ngx_stream_module.so
elif [ "$OS" = "archlinux" ] ; then
ARCHLINUX_DEPS="certbot git cronie curl python python-pip procps sudo"
do_and_check_cmd pacman -S --noconfirm $ARCHLINUX_DEPS
elif [ "$OS" = "alpine" ] ; then
ALPINE_DEPS="certbot bash libgcc yajl libstdc++ openssl py3-pip git"
do_and_check_cmd apk add --no-cache $ALPINE_DEPS
@@ -743,6 +764,10 @@ fi
echo "[*] Copy bunkerized-nginx"
do_and_check_cmd cp /tmp/bunkerized-nginx/helpers/bunkerized-nginx /usr/local/bin
# Copy VERSION
echo "[*] Copy VERSION"
do_and_check_cmd cp /tmp/bunkerized-nginx/VERSION /opt/bunkerized-nginx
# Replace old nginx.service file
if [ "$OS" != "alpine" ] ; then
do_and_check_cmd mv /lib/systemd/system/nginx.service /lib/systemd/system/nginx.service.bak
@@ -800,7 +825,7 @@ fi
# Create acme-challenge folder
if [ ! -d "/opt/bunkerized-nginx/acme-challenge" ] ; then
echo "[*] Create /opt/bunkerized-nginx/acme-challenge folder"
do_and_check_cmd mkdir /opt/bunkerized-nginx/acme-challenge
do_and_check_cmd mkdir -p /opt/bunkerized-nginx/acme-challenge/.well-known/acme-challenge
fi
# Create plugins folder
@@ -815,11 +840,12 @@ do_and_check_cmd chown -R root:nginx /opt/bunkerized-nginx
do_and_check_cmd find /opt/bunkerized-nginx -type f -exec chmod 0740 {} \;
do_and_check_cmd find /opt/bunkerized-nginx -type d -exec chmod 0750 {} \;
do_and_check_cmd chmod 770 /opt/bunkerized-nginx/cache
do_and_check_cmd chmod 770 /opt/bunkerized-nginx/acme-challenge
do_and_check_cmd chmod -R 770 /opt/bunkerized-nginx/acme-challenge
do_and_check_cmd chmod 750 /opt/bunkerized-nginx/entrypoint/*
do_and_check_cmd chmod 750 /opt/bunkerized-nginx/gen/main.py
do_and_check_cmd chmod 750 /opt/bunkerized-nginx/jobs/main.py
do_and_check_cmd chmod 750 /opt/bunkerized-nginx/jobs/reload.py
do_and_check_cmd chmod 750 /opt/bunkerized-nginx/jobs/certbot-*.py
# Set permissions for /usr/local/bin/bunkerized-nginx
do_and_check_cmd chown root:root /usr/local/bin/bunkerized-nginx
do_and_check_cmd chmod 750 /usr/local/bin/bunkerized-nginx

View File

@@ -27,7 +27,7 @@ spec:
- name: KUBERNETES_MODE
value: "yes"
- name: DNS_RESOLVERS
value: "kube-dns.kube-system.svc.cluster.local"
value: "coredns.kube-system.svc.cluster.local"
- name: USE_API
value: "yes"
- name: API_URI
@@ -36,36 +36,6 @@ spec:
value: ""
- name: MULTISITE
value: "yes"
volumeMounts:
- name: confs
mountPath: /etc/nginx
readOnly: true
- name: letsencrypt
mountPath: /etc/letsencrypt
readOnly: true
- name: acme-challenge
mountPath: /acme-challenge
readOnly: true
- name: www
mountPath: /www
readOnly: true
volumes:
- name: confs
hostPath:
path: /shared/confs
type: Directory
- name: letsencrypt
hostPath:
path: /shared/letsencrypt
type: Directory
- name: acme-challenge
hostPath:
path: /shared/acme-challenge
type: Directory
- name: www
hostPath:
path: /shared/www
type: Directory
---
apiVersion: v1
kind: Service
@@ -82,6 +52,17 @@ spec:
selector:
name: bunkerized-nginx
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-nginx
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -99,6 +80,30 @@ spec:
app: bunkerized-nginx-autoconf
spec:
serviceAccountName: bunkerized-nginx-ingress-controller
volumes:
- name: vol-nginx
persistentVolumeClaim:
claimName: pvc-nginx
initContainers:
- name: change-data-dir-permissions
command:
- chown
- -R
- 101:101
- /etc/letsencrypt
- /cache
image: busybox
volumeMounts:
- name: vol-nginx
mountPath: /etc/letsencrypt
subPath: letsencrypt
- name: vol-nginx
mountPath: /cache
subPath: cache
securityContext:
runAsNonRoot: false
runAsUser: 0
runAsGroup: 0
containers:
- name: bunkerized-nginx-autoconf
image: bunkerity/bunkerized-nginx-autoconf
@@ -108,22 +113,9 @@ spec:
- name: API_URI
value: "/ChangeMeToSomethingHardToGuess"
volumeMounts:
- name: confs
mountPath: /etc/nginx
- name: letsencrypt
- name: vol-nginx
mountPath: /etc/letsencrypt
- name: acme-challenge
mountPath: /acme-challenge
volumes:
- name: confs
hostPath:
path: /shared/confs
type: Directory
- name: letsencrypt
hostPath:
path: /shared/letsencrypt
type: Directory
- name: acme-challenge
hostPath:
path: /shared/acme-challenge
type: Directory
subPath: letsencrypt
- name: vol-nginx
mountPath: /cache
subPath: cache

View File

@@ -13,11 +13,6 @@ services:
target: 8443
mode: host
protocol: tcp
volumes:
- /shared/confs:/etc/nginx:ro
- /shared/www:/www:ro
- /shared/letsencrypt:/etc/letsencrypt:ro
- /shared/acme-challenge:/acme-challenge:ro
environment:
- SWARM_MODE=yes
- USE_API=yes
@@ -41,9 +36,8 @@ services:
image: bunkerity/bunkerized-nginx-autoconf
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /shared/confs:/etc/nginx
- /shared/letsencrypt:/etc/letsencrypt
- /shared/acme-challenge:/acme-challenge
- cache-vol:/cache
- certs-vol:/etc/letsencrypt
environment:
- SWARM_MODE=yes
- API_URI=/ChangeMeToSomethingHardToGuess # must match API_URI from nginx
@@ -65,3 +59,7 @@ networks:
driver: overlay
attachable: true
name: services-net
# And the volumes too
volumes:
cache-vol:
certs-vol:

View File

@@ -6,7 +6,7 @@ class CertbotNew(Job) :
def __init__(self, redis_host=None, copy_cache=False, domain="", email="", staging=False) :
name = "certbot-new"
data = ["certbot", "certonly", "--webroot", "-w", "/opt/bunkerized-nginx/acme-challenge", "-n", "-d", domain, "--email", email, "--agree-tos"]
data = ["certbot", "certonly", "--manual", "--preferred-challenges=http", "--manual-auth-hook", "/opt/bunkerized-nginx/jobs/certbot-auth.py", "--manual-cleanup-hook", "/opt/bunkerized-nginx/jobs/certbot-cleanup.py", "-n", "-d", domain, "--email", email, "--agree-tos"]
if staging :
data.append("--staging")
type = "exec"

View File

@@ -77,7 +77,7 @@ class JobManagement() :
class Job(abc.ABC) :
def __init__(self, name, data, filename=None, redis_host=None, redis_ex=86400, type="line", regex=r"^.+$", copy_cache=False) :
def __init__(self, name, data, filename=None, redis_host=None, redis_ex=86400, type="line", regex=r"^.+$", copy_cache=False, json_data=None, method="GET") :
self._name = name
self._data = data
self._filename = filename
@@ -92,11 +92,13 @@ class Job(abc.ABC) :
self._type = type
self._regex = regex
self._copy_cache = copy_cache
self._json_data = json_data
self._method = method
def run(self) :
ret = JobRet.KO
try :
if self._type == "line" or self._type == "file" :
if self._type in ["line", "file", "json"] :
if self._copy_cache :
ret = self.__from_cache()
if ret != JobRet.KO :
@@ -114,7 +116,11 @@ class Job(abc.ABC) :
if self._redis == None :
if os.path.isfile("/tmp/" + self._filename) :
os.remove("/tmp/" + self._filename)
file = open("/tmp/" + self._filename, "ab")
# mode = "a"
# if self._type == "file" :
# mode = "ab"
# file = open("/tmp/" + self._filename, mode)
file = open("/tmp/" + self._filename, "wb")
elif self._redis != None :
pipe = self._redis.pipeline()
@@ -123,28 +129,32 @@ class Job(abc.ABC) :
for url in self._data :
data = self.__download_data(url)
for chunk in data :
if self._type == "line" :
if not re.match(self._regex, chunk.decode("utf-8")) :
if isinstance(chunk, bytes) and self._type in ["line", "json"] :
chunk = chunk.decode("utf-8")
if self._type in ["line", "json"] :
if not re.match(self._regex, chunk) :
#log(self._name, "WARN", chunk + " doesn't match regex " + self._regex)
continue
chunks = self._edit(chunk)
if self._redis == None :
if self._type == "line" :
for chunk in chunks :
file.write(chunk + b"\n")
if self._type in ["line", "json"] :
chunks = self._edit(chunk)
for more_chunk in chunks :
file.write(more_chunk.encode("utf-8") + b"\n")
else :
file.write(chunk)
else :
if self._type == "line" :
for chunk in chunks :
pipe.set(self._name + "_" + chunk, "1", ex=self._redis_ex)
if self._type in ["line", "json"] :
chunks = self._edit(chunk)
for more_chunk in chunks :
pipe.set(self._name + "_" + more_chunk, "1", ex=self._redis_ex)
else :
pipe.set(self._name + "_" + chunk, "1", ex=self._redis_ex)
count += 1
if self._redis == None :
file.close()
if count > 0 :
shutil.copyfile("/tmp/" + self._filename, "/etc/nginx/" + self._filename)
#if count > 0 :
shutil.copyfile("/tmp/" + self._filename, "/etc/nginx/" + self._filename)
os.remove("/tmp/" + self._filename)
return JobRet.OK_RELOAD
@@ -155,11 +165,16 @@ class Job(abc.ABC) :
return JobRet.KO
def __download_data(self, url) :
r = requests.get(url, stream=True)
r = requests.request(self._method, url, stream=True, json=self._json_data)
if not r or r.status_code != 200 :
raise Exception("can't download data at " + url)
if self._type == "line" :
return r.iter_lines()
return r.iter_lines(decode_unicode=True)
if self._type == "json" :
try :
return self._json(r.json())
except :
raise Exception("can't decode json from " + url)
return r.iter_content(chunk_size=8192)
def __exec(self) :
@@ -177,6 +192,9 @@ class Job(abc.ABC) :
self._callback(True)
return JobRet.OK_RELOAD
def _json(self, data) :
return data
def _edit(self, chunk) :
return [chunk]
@@ -193,7 +211,7 @@ class Job(abc.ABC) :
return JobRet.OK_RELOAD
return JobRet.OK_NO_RELOAD
if self._redis != None and self._type == "line" :
if self._redis != None and self._type in ["line", "json"] :
with open("/opt/bunkerized-nginx/cache/" + self._filename) as f :
pipe = self._redis.pipeline()
while True :
@@ -210,7 +228,7 @@ class Job(abc.ABC) :
def __to_cache(self) :
if self._redis == None or self._type == "file" :
shutil.copyfile("/etc/nginx/" + self._filename, "/opt/bunkerized-nginx/cache/" + self._filename)
elif self._redis != None and self._type == "line" :
elif self._redis != None and self._type in ["line", "json"] :
if os.path.isfile("/opt/bunkerized-nginx/cache/" + self._filename) :
os.remove("/opt/bunkerized-nginx/cache/" + self._filename)
with open("/opt/bunkerized-nginx/cache/" + self._filename, "a") as f :

View File

@@ -12,4 +12,4 @@ class Referrers(Job) :
super().__init__(name, data, filename, redis_host=redis_host, redis_ex=redis_ex, type=type, regex=regex, copy_cache=copy_cache)
def _edit(self, chunk) :
return [chunk.replace(b".", b"%.").replace(b"-", b"%-")]
return [chunk.replace(".", "%.").replace("-", "%-")]

16
jobs/RemoteApiDatabase.py Normal file
View File

@@ -0,0 +1,16 @@
from Job import Job
class RemoteApiDatabase(Job) :
def __init__(self, server="", version="", id="", redis_host=None, copy_cache=False) :
name = "remote-api-database"
data = [server + "/db"]
filename = "remote-api.db"
type = "json"
redis_ex = 3600
regex = r"^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$"
json_data = {"version": version, "id": id}
super().__init__(name, data, filename, type=type, redis_host=redis_host, redis_ex=redis_ex, regex=regex, copy_cache=copy_cache, json_data=json_data)
def _json(self, data) :
return data["data"]

16
jobs/RemoteApiRegister.py Normal file
View File

@@ -0,0 +1,16 @@
from Job import Job
class RemoteApiRegister(Job) :
def __init__(self, server="", version="") :
name = "remote-api-register"
data = [server + "/register"]
filename = "machine.id"
type = "json"
regex = r"^[0-9a-f]{256}$"
json_data = {"version": version}
method = "POST"
super().__init__(name, data, filename, type=type, regex=regex, copy_cache=True, json_data=json_data, method=method)
def _json(self, data) :
return [data["data"]]

View File

@@ -12,4 +12,4 @@ class UserAgents(Job) :
super().__init__(name, data, filename, redis_host=redis_host, redis_ex=redis_ex, type=type, regex=regex, copy_cache=copy_cache)
def _edit(self, chunk) :
return [chunk.replace(b"\\ ", b" ").replace(b"\\.", b"%.").replace(b"\\\\", b"\\").replace(b"-", b"%-")]
return [chunk.replace("\\ ", " ").replace("\\.", "%.").replace("\\\\", "\\").replace("-", "%-")]

28
jobs/certbot-auth.py Normal file
View File

@@ -0,0 +1,28 @@
#!/usr/bin/python3
import os, socket, sys, stat
VALIDATION = os.getenv("CERTBOT_VALIDATION", None)
TOKEN = os.getenv("CERTBOT_TOKEN", None)
if VALIDATION == None or TOKEN == None :
sys.exit(1)
try :
with open("/opt/bunkerized-nginx/acme-challenge/.well-known/acme-challenge/" + TOKEN, "w") as f :
f.write(VALIDATION)
except :
sys.exit(2)
try :
if os.path.exists("/tmp/autoconf.sock") and stat.S_ISSOCK(os.stat("/tmp/autoconf.sock").st_mode) :
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect("/tmp/autoconf.sock")
sock.sendall(b"acme")
data = sock.recv(512)
if data != b"ok" :
raise Exception("can't acme")
sock.sendall(b"close")
except :
sys.exit(3)
sys.exit(0)

14
jobs/certbot-cleanup.py Normal file
View File

@@ -0,0 +1,14 @@
#!/usr/bin/python3
import os, sys
TOKEN = os.getenv("CERTBOT_TOKEN", None)
if TOKEN == None :
sys.exit(1)
try :
os.remove("/opt/bunkerized-nginx/acme-challenge/.well-known/acme-challenge/" + TOKEN)
except :
sys.exit(2)
sys.exit(0)

View File

@@ -4,7 +4,7 @@ import argparse, sys, re
sys.path.append("/opt/bunkerized-nginx/jobs")
import Abusers, CertbotNew, CertbotRenew, ExitNodes, GeoIP, Proxies, Referrers, SelfSignedCert, UserAgents
import Abusers, CertbotNew, CertbotRenew, ExitNodes, GeoIP, Proxies, Referrers, SelfSignedCert, UserAgents, RemoteApiDatabase, RemoteApiRegister
from Job import JobRet, JobManagement, ReloadRet
from logger import log
@@ -17,8 +17,11 @@ JOBS = {
"geoip": GeoIP.GeoIP,
"proxies": Proxies.Proxies,
"referrers": Referrers.Referrers,
"remote-api-database": RemoteApiDatabase.RemoteApiDatabase,
"remote-api-register": RemoteApiRegister.RemoteApiRegister,
"self-signed-cert": SelfSignedCert.SelfSignedCert,
"user-agents": UserAgents.UserAgents
}
if __name__ == "__main__" :
@@ -36,6 +39,9 @@ if __name__ == "__main__" :
parser.add_argument("--dst_key", default="", type=str, help="key path for self-signed-cert job (e.g. : /etc/nginx/default-key.pem)")
parser.add_argument("--expiry", default="", type=str, help="number of validity days for self-signed-cert job (e.g. : 365)")
parser.add_argument("--subj", default="", type=str, help="certificate subject for self-signed-cert job (e.g. : OU=X/CN=Y...)")
parser.add_argument("--server", default="", type=str, help="address of the server for remote-api jobs")
parser.add_argument("--id", default="", type=str, help="machine id for remote-api jobs")
parser.add_argument("--version", default="", type=str, help="bunkerized-nginx version for remote-api jobs")
args = parser.parse_args()
# Check job name
@@ -68,6 +74,10 @@ if __name__ == "__main__" :
instance = JOBS[job](redis_host=redis_host, copy_cache=args.cache, domain=args.domain, email=args.email, staging=args.staging)
elif job == "self-signed-cert" :
instance = JOBS[job](redis_host=redis_host, copy_cache=args.cache, dst_cert=args.dst_cert, dst_key=args.dst_key, expiry=args.expiry, subj=args.subj)
elif job == "remote-api-database" :
instance = JOBS[job](server=args.server, version=args.version, id=args.id, redis_host=redis_host, copy_cache=args.cache)
elif job == "remote-api-register" :
instance = JOBS[job](server=args.server, version=args.version)
else :
instance = JOBS[job](redis_host=redis_host, copy_cache=args.cache)
ret = instance.run()

View File

@@ -1,6 +1,8 @@
local M = {}
local api_list = {}
local iputils = require "resty.iputils"
local upload = require "resty.upload"
local logger = require "logger"
api_list["^/ping$"] = function ()
return true
@@ -26,6 +28,90 @@ api_list["^/stop$"] = function ()
return os.execute("/usr/sbin/nginx -s quit") == 0
end
api_list["^/stop%-temp$"] = function ()
return os.execute("/usr/sbin/nginx -c /tmp/nginx-temp.conf -s stop") == 0
end
api_list["^/conf$"] = function ()
if not M.save_file("/tmp/conf.tar.gz") then
return false
end
return M.extract_file("/tmp/conf.tar.gz", "/etc/nginx/")
end
api_list["^/letsencrypt$"] = function ()
if not M.save_file("/tmp/letsencrypt.tar.gz") then
return false
end
return M.extract_file("/tmp/letsencrypt.tar.gz", "/etc/letsencrypt/")
end
api_list["^/acme$"] = function ()
if not M.save_file("/tmp/acme.tar.gz") then
return false
end
return M.extract_file("/tmp/acme.tar.gz", "/acme-challenge")
end
api_list["^/http$"] = function ()
if not M.save_file("/tmp/http.tar.gz") then
return false
end
return M.extract_file("/tmp/http.tar.gz", "/http-confs/")
end
api_list["^/server$"] = function ()
if not M.save_file("/tmp/server.tar.gz") then
return false
end
return M.extract_file("/tmp/server.tar.gz", "/server-confs/")
end
api_list["^/modsec$"] = function ()
if not M.save_file("/tmp/modsec.tar.gz") then
return false
end
return M.extract_file("/tmp/modsec.tar.gz", "/modsec-confs/")
end
api_list["^/modsec%-crs$"] = function ()
if not M.save_file("/tmp/modsec-crs.tar.gz") then
return false
end
return M.extract_file("/tmp/modsec-crs.tar.gz", "/modsec-crs-confs/")
end
function M.save_file (name)
local form, err = upload:new(4096)
if not form then
logger.log(ngx.ERR, "API", err)
return false
end
form:set_timeout(1000)
local file = io.open(name, "w")
while true do
local typ, res, err = form:read()
if not typ then
file:close()
logger.log(ngx.ERR, "API", "not typ")
return false
end
if typ == "eof" then
break
end
if typ == "body" then
file:write(res)
end
end
file:flush()
file:close()
return true
end
function M.extract_file(archive, destination)
return os.execute("tar xzf " .. archive .. " -C " .. destination) == 0
end
function M.is_api_call (api_uri, api_whitelist_ip)
local whitelist = iputils.parse_cidrs(api_whitelist_ip)
if iputils.ip_in_cidrs(ngx.var.remote_addr, whitelist) and ngx.var.request_uri:sub(1, #api_uri) .. "/" == api_uri .. "/" then

View File

@@ -16,17 +16,18 @@ function M.count (status_codes, threshold, count_time, ban_time)
local ok, err = ngx.shared.behavior_count:set(ngx.var.remote_addr, count, count_time)
if not ok then
logger.log(ngx.ERR, "BEHAVIOR", "not enough memory allocated to behavior_ip_count")
return
return false
end
if count >= threshold then
logger.log(ngx.WARN, "BEHAVIOR", "threshold reached for " .. ngx.var.remote_addr .. " (" .. count .. " / " .. threshold .. ") : IP is banned for " .. ban_time .. " seconds")
local ok, err = ngx.shared.behavior_ban:safe_set(ngx.var.remote_addr, true, ban_time)
if not ok then
logger.log(ngx.ERR, "BEHAVIOR", "not enough memory allocated to behavior_ip_ban")
return
return false
end
return true
end
break
return false
end
end
end

View File

@@ -4,19 +4,19 @@ local base64 = require "misc.base64"
function M.get_challenge ()
local cap = captcha.new()
cap:font("/usr/local/lib/lua/misc/Vera.ttf")
cap:font("/opt/bunkerized-nginx/lua/misc/Vera.ttf")
cap:generate()
return cap:jpegStr(70), cap:getStr()
end
function M.get_code (img, antibot_uri)
-- get template
local f = io.open("/antibot/captcha.html", "r")
local f = io.open("/opt/bunkerized-nginx/antibot/captcha.html", "r")
local template = f:read("*all")
f:close()
-- get captcha code
f = io.open("/antibot/captcha.data", "r")
f = io.open("/opt/bunkerized-nginx/antibot/captcha.data", "r")
local captcha_data = f:read("*all")
f:close()

View File

@@ -15,12 +15,12 @@ end
function M.get_code (challenge, antibot_uri, original_uri)
-- get template
local f = io.open("/antibot/javascript.html", "r")
local f = io.open("/opt/bunkerized-nginx/antibot/javascript.html", "r")
local template = f:read("*all")
f:close()
-- get JS code
f = io.open("/antibot/javascript.data", "r")
f = io.open("/opt/bunkerized-nginx/antibot/javascript.data", "r")
local javascript = f:read("*all")
f:close()
@@ -32,14 +32,11 @@ function M.get_code (challenge, antibot_uri, original_uri)
end
function M.check (challenge, user)
ngx.log(ngx.ERR, "debug challenge = " .. challenge)
ngx.log(ngx.ERR, "debug user = " .. user)
local resty_sha256 = require "resty.sha256"
local str = require "resty.string"
local sha256 = resty_sha256:new()
sha256:update(challenge .. user)
local digest = sha256:final()
ngx.log(ngx.ERR, "debug digest = " .. str.to_hex(digest))
return str.to_hex(digest):find("^0000") ~= nil
end

72
lua/limitreq.lua Normal file
View File

@@ -0,0 +1,72 @@
local M = {}
local logger = require "logger"
function M.decr (key, delay)
local function callback (premature, key)
if premature then
ngx.shared.limit_req:delete(key)
return
end
local value, flags = ngx.shared.limit_req:get(key)
if value ~= nil then
if value - 1 == 0 then
ngx.shared.limit_req:delete(key)
return
end
ngx.shared.limit_req:set(key, value-1, 0)
end
end
local ok, err = ngx.timer.at(delay, callback, key)
if not ok then
logger.log(ngx.ERR, "REQ LIMIT", "can't setup decrement timer : " .. err)
return false
end
return true
end
function M.incr (key)
local newval, err, forcible = ngx.shared.limit_req:incr(key, 1, 0, 0)
if not newval then
logger.log(ngx.ERR, "REQ LIMIT", "can't increment counter : " .. err)
return false
end
return true
end
function M.check (rate, burst, sleep)
local key = ngx.var.remote_addr .. ngx.var.uri
local rate_split = {}
for str in rate:gmatch("([^r/]+)") do
table.insert(rate_split, str)
end
local max = tonumber(rate_split[1])
local unit = rate_split[2]
local delay = 0
if unit == "s" then
delay = 1
elseif unit == "m" then
delay = 60
elseif unit == "h" then
delay = 3600
elseif unit == "d" then
delay = 86400
end
if M.incr(key) then
local current, flags = ngx.shared.limit_req:get(key)
if M.decr(key, delay) then
if current > max + burst then
logger.log(ngx.WARN, "REQ LIMIT", "ip " .. ngx.var.remote_addr .. " has reached the limit for uri " .. ngx.var.uri .. " : " .. current .. "r/" .. unit .. " (max = " .. rate .. ")")
return true
elseif current > max then
if sleep > 0 then
ngx.sleep(sleep)
end
end
else
ngx.shared.limit_req:set(key, current-1, 0)
end
end
return false
end
return M

3314
lua/misc/root-ca.pem Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -4,15 +4,15 @@ local cjson = require "cjson"
function M.get_code (antibot_uri, recaptcha_sitekey)
-- get template
local f = io.open("/antibot/recaptcha.html", "r")
local f = io.open("/opt/bunkerized-nginx/antibot/recaptcha.html", "r")
local template = f:read("*all")
f:close()
-- get recaptcha code
f = io.open("/antibot/recaptcha-head.data", "r")
f = io.open("/opt/bunkerized-nginx/antibot/recaptcha-head.data", "r")
local recaptcha_head = f:read("*all")
f:close()
f = io.open("/antibot/recaptcha-body.data", "r")
f = io.open("/opt/bunkerized-nginx/antibot/recaptcha-body.data", "r")
local recaptcha_body = f:read("*all")
f:close()
@@ -27,7 +27,6 @@ end
function M.check (token, recaptcha_secret)
local httpc = http.new()
local res, err = httpc:request_uri("https://www.google.com/recaptcha/api/siteverify", {
ssl_verify = false,
method = "POST",
body = "secret=" .. recaptcha_secret .. "&response=" .. token .. "&remoteip=" .. ngx.var.remote_addr,
headers = { ["Content-Type"] = "application/x-www-form-urlencoded" }

104
lua/remoteapi.lua Normal file
View File

@@ -0,0 +1,104 @@
local M = {}
local http = require "resty.http"
local cjson = require "cjson"
local logger = require "logger"
function M.send(method, url, data)
local httpc, err = http.new()
if not httpc then
logger.log(ngx.ERR, "REMOTE API", "Can't instantiate HTTP object : " .. err)
return false, nil, nil
end
local res, err = httpc:request_uri(ngx.shared.remote_api:get("server") .. url, {
method = method,
body = cjson.encode(data),
headers = {
["Content-Type"] = "application/json",
["User-Agent"] = "bunkerized-nginx/" .. data["version"]
}
})
if not res then
logger.log(ngx.ERR, "REMOTE API", "Can't send HTTP request : " .. err)
return false, nil, nil
end
if res.status ~= 200 then
logger.log(ngx.WARN, "REMOTE API", "Received status " .. res.status .. " from API : " .. res.body)
end
return true, res.status, cjson.decode(res.body)["data"]
end
function M.gen_data(use_id, data)
local all_data = {}
if use_id then
all_data["id"] = ngx.shared.remote_api:get("id")
end
all_data["version"] = ngx.shared.remote_api:get("version")
for k, v in pairs(data) do
all_data[k] = v
end
return all_data
end
function M.ping2()
local https = require "ssl.https"
local ltn12 = require "ltn12"
local request_body = cjson.encode(M.gen_data(true, {}))
local response_body = {}
local res, code, headers, status = https.request {
url = ngx.shared.remote_api:get("server") .. "/ping",
method = "GET",
headers = {
["Content-Type"] = "application/json",
["User-Agent"] = "bunkerized-nginx/" .. ngx.shared.remote_api:get("version"),
["Content-Length"] = request_body:len()
},
source = ltn12.source.string(request_body),
sink = ltn12.sink.table(response_body)
}
if res and status:match("^.*% 200% .*$") then
response_body = cjson.decode(response_body[1])
return response_body["data"] == "pong"
end
return false
end
function M.register()
local request = {}
local res, status, data = M.send("POST", "/register", M.gen_data(false, request))
if res and status == 200 then
return true, data
end
return false, data
end
function M.ping()
local request = {}
local res, status, data = M.send("GET", "/ping", M.gen_data(true, request))
if res and status == 200 then
return true, data
end
return false, data
end
function M.ip(ip, reason)
local request = {
["ip"] = ip,
["reason"] = reason
}
local res, status, data = M.send("POST", "/ip", M.gen_data(true, request))
if res and status == 200 then
return true, data
end
return false, data
end
function M.db()
local request = {}
local res, status, data = M.send("GET", "/db", M.gen_data(true, request))
if res and status == 200 then
return true, data
end
return false, data
end
return M

View File

@@ -3,5 +3,6 @@
45 0 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_REFERRER yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name referrers >> /var/log/nginx/jobs.log 2>&1
0 1 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_ABUSERS yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name abusers >> /var/log/nginx/jobs.log 2>&1
0 2 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_PROXIES yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name proxies >> /var/log/nginx/jobs.log 2>&1
30 */1 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value USE_REMOTE_API yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name remote-api-database --server "$(grep '^REMOTE_API_SERVER=' /etc/nginx/global.env | cut -d '=' -f 2)" --version "$(cat /opt/bunkerized-nginx/VERSION)" --id "$(cat /opt/bunkerized-nginx/cache/machine.id)" >> /var/log/nginx/jobs.log 2>&1
0 */1 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_TOR_EXIT_NODE yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name exit-nodes >> /var/log/nginx/jobs.log 2>&1
0 3 2 * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ [ "$(has_value BLACKLIST_COUNTRY ".\+")" != "" ] || [ "$(has_value WHITELIST_COUNTRY ".\+")" != "" ] ] && /opt/bunkerized-nginx/jobs/main.py --reload --name geoip >> /var/log/nginx/jobs.log 2>&1

View File

@@ -3,5 +3,6 @@
45 0 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_REFERRER yes)" != "" ] && /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name referrers" nginx >> /var/log/nginx/jobs.log 2>&1
0 1 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_ABUSERS yes)" != "" ] && /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name abusers" nginx >> /var/log/nginx/jobs.log 2>&1
0 2 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_PROXIES yes)" != "" ] && /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name proxies" nginx >> /var/log/nginx/jobs.log 2>&1
30 */1 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value USE_REMOTE_API yes)" != "" ] && /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name remote-api-database --server $(grep '^REMOTE_API_SERVER=' /etc/nginx/global.env | cut -d '=' -f 2) --version $(cat /opt/bunkerized-nginx/VERSION) --id $(cat /opt/bunkerized-nginx/cache/machine.id)" nginx >> /var/log/nginx/jobs.log 2>&1
0 */1 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_TOR_EXIT_NODE yes)" != "" ] && /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name exit-nodes" nginx >> /var/log/nginx/jobs.log 2>&1
0 3 2 * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ [ "$(has_value BLACKLIST_COUNTRY ".\+")" != "" ] || [ "$(has_value WHITELIST_COUNTRY ".\+")" != "" ] ] && /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name geoip" nginx >> /var/log/nginx/jobs.log 2>&1

View File

@@ -3,5 +3,6 @@
45 0 * * * nginx . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_REFERRER yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name referrers >> /var/log/nginx/jobs.log 2>&1
0 1 * * * nginx . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_ABUSERS yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name abusers >> /var/log/nginx/jobs.log 2>&1
0 2 * * * nginx . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_PROXIES yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name proxies >> /var/log/nginx/jobs.log 2>&1
30 */1 * * * nginx . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value USE_REMOTE_API yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name remote-api-database --server "$(grep '^REMOTE_API_SERVER=' /etc/nginx/global.env | cut -d '=' -f 2)" --version "$(cat /opt/bunkerized-nginx/VERSION)" --id "$(cat /opt/bunkerized-nginx/cache/machine.id)" >> /var/log/nginx/jobs.log 2>&1
0 */1 * * * nginx . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_TOR_EXIT_NODE yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name exit-nodes >> /var/log/nginx/jobs.log 2>&1
0 3 2 * * nginx . /opt/bunkerized-nginx/entrypoint/utils.sh && [ [ "$(has_value BLACKLIST_COUNTRY ".\+")" != "" ] || [ "$(has_value WHITELIST_COUNTRY ".\+")" != "" ] ] && /opt/bunkerized-nginx/jobs/main.py --reload --name geoip >> /var/log/nginx/jobs.log 2>&1

View File

@@ -99,6 +99,56 @@
}
]
},
"Bad behavior": {
"id": "bad-behavior",
"params": [
{
"context": "multisite",
"default": "yes",
"env": "USE_BAD_BEHAVIOR",
"id": "use-bad-behavior",
"label": "Use bad behavior",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "multisite",
"default": "86400",
"env": "BAD_BEHAVIOR_BAN_TIME",
"id": "bad-behavior-ban-time",
"label": "Ban duration time",
"regex": "^[0-9]+$",
"type": "text"
},
{
"context": "multisite",
"default": "60",
"env": "BAD_BEHAVIOR_COUNT_TIME",
"id": "bad-behavior-count-time",
"label": "Count time",
"regex": "^[0-9]+$",
"type": "text"
},
{
"context": "multisite",
"default": "400 401 403 404 405 429 444",
"env": "BAD_BEHAVIOR_STATUS_CODES",
"id": "bad-behavior-status-codes",
"label": "Status codes",
"regex": "^([0-9]{3} ?)*$",
"type": "text"
},
{
"context": "multisite",
"default": "10",
"env": "BAD_BEHAVIOR_THRESHOLD",
"id": "bad-behavior-threshold",
"label": "Threshold",
"regex": "^[0-9]+$",
"type": "text"
}
]
},
"Basic auth": {
"id": "auth-basic",
"params": [
@@ -724,7 +774,7 @@
},
{
"context": "multisite",
"default": "accelerometer 'none'; ambient-light-sensor 'none'; autoplay 'none'; camera 'none'; display-capture 'none'; document-domain 'none'; encrypted-media 'none'; fullscreen 'none'; geolocation 'none'; gyroscope 'none'; magnetometer 'none'; microphone 'none'; midi 'none'; payment 'none'; picture-in-picture 'none'; speaker 'none'; sync-xhr 'none'; usb 'none'; vibrate 'none'; vr 'none'",
"default": "accelerometer 'none'; ambient-light-sensor 'none'; autoplay 'none'; battery 'none'; camera 'none'; display-capture 'none'; document-domain 'none'; encrypted-media 'none'; fullscreen 'none'; geolocation 'none'; gyroscope 'none'; magnetometer 'none'; microphone 'none'; midi 'none'; payment 'none'; picture-in-picture 'none'; publickey-credentials-get 'none'; sync-xhr 'none'; usb 'none'; wake-lock 'none'; web-share 'none'; xr-spatial-tracking 'none'",
"env": "FEATURE_POLICY",
"id": "feature-policy",
"label": "Feature policy",
@@ -733,7 +783,7 @@
},
{
"context": "multisite",
"default": "accelerometer=(), ambient-light-sensor=(), autoplay=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), fullscreen=(), geolocation=(), gyroscope=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), speaker=(), sync-xhr=(), usb=(), vibrate=(), vr=()",
"default": "accelerometer=(), ambient-light-sensor=(), autoplay=(), battery=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), fullscreen=(), geolocation=(), gyroscope=(), interest-cohort=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), publickey-credentials-get=(), screen-wake-lock=(), sync-xhr=(), usb=(), web-share=(), xr-spatial-tracking=()",
"env": "PERMISSIONS_POLICY",
"id": "permissions-policy",
"label": "Permissions policy",
@@ -779,7 +829,7 @@
{
"id": "custom-headers",
"label": "Custom headers",
"params" : [
"params": [
{
"context": "multisite",
"default": "",
@@ -827,6 +877,92 @@
}
]
},
"Internal": {
"id": "internal",
"params": [
{
"context": "global",
"default": "no",
"env": "USE_API",
"id": "use-api",
"label": "Enable API",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "192.168.0.0/16 172.16.0.0/12 10.0.0.0/8",
"env": "API_WHITELIST_IP",
"id": "api-whitelist-ip",
"label": "API whitelist IP",
"regex": "^(\\d+.\\d+.\\d+.\\d+(/\\d+)? ?)*$",
"type": "text"
},
{
"context": "global",
"default": "random",
"env": "API_URI",
"id": "api-uri",
"label": "API URI",
"regex": "^(random|\\/[A-Za-z0-9\\-\\/]+)$",
"type": "text"
},
{
"context": "global",
"default": "no",
"env": "SWARM_MODE",
"id": "swarm-mode",
"label": "Swarm mode",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "no",
"env": "KUBERNETES_MODE",
"id": "kubernetes-mode",
"label": "Kubernetes mode",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "no",
"env": "USE_REDIS",
"id": "use-redis",
"label": "Use external redis when coupled with autoconf",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "",
"env": "REDIS_HOST",
"id": "redis-host",
"label": "Hostname/IP of the Redis service",
"regex": "^([A-Za-z0-9\\-\\.\\_]+|.{0})$",
"type": "text"
},
{
"context": "multisite",
"default": "yes",
"env": "USE_REMOTE_API",
"id": "use-remote-api",
"label": "Use a remote service for enhanced security",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "https://api.bunkerity.com/bunkerized",
"env": "REMOTE_API_SERVER",
"id": "remote-api-server",
"label": "The URL of the remote service",
"regex": "^.*$",
"type": "text"
}
]
},
"Limit conn": {
"id": "limit-conn",
"params": [
@@ -872,22 +1008,51 @@
"type": "checkbox"
},
{
"context": "multisite",
"default": "1r/s",
"env": "LIMIT_REQ_RATE",
"id": "limit-req-rate",
"label": "Limit req rate",
"regex": "^\\d+r/(ms|s|m|h|d)$",
"type": "text"
},
{
"context": "multisite",
"default": "2",
"env": "LIMIT_REQ_BURST",
"id": "limit-req-burst",
"label": "Limit req burst",
"regex": "^\\d+$",
"type": "text"
"id": "limit-req-params",
"label": "Limit request",
"params": [
{
"context": "multisite",
"default": "",
"env": "LIMIT_REQ_URL",
"id": "limit-req-url",
"label": "Limit req url",
"multiple": "Limit request",
"regex": "^.*$",
"type": "text"
},
{
"context": "multisite",
"default": "1r/s",
"env": "LIMIT_REQ_RATE",
"id": "limit-req-rate",
"label": "Limit req rate",
"multiple": "Limit request",
"regex": "^\\d+r/(s|m|h|d)$",
"type": "text"
},
{
"context": "multisite",
"default": "5",
"env": "LIMIT_REQ_BURST",
"id": "limit-req-burst",
"label": "Limit req burst",
"multiple": "Limit request",
"regex": "^\\d+$",
"type": "text"
},
{
"context": "multisite",
"default": "1",
"env": "LIMIT_REQ_DELAY",
"id": "limit-req-delay",
"label": "Limit req delay",
"multiple": "Limit request",
"regex": "^\\d+(\\.\\d+)?$",
"type": "text"
}
],
"type": "multiple"
},
{
"context": "global",
@@ -1158,120 +1323,70 @@
}
]
},
"Bad behavior": {
"id": "bad-behavior",
"Whitelist": {
"id": "whitelist",
"params": [
{
"context": "multisite",
"default": "yes",
"env": "USE_BAD_BEHAVIOR",
"id": "use-bad-behavior",
"label": "Use bad behavior",
"env": "USE_WHITELIST_IP",
"id": "use-whitelist-ip",
"label": "Use whitelist ip",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "multisite",
"default": "86400",
"env": "BAD_BEHAVIOR_BAN_TIME",
"id": "bad-behavior-ban-time",
"label": "Ban duration time",
"regex": "^[0-9]+$",
"type": "text"
},
{
"context": "multisite",
"default": "60",
"env": "BAD_BEHAVIOR_COUNT_TIME",
"id": "bad-behavior-count-time",
"label": "Count time",
"regex": "^[0-9]+$",
"type": "text"
},
{
"context": "multisite",
"default": "400 401 403 404 405 429 444",
"env": "BAD_BEHAVIOR_STATUS_CODES",
"id": "bad-behavior-status-codes",
"label": "Status codes",
"regex": "^([0-9]{3} ?)*$",
"type": "text"
},
{
"context": "multisite",
"default": "10",
"env": "BAD_BEHAVIOR_THRESHOLD",
"id": "bad-behavior-threshold",
"label": "Threshold",
"regex": "^[0-9]+$",
"type": "text"
}
]
},
"Internal": {
"id": "internal",
"params": [
{
"context": "global",
"default": "no",
"env": "USE_API",
"id": "use-api",
"label": "Enable API",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "192.168.0.0/16 172.16.0.0/12 10.0.0.0/8",
"env": "API_WHITELIST_IP",
"id": "api-whitelist-ip",
"label": "API whitelist IP",
"default": "23.21.227.69 40.88.21.235 50.16.241.113 50.16.241.114 50.16.241.117 50.16.247.234 52.204.97.54 52.5.190.19 54.197.234.188 54.208.100.253 54.208.102.37 107.21.1.8",
"env": "WHITELIST_IP_LIST",
"id": "whitelist-ip-list",
"label": "Whitelist ip list",
"regex": "^(\\d+.\\d+.\\d+.\\d+(/\\d+)? ?)*$",
"type": "text"
},
{
"context": "global",
"default": "random",
"env": "API_URI",
"id": "api-uri",
"label": "API URI",
"regex": "^(random|\\/[A-Za-z0-9\\-\\/]+)$",
"context": "multisite",
"default": "yes",
"env": "USE_WHITELIST_REVERSE",
"id": "use-whitelist-reverse",
"label": "Use whitelist reverse",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "multisite",
"default": ".googlebot.com .google.com .search.msn.com .crawl.yahoo.net .crawl.baidu.jp .crawl.baidu.com .yandex.com .yandex.ru .yandex.net",
"env": "WHITELIST_REVERSE_LIST",
"id": "whitelist-reverse-list",
"label": "Whitelist reverse list",
"regex": "^([a-z\\-0-9\\.]+ ?)*$",
"type": "text"
},
{
"context": "global",
"default": "no",
"env": "SWARM_MODE",
"id": "swarm-mode",
"label": "Swarm mode",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "no",
"env": "KUBERNETES_MODE",
"id": "kubernetes-mode",
"label": "Kubernetes mode",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "no",
"env": "USE_REDIS",
"id": "use-redis",
"label": "Use external redis when coupled with autoconf",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"context": "multisite",
"default": "",
"env": "REDIS_HOST",
"id": "redis-host",
"label": "Hostname/IP of the Redis service",
"regex": "^([A-Za-z0-9\\-\\.\\_]+|.{0})$",
"env": "WHITELIST_COUNTRY",
"id": "whitelist-country",
"label": "Whitelist country",
"regex": "^([A-Z]{2} ?)*$",
"type": "text"
},
{
"context": "multisite",
"default": "",
"env": "WHITELIST_USER_AGENT",
"id": "whitelist-user-agent",
"label": "Whitelist user agent",
"regex": ".*",
"type": "text"
},
{
"context": "multisite",
"default": "",
"env": "WHITELIST_URI",
"id": "whitelist-uri",
"label": "Whitelist URI",
"regex": "^(\\S ?)*$",
"type": "text"
}
]
@@ -1345,6 +1460,7 @@
{
"context": "global",
"default": "8080",
"env": "HTTP_PORT",
"id": "http-port",
"label": "HTTP port",
@@ -1388,73 +1504,5 @@
"type": "text"
}
]
},
"Whitelist": {
"id": "whitelist",
"params": [
{
"context": "multisite",
"default": "yes",
"env": "USE_WHITELIST_IP",
"id": "use-whitelist-ip",
"label": "Use whitelist ip",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "multisite",
"default": "23.21.227.69 40.88.21.235 50.16.241.113 50.16.241.114 50.16.241.117 50.16.247.234 52.204.97.54 52.5.190.19 54.197.234.188 54.208.100.253 54.208.102.37 107.21.1.8",
"env": "WHITELIST_IP_LIST",
"id": "whitelist-ip-list",
"label": "Whitelist ip list",
"regex": "^(\\d+.\\d+.\\d+.\\d+(/\\d+)? ?)*$",
"type": "text"
},
{
"context": "multisite",
"default": "yes",
"env": "USE_WHITELIST_REVERSE",
"id": "use-whitelist-reverse",
"label": "Use whitelist reverse",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "multisite",
"default": ".googlebot.com .google.com .search.msn.com .crawl.yahoo.net .crawl.baidu.jp .crawl.baidu.com .yandex.com .yandex.ru .yandex.net",
"env": "WHITELIST_REVERSE_LIST",
"id": "whitelist-reverse-list",
"label": "Whitelist reverse list",
"regex": "^([a-z\\-0-9\\.]+ ?)*$",
"type": "text"
},
{
"context": "multisite",
"default": "",
"env": "WHITELIST_COUNTRY",
"id": "whitelist-country",
"label": "Whitelist country",
"regex": "^([A-Z]{2} ?)*$",
"type": "text"
},
{
"context": "multisite",
"default": "",
"env": "WHITELIST_USER_AGENT",
"id": "whitelist-user-agent",
"label": "Whitelist user agent",
"regex": ".*",
"type": "text"
},
{
"context": "multisite",
"default": "",
"env": "WHITELIST_URI",
"id": "whitelist-uri",
"label": "Whitelist URI",
"regex": "^(\\S ?)*$",
"type": "text"
}
]
}
}

View File

@@ -0,0 +1,12 @@
FROM archlinux:base
RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
rm -f /lib/systemd/system/multi-user.target.wants/*;\
rm -f /etc/systemd/system/*.wants/*;\
rm -f /lib/systemd/system/local-fs.target.wants/*; \
rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
rm -f /lib/systemd/system/basic.target.wants/*;\
rm -f /lib/systemd/system/anaconda.target.wants/*;
RUN pacman -Syu --noconfirm

View File

@@ -1,6 +1,6 @@
FROM debian:buster-slim
FROM debian:bullseye-slim
RUN apt update && apt install -y systemd
RUN apt update && apt install -y systemd init
RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
rm -f /lib/systemd/system/multi-user.target.wants/*;\

View File

@@ -9,15 +9,13 @@ COPY confs/site/ /opt/bunkerized-nginx/confs/site
COPY confs/global/ /opt/bunkerized-nginx/confs/global
COPY ui/ /opt/bunkerized-nginx/ui
COPY settings.json /opt/bunkerized-nginx
COPY VERSION /opt/bunkerized-nginx
COPY ui/prepare.sh /tmp
RUN chmod +x /tmp/prepare.sh && \
/tmp/prepare.sh && \
rm -f /tmp/prepare.sh
# Fix CVE-2021-36159
RUN apk add "apk-tools>=2.12.6-r0"
EXPOSE 5000
WORKDIR /opt/bunkerized-nginx/ui