51 Commits

Author SHA1 Message Date
Florian Pitance
3a078326c5 Merge pull request #199 from Myzel394/patch-1
Fix typo
2021-10-24 18:25:37 +02:00
florian
d43b82b757 remote API - only do action if 403 2021-10-24 18:24:10 +02:00
florian
3850cacb9c prepare for v1.3.2 2021-10-23 16:56:10 +02:00
florian
c00c7f46a0 lua - verify certs when doing HTTPS requests 2021-10-23 10:10:24 +02:00
bunkerity
163af4a49d prepare for v1.3.2 2021-10-22 21:16:36 +02:00
bunkerity
98e85eb99f docs - update security tuning sections : distributed blacklist and request limit 2021-10-21 21:31:15 +02:00
bunkerity
2e63bb0256 docs - reflect kubernetes/swarm changes into the doc 2021-10-21 16:47:08 +02:00
bunkerity
6546a0edb7 disable country ban if IP is local, update default values of PERMISSIONS_POLICY and FEATURE_POLICY, upgrade archlinux packages before testing 2021-10-21 15:40:20 +02:00
bunkerity
ab00381746 ui - fix ROOT_FOLDER bug in serve-files.conf 2021-10-21 11:30:49 +02:00
bunkerity
9f7097de0d request limit - fix some LUA code 2021-10-19 17:21:30 +02:00
bunkerity
24d6337a57 limit req - multiple url support 2021-10-18 16:48:06 +02:00
bunkerity
bfb5319c16 limit req - add burst and delay parameters 2021-10-13 20:53:10 +02:00
bunkerity
4c77a14825 use annotations as env var in Ingress definition, fix cidr parsing for reserved ips, fix missing empty when job is external, fix ping check for remote api and init work hour/day support for request limit 2021-10-13 17:21:25 +02:00
bunkerity
4e45fa3874 integrations - acme without shared folder when using k8s/swarm 2021-10-12 16:58:13 +02:00
Myzel394
a9a26b82d9 fixed typo 2021-10-12 10:22:25 +00:00
bunkerity
00d91dcaaa jobs - move certbot hooks to python 2021-10-11 20:57:13 +02:00
bunkerity
650ad7ea49 integrations - fix missing acme folder when using Swarm or Kubernetes 2021-10-11 17:24:19 +02:00
bunkerity
7045c0c2b6 jobs - fix encoding error on CentOS 2021-10-08 17:10:01 +02:00
bunkerity
f0f432487b remote API - ban IP from distributed DB 2021-10-07 16:57:37 +02:00
bunkerity
fdc02be051 remote API - basic send of bad IPs 2021-10-07 12:00:20 +02:00
bunkerity
fb799765a4 jobs - fix str/bytes hell 2021-10-06 21:09:27 +02:00
bunkerity
d53f02b5b3 api - client side (untested) 2021-10-06 15:41:55 +02:00
bunkerity
7b9722fac4 jobs - add remote API 2021-10-06 12:13:13 +02:00
bunkerity
31ed4ff834 centos - update ca-certificates in install script 2021-10-05 16:06:35 +02:00
bunkerity
bc5f3ee88e fix CVEs and add init to Debian test image 2021-10-05 15:01:43 +02:00
bunkerity
a6b21aae8c fix typo in settings.json, bump Debian to bullseyes, init support of Arch Linux 2021-10-05 14:32:19 +02:00
bunkerity
64aa9c2530 init work remote API 2021-10-02 20:29:50 +02:00
bunkerity
5d94cc8f43 docs - init changes about storageless 2021-09-14 16:41:39 +02:00
bunkerity
e7ee21cbb5 antibot - fix path for templates and data 2021-09-14 11:30:33 +02:00
florian
a0f8cbdac1 antibot - fix LUA typo in recaptcha mode 2021-09-13 21:26:09 +02:00
Florian Pitance
178d7a6849 Merge pull request #182 from Nakinox/patch-2
Update docker-compose.yml
2021-09-13 21:20:26 +02:00
florian
ca81535bb3 swarm/k8s - less storage, more API 2021-09-05 00:36:15 +02:00
florian
062fa3e78a integration - continue work on storageless config for k8s and swarm 2021-09-03 22:40:37 +02:00
Nakinox
95f2d2af9c Update docker-compose.yml 2021-09-03 17:21:36 +02:00
bunkerity
e55dff8128 api - init work on storageless configuration 2021-09-03 12:04:30 +02:00
bunkerity
f0f1c79d40 v1.3.1 release 2021-09-02 17:18:57 +02:00
bunkerity
3d2f5e2389 conf - add REVERSE_PROXY_KEEPALIVE 2021-09-02 12:03:56 +02:00
bunkerity
b079c99fb9 Merge branch 'patch-15' of github.com:thelittlefireman/bunkerized-nginx into keepalive 2021-09-02 11:52:38 +02:00
bunkerity
2e403c6ebc config - add CUSTOM_HEADER 2021-09-02 10:34:58 +02:00
bunkerity
f75a05584e config - add REVERSE_PROXY_BUFFERING 2021-09-02 09:36:28 +02:00
florian
148edf6814 tests - add github token to trivy scanner 2021-08-30 22:34:19 +02:00
Florian Pitance
a19d8aa041 Merge pull request #180 from vepito/vepito-patch-1
Fix typo related to non-HTTP configuration
2021-08-30 21:17:34 +02:00
Florian Pitance
480cff86bc Merge pull request #179 from thelittlefireman/patch-16
Mismatch in docs with modsec folder
2021-08-30 21:15:15 +02:00
Florian Pitance
35df3423d0 missing blank line 2021-08-30 21:14:04 +02:00
Florian Pitance
29f4069de7 switch the use cases 2021-08-30 21:12:59 +02:00
vepito
72e4384596 Fix typo related to non-HTTP configuration
Fix typo that prevents non-HTTP configuration to be working when MULTISITE is used
2021-08-29 02:59:24 +02:00
bunkerity
a4a2647737 jobs - fix docker reload and only do cron jobs when necessary 2021-08-26 15:48:38 +02:00
thelittlefireman
892e533694 Missmatch in docs with modsec folder 2021-08-25 22:32:47 +02:00
bunkerity
a056141609 deps - use ModSecurity v3.0.4 instead of v3.0.5 to avoid memory leak 2021-08-25 17:12:38 +02:00
thelittlefireman
9de628f3eb Missing proxy_set_header for keep alive 2021-08-18 23:47:01 +02:00
thelittlefireman
6cc1abc893 Allow keep alive connection when ws is off
This help improves performance.
2021-08-18 23:42:18 +02:00
65 changed files with 4717 additions and 573 deletions

View File

@@ -20,6 +20,8 @@ jobs:
run: docker build -t centos-systemd -f tests/Dockerfile-centos . run: docker build -t centos-systemd -f tests/Dockerfile-centos .
- name: Build Fedora with systemd - name: Build Fedora with systemd
run: docker build -t fedora-systemd -f tests/Dockerfile-fedora . run: docker build -t fedora-systemd -f tests/Dockerfile-fedora .
- name: Build Arch Linux with systemd
run: docker build -t archlinux-systemd -f tests/Dockerfile-archlinux .
- name: Debian test - name: Debian test
run: ./tests/linux-run.sh debian-systemd test-debian run: ./tests/linux-run.sh debian-systemd test-debian
- name: Ubuntu test - name: Ubuntu test
@@ -28,3 +30,5 @@ jobs:
run: ./tests/linux-run.sh centos-systemd test-centos run: ./tests/linux-run.sh centos-systemd test-centos
- name: Fedora test - name: Fedora test
run: ./tests/linux-run.sh fedora-systemd test-fedora run: ./tests/linux-run.sh fedora-systemd test-fedora
- name: Arch Linux test
run: ./tests/linux-run.sh archlinux-systemd test-archlinux

View File

@@ -1,6 +1,28 @@
# Changelog # Changelog
## v1.3.0 ## v1.3.2 -
- Use API instead of a shared folder for Swarm and Kubernetes integrations
- Beta integration of distributed bad IPs database through a remote API
- Improvement of the request limiting feature : hour/day rate and multiple URL support
- Various bug fixes related to antibot feature
- Init support of Arch Linux
- Fix Moodle example
- Fix ROOT_FOLDER bug in serve-files.conf when using the UI
- Update default values for PERMISSIONS_POLICY and FEATURE_POLICY
- Disable COUNTRY ban if IP is local
## v1.3.1 - 2021/09/02
- Use ModSecurity v3.0.4 instead of v3.0.5 to fix memory leak
- Fix ignored variables to control jobs
- Fix bug when LISTEN_HTTP=no and MULTISITE=yes
- Add CUSTOM_HEADER variable
- Add REVERSE_PROXY_BUFFERING variable
- Add REVERSE_PROXY_KEEPALIVE variable
- Fix documentation for modsec and modsec-crs special folders
## v1.3.0 - 2021/08/23
- Kubernetes integration in beta - Kubernetes integration in beta
- Linux integration in beta - Linux integration in beta

View File

@@ -12,8 +12,8 @@ RUN chmod +x /tmp/docker.sh && \
/tmp/docker.sh && \ /tmp/docker.sh && \
rm -f /tmp/docker.sh rm -f /tmp/docker.sh
# Fix CVE-2021-22901, CVE-2021-22898, CVE-2021-22897, CVE-2021-33560 and CVE-2021-36159 # Fix CVE-2021-22945, CVE-2021-22946, CVE-2021-22947 and CVE-2021-40528
RUN apk add "curl>=7.77.0-r0" "libgcrypt>=1.8.8-r0" "apk-tools>=2.12.6-r0" RUN apk add "curl>=7.79.0-r0" "libgcrypt>=1.8.8-r1"
VOLUME /www /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache /pre-server-confs /acme-challenge /plugins VOLUME /www /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache /pre-server-confs /acme-challenge /plugins

View File

@@ -3,7 +3,7 @@
</p> </p>
<p align="center"> <p align="center">
<img src="https://img.shields.io/badge/bunkerized--nginx-1.3.0-blue" /> <img src="https://img.shields.io/badge/bunkerized--nginx-1.3.2-blue" />
<img src="https://img.shields.io/badge/nginx-1.20.1-blue" /> <img src="https://img.shields.io/badge/nginx-1.20.1-blue" />
<img src="https://img.shields.io/github/last-commit/bunkerity/bunkerized-nginx" /> <img src="https://img.shields.io/github/last-commit/bunkerity/bunkerized-nginx" />
<img src="https://img.shields.io/github/workflow/status/bunkerity/bunkerized-nginx/Automatic%20test?label=automatic%20test" /> <img src="https://img.shields.io/github/workflow/status/bunkerity/bunkerized-nginx/Automatic%20test?label=automatic%20test" />
@@ -38,8 +38,8 @@ Non-exhaustive list of features :
- Automatic ban of strange behaviors - Automatic ban of strange behaviors
- Antibot challenge through cookie, javascript, captcha or recaptcha v3 - Antibot challenge through cookie, javascript, captcha or recaptcha v3
- Block TOR, proxies, bad user-agents, countries, ... - Block TOR, proxies, bad user-agents, countries, ...
- Block known bad IP with DNSBL - Block known bad IP with DNSBL and distributed blacklist
- Prevent bruteforce attacks with rate limiting - Prevent bruteforce attacks and protect API resources with rate limiting
- Plugins system for external security checks (ClamAV, CrowdSec, ...) - Plugins system for external security checks (ClamAV, CrowdSec, ...)
- Easy to configure with environment variables or web UI - Easy to configure with environment variables or web UI
- Seamless integration into existing environments : Linux, Docker, Swarm, Kubernetes, ... - Seamless integration into existing environments : Linux, Docker, Swarm, Kubernetes, ...
@@ -105,7 +105,7 @@ You will find more information about Docker autoconf feature in the [documentati
## Swarm ## Swarm
Using bunkerized-nginx in a Docker Swarm cluster requires a shared folder accessible from both managers and workers (anything like NFS, GlusterFS, CephFS or even SSHFS will work). The deployment and configuration is very similar to the "Docker autoconf" one but with services instead of containers. A service based on the bunkerized-nginx-autoconf image needs to be scheduled on a manager node (don't worry it doesn't expose any network port for obvious security reasons). This service will listen for Docker Swarm events like service creation or deletion and generate the configuration according to the labels of each service. Once configuration generation is done, the bunkerized-nginx-autoconf service will send a reload order to all the bunkerized-nginx tasks so they can load the new configuration. The deployment and configuration is very similar to the "Docker autoconf" one but with services instead of containers. A service based on the bunkerized-nginx-autoconf image needs to be scheduled on a manager node (don't worry it doesn't expose any network port for obvious security reasons). This service will listen for Docker Swarm events like service creation or deletion and generate the configuration according to the labels of each service. Once configuration generation is done, the bunkerized-nginx-autoconf service will send the configuration files and a reload order to all the bunkerized-nginx tasks so they can apply the new configuration. If you need to deliver static files (e.g., html, images, css, js, ...) a shared folder accessible from all bunkerized-nginx instances is needed (you can use a storage system like NFS, GlusterFS, CephFS on the host or a [Docker volume plugin](https://docs.docker.com/engine/extend/)).
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/swarm.png?raw=true" /> <img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/swarm.png?raw=true" />
@@ -115,7 +115,7 @@ You will find more information about Docker Swarm integration in the [documentat
**This integration is still in beta, please fill an issue if you find a bug or have an idea on how to improve it.** **This integration is still in beta, please fill an issue if you find a bug or have an idea on how to improve it.**
Using bunkerized-nginx in a Kubernetes cluster requires a shared folder accessible from the nodes (anything like NFS, GlusterFS, CephFS or even SSHFS will work). The bunkerized-nginx-autoconf acts as an Ingress Controller and connects to the k8s API to get cluster events and generate a new configuration when it's needed. Once the configuration is generated, the Ingress Controller sends a reload order to the bunkerized-nginx instances running in the cluster. The bunkerized-nginx-autoconf acts as an Ingress Controller and connects to the k8s API to get cluster events and generate a new configuration when it's needed. Once the configuration is generated, the Ingress Controller sends the configuration files and a reload order to the bunkerized-nginx instances running in the cluster. If you need to deliver static files (e.g., html, images, css, js, ...) a shared folder accessible from all bunkerized-nginx instances is needed (you can use a storage system like NFS, GlusterFS, CephFS on the host or a [Kubernetes Volume that supports ReadOnlyMany access](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes)).
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/kubernetes.png?raw=true" /> <img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/kubernetes.png?raw=true" />
@@ -130,6 +130,7 @@ List of supported Linux distributions :
- Ubuntu focal (20.04) - Ubuntu focal (20.04)
- CentOS 7 - CentOS 7
- Fedora 34 - Fedora 34
- Arch Linux
Unlike containers, Linux integration can be tedious because bunkerized-nginx has a bunch of dependencies that need to be installed before we can use it. Fortunately, we provide a [helper script](https://github.com/bunkerity/bunkerized-nginx/blob/master/helpers/install.sh) to make the process easier and automatic. Once installed, the configuration is really simple, all you have to do is to edit the `/opt/bunkerized-nginx/variables.env` configuration file and run the `bunkerized-nginx` command to apply it. Unlike containers, Linux integration can be tedious because bunkerized-nginx has a bunch of dependencies that need to be installed before we can use it. Fortunately, we provide a [helper script](https://github.com/bunkerity/bunkerized-nginx/blob/master/helpers/install.sh) to make the process easier and automatic. Once installed, the configuration is really simple, all you have to do is to edit the `/opt/bunkerized-nginx/variables.env` configuration file and run the `bunkerized-nginx` command to apply it.

View File

@@ -1 +1 @@
1.3.0 1.3.2

View File

@@ -10,8 +10,9 @@ COPY misc/cron-autoconf /etc/crontabs/root
COPY autoconf/entrypoint.sh /opt/bunkerized-nginx/entrypoint/ COPY autoconf/entrypoint.sh /opt/bunkerized-nginx/entrypoint/
COPY autoconf/requirements.txt /opt/bunkerized-nginx/entrypoint/ COPY autoconf/requirements.txt /opt/bunkerized-nginx/entrypoint/
COPY autoconf/src/* /opt/bunkerized-nginx/entrypoint/ COPY autoconf/src/* /opt/bunkerized-nginx/entrypoint/
COPY VERSION /opt/bunkerized-nginx
RUN apk add --no-cache py3-pip bash certbot curl openssl && \ RUN apk add --no-cache py3-pip bash certbot curl openssl socat && \
pip3 install -r /opt/bunkerized-nginx/gen/requirements.txt && \ pip3 install -r /opt/bunkerized-nginx/gen/requirements.txt && \
pip3 install -r /opt/bunkerized-nginx/entrypoint/requirements.txt && \ pip3 install -r /opt/bunkerized-nginx/entrypoint/requirements.txt && \
pip3 install -r /opt/bunkerized-nginx/jobs/requirements.txt pip3 install -r /opt/bunkerized-nginx/jobs/requirements.txt
@@ -21,7 +22,6 @@ RUN chmod +x /tmp/prepare.sh && \
/tmp/prepare.sh && \ /tmp/prepare.sh && \
rm -f /tmp/prepare.sh rm -f /tmp/prepare.sh
# Fix CVE-2021-36159 #VOLUME /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache /etc/letsencrypt /acme-challenge
RUN apk add "apk-tools>=2.12.6-r0"
ENTRYPOINT ["/opt/bunkerized-nginx/entrypoint/entrypoint.sh"] ENTRYPOINT ["/opt/bunkerized-nginx/entrypoint/entrypoint.sh"]

View File

@@ -16,6 +16,7 @@ chmod ugo+x /opt/bunkerized-nginx/entrypoint/* /opt/bunkerized-nginx/scripts/*
chmod ugo+x /opt/bunkerized-nginx/gen/main.py chmod ugo+x /opt/bunkerized-nginx/gen/main.py
chmod ugo+x /opt/bunkerized-nginx/jobs/main.py chmod ugo+x /opt/bunkerized-nginx/jobs/main.py
chmod ugo+x /opt/bunkerized-nginx/jobs/reload.py chmod ugo+x /opt/bunkerized-nginx/jobs/reload.py
chmod ugo+x /opt/bunkerized-nginx/jobs/certbot-*.py
chmod 770 /opt/bunkerized-nginx chmod 770 /opt/bunkerized-nginx
chmod 440 /opt/bunkerized-nginx/settings.json chmod 440 /opt/bunkerized-nginx/settings.json
@@ -28,6 +29,11 @@ mkdir /var/log/letsencrypt
chown nginx:nginx /var/log/letsencrypt chown nginx:nginx /var/log/letsencrypt
chmod 770 /var/log/letsencrypt chmod 770 /var/log/letsencrypt
# prepare /etc/nginx
mkdir /etc/nginx
chown root:nginx /etc/nginx
chmod 770 /etc/nginx
# prepare /etc/letsencrypt # prepare /etc/letsencrypt
mkdir /etc/letsencrypt mkdir /etc/letsencrypt
chown root:nginx /etc/letsencrypt chown root:nginx /etc/letsencrypt
@@ -46,9 +52,21 @@ chmod 770 /cache
# prepare /acme-challenge # prepare /acme-challenge
ln -s /acme-challenge /opt/bunkerized-nginx/acme-challenge ln -s /acme-challenge /opt/bunkerized-nginx/acme-challenge
mkdir /acme-challenge mkdir -p /acme-challenge/.well-known/acme-challenge
chown root:nginx /acme-challenge chown -R root:nginx /acme-challenge
chmod 770 /acme-challenge chmod -R 770 /acme-challenge
# prepare /http-confs
ln -s /http-confs /opt/bunkerized-nginx/http-confs
mkdir /http-confs
chown root:nginx /http-confs
chmod 770 /http-confs
# prepare /server-confs
ln -s /server-confs /opt/bunkerized-nginx/server-confs
mkdir /server-confs
chown root:nginx /server-confs
chmod 770 /server-confs
# prepare /modsec-confs # prepare /modsec-confs
ln -s /modsec-confs /opt/bunkerized-nginx/modsec-confs ln -s /modsec-confs /opt/bunkerized-nginx/modsec-confs

View File

@@ -1,11 +1,21 @@
#!/usr/bin/python3 #!/usr/bin/python3
import subprocess, shutil, os, traceback, requests, time, dns.resolver import subprocess, shutil, os, traceback, requests, time, dns.resolver, io, tarfile
import Controller import Controller
from logger import log from logger import log
CONFIGS = {
"conf": "/etc/nginx",
"letsencrypt": "/etc/letsencrypt",
"http": "/http-confs",
"server": "/server-confs",
"modsec": "/modsec-confs",
"modsec-crs": "/modsec-crs-confs",
"acme": "/acme-challenge"
}
class Config : class Config :
def __init__(self, type, api_uri, http_port="8080") : def __init__(self, type, api_uri, http_port="8080") :
@@ -19,9 +29,9 @@ class Config :
stdout = proc.stdout.decode("ascii") stdout = proc.stdout.decode("ascii")
stderr = proc.stderr.decode("ascii") stderr = proc.stderr.decode("ascii")
if len(stdout) > 1 : if len(stdout) > 1 :
log("config", "INFO", "jobs stdout : " + stdout) log("config", "INFO", "jobs stdout :\n" + stdout)
if stderr != "" : if stderr != "" :
log("config", "ERROR", "jobs stderr : " + stderr) log("config", "ERROR", "jobs stderr :\n" + stderr)
if proc.returncode != 0 : if proc.returncode != 0 :
log("config", "ERROR", "jobs error (return code = " + str(proc.returncode) + ")") log("config", "ERROR", "jobs error (return code = " + str(proc.returncode) + ")")
return False return False
@@ -64,12 +74,35 @@ class Config :
instance.kill("SIGHUP") instance.kill("SIGHUP")
except : except :
ret = False ret = False
elif self.__type == Controller.Type.SWARM : elif self.__type == Controller.Type.SWARM or self.__type == Controller.Type.KUBERNETES :
ret = self.__api_call(instances, "/reload")
elif self.__type == Controller.Type.KUBERNETES :
ret = self.__api_call(instances, "/reload") ret = self.__api_call(instances, "/reload")
return ret return ret
def send(self, instances, files="all") :
ret = True
fail = False
for name, path in CONFIGS.items() :
if files != "all" and name != files :
continue
file = self.__tarball(path)
if not self.__api_call(instances, "/" + name, file=file) :
log("config", "ERROR", "can't send config " + name + " to instance(s)")
fail = True
file.close()
if fail :
ret = False
return ret
def stop_temp(self, instances) :
return self.__api_call(instances, "/stop-temp")
def __tarball(self, path) :
file = io.BytesIO()
with tarfile.open(mode="w:gz", fileobj=file) as tar :
tar.add(path, arcname=".")
file.seek(0, 0)
return file
def __ping(self, instances) : def __ping(self, instances) :
return self.__api_call(instances, "/ping") return self.__api_call(instances, "/ping")
@@ -120,7 +153,7 @@ class Config :
log("config", "ERROR", "exception while waiting for bunkerized-nginx instances : " + traceback.format_exc()) log("config", "ERROR", "exception while waiting for bunkerized-nginx instances : " + traceback.format_exc())
return False return False
def __api_call(self, instances, path) : def __api_call(self, instances, path, file=None) :
ret = True ret = True
nb = 0 nb = 0
urls = [] urls = []
@@ -146,7 +179,11 @@ class Config :
for url in urls : for url in urls :
req = None req = None
try : try :
if file == None :
req = requests.post(url) req = requests.post(url)
else :
file.seek(0, 0)
req = requests.post(url, files={'file': file})
except : except :
pass pass
if req and req.status_code == 200 and req.text == "ok" : if req and req.status_code == 200 and req.text == "ok" :

View File

@@ -1,3 +1,4 @@
import traceback
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from enum import Enum from enum import Enum
@@ -51,3 +52,17 @@ class Controller(ABC) :
except : except :
ret = False ret = False
return ret return ret
def _send(self, instances, files="all") :
try :
ret = self._config.send(instances, files=files)
except Exception as e :
ret = False
return ret
def _stop_temp(self, instances) :
try :
ret = self._config.stop_temp(instances)
except Exception as e :
ret = False
return ret

View File

@@ -41,6 +41,8 @@ class IngressController(Controller.Controller) :
def __annotations_to_env(self, annotations) : def __annotations_to_env(self, annotations) :
env = {} env = {}
prefix = ""
if "bunkerized-nginx.SERVER_NAME" in annotations :
prefix = annotations["bunkerized-nginx.SERVER_NAME"].split(" ")[0] + "_" prefix = annotations["bunkerized-nginx.SERVER_NAME"].split(" ")[0] + "_"
for annotation in annotations : for annotation in annotations :
if annotation.startswith("bunkerized-nginx.") and annotation.replace("bunkerized-nginx.", "", 1) != "" and annotation.replace("bunkerized-nginx.", "", 1) != "AUTOCONF" : if annotation.startswith("bunkerized-nginx.") and annotation.replace("bunkerized-nginx.", "", 1) != "" and annotation.replace("bunkerized-nginx.", "", 1) != "AUTOCONF" :
@@ -50,18 +52,26 @@ class IngressController(Controller.Controller) :
def __rules_to_env(self, rules, namespace="default") : def __rules_to_env(self, rules, namespace="default") :
env = {} env = {}
first_servers = [] first_servers = []
numbers = {}
for rule in rules : for rule in rules :
rule = rule.to_dict() rule = rule.to_dict()
prefix = "" prefix = ""
number = 1
if "host" in rule : if "host" in rule :
prefix = rule["host"] + "_" prefix = rule["host"] + "_"
first_servers.append(rule["host"]) first_servers.append(rule["host"])
if not rule["host"] in numbers :
numbers[rule["host"]] = 1
number = numbers[rule["host"]]
if not "http" in rule or not "paths" in rule["http"] : if not "http" in rule or not "paths" in rule["http"] :
continue continue
for path in rule["http"]["paths"] :
env[prefix + "USE_REVERSE_PROXY"] = "yes" env[prefix + "USE_REVERSE_PROXY"] = "yes"
env[prefix + "REVERSE_PROXY_URL"] = path["path"] for path in rule["http"]["paths"] :
env[prefix + "REVERSE_PROXY_HOST"] = "http://" + path["backend"]["service_name"] + "." + namespace + ".svc.cluster.local:" + str(path["backend"]["service_port"]) suffix = "_" + str(number)
env[prefix + "REVERSE_PROXY_URL" + suffix] = path["path"]
env[prefix + "REVERSE_PROXY_HOST" + suffix] = "http://" + path["backend"]["service_name"] + "." + namespace + ".svc.cluster.local:" + str(path["backend"]["service_port"])
number += 1
numbers[rule["host"]] = number
env["SERVER_NAME"] = " ".join(first_servers) env["SERVER_NAME"] = " ".join(first_servers)
return env return env
@@ -77,6 +87,8 @@ class IngressController(Controller.Controller) :
first_servers.extend(env["SERVER_NAME"].split(" ")) first_servers.extend(env["SERVER_NAME"].split(" "))
for ingress in ingresses : for ingress in ingresses :
env.update(self.__rules_to_env(ingress.spec.rules, namespace=ingress.metadata.namespace)) env.update(self.__rules_to_env(ingress.spec.rules, namespace=ingress.metadata.namespace))
if ingress.metadata.annotations != None :
env.update(self.__annotations_to_env(ingress.metadata.annotations))
if ingress.spec.tls : if ingress.spec.tls :
for tls_entry in ingress.spec.tls : for tls_entry in ingress.spec.tls :
for host in tls_entry.hosts : for host in tls_entry.hosts :
@@ -96,9 +108,9 @@ class IngressController(Controller.Controller) :
def process_events(self, current_env) : def process_events(self, current_env) :
self.__old_env = current_env self.__old_env = current_env
t_pod = Thread(target=self.__watch_pod) t_pod = Thread(target=self.__watch, args=("pod",))
t_ingress = Thread(target=self.__watch_ingress) t_ingress = Thread(target=self.__watch, args=("ingress",))
t_service = Thread(target=self.__watch_service) t_service = Thread(target=self.__watch, args=("service",))
t_pod.start() t_pod.start()
t_ingress.start() t_ingress.start()
t_service.start() t_service.start()
@@ -106,63 +118,41 @@ class IngressController(Controller.Controller) :
t_ingress.join() t_ingress.join()
t_service.join() t_service.join()
def __watch_pod(self) : def __watch(self, type) :
w = watch.Watch() w = watch.Watch()
for event in w.stream(self.__api.list_pod_for_all_namespaces, label_selector="bunkerized-nginx") : what = None
if type == "pod" :
what = self.__api.list_pod_for_all_namespaces
elif type == "ingress" :
what = self.__extensions_api.list_ingress_for_all_namespaces
elif type == "service" :
what = self.__api.list_service_for_all_namespaces
for event in w.stream(what, label_selector="bunkerized-nginx") :
self.lock.acquire() self.lock.acquire()
new_env = self.get_env() new_env = self.get_env()
if new_env != self.__old_env : if new_env != self.__old_env :
try : try :
if self.gen_conf(new_env) : if not self.gen_conf(new_env) :
raise Exception("can't generate configuration")
if not self.send() :
raise Exception("can't send configuration")
if not self.reload() :
raise Exception("can't reload configuration")
self.__old_env = new_env.copy() self.__old_env = new_env.copy()
log("CONTROLLER", "INFO", "successfully generated new configuration") log("CONTROLLER", "INFO", "successfully loaded new configuration")
if self.reload() : except Exception as e :
log("controller", "INFO", "successful reload") log("controller", "ERROR", "error while computing new event : " + str(e))
else :
log("controller", "ERROR", "failed reload")
except :
log("controller", "ERROR", "exception while receiving event")
self.lock.release()
def __watch_ingress(self) :
w = watch.Watch()
for event in w.stream(self.__extensions_api.list_ingress_for_all_namespaces, label_selector="bunkerized-nginx") :
self.lock.acquire()
new_env = self.get_env()
if new_env != self.__old_env :
try :
if self.gen_conf(new_env) :
self.__old_env = new_env.copy()
log("CONTROLLER", "INFO", "successfully generated new configuration")
if self.reload() :
log("controller", "INFO", "successful reload")
else :
log("controller", "ERROR", "failed reload")
except :
log("controller", "ERROR", "exception while receiving event")
self.lock.release()
def __watch_service(self) :
w = watch.Watch()
for event in w.stream(self.__api.list_service_for_all_namespaces, label_selector="bunkerized-nginx") :
self.lock.acquire()
new_env = self.get_env()
if new_env != self.__old_env :
try :
if self.gen_conf(new_env) :
self.__old_env = new_env.copy()
log("CONTROLLER", "INFO", "successfully generated new configuration")
if self.reload() :
log("controller", "INFO", "successful reload")
else :
log("controller", "ERROR", "failed reload")
except :
log("controller", "ERROR", "exception while receiving event")
self.lock.release() self.lock.release()
def reload(self) : def reload(self) :
return self._reload(self.__get_services(autoconf=True)) return self._reload(self.__get_services(autoconf=True))
def send(self, files="all") :
return self._send(self.__get_services(autoconf=True), files=files)
def stop_temp(self) :
return self._stop_temp(self.__get_services(autoconf=True))
def wait(self) : def wait(self) :
self.lock.acquire() self.lock.acquire()
try : try :
@@ -171,20 +161,28 @@ class IngressController(Controller.Controller) :
while len(pods) == 0 : while len(pods) == 0 :
time.sleep(1) time.sleep(1)
pods = self.__get_pods() pods = self.__get_pods()
# Wait for at least one bunkerized-nginx service # Wait for at least one bunkerized-nginx service
services = self.__get_services(autoconf=True) services = self.__get_services(autoconf=True)
while len(services) == 0 : while len(services) == 0 :
time.sleep(1) time.sleep(1)
services = self.__get_services(autoconf=True) services = self.__get_services(autoconf=True)
# Generate first config # Generate first config
env = self.get_env() env = self.get_env()
if not self.gen_conf(env) : if not self.gen_conf(env) :
self.lock.release() self.lock.release()
return False, env return False, env
# Send the config
if not self.send() :
self.lock.release()
return False, env
# Stop the temporary server
if not self.stop_temp() :
self.lock.release()
return False, env
# Wait for bunkerized-nginx # Wait for bunkerized-nginx
if not self._config.wait(instances) :
self.lock.release()
return False, env
self.lock.release() self.lock.release()
return self._config.wait(services), env return self._config.wait(services), env
except : except :

View File

@@ -2,7 +2,7 @@ import socketserver, threading, os, stat
from logger import log from logger import log
class ReloadServerHandler(socketserver.StreamRequestHandler): class ReloadServerHandler(socketserver.BaseRequestHandler):
def handle(self) : def handle(self) :
locked = False locked = False
@@ -10,7 +10,7 @@ class ReloadServerHandler(socketserver.StreamRequestHandler):
while True : while True :
data = self.request.recv(512) data = self.request.recv(512)
if not data or not data in [b"lock", b"reload", b"unlock"] : if not data or not data in [b"lock", b"reload", b"unlock", b"acme"] :
break break
if data == b"lock" : if data == b"lock" :
self.server.controller.lock.acquire() self.server.controller.lock.acquire()
@@ -20,6 +20,12 @@ class ReloadServerHandler(socketserver.StreamRequestHandler):
self.server.controller.lock.release() self.server.controller.lock.release()
locked = False locked = False
self.request.sendall(b"ok") self.request.sendall(b"ok")
elif data == b"acme" :
ret = self.server.controller.send(files="acme")
if ret :
self.request.sendall(b"ok")
else :
self.request.sendall(b"ko")
elif data == b"reload" : elif data == b"reload" :
ret = self.server.controller.reload() ret = self.server.controller.reload()
if ret : if ret :
@@ -31,8 +37,11 @@ class ReloadServerHandler(socketserver.StreamRequestHandler):
if locked : if locked :
self.server.controller.lock.release() self.server.controller.lock.release()
class ThreadingUnixServer(socketserver.ThreadingMixIn, socketserver.UnixStreamServer) :
pass
def run_reload_server(controller) : def run_reload_server(controller) :
server = socketserver.UnixStreamServer("/tmp/autoconf.sock", ReloadServerHandler) server = ThreadingUnixServer("/tmp/autoconf.sock", ReloadServerHandler)
os.chown("/tmp/autoconf.sock", 0, 101) os.chown("/tmp/autoconf.sock", 0, 101)
os.chmod("/tmp/autoconf.sock", 0o770) os.chmod("/tmp/autoconf.sock", 0o770)
server.controller = controller server.controller = controller

View File

@@ -46,23 +46,27 @@ class SwarmController(Controller.Controller) :
if new_env != old_env : if new_env != old_env :
self.lock.acquire() self.lock.acquire()
try : try :
log("controller", "INFO", "generating new configuration") if not self.gen_conf(new_env) :
if self.gen_conf(new_env) : raise Exception("can't generate configuration")
old_env = new_env.copy() if not self.send() :
log("controller", "INFO", "successfully generated new configuration") raise Exception("can't send configuration")
if self.reload() : if not self.reload() :
log("controller", "INFO", "successful reload") raise Exception("can't reload configuration")
else : self.__old_env = new_env.copy()
log("controller", "ERROR", "failed reload") log("CONTROLLER", "INFO", "successfully loaded new configuration")
else : except Exception as e :
log("controller", "ERROR", "can't generate new configuration") log("controller", "ERROR", "error while computing new event : " + str(e))
except :
log("controller", "ERROR", "exception while receiving event")
self.lock.release() self.lock.release()
def reload(self) : def reload(self) :
return self._reload(self.__get_instances()) return self._reload(self.__get_instances())
def send(self, files="all") :
return self._send(self.__get_instances(), files=files)
def stop_temp(self) :
return self._stop_temp(self.__get_instances())
def wait(self) : def wait(self) :
self.lock.acquire() self.lock.acquire()
try : try :
@@ -71,14 +75,29 @@ class SwarmController(Controller.Controller) :
while len(instances) == 0 : while len(instances) == 0 :
time.sleep(1) time.sleep(1)
instances = self.__get_instances() instances = self.__get_instances()
# Wait for temporary bunkerized-nginx
if not self._config.wait(instances) :
self.lock.release()
return False, env
# Generate first config # Generate first config
env = self.get_env() env = self.get_env()
if not self.gen_conf(env) : if not self.gen_conf(env) :
self.lock.release() self.lock.release()
return False, env return False, env
# Wait for nginx # Send the config
if not self.send() :
self.lock.release() self.lock.release()
return self._config.wait(instances), env return False, env
# Stop the temporary server
if not self.stop_temp() :
self.lock.release()
return False, env
# Wait for bunkerized-nginx
if not self._config.wait(instances) :
self.lock.release()
return False, env
self.lock.release()
return True, env
except : except :
pass pass
self.lock.release() self.lock.release()

View File

@@ -1,6 +1,4 @@
location ~ ^%API_URI%/ping { client_max_body_size 1G;
return 444;
}
location ~ %API_URI% { location ~ %API_URI% {
@@ -15,10 +13,10 @@ rewrite_by_lua_block {
ngx.header.content_type = 'text/plain' ngx.header.content_type = 'text/plain'
if api.do_api_call(api_uri) then if api.do_api_call(api_uri) then
logger.log(ngx.NOTICE, "API", "API call " .. ngx.var.request_uri .. " successfull from " .. ngx.var.remote_addr) logger.log(ngx.NOTICE, "API", "API call " .. ngx.var.request_uri .. " successfull from " .. ngx.var.remote_addr)
ngx.say("ok") ngx.print("ok")
else else
logger.log(ngx.WARN, "API", "API call " .. ngx.var.request_uri .. " failed from " .. ngx.var.remote_addr) logger.log(ngx.WARN, "API", "API call " .. ngx.var.request_uri .. " failed from " .. ngx.var.remote_addr)
ngx.say("ko") ngx.print("ko")
end end
ngx.exit(ngx.HTTP_OK) ngx.exit(ngx.HTTP_OK)
@@ -29,3 +27,4 @@ rewrite_by_lua_block {
} }
} }

View File

@@ -1,4 +1,5 @@
# todo : if api_uri == "random" # todo : if api_uri == "random"
client_max_body_size 1G;
rewrite_by_lua_block { rewrite_by_lua_block {
local api = require "api" local api = require "api"

View File

@@ -3,6 +3,8 @@ init_by_lua_block {
local dataloader = require "dataloader" local dataloader = require "dataloader"
local logger = require "logger" local logger = require "logger"
local cjson = require "cjson" local cjson = require "cjson"
local remoteapi = require "remoteapi"
local iputils = require "resty.iputils"
local use_redis = {% if USE_REDIS == "yes" %}true{% else %}false{% endif +%} local use_redis = {% if USE_REDIS == "yes" %}true{% else %}false{% endif +%}
@@ -12,6 +14,35 @@ local use_tor_exit_nodes = {% if has_value("BLOCK_TOR_EXIT_NODE", "yes") %}true{
local use_user_agents = {% if has_value("BLOCK_USER_AGENT", "yes") %}true{% else %}false{% endif +%} local use_user_agents = {% if has_value("BLOCK_USER_AGENT", "yes") %}true{% else %}false{% endif +%}
local use_referrers = {% if has_value("BLOCK_REFERRER", "yes") %}true{% else %}false{% endif +%} local use_referrers = {% if has_value("BLOCK_REFERRER", "yes") %}true{% else %}false{% endif +%}
local use_remote_api = {% if has_value("USE_REMOTE_API", "yes") %}true{% else %}false{% endif +%}
-- Load reserved IPs
local reserved_ips = {
"0.0.0.0/8",
"10.0.0.0/8",
"100.64.0.0/10",
"127.0.0.0/8",
"169.254.0.0/16",
"172.16.0.0/12",
"192.0.0.0/24",
"192.0.2.0/24",
"192.88.99.0/24",
"192.168.0.0/16",
"198.18.0.0/15",
"198.51.100.0/24",
"203.0.113.0/24",
"224.0.0.0/4",
"233.252.0.0/24",
"240.0.0.0/4",
"255.255.255.255/32"
}
local success, err, forcible = ngx.shared.reserved_ips:set("data", cjson.encode(iputils.parse_cidrs(reserved_ips)), 0)
if not success then
logger.log(ngx.ERR, "INIT", "Can't load reserved IPs : " .. err)
end
-- Load blacklists
if not use_redis then if not use_redis then
if use_proxies then if use_proxies then
dataloader.load_ip("/etc/nginx/proxies.list", ngx.shared.proxies_data) dataloader.load_ip("/etc/nginx/proxies.list", ngx.shared.proxies_data)
@@ -72,4 +103,45 @@ for dir in p:lines() do
end end
p:close() p:close()
-- Remote API
if use_remote_api then
-- Save server
ngx.shared.remote_api:set("server", "{{ REMOTE_API_SERVER }}", 0)
-- Save version
local f = io.open("/opt/bunkerized-nginx/VERSION", "r")
ngx.shared.remote_api:set("version", f:read("*all"):gsub("[\r\n]", ""), 0)
f:close()
-- Save machine ID
local id = "empty"
local f = io.open("/etc/nginx/machine.id", "r")
if f == nil then
logger.log(ngx.ERR, "REMOTE API", "USE_REMOTE_API is set to yes but machine ID is not generated - communication with {{ REMOTE_API_SERVER }} won't work")
else
id = f:read("*all"):gsub("[\r\n]", "")
logger.log(ngx.ERR, "REMOTE API", "*NOT AN ERROR* Using existing machine ID (" .. id .. ")")
f:close()
end
ngx.shared.remote_api:set("id", id, 0)
-- Ping the remote API
local ping = "ko"
if id ~= "empty" then
if remoteapi.ping2() then
ping = "ok"
logger.log(ngx.ERR, "REMOTE API", "*NOT AN ERROR* Successfully requested the remote API")
else
logger.log(ngx.ERR, "REMOTE API", "Can't contact the remote API, feature will be disabled")
end
end
ngx.shared.remote_api:set("ping", ping, 0)
-- Load the database
if ping ~= "ko" then
dataloader.load_ip("/etc/nginx/remote-api.db", ngx.shared.remote_api_db)
end
end
} }

View File

@@ -1,5 +1,5 @@
server { server {
{% if LISTEN_HTTP == "yes" %}listen 0.0.0.0:{{ HTTP_PORT }} default_server{% endif +%}; {% if LISTEN_HTTP == "yes" %}listen 0.0.0.0:{{ HTTP_PORT }} default_server;{% endif +%}
server_name _; server_name _;
{% if has_value("AUTO_LETS_ENCRYPT", "yes") %}include /etc/nginx/multisite-default-server-https.conf;{% endif +%} {% if has_value("AUTO_LETS_ENCRYPT", "yes") %}include /etc/nginx/multisite-default-server-https.conf;{% endif +%}
include /etc/nginx/multisite-default-server-lets-encrypt-webroot.conf; include /etc/nginx/multisite-default-server-lets-encrypt-webroot.conf;

View File

@@ -1,6 +1,6 @@
load_module /usr/lib/nginx/modules/ngx_http_lua_module.so; load_module /usr/lib/nginx/modules/ngx_http_lua_module.so;
daemon on; #daemon on;
pid /tmp/nginx-temp.pid; pid /tmp/nginx-temp.pid;

View File

@@ -76,6 +76,8 @@ http {
# lua path and dicts # lua path and dicts
lua_package_path "/opt/bunkerized-nginx/lua/?.lua;/opt/bunkerized-nginx/plugins/?.lua;/opt/bunkerized-nginx/deps/lib/lua/?.lua;;"; lua_package_path "/opt/bunkerized-nginx/lua/?.lua;/opt/bunkerized-nginx/plugins/?.lua;/opt/bunkerized-nginx/deps/lib/lua/?.lua;;";
lua_package_cpath "/opt/bunkerized-nginx/deps/lib/?.so;/opt/bunkerized-nginx/deps/lib/lua/?.so;;"; lua_package_cpath "/opt/bunkerized-nginx/deps/lib/?.so;/opt/bunkerized-nginx/deps/lib/lua/?.so;;";
lua_ssl_trusted_certificate "/opt/bunkerized-nginx/lua/misc/root-ca.pem";
lua_ssl_verify_depth 2;
{% if has_value("USE_WHITELIST_IP", "yes") %}lua_shared_dict whitelist_ip_cache 10m;{% endif +%} {% if has_value("USE_WHITELIST_IP", "yes") %}lua_shared_dict whitelist_ip_cache 10m;{% endif +%}
{% if has_value("USE_WHITELIST_REVERSE", "yes") %}lua_shared_dict whitelist_reverse_cache 10m;{% endif +%} {% if has_value("USE_WHITELIST_REVERSE", "yes") %}lua_shared_dict whitelist_reverse_cache 10m;{% endif +%}
{% if has_value("USE_BLACKLIST_IP", "yes") %}lua_shared_dict blacklist_ip_cache 10m;{% endif +%} {% if has_value("USE_BLACKLIST_IP", "yes") %}lua_shared_dict blacklist_ip_cache 10m;{% endif +%}
@@ -90,10 +92,14 @@ http {
{% if has_value("BLOCK_REFERRER", "yes") %}lua_shared_dict referrers_cache 10m;{% endif +%} {% if has_value("BLOCK_REFERRER", "yes") %}lua_shared_dict referrers_cache 10m;{% endif +%}
{% if has_value("USE_BAD_BEHAVIOR", "yes") %}lua_shared_dict behavior_ban 10m;{% endif +%} {% if has_value("USE_BAD_BEHAVIOR", "yes") %}lua_shared_dict behavior_ban 10m;{% endif +%}
{% if has_value("USE_BAD_BEHAVIOR", "yes") %}lua_shared_dict behavior_count 10m;{% endif +%} {% if has_value("USE_BAD_BEHAVIOR", "yes") %}lua_shared_dict behavior_count 10m;{% endif +%}
{% if has_value("USE_LIMIT_REQ", "yes") %}lua_shared_dict limit_req {{ LIMIT_REQ_CACHE }};{% endif +%}
lua_shared_dict plugins_data 10m; lua_shared_dict plugins_data 10m;
lua_shared_dict reserved_ips 1m;
{% if has_value("USE_REMOTE_API", "yes") %}lua_shared_dict remote_api 1m;{% endif +%}
{% if has_value("USE_REMOTE_API", "yes") %}lua_shared_dict remote_api_db 10m;{% endif +%}
# shared memory zone for limit_req # shared memory zone for limit_req
{% if has_value("USE_LIMIT_REQ", "yes") %}limit_req_zone $binary_remote_addr$uri zone=limit:{{ LIMIT_REQ_CACHE }} rate={{ LIMIT_REQ_RATE }};{% endif +%} #{% if has_value("USE_LIMIT_REQ", "yes") %}limit_req_zone $binary_remote_addr$uri zone=limit:{{ LIMIT_REQ_CACHE }} rate={{ LIMIT_REQ_RATE }};{% endif +%}
# shared memory zone for limit_conn # shared memory zone for limit_conn
{% if has_value("USE_LIMIT_CONN", "yes") %}limit_conn_zone $binary_remote_addr zone=ddos:{{ LIMIT_CONN_CACHE }};{% endif +%} {% if has_value("USE_LIMIT_CONN", "yes") %}limit_conn_zone $binary_remote_addr zone=ddos:{{ LIMIT_CONN_CACHE }};{% endif +%}

View File

@@ -6,7 +6,7 @@ location = {{ ANTIBOT_URI }} {
content_by_lua_block { content_by_lua_block {
local cookie = require "cookie" local cookie = require "cookie"
local recaptcha = require "recaptcha" local recaptcha = require "recaptcha"
local loggger = require "logger" local logger = require "logger"
if not cookie.is_set("uri") then if not cookie.is_set("uri") then
logger.log(ngx.WARN, "ANTIBOT", "recaptcha fail (1) for " .. ngx.var.remote_addr) logger.log(ngx.WARN, "ANTIBOT", "recaptcha fail (1) for " .. ngx.var.remote_addr)
return ngx.exit(ngx.HTTP_FORBIDDEN) return ngx.exit(ngx.HTTP_FORBIDDEN)

View File

@@ -0,0 +1,5 @@
{% for k, v in all.items() +%}
{% if k.startswith("CUSTOM_HEADER") and v != "" +%}
more_set_header "{{ v }}";
{% endif %}
{% endfor %}

View File

@@ -1,5 +1,8 @@
log_by_lua_block { log_by_lua_block {
local logger = require "logger"
local cjson = require "cjson"
-- bad behavior -- bad behavior
local use_bad_behavior = {% if USE_BAD_BEHAVIOR == "yes" %}true{% else %}false{% endif +%} local use_bad_behavior = {% if USE_BAD_BEHAVIOR == "yes" %}true{% else %}false{% endif +%}
local behavior = require "behavior" local behavior = require "behavior"
@@ -9,7 +12,47 @@ local bad_behavior_count_time = {{ BAD_BEHAVIOR_COUNT_TIME }}
local bad_behavior_ban_time = {{ BAD_BEHAVIOR_BAN_TIME }} local bad_behavior_ban_time = {{ BAD_BEHAVIOR_BAN_TIME }}
if use_bad_behavior then if use_bad_behavior then
behavior.count(bad_behavior_status_codes, bad_behavior_threshold, bad_behavior_count_time, bad_behavior_ban_time) local new_bad_behavior_ban = false
if not behavior.is_banned() then
new_bad_behavior_ban = behavior.count(bad_behavior_status_codes, bad_behavior_threshold, bad_behavior_count_time, bad_behavior_ban_time)
end
end
-- remote API
local use_remote_api = {% if USE_REMOTE_API == "yes" %}true{% else %}false{% endif +%}
local remoteapi = require "remoteapi"
local iputils = require "resty.iputils"
if use_remote_api and ngx.status == ngx.HTTP_FORBIDDEN and not iputils.ip_in_cidrs(ngx.var.remote_addr, cjson.decode(ngx.shared.reserved_ips:get("data"))) and ngx.shared.remote_api:get("id") ~= "empty" then
if ngx.shared.remote_api:get("ping") == "ko" then
if remoteapi.ping2() then
ngx.shared.remote_api:set("ping", "ok", 0)
logger.log(ngx.NOTICE, "REMOTE API", "Successfully requested the remote API again")
else
logger.log(ngx.ERR, "REMOTE API", "Can't contact the remote API, feature will be disabled")
end
end
if ngx.shared.remote_api:get("ping") ~= "ko" then
local reason = "other"
if use_bad_behavior and new_bad_behavior_ban then
reason = "behavior"
end
local report_ip = function (premature, ip, reason)
if premature then
return
end
local remoteapi = require "remoteapi"
local logger = require "logger"
local res, data = remoteapi.ip(ip, reason)
-- TODO : find a way to log
end
local ok, err = ngx.timer.at(0, report_ip, ngx.var.remote_addr, reason)
if not ok then
logger.log(ngx.ERR, "REMOTE API", "Error while creating report timer " .. err)
else
logger.log(ngx.NOTICE, "REMOTE API", "Reporting " .. ngx.var.remote_addr .. "(reason: " .. reason .. ") to the remote API")
end
end
end end
} }

View File

@@ -57,11 +57,18 @@ local dnsbl_list = {% raw %}{{% endraw %}{% if DNSBL_LIST != "" %}{% set elemen
-- bad behavior -- bad behavior
local use_bad_behavior = {% if USE_BAD_BEHAVIOR == "yes" %}true{% else %}false{% endif +%} local use_bad_behavior = {% if USE_BAD_BEHAVIOR == "yes" %}true{% else %}false{% endif +%}
-- limit req
local use_limit_req = {% if USE_LIMIT_REQ == "yes" %}true{% else %}false{% endif +%}
-- remote API
local use_remote_api = {% if USE_REMOTE_API == "yes" %}true{% else %}false{% endif +%}
-- include LUA code -- include LUA code
local whitelist = require "whitelist" local whitelist = require "whitelist"
local blacklist = require "blacklist" local blacklist = require "blacklist"
local dnsbl = require "dnsbl" local dnsbl = require "dnsbl"
local cookie = require "cookie" local cookie = require "cookie"
local cjson = require "cjson"
local javascript = require "javascript" local javascript = require "javascript"
local captcha = require "captcha" local captcha = require "captcha"
local recaptcha = require "recaptcha" local recaptcha = require "recaptcha"
@@ -70,6 +77,7 @@ local behavior = require "behavior"
local logger = require "logger" local logger = require "logger"
local redis = require "resty.redis" local redis = require "resty.redis"
local checker = require "checker" local checker = require "checker"
local limitreq = require "limitreq"
-- user variables -- user variables
local antibot_uri = "{{ ANTIBOT_URI }}" local antibot_uri = "{{ ANTIBOT_URI }}"
@@ -145,6 +153,30 @@ if use_bad_behavior and behavior.is_banned() then
ngx.exit(ngx.HTTP_FORBIDDEN) ngx.exit(ngx.HTTP_FORBIDDEN)
end end
-- check if IP is banned because of "request limit"
if use_limit_req then
{% if USE_LIMIT_REQ == "yes" %}
{% for k, v in all.items() %}
{% if k.startswith("LIMIT_REQ_URL") and v != "" +%}
{% set url = v %}
{% set rate = all[k.replace("URL", "RATE")] if k.replace("URL", "RATE") in all else "1r/s" %}
{% set burst = all[k.replace("URL", "BURST")] if k.replace("URL", "BURST") in all else "5" %}
{% set delay = all[k.replace("URL", "DELAY")] if k.replace("URL", "DELAY") in all else "1" %}
{% if url == "/" %}
if limitreq.check("{{ rate }}", {{ burst }}, {{ delay }}) then
ngx.exit(ngx.HTTP_TOO_MANY_REQUESTS)
end
{% else %}
if ngx.var.uri == "{{ url }}" and limitreq.check("{{ rate }}", {{ burst }}, {{ delay }}) then
ngx.exit(ngx.HTTP_TOO_MANY_REQUESTS)
end
{% endif %}
{% endif %}
{% endfor %}
{% endif %}
end
-- our redis client -- our redis client
local redis_client = nil local redis_client = nil
if use_redis then if use_redis then
@@ -212,7 +244,7 @@ if use_referrer and ngx.var.http_referer ~= nil then
end end
-- check if country is allowed -- check if country is allowed
if use_country and ngx.var.allowed_country == "no" then if use_country and ngx.var.allowed_country == "no" and not iputils.ip_in_cidrs(ngx.var.remote_addr, cjson.decode(ngx.shared.reserved_ips:get("data"))) then
logger.log(ngx.WARN, "COUNTRY", "Country of " .. ngx.var.remote_addr .. " is blacklisted") logger.log(ngx.WARN, "COUNTRY", "Country of " .. ngx.var.remote_addr .. " is blacklisted")
ngx.exit(ngx.HTTP_FORBIDDEN) ngx.exit(ngx.HTTP_FORBIDDEN)
end end
@@ -224,6 +256,15 @@ if use_dnsbl and not dnsbl.cached() then
end end
end end
-- check if IP is in distributed DB
if use_remote_api then
local checker = checker:new("remote-api-db", ngx.shared.remote_api_db, redis_client, "simple")
if checker:check(iputils.ip2bin(ngx.var.remote_addr)) then
logger.log(ngx.WARN, "REMOTE API", "IP " .. ngx.var.remote_addr .. " is in the distributed DB")
ngx.exit(ngx.HTTP_FORBIDDEN)
end
end
-- cookie check -- cookie check
if use_antibot_cookie and ngx.var.uri ~= "/favicon.ico" then if use_antibot_cookie and ngx.var.uri ~= "/favicon.ico" then
if not cookie.is_set("uri") then if not cookie.is_set("uri") then

View File

@@ -5,10 +5,17 @@
{% set host = all[k.replace("URL", "HOST")] if k.replace("URL", "HOST") in all else "" %} {% set host = all[k.replace("URL", "HOST")] if k.replace("URL", "HOST") in all else "" %}
{% set ws = all[k.replace("URL", "WS")] if k.replace("URL", "WS") in all else "" %} {% set ws = all[k.replace("URL", "WS")] if k.replace("URL", "WS") in all else "" %}
{% set headers = all[k.replace("URL", "HEADERS")] if k.replace("URL", "HEADERS") in all else "" %} {% set headers = all[k.replace("URL", "HEADERS")] if k.replace("URL", "HEADERS") in all else "" %}
{% set buffering = all[k.replace("URL", "BUFFERING")] if k.replace("URL", "BUFFERING") in all else "yes" %}
{% set keepalive = all[k.replace("URL", "KEEPALIVE")] if k.replace("URL", "KEEPALIVE") in all else "yes" %}
location {{ url }} {% raw %}{{% endraw +%} location {{ url }} {% raw %}{{% endraw +%}
etag off; etag off;
set $backend "{{ host }}"; set $backend "{{ host }}";
proxy_pass $backend; proxy_pass $backend;
{% if buffering == "yes" +%}
proxy_buffering on;
{% else +%}
proxy_buffering off;
{% endif %}
{% if USE_AUTHELIA == "yes" +%} {% if USE_AUTHELIA == "yes" +%}
include {{ NGINX_PREFIX }}authelia-auth-request.conf; include {{ NGINX_PREFIX }}authelia-auth-request.conf;
{% endif %} {% endif %}
@@ -17,6 +24,9 @@ location {{ url }} {% raw %}{{% endraw +%}
proxy_http_version 1.1; proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade"; proxy_set_header Connection "Upgrade";
{% elif keepalive == "yes" +%}
proxy_http_version 1.1;
proxy_set_header Connection "";
{% endif %} {% endif %}
{% if headers != "" %} {% if headers != "" %}
{% for header in headers.split(";") +%} {% for header in headers.split(";") +%}

View File

@@ -65,9 +65,9 @@ server {
} }
# requests limiting # requests limiting
{% if USE_LIMIT_REQ == "yes" +%} #{% if USE_LIMIT_REQ == "yes" +%}
include {{ NGINX_PREFIX }}limit-req.conf; # include {{ NGINX_PREFIX }}limit-req.conf;
{% endif %} #{% endif %}
# connections limiting # connections limiting
{% if USE_LIMIT_CONN == "yes" +%} {% if USE_LIMIT_CONN == "yes" +%}
@@ -180,6 +180,9 @@ server {
include {{ NGINX_PREFIX }}redirect-to.conf; include {{ NGINX_PREFIX }}redirect-to.conf;
{% endif %} {% endif %}
# custom headers
include {{ NGINX_PREFIX }}custom-headers.conf;
# reverse proxy # reverse proxy
{% if USE_REVERSE_PROXY == "yes" +%} {% if USE_REVERSE_PROXY == "yes" +%}
include {{ NGINX_PREFIX }}reverse-proxy.conf; include {{ NGINX_PREFIX }}reverse-proxy.conf;

View File

@@ -50,7 +50,7 @@ copyright = '2021, bunkerity'
author = 'bunkerity' author = 'bunkerity'
# The full version, including alpha/beta/rc tags # The full version, including alpha/beta/rc tags
release = 'v1.3.0' release = 'v1.3.2'
# -- General configuration --------------------------------------------------- # -- General configuration ---------------------------------------------------

View File

@@ -123,6 +123,12 @@ Default value : *no*
Context : *global*, *multisite* Context : *global*, *multisite*
When set to yes and `REDIRECT_TO` is set it will append the requested path to the redirection (e.g., https://example.com/something redirects to https://www.example.com/something). When set to yes and `REDIRECT_TO` is set it will append the requested path to the redirection (e.g., https://example.com/something redirects to https://www.example.com/something).
`CUSTOM_HEADER`
Values : *\<HeaderName: HeaderValue\>*
Default value :
Context : *global*, *multisite*
Add custom HTTP header of your choice to clients. You can add multiple headers by appending a number as a suffix of the environment variable : `CUSTOM_HEADER_1`, `CUSTOM_HEADER_2`, `CUSTOM_HEADER_3`, ...
### Information leak ### Information leak
`SERVER_TOKENS` `SERVER_TOKENS`
@@ -207,6 +213,20 @@ Context : *global*, *multisite*
Only valid when `USE_REVERSE_PROXY` is set to *yes*. Set it to *yes* when the corresponding `REVERSE_PROXY_HOST` is a WebSocket server. Only valid when `USE_REVERSE_PROXY` is set to *yes*. Set it to *yes* when the corresponding `REVERSE_PROXY_HOST` is a WebSocket server.
You can set multiple url/host by adding a suffix number to the variable name like this : `REVERSE_PROXY_WS_1`, `REVERSE_PROXY_WS_2`, `REVERSE_PROXY_WS_3`, ... You can set multiple url/host by adding a suffix number to the variable name like this : `REVERSE_PROXY_WS_1`, `REVERSE_PROXY_WS_2`, `REVERSE_PROXY_WS_3`, ...
`REVERSE_PROXY_BUFFERING`
Values : *yes* | *no*
Default value : *yes*
Context : *global*, *multisite*
Only valid when `USE_REVERSE_PROXY` is set to *yes*. Set it to *yes* then the [proxy_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering) directive will be set to `on` or `off` otherwise.
You can set multiple url/host by adding a suffix number to the variable name like this : `REVERSE_PROXY_BUFFERING_1`, `REVERSE_PROXY_BUFFERING_2`, `REVERSE_PROXY_BUFFERING_3`, ...
`REVERSE_PROXY_KEEPALIVE`
Values : *yes* | *no*
Default value : *yes*
Context : *global*, *multisite*
Only valid when `USE_REVERSE_PROXY` is set to *yes*. Set it to *yes* to enable keepalive connections with the backend (needs a HTTP 1.1 backend) or *no* otherwise.
You can set multiple url/host by adding a suffix number to the variable name like this : `REVERSE_PROXY_KEEPALIVE_1`, `REVERSE_PROXY_KEEPALIVE_2`, `REVERSE_PROXY_KEEPALIVE_3`, ...
`REVERSE_PROXY_HEADERS` `REVERSE_PROXY_HEADERS`
Values : *\<list of custom headers separated with a semicolon like this : header1 value1;header2 value2...\>* Values : *\<list of custom headers separated with a semicolon like this : header1 value1;header2 value2...\>*
Default value : Default value :
@@ -218,7 +238,7 @@ You can set multiple url/host by adding a suffix number to the variable name lik
Values : *yes* | *no* Values : *yes* | *no*
Default value : *no* Default value : *no*
Context : *global*, *multisite* Context : *global*, *multisite*
Set this environment variable to *yes* if you're using bunkerized-nginx behind a reverse proxy. This means you will see the real client address instead of the proxy one inside your logs. Ssecurity tools will also then work correctly. Set this environment variable to *yes* if you're using bunkerized-nginx behind a reverse proxy. This means you will see the real client address instead of the proxy one inside your logs. Security tools will also then work correctly.
`PROXY_REAL_IP_FROM` `PROXY_REAL_IP_FROM`
Values : *\<list of trusted IP addresses and/or networks separated with spaces\>* Values : *\<list of trusted IP addresses and/or networks separated with spaces\>*
@@ -583,14 +603,14 @@ More info [here](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Refer
`FEATURE_POLICY` `FEATURE_POLICY`
Values : *&lt;directive&gt; &lt;allow list&gt;* Values : *&lt;directive&gt; &lt;allow list&gt;*
Default value : *accelerometer 'none'; ambient-light-sensor 'none'; autoplay 'none'; camera 'none'; display-capture 'none'; document-domain 'none'; encrypted-media 'none'; fullscreen 'none'; geolocation 'none'; gyroscope 'none'; magnetometer 'none'; microphone 'none'; midi 'none'; payment 'none'; picture-in-picture 'none'; speaker 'none'; sync-xhr 'none'; usb 'none'; vibrate 'none'; vr 'none'* Default value : *accelerometer 'none'; ambient-light-sensor 'none'; autoplay 'none'; battery 'none'; camera 'none'; display-capture 'none'; document-domain 'none'; encrypted-media 'none'; fullscreen 'none'; geolocation 'none'; gyroscope 'none'; magnetometer 'none'; microphone 'none'; midi 'none'; payment 'none'; picture-in-picture 'none'; publickey-credentials-get 'none'; sync-xhr 'none'; usb 'none'; wake-lock 'none'; web-share 'none'; xr-spatial-tracking 'none"*
Context : *global*, *multisite* Context : *global*, *multisite*
Tells the browser which features can be used on the website. Tells the browser which features can be used on the website.
More info [here](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Feature-Policy). More info [here](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Feature-Policy).
`PERMISSIONS_POLICY` `PERMISSIONS_POLICY`
Values : *feature=(allow list)* Values : *feature=(allow list)*
Default value : accelerometer=(), ambient-light-sensor=(), autoplay=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), fullscreen=(), geolocation=(), gyroscope=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), speaker=(), sync-xhr=(), usb=(), vibrate=(), vr=() Default value : *accelerometer=(), ambient-light-sensor=(), autoplay=(), battery=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), fullscreen=(), geolocation=(), gyroscope=(), interest-cohort=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), publickey-credentials-get=(), screen-wake-lock=(), sync-xhr=(), usb=(), web-share=(), xr-spatial-tracking=()*
Context : *global*, *multisite* Context : *global*, *multisite*
Tells the browser which features can be used on the website. Tells the browser which features can be used on the website.
More info [here](https://www.w3.org/TR/permissions-policy-1/). More info [here](https://www.w3.org/TR/permissions-policy-1/).
@@ -666,6 +686,20 @@ Default value :
Context : *global*, *multisite* Context : *global*, *multisite*
The secret given by Google when `USE_ANTIBOT` is set to *recaptcha*. The secret given by Google when `USE_ANTIBOT` is set to *recaptcha*.
### Distributed blacklist
`USE_REMOTE_API`
Values : *yes* | *no*
Default value : *yes*
Context : *global*, *multisite*
If set to yes, the instance will participate into the distributed blacklist shared among all other instances. The blacklist will be automaticaly downloaded on a periodic basis.
`REMOTE_API_SERVER`
Values : *\<any valid full URL\>*
Default value :
Context : *global*
Full URL of the remote API used for the distributed blacklist.
### External blacklists ### External blacklists
`BLOCK_USER_AGENT` `BLOCK_USER_AGENT`
@@ -808,19 +842,34 @@ Values : *yes* | *no*
Default value : *yes* Default value : *yes*
Context : *global*, *multisite* Context : *global*, *multisite*
If set to yes, the amount of HTTP requests made by a user for a given resource will be limited during a period of time. If set to yes, the amount of HTTP requests made by a user for a given resource will be limited during a period of time.
More info rate limiting [here](https://www.nginx.com/blog/rate-limiting-nginx/) (the key used is $binary_remote_addr$uri).
`LIMIT_REQ_URL`
Values : *\<any valid url\>*
Default value :
Context : *global*, *multisite*
The URL where you want to apply the request limiting. Use special value of `/` to apply it globally for all URL.
You can set multiple rules by adding a suffix number to the variable name like this : `LIMIT_REQ_URL_1`, `LIMIT_REQ_URL_2`, `LIMIT_REQ_URL_3`, ...
`LIMIT_REQ_RATE` `LIMIT_REQ_RATE`
Values : *Xr/s* | *Xr/m* Values : *Xr/s* | *Xr/m* | *Xr/h* | *Xr/d*
Default value : *1r/s* Default value : *1r/s*
Context : *global*, *multisite* Context : *global*, *multisite*
The rate limit to apply when `USE_LIMIT_REQ` is set to *yes*. Default is 1 request to the same URI and from the same IP per second. The rate limit to apply when `USE_LIMIT_REQ` is set to *yes*. Default is 1 request to the same URI and from the same IP per second. Possible value are : `s` (second), `m` (minute), `h` (hour) and `d` (day)).
You can set multiple rules by adding a suffix number to the variable name like this : `LIMIT_REQ_RATE_1`, `LIMIT_REQ_RATE_2`, `LIMIT_REQ_RATE_3`, ...
`LIMIT_REQ_BURST` `LIMIT_REQ_BURST`
Values : *<any valid integer\>* Values : *\<any valid integer\>*
Default value : *2* Default value : *5*
Context : *global*, *multisite* Context : *global*, *multisite*
The number of requests to put in queue before rejecting requests. The number of requests to put in queue before rejecting requests.
You can set multiple rules by adding a suffix number to the variable name like this : `LIMIT_REQ_BURST_1`, `LIMIT_REQ_BURST_2`, `LIMIT_REQ_BURST_3`, ...
`LIMIT_REQ_DELAY`
Values : *\<any valid float\>*
Default value : *1*
Context : *global*, *multisite*
The number of seconds to wait before requests in queue are processed. Values like `0.1`, `0.01` or `0.001` are also accepted.
You can set multiple rules by adding a suffix number to the variable name like this : `LIMIT_REQ_DELAY_1`, `LIMIT_REQ_DELAY_2`, `LIMIT_REQ_DELAY_3`, ...
`LIMIT_REQ_CACHE` `LIMIT_REQ_CACHE`
Values : *Xm* | *Xk* Values : *Xm* | *Xk*

Binary file not shown.

Before

Width:  |  Height:  |  Size: 128 KiB

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 148 KiB

After

Width:  |  Height:  |  Size: 89 KiB

View File

@@ -241,23 +241,13 @@ When your container is not needed anymore, you can delete it as usual. The autoc
## Docker Swarm ## Docker Swarm
Using bunkerized-nginx in a Docker Swarm cluster requires a shared folder accessible from both managers and workers (anything like NFS, GlusterFS, CephFS or even SSHFS will work). The deployment and configuration is very similar to the "Docker autoconf" one but with services instead of containers. A service based on the bunkerized-nginx-autoconf image needs to be scheduled on a manager node (don't worry it doesn't expose any network port for obvious security reasons). This service will listen for Docker Swarm events like service creation or deletion and generate the configuration according to the labels of each service. Once configuration generation is done, the bunkerized-nginx-autoconf service will send a reload order to all the bunkerized-nginx tasks so they can load the new configuration. The deployment and configuration is very similar to the "Docker autoconf" one but with services instead of containers. A service based on the bunkerized-nginx-autoconf image needs to be scheduled on a manager node (don't worry it doesn't expose any network port for obvious security reasons). This service will listen for Docker Swarm events like service creation or deletion and generate the configuration according to the labels of each service. Once configuration generation is done, the bunkerized-nginx-autoconf service will send the configuration files and a reload order to all the bunkerized-nginx tasks so they can apply the new configuration. If you need to deliver static files (e.g., html, images, css, js, ...) a shared folder accessible from all bunkerized-nginx instances is needed (you can use a storage system like NFS, GlusterFS, CephFS on the host or a [Docker volume plugin](https://docs.docker.com/engine/extend/)).
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/swarm.png?raw=true" /> <img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/swarm.png?raw=true" />
**We will assume that a shared directory is mounted at the /shared location on both your managers and workers. Keep in mind that bunkerized-nginx and autoconf are running as unprivileged users with UID and GID 101. You must set the rights and permissions of the subfolders in /shared accordingly.**
In this setup we will deploy bunkerized-nginx in global mode on all workers and autoconf as a single replica on a manager. In this setup we will deploy bunkerized-nginx in global mode on all workers and autoconf as a single replica on a manager.
First of all, you will need to setup the shared folders : First of all, you will need to create 2 networks, one for the communication between bunkerized-nginx and autoconf and the other one for the communication between bunkerized-nginx and the web services :
```shell
$ cd /shared
$ mkdir www confs letsencrypt acme-challenge
$ chown root:101 www confs letsencrypt acme-challenge
$ chmod 770 www confs letsencrypt acme-challenge
```
Then you will need to create 2 networks, one for the communication between bunkerized-nginx and autoconf and the other one for the communication between bunkerized-nginx and the web services :
```shell ```shell
$ docker network create -d overlay --attachable bunkerized-net $ docker network create -d overlay --attachable bunkerized-net
$ docker network create -d overlay --attachable services-net $ docker network create -d overlay --attachable services-net
@@ -273,10 +263,6 @@ $ docker service create \
--network bunkerized-net \ --network bunkerized-net \
-p published=80,target=8080,mode=host \ -p published=80,target=8080,mode=host \
-p published=443,target=8443,mode=host \ -p published=443,target=8443,mode=host \
--mount type=bind,source=/shared/confs,destination=/etc/nginx,ro \
--mount type=bind,source=/shared/www,destination=/www,ro \
--mount type=bind,source=/shared/letsencrypt,destination=/etc/letsencrypt,ro \
--mount type=bind,source=/shared/acme-challenge,destination=/acme-challenge,ro \
-e SWARM_MODE=yes \ -e SWARM_MODE=yes \
-e USE_API=yes \ -e USE_API=yes \
-e API_URI=/ChangeMeToSomethingHardToGuess \ -e API_URI=/ChangeMeToSomethingHardToGuess \
@@ -297,9 +283,8 @@ $ docker service create \
--constraint node.role==manager \ --constraint node.role==manager \
--network bunkerized-net \ --network bunkerized-net \
--mount type=bind,source=/var/run/docker.sock,destination=/var/run/docker.sock,ro \ --mount type=bind,source=/var/run/docker.sock,destination=/var/run/docker.sock,ro \
--mount type=bind,source=/shared/confs,destination=/etc/nginx \ --mount type=volume,source=cache-vol,destination=/cache \
--mount type=bind,source=/shared/letsencrypt,destination=/etc/letsencrypt \ --mount type=volume,source=certs-vol,destination=/etc/letsencrypt \
--mount type=bind,source=/shared/acme-challenge,destination=/acme-challenge \
-e SWARM_MODE=yes \ -e SWARM_MODE=yes \
-e API_URI=/ChangeMeToSomethingHardToGuess \ -e API_URI=/ChangeMeToSomethingHardToGuess \
bunkerity/bunkerized-nginx-autoconf bunkerity/bunkerized-nginx-autoconf
@@ -322,11 +307,6 @@ services:
target: 8443 target: 8443
mode: host mode: host
protocol: tcp protocol: tcp
volumes:
- /shared/confs:/etc/nginx:ro
- /shared/www:/www:ro
- /shared/letsencrypt:/etc/letsencrypt:ro
- /shared/acme-challenge:/acme-challenge:ro
environment: environment:
- SWARM_MODE=yes - SWARM_MODE=yes
- USE_API=yes - USE_API=yes
@@ -350,9 +330,8 @@ services:
image: bunkerity/bunkerized-nginx-autoconf image: bunkerity/bunkerized-nginx-autoconf
volumes: volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro - /var/run/docker.sock:/var/run/docker.sock:ro
- /shared/confs:/etc/nginx - cache-vol:/cache
- /shared/letsencrypt:/etc/letsencrypt - certs-vol:/etc/letsencrypt
- /shared/acme-challenge:/acme-challenge
environment: environment:
- SWARM_MODE=yes - SWARM_MODE=yes
- API_URI=/ChangeMeToSomethingHardToGuess # must match API_URI from nginx - API_URI=/ChangeMeToSomethingHardToGuess # must match API_URI from nginx
@@ -374,6 +353,10 @@ networks:
driver: overlay driver: overlay
attachable: true attachable: true
name: services-net name: services-net
# And the volumes too
volumes:
cache-vol:
certs-vol:
``` ```
Check the logs of both autoconf and bunkerized-nginx services to see if everything is working as expected. Check the logs of both autoconf and bunkerized-nginx services to see if everything is working as expected.
@@ -427,20 +410,10 @@ When your service is not needed anymore, you can delete it as usual. The autocon
**This integration is still in beta, please fill an issue if you find a bug or have an idea on how to improve it.** **This integration is still in beta, please fill an issue if you find a bug or have an idea on how to improve it.**
Using bunkerized-nginx in a Kubernetes cluster requires a shared folder accessible from the nodes (anything like NFS, GlusterFS, CephFS or even SSHFS will work). The bunkerized-nginx-autoconf acts as an Ingress Controller and connects to the k8s API to get cluster events and generate a new configuration when it's needed. Once the configuration is generated, the Ingress Controller sends a reload order to the bunkerized-nginx instances running in the cluster. The bunkerized-nginx-autoconf acts as an Ingress Controller and connects to the k8s API to get cluster events and generate a new configuration when it's needed. Once the configuration is generated, the Ingress Controller sends the configuration files and a reload order to the bunkerized-nginx instances running in the cluster. If you need to deliver static files (e.g., html, images, css, js, ...) a shared folder accessible from all bunkerized-nginx instances is needed (you can use a storage system like NFS, GlusterFS, CephFS on the host or a [Kubernetes Volume that supports ReadOnlyMany access](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes)).
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/kubernetes.png?raw=true" /> <img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/kubernetes.png?raw=true" />
**We will assume that a shared directory is mounted at the /shared location on your nodes. Keep in mind that bunkerized-nginx and autoconf are running as unprivileged users with UID and GID 101. You must set the rights and permissions of the subfolders in /shared accordingly.**
First of all, you will need to setup the shared folders :
```shell
$ cd /shared
$ mkdir www confs letsencrypt acme-challenge
$ chown root:nginx www confs letsencrypt acme-challenge
$ chmod 770 www confs letsencrypt acme-challenge
```
The first step to do is to declare the RBAC authorization that will be used by the Ingress Controller to access the Kubernetes API. A ready-to-use declaration is available here : The first step to do is to declare the RBAC authorization that will be used by the Ingress Controller to access the Kubernetes API. A ready-to-use declaration is available here :
```yaml ```yaml
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
@@ -506,7 +479,7 @@ spec:
- name: KUBERNETES_MODE - name: KUBERNETES_MODE
value: "yes" value: "yes"
- name: DNS_RESOLVERS - name: DNS_RESOLVERS
value: "kube-dns.kube-system.svc.cluster.local" value: "coredns.kube-system.svc.cluster.local"
- name: USE_API - name: USE_API
value: "yes" value: "yes"
- name: API_URI - name: API_URI
@@ -515,36 +488,6 @@ spec:
value: "" value: ""
- name: MULTISITE - name: MULTISITE
value: "yes" value: "yes"
volumeMounts:
- name: confs
mountPath: /etc/nginx
readOnly: true
- name: letsencrypt
mountPath: /etc/letsencrypt
readOnly: true
- name: acme-challenge
mountPath: /acme-challenge
readOnly: true
- name: www
mountPath: /www
readOnly: true
volumes:
- name: confs
hostPath:
path: /shared/confs
type: Directory
- name: letsencrypt
hostPath:
path: /shared/letsencrypt
type: Directory
- name: acme-challenge
hostPath:
path: /shared/acme-challenge
type: Directory
- name: www
hostPath:
path: /shared/www
type: Directory
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
@@ -562,10 +505,19 @@ spec:
name: bunkerized-nginx name: bunkerized-nginx
``` ```
Important thing to note, labels and annotations defined are mandatory for autoconf to work.
You can now deploy the autoconf which will act as the ingress controller : You can now deploy the autoconf which will act as the ingress controller :
```yaml ```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-nginx
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
@@ -583,6 +535,30 @@ spec:
app: bunkerized-nginx-autoconf app: bunkerized-nginx-autoconf
spec: spec:
serviceAccountName: bunkerized-nginx-ingress-controller serviceAccountName: bunkerized-nginx-ingress-controller
volumes:
- name: vol-nginx
persistentVolumeClaim:
claimName: pvc-nginx
initContainers:
- name: change-data-dir-permissions
command:
- chown
- -R
- 101:101
- /etc/letsencrypt
- /cache
image: busybox
volumeMounts:
- name: vol-nginx
mountPath: /etc/letsencrypt
subPath: letsencrypt
- name: vol-nginx
mountPath: /cache
subPath: cache
securityContext:
runAsNonRoot: false
runAsUser: 0
runAsGroup: 0
containers: containers:
- name: bunkerized-nginx-autoconf - name: bunkerized-nginx-autoconf
image: bunkerity/bunkerized-nginx-autoconf image: bunkerity/bunkerized-nginx-autoconf
@@ -592,25 +568,12 @@ spec:
- name: API_URI - name: API_URI
value: "/ChangeMeToSomethingHardToGuess" value: "/ChangeMeToSomethingHardToGuess"
volumeMounts: volumeMounts:
- name: confs - name: vol-nginx
mountPath: /etc/nginx
- name: letsencrypt
mountPath: /etc/letsencrypt mountPath: /etc/letsencrypt
- name: acme-challenge subPath: letsencrypt
mountPath: /acme-challenge - name: vol-nginx
volumes: mountPath: /cache
- name: confs subPath: cache
hostPath:
path: /shared/confs
type: Directory
- name: letsencrypt
hostPath:
path: /shared/letsencrypt
type: Directory
- name: acme-challenge
hostPath:
path: /shared/acme-challenge
type: Directory
``` ```
Check the logs of both bunkerized-nginx and autoconf deployments to see if everything is working as expected. Check the logs of both bunkerized-nginx and autoconf deployments to see if everything is working as expected.
@@ -721,17 +684,18 @@ List of supported Linux distributions :
- Ubuntu focal (20.04) - Ubuntu focal (20.04)
- CentOS 7 - CentOS 7
- Fedora 34 - Fedora 34
- Arch Linux
Unlike containers, Linux integration can be tedious because bunkerized-nginx has a bunch of dependencies that need to be installed before we can use it. Fortunately, we provide a helper script to make the process easier and automatic. Once installed, the configuration is really simple, all you have to do is to edit the `/opt/bunkerized-nginx/variables.env` configuration file and run the `bunkerized-nginx` command to apply it. Unlike containers, Linux integration can be tedious because bunkerized-nginx has a bunch of dependencies that need to be installed before we can use it. Fortunately, we provide a helper script to make the process easier and automatic. Once installed, the configuration is really simple, all you have to do is to edit the `/opt/bunkerized-nginx/variables.env` configuration file and run the `bunkerized-nginx` command to apply it.
First of all you will need to install bunkerized-nginx. The recommended way is to use the official installer script : First of all you will need to install bunkerized-nginx. The recommended way is to use the official installer script :
```shell ```shell
$ curl -fsSL https://github.com/bunkerity/bunkerized-nginx/releases/download/v1.3.0/linux-install.sh -o /tmp/bunkerized-nginx.sh $ curl -fsSL https://github.com/bunkerity/bunkerized-nginx/releases/download/v1.3.2/linux-install.sh -o /tmp/bunkerized-nginx.sh
``` ```
Before executing it, you should also check the signature : Before executing it, you should also check the signature :
```shell ```shell
$ curl -fsSL https://github.com/bunkerity/bunkerized-nginx/releases/download/v1.3.0/linux-install.sh.asc -o /tmp/bunkerized-nginx.sh.asc $ curl -fsSL https://github.com/bunkerity/bunkerized-nginx/releases/download/v1.3.2/linux-install.sh.asc -o /tmp/bunkerized-nginx.sh.asc
$ gpg --auto-key-locate hkps://keys.openpgp.org --locate-keys contact@bunkerity.com $ gpg --auto-key-locate hkps://keys.openpgp.org --locate-keys contact@bunkerity.com
$ gpg --verify /tmp/bunkerized-nginx.sh.asc /tmp/bunkerized-nginx.sh $ gpg --verify /tmp/bunkerized-nginx.sh.asc /tmp/bunkerized-nginx.sh
``` ```

View File

@@ -62,7 +62,7 @@ docker kill --signal=SIGHUP my-container
Swarm and Kubernetes reload (repeat for each node) : Swarm and Kubernetes reload (repeat for each node) :
```shell ```shell
$ curl http://node-local-ip:80/reload $ curl http://node-local-ip:80/api-uri/reload
``` ```
Linux reload : Linux reload :
@@ -141,6 +141,18 @@ You can use the `USE_ANTIBOT` environment variable to add that kind of checks wh
## External blacklists ## External blacklists
### Distributed
**This feature is in beta and will be improved regularly.**
You can benefit from a distributed blacklist shared among all of the bunkerized-nginx users.
Each time a bunkerized-nginx instance detect a bad request, the offending IP is sent to a remote API and will enrich a database. An extract of the top malicious IP is downloaded on a periodic basis and integrated into bunkerized-nginx as a blacklist.
This feature is controlled with the `USE_REMOTE_API=yes` environment variable.
**To avoid poisoning, in addition to the various security checks made by the API we only mark IP as bad in the database if it has been seen by one of our honeypots under our control.**
### DNSBL ### DNSBL
Automatic checks on external DNS BlackLists are enabled by default with the `USE_DNSBL=yes` environment variable. The list of DNSBL zones is also configurable, you just need to edit the `DNSBL_LIST` environment variable which contains the following value by default `bl.blocklist.de problems.dnsbl.sorbs.net sbl.spamhaus.org xbl.spamhaus.org`. Automatic checks on external DNS BlackLists are enabled by default with the `USE_DNSBL=yes` environment variable. The list of DNSBL zones is also configurable, you just need to edit the `DNSBL_LIST` environment variable which contains the following value by default `bl.blocklist.de problems.dnsbl.sorbs.net sbl.spamhaus.org xbl.spamhaus.org`.
@@ -173,12 +185,16 @@ This list contains bad referrers domains known for spamming (downloaded from [he
### Requests ### Requests
To limit bruteforce attacks we decided to use the [rate limiting feature in nginx](https://www.nginx.com/blog/rate-limiting-nginx/) so attackers will be limited to X request(s)/s for the same resource. That kind of protection might be useful against other attacks too (e.g., blind SQL injection). To limit bruteforce attacks or rate limit access to your API you can use the "request limit" feature so attackers will be limited to X request(s) within a period of time for the same resource. That kind of protection might be useful against other attacks too (e.g., blind SQL injection).
Here is the list of related environment variables and their default value : Here is the list of related environment variables and their default value :
- `USE_LIMIT_REQ=yes` : enable/disable request limiting - `USE_LIMIT_REQ=yes` : enable/disable request limiting
- `LIMIT_REQ_RATE=1r/s` : the rate to apply for the same resource - `LIMIT_REQ_URL=` : the URL you want to protect, use `/` to apply the limit for all URL
- `LIMIT_REQ_BURST=2` : the number of request tu put in a queue before effectively rejecting requests - `LIMIT_REQ_RATE=1r/s` : the rate to apply for the resource, valid period are : `s` (second), `m` (minute), `h` (hour) and `d` (day)
- `LIMIT_REQ_BURST=5 : the number of request tu put in a queue before effectively rejecting requests
- `LIMIT_REQ_DELAY=1` : the number of seconds to wait before we proceed requests in queue
Please note that you can apply different rate to different URL by appending a number as suffix (more info [here](https://bunkerized-nginx.readthedocs.io/en/latest/environment_variables.html#requests-limiting)).
### Connections ### Connections

View File

@@ -46,7 +46,7 @@ Examples :
## ModSecurity configurations ## ModSecurity configurations
This special folder contains .conf files that will be loaded by ModSecurity before the OWASP Core Rule Set is loaded. The typical use case is when you want to specify exclusions for the CRS. This special folder contains .conf files that will be loaded by ModSecurity after the OWASP Core Rule Set is loaded. The typical use case is to edit loaded CRS rules to avoid false positives.
Location (container) : `/modsec-confs` Location (container) : `/modsec-confs`
Location (Linux) : `/opt/bunkerized-nginx/modsec-confs` Location (Linux) : `/opt/bunkerized-nginx/modsec-confs`
@@ -59,7 +59,7 @@ Examples :
## CRS configurations ## CRS configurations
This special folder contains .conf file that will be loaded by ModSecurity after the OWASP Core Rule Set is loaded. The typical use case is to edit loaded CRS rules to avoid false positives. This special folder contains .conf file that will be loaded by ModSecurity before the OWASP Core Rule Set is loaded. The typical use case is when you want to specify exclusions for the CRS.
Location (container) : `/modsec-crs-confs` Location (container) : `/modsec-crs-confs`
Location (Linux) : `/opt/bunkerized-nginx/modsec-crs-confs` Location (Linux) : `/opt/bunkerized-nginx/modsec-crs-confs`

View File

@@ -49,7 +49,7 @@ if [ ! -f "/etc/nginx/global.env" ] ; then
exit 1 exit 1
fi fi
# start temp nginx to solve Let's Encrypt challenges if needed # start temp nginx to solve Let's Encrypt challenges if needed and serve API
/opt/bunkerized-nginx/entrypoint/nginx-temp.sh /opt/bunkerized-nginx/entrypoint/nginx-temp.sh
# only do config if we are not in swarm/kubernetes mode # only do config if we are not in swarm/kubernetes mode
@@ -75,15 +75,16 @@ else
fi fi
# start crond # start crond
crond if [ "$SWARM_MODE" != "yes" ] && [ "$KUBERNETES_MODE" != "yes" ] ; then
crond
# wait until config has been generated if we are in swarm mode
if [ "$SWARM_MODE" = "yes" ] || [ "$KUBERNETES_MODE" = "yes" ] ; then
log "entrypoint" "INFO" "waiting until config has been generated ..."
while [ ! -f "/etc/nginx/autoconf" ] ; do
sleep 1
done
fi fi
# wait until config has been generated if we are in swarm mode
#if [ "$SWARM_MODE" = "yes" ] || [ "$KUBERNETES_MODE" = "yes" ] ; then
# log "entrypoint" "INFO" "waiting until config has been generated ..."
# while [ ! -f "/etc/nginx/autoconf" ] ; do
# sleep 1
# done
#fi
# stop temp config if needed # stop temp config if needed
if [ -f "/tmp/nginx-temp.pid" ] ; then if [ -f "/tmp/nginx-temp.pid" ] ; then

View File

@@ -87,3 +87,11 @@ fi
if [ "$(has_value BLOCK_ABUSERS yes)" != "" ] ; then if [ "$(has_value BLOCK_ABUSERS yes)" != "" ] ; then
/opt/bunkerized-nginx/jobs/main.py --name abusers --cache /opt/bunkerized-nginx/jobs/main.py --name abusers --cache
fi fi
# remote API
if [ "$(has_value USE_REMOTE_API yes)" != "" ] ; then
/opt/bunkerized-nginx/jobs/main.py --name remote-api-register --cache --server "$(grep '^REMOTE_API_SERVER=' /etc/nginx/global.env | cut -d '=' -f 2)" --version "$(cat /opt/bunkerized-nginx/VERSION)"
if [ $? -eq 0 ] ; then
/opt/bunkerized-nginx/jobs/main.py --name remote-api-database --cache --server "$(grep '^REMOTE_API_SERVER=' /etc/nginx/global.env | cut -d '=' -f 2)" --version "$(cat /opt/bunkerized-nginx/VERSION)" --id "$(cat /opt/bunkerized-nginx/cache/machine.id)"
fi
fi

View File

@@ -7,7 +7,7 @@
if [ "$(has_value AUTO_LETS_ENCRYPT yes)" != "" ] || [ "$SWARM_MODE" = "yes" ] || [ "$AUTO_LETS_ENCRYPT" = "yes" ] || [ "$KUBERNETES_MODE" = "yes" ] ; then if [ "$(has_value AUTO_LETS_ENCRYPT yes)" != "" ] || [ "$SWARM_MODE" = "yes" ] || [ "$AUTO_LETS_ENCRYPT" = "yes" ] || [ "$KUBERNETES_MODE" = "yes" ] ; then
cp /opt/bunkerized-nginx/confs/global/nginx-temp.conf /tmp/nginx-temp.conf cp /opt/bunkerized-nginx/confs/global/nginx-temp.conf /tmp/nginx-temp.conf
cp /opt/bunkerized-nginx/confs/global/api-temp.conf /tmp/api.conf cp /opt/bunkerized-nginx/confs/global/api-temp.conf /tmp/api.conf
if [ "$SWARM_MODE" = "yes" ] ; then if [ "$SWARM_MODE" = "yes" ] || [ "$KUBERNETES_MODE" = "yes" ] ; then
replace_in_file "/tmp/nginx-temp.conf" "%USE_API%" "include /tmp/api.conf;" replace_in_file "/tmp/nginx-temp.conf" "%USE_API%" "include /tmp/api.conf;"
replace_in_file "/tmp/api.conf" "%API_URI%" "$API_URI" replace_in_file "/tmp/api.conf" "%API_URI%" "$API_URI"
API_WHITELIST_IP="${API_WHITELIST_IP-192.168.0.0/16 172.16.0.0/12 10.0.0.0/8}" API_WHITELIST_IP="${API_WHITELIST_IP-192.168.0.0/16 172.16.0.0/12 10.0.0.0/8}"
@@ -18,10 +18,15 @@ if [ "$(has_value AUTO_LETS_ENCRYPT yes)" != "" ] || [ "$SWARM_MODE" = "yes" ] |
fi fi
HTTP_PORT="${HTTP_PORT-8080}" HTTP_PORT="${HTTP_PORT-8080}"
replace_in_file "/tmp/nginx-temp.conf" "%HTTP_PORT%" "$HTTP_PORT" replace_in_file "/tmp/nginx-temp.conf" "%HTTP_PORT%" "$HTTP_PORT"
nginx -c /tmp/nginx-temp.conf if [ "$SWARM_MODE" = "yes" ] || [ "$KUBERNETES_MODE" = "yes" ] ; then
if [ "$?" -eq 0 ] ; then log "nginx-temp" "INFO" "start temporary nginx server and wait for autoconf events..."
echo "[*] Successfully started temp nginx" nginx -c /tmp/nginx-temp.conf -g 'daemon off;'
else else
echo "[!] Can't start temp nginx" nginx -c /tmp/nginx-temp.conf -g 'daemon on;'
if [ "$?" -eq 0 ] ; then
log "nginx-temp" "INFO" "successfully started temp nginx"
else
log "nginx-temp" "ERROR" "can't start temp nginx"
fi
fi fi
fi fi

View File

@@ -29,7 +29,7 @@ services:
- REVERSE_PROXY_HOST=https://mymoodle:8443 - REVERSE_PROXY_HOST=https://mymoodle:8443
mymoodle: mymoodle:
image: bitnami/moodle image: bitnami/moodle:latest
restart: always restart: always
volumes: volumes:
- ./moodle-files:/bitnami/moodle - ./moodle-files:/bitnami/moodle
@@ -43,9 +43,10 @@ services:
- MOODLE_DATABASE_NAME=moodle - MOODLE_DATABASE_NAME=moodle
- MOODLE_DATABASE_USER=user - MOODLE_DATABASE_USER=user
- MOODLE_DATABASE_PASSWORD=db-user-pwd # replace with a stronger password (must match MYSQL_PASSWORD) - MOODLE_DATABASE_PASSWORD=db-user-pwd # replace with a stronger password (must match MYSQL_PASSWORD)
depends_on:
- mydb
mydb: mydb:
image: mariadb image: mariadb:10.5
restart: always restart: always
volumes: volumes:
- ./db-data:/var/lib/mysql - ./db-data:/var/lib/mysql
@@ -54,3 +55,5 @@ services:
- MYSQL_DATABASE=moodle - MYSQL_DATABASE=moodle
- MYSQL_USER=user - MYSQL_USER=user
- MYSQL_PASSWORD=db-user-pwd # replace with a stronger password (must match MOODLE_DATABASE_PASSWORD) - MYSQL_PASSWORD=db-user-pwd # replace with a stronger password (must match MOODLE_DATABASE_PASSWORD)
- MARIADB_CHARACTER_SET=utf8mb4
- MARIADB_COLLATE=utf8mb4_unicode_ci

View File

@@ -28,7 +28,7 @@ services:
- admin.example.com_SERVE_FILES=no - admin.example.com_SERVE_FILES=no
- admin.example.com_USE_REVERSE_PROXY=yes - admin.example.com_USE_REVERSE_PROXY=yes
- admin.example.com_REVERSE_PROXY_URL=/admin/ # change it to something hard to guess - admin.example.com_REVERSE_PROXY_URL=/admin/ # change it to something hard to guess
- admin.example.com_REVERSE_PROXY_HOST=http://myui:5000/ - admin.example.com_REVERSE_PROXY_HOST=http://myui:5000
- admin.example.com_REVERSE_PROXY_HEADERS=X-Script-Name /admin # must match REVERSE_PROXY_URL - admin.example.com_REVERSE_PROXY_HEADERS=X-Script-Name /admin # must match REVERSE_PROXY_URL
- admin.example.com_USE_MODSECURITY=no - admin.example.com_USE_MODSECURITY=no
labels: labels:

View File

@@ -45,6 +45,7 @@ class Templator :
real_config["NGINX_PREFIX"] = self.__target_path real_config["NGINX_PREFIX"] = self.__target_path
if self.__config_global["MULTISITE"] == "yes" and type == "site" : if self.__config_global["MULTISITE"] == "yes" and type == "site" :
real_config["NGINX_PREFIX"] += first_server + "/" real_config["NGINX_PREFIX"] += first_server + "/"
if not real_config["ROOT_FOLDER"].endswith("/" + first_server) :
real_config["ROOT_FOLDER"] += "/" + first_server real_config["ROOT_FOLDER"] += "/" + first_server
if real_config["ROOT_SITE_SUBFOLDER"] != "" : if real_config["ROOT_SITE_SUBFOLDER"] != "" :
real_config["ROOT_FOLDER"] += "/" + real_config["ROOT_SITE_SUBFOLDER"] real_config["ROOT_FOLDER"] += "/" + real_config["ROOT_SITE_SUBFOLDER"]

View File

@@ -1,33 +1,19 @@
#!/bin/sh #!/bin/sh
# prepare /www # prepare folders
mkdir /www
chown -R root:nginx /www
chmod -R 770 /www
# prepare /acme-challenge
mkdir /acme-challenge
chown root:nginx /acme-challenge
chmod 770 /acme-challenge
# prepare /cache
mkdir /cache
chown root:nginx /cache
chmod 770 /cache
# prepare /plugins
mkdir /plugins
chown root:nginx /plugins
chmod 770 /plugins
# prepare symlinks
folders="www http-confs server-confs modsec-confs modsec-crs-confs cache pre-server-confs acme-challenge plugins" folders="www http-confs server-confs modsec-confs modsec-crs-confs cache pre-server-confs acme-challenge plugins"
for folder in $folders ; do for folder in $folders ; do
if [ -e "/opt/bunkerized-nginx/$folder" ] ; then if [ -e "/opt/bunkerized-nginx/${folder}" ] ; then
rm -rf "/opt/bunkerized-nginx/$folder" rm -rf "/opt/bunkerized-nginx/${folder}"
fi fi
mkdir "/${folder}"
chown root:nginx "/${folder}"
chmod 770 "/${folder}"
ln -s "/$folder" "/opt/bunkerized-nginx/$folder" ln -s "/$folder" "/opt/bunkerized-nginx/$folder"
done done
mkdir -p /acme-challenge/.well-known/acme-challenge
chown -R root:nginx /acme-challenge
chmod -R 770 /acme-challenge
# prepare /var/log # prepare /var/log
rm -f /var/log/nginx/* rm -f /var/log/nginx/*

View File

@@ -290,6 +290,8 @@ elif [ "$(grep CentOS /etc/os-release)" != "" ] ; then
OS="centos" OS="centos"
elif [ "$(grep Fedora /etc/os-release)" != "" ] ; then elif [ "$(grep Fedora /etc/os-release)" != "" ] ; then
OS="fedora" OS="fedora"
elif [ "$(grep Arch /etc/os-release)" != "" ] ; then
OS="archlinux"
elif [ "$(grep Alpine /etc/os-release)" != "" ] ; then elif [ "$(grep Alpine /etc/os-release)" != "" ] ; then
OS="alpine" OS="alpine"
fi fi
@@ -346,6 +348,11 @@ module_hotfixes=true"
elif [ "$OS" = "fedora" ] ; then elif [ "$OS" = "fedora" ] ; then
echo "[*] Install nginx" echo "[*] Install nginx"
do_and_check_cmd dnf install -y nginx do_and_check_cmd dnf install -y nginx
elif [ "$OS" = "archlinux" ] ; then
echo "[*] Update pacman DB"
do_and_check_cmd pacman -Sy
echo "[*] Install nginx"
do_and_check_cmd pacman -S --noconfirm nginx
elif [ "$OS" = "alpine" ] ; then elif [ "$OS" = "alpine" ] ; then
echo "[*] Add nginx official repository" echo "[*] Add nginx official repository"
get_sign_repo_key_rsa > /etc/apk/keys/nginx_signing.rsa.pub get_sign_repo_key_rsa > /etc/apk/keys/nginx_signing.rsa.pub
@@ -373,6 +380,8 @@ fi
echo "[*] Update packet list" echo "[*] Update packet list"
if [ "$OS" = "debian" ] || [ "$OS" = "ubuntu" ] ; then if [ "$OS" = "debian" ] || [ "$OS" = "ubuntu" ] ; then
do_and_check_cmd apt update do_and_check_cmd apt update
elif [ "$OS" = "archlinux" ] ; then
do_and_check_cmd pacman -Sy
fi fi
echo "[*] Install compilation dependencies" echo "[*] Install compilation dependencies"
if [ "$OS" = "debian" ] || [ "$OS" = "ubuntu" ] ; then if [ "$OS" = "debian" ] || [ "$OS" = "ubuntu" ] ; then
@@ -380,11 +389,14 @@ if [ "$OS" = "debian" ] || [ "$OS" = "ubuntu" ] ; then
DEBIAN_FRONTEND=noninteractive do_and_check_cmd apt install -y $DEBIAN_DEPS DEBIAN_FRONTEND=noninteractive do_and_check_cmd apt install -y $DEBIAN_DEPS
elif [ "$OS" = "centos" ] ; then elif [ "$OS" = "centos" ] ; then
do_and_check_cmd yum install -y epel-release do_and_check_cmd yum install -y epel-release
CENTOS_DEPS="git autoconf pkg-config pcre-devel automake libtool gcc-c++ make gd-devel openssl-devel wget brotli-devel gnupg patch readline-devel" CENTOS_DEPS="git autoconf pkg-config pcre-devel automake libtool gcc-c++ make gd-devel openssl-devel wget brotli-devel gnupg patch readline-devel ca-certificates"
do_and_check_cmd yum install -y $CENTOS_DEPS do_and_check_cmd yum install -y $CENTOS_DEPS
elif [ "$OS" = "fedora" ] ; then elif [ "$OS" = "fedora" ] ; then
FEDORA_DEPS="git autoconf pkg-config pcre-devel automake libtool gcc-c++ make gd-devel openssl-devel wget brotli-devel gnupg libxslt-devel perl-ExtUtils-Embed gperftools-devel patch readline-devel" FEDORA_DEPS="git autoconf pkg-config pcre-devel automake libtool gcc-c++ make gd-devel openssl-devel wget brotli-devel gnupg libxslt-devel perl-ExtUtils-Embed gperftools-devel patch readline-devel"
do_and_check_cmd dnf install -y $FEDORA_DEPS do_and_check_cmd dnf install -y $FEDORA_DEPS
elif [ "$OS" = "archlinux" ] ; then
ARCHLINUX_DEPS="git autoconf pkgconf pcre2 automake libtool gcc make gd openssl wget brotli gnupg libxslt patch readline"
do_and_check_cmd pacman -S --noconfirm $ARCHLINUX_DEPS
elif [ "$OS" = "alpine" ] ; then elif [ "$OS" = "alpine" ] ; then
ALPINE_DEPS="git build autoconf libtool automake git geoip-dev yajl-dev g++ gcc curl-dev libxml2-dev pcre-dev make linux-headers musl-dev gd-dev gnupg brotli-dev openssl-dev patch readline-dev" ALPINE_DEPS="git build autoconf libtool automake git geoip-dev yajl-dev g++ gcc curl-dev libxml2-dev pcre-dev make linux-headers musl-dev gd-dev gnupg brotli-dev openssl-dev patch readline-dev"
do_and_check_cmd apk add --no-cache --virtual build $ALPINE_DEPS do_and_check_cmd apk add --no-cache --virtual build $ALPINE_DEPS
@@ -459,7 +471,10 @@ CHANGE_DIR="/tmp/bunkerized-nginx/libmaxminddb-1.6.0" do_and_check_cmd make inst
# Download, compile and install ModSecurity # Download, compile and install ModSecurity
echo "[*] Clone SpiderLabs/ModSecurity" echo "[*] Clone SpiderLabs/ModSecurity"
git_secure_clone https://github.com/SpiderLabs/ModSecurity.git bf881a4eda343d37629e39ede5e28b70dc4067c0 # TODO : looks like memory leak is happening with ModSecurity 3.0.5
# so we keep 3.0.4 until a fixed version is available
#git_secure_clone https://github.com/SpiderLabs/ModSecurity.git bf881a4eda343d37629e39ede5e28b70dc4067c0
git_secure_clone https://github.com/SpiderLabs/ModSecurity.git 753145fbd1d6751a6b14fdd700921eb3cc3a1d35
echo "[*] Compile and install ModSecurity" echo "[*] Compile and install ModSecurity"
# temp fix : Debian run it twice # temp fix : Debian run it twice
cd /tmp/bunkerized-nginx/ModSecurity && ./build.sh > /dev/null 2>&1 cd /tmp/bunkerized-nginx/ModSecurity && ./build.sh > /dev/null 2>&1
@@ -607,6 +622,12 @@ git_secure_clone https://github.com/openresty/lua-resty-redis.git 91585affcd9a8d
echo "[*] Install lua-resty-redis" echo "[*] Install lua-resty-redis"
CHANGE_DIR="/tmp/bunkerized-nginx/lua-resty-redis" do_and_check_cmd make PREFIX=/opt/bunkerized-nginx/deps LUA_LIB_DIR=/opt/bunkerized-nginx/deps/lib/lua install CHANGE_DIR="/tmp/bunkerized-nginx/lua-resty-redis" do_and_check_cmd make PREFIX=/opt/bunkerized-nginx/deps LUA_LIB_DIR=/opt/bunkerized-nginx/deps/lib/lua install
# Download and install lua-resty-upload
echo "[*] Clone openresty/lua-resty-upload"
git_secure_clone https://github.com/openresty/lua-resty-upload.git 7baca92c7e741979ae5857989bbf6cc0402d6126
echo "[*] Install lua-resty-upload"
CHANGE_DIR="/tmp/bunkerized-nginx/lua-resty-upload" do_and_check_cmd make PREFIX=/opt/bunkerized-nginx/deps LUA_LIB_DIR=/opt/bunkerized-nginx/deps/lib/lua install
# Download nginx and decompress sources # Download nginx and decompress sources
echo "[*] Download nginx-${NGINX_VERSION}.tar.gz" echo "[*] Download nginx-${NGINX_VERSION}.tar.gz"
do_and_check_cmd wget -O "/tmp/bunkerized-nginx/nginx-${NGINX_VERSION}.tar.gz" "https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz" do_and_check_cmd wget -O "/tmp/bunkerized-nginx/nginx-${NGINX_VERSION}.tar.gz" "https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz"
@@ -663,6 +684,9 @@ elif [ "$OS" = "fedora" ] ; then
do_and_check_cmd dnf install -y $FEDORA_DEPS do_and_check_cmd dnf install -y $FEDORA_DEPS
# Temp fix # Temp fix
do_and_check_cmd cp /usr/lib64/nginx/modules/ngx_stream_module.so /usr/lib/nginx/modules/ngx_stream_module.so do_and_check_cmd cp /usr/lib64/nginx/modules/ngx_stream_module.so /usr/lib/nginx/modules/ngx_stream_module.so
elif [ "$OS" = "archlinux" ] ; then
ARCHLINUX_DEPS="certbot git cronie curl python python-pip procps sudo"
do_and_check_cmd pacman -S --noconfirm $ARCHLINUX_DEPS
elif [ "$OS" = "alpine" ] ; then elif [ "$OS" = "alpine" ] ; then
ALPINE_DEPS="certbot bash libgcc yajl libstdc++ openssl py3-pip git" ALPINE_DEPS="certbot bash libgcc yajl libstdc++ openssl py3-pip git"
do_and_check_cmd apk add --no-cache $ALPINE_DEPS do_and_check_cmd apk add --no-cache $ALPINE_DEPS
@@ -671,7 +695,7 @@ fi
# Clone the repo # Clone the repo
if [ "$OS" != "alpine" ] && [ ! -d "/tmp/bunkerized-nginx-test" ] ; then if [ "$OS" != "alpine" ] && [ ! -d "/tmp/bunkerized-nginx-test" ] ; then
echo "[*] Clone bunkerity/bunkerized-nginx" echo "[*] Clone bunkerity/bunkerized-nginx"
CHANGE_DIR="/tmp" do_and_check_cmd git_secure_clone https://github.com/bunkerity/bunkerized-nginx.git 8808f161c5f92911ae485623d96f6ef24fe42ffe CHANGE_DIR="/tmp" do_and_check_cmd git_secure_clone https://github.com/bunkerity/bunkerized-nginx.git 3d2f5e2389e5f75131ae22f822a673b92cb12cca
# TODO : dev only # TODO : dev only
#CHANGE_DIR="/tmp" do_and_check_cmd git clone https://github.com/bunkerity/bunkerized-nginx.git #CHANGE_DIR="/tmp" do_and_check_cmd git clone https://github.com/bunkerity/bunkerized-nginx.git
#CHANGE_DIR="/tmp/bunkerized-nginx" do_and_check_cmd git checkout dev #CHANGE_DIR="/tmp/bunkerized-nginx" do_and_check_cmd git checkout dev
@@ -740,6 +764,10 @@ fi
echo "[*] Copy bunkerized-nginx" echo "[*] Copy bunkerized-nginx"
do_and_check_cmd cp /tmp/bunkerized-nginx/helpers/bunkerized-nginx /usr/local/bin do_and_check_cmd cp /tmp/bunkerized-nginx/helpers/bunkerized-nginx /usr/local/bin
# Copy VERSION
echo "[*] Copy VERSION"
do_and_check_cmd cp /tmp/bunkerized-nginx/VERSION /opt/bunkerized-nginx
# Replace old nginx.service file # Replace old nginx.service file
if [ "$OS" != "alpine" ] ; then if [ "$OS" != "alpine" ] ; then
do_and_check_cmd mv /lib/systemd/system/nginx.service /lib/systemd/system/nginx.service.bak do_and_check_cmd mv /lib/systemd/system/nginx.service /lib/systemd/system/nginx.service.bak
@@ -797,7 +825,7 @@ fi
# Create acme-challenge folder # Create acme-challenge folder
if [ ! -d "/opt/bunkerized-nginx/acme-challenge" ] ; then if [ ! -d "/opt/bunkerized-nginx/acme-challenge" ] ; then
echo "[*] Create /opt/bunkerized-nginx/acme-challenge folder" echo "[*] Create /opt/bunkerized-nginx/acme-challenge folder"
do_and_check_cmd mkdir /opt/bunkerized-nginx/acme-challenge do_and_check_cmd mkdir -p /opt/bunkerized-nginx/acme-challenge/.well-known/acme-challenge
fi fi
# Create plugins folder # Create plugins folder
@@ -812,11 +840,12 @@ do_and_check_cmd chown -R root:nginx /opt/bunkerized-nginx
do_and_check_cmd find /opt/bunkerized-nginx -type f -exec chmod 0740 {} \; do_and_check_cmd find /opt/bunkerized-nginx -type f -exec chmod 0740 {} \;
do_and_check_cmd find /opt/bunkerized-nginx -type d -exec chmod 0750 {} \; do_and_check_cmd find /opt/bunkerized-nginx -type d -exec chmod 0750 {} \;
do_and_check_cmd chmod 770 /opt/bunkerized-nginx/cache do_and_check_cmd chmod 770 /opt/bunkerized-nginx/cache
do_and_check_cmd chmod 770 /opt/bunkerized-nginx/acme-challenge do_and_check_cmd chmod -R 770 /opt/bunkerized-nginx/acme-challenge
do_and_check_cmd chmod 750 /opt/bunkerized-nginx/entrypoint/* do_and_check_cmd chmod 750 /opt/bunkerized-nginx/entrypoint/*
do_and_check_cmd chmod 750 /opt/bunkerized-nginx/gen/main.py do_and_check_cmd chmod 750 /opt/bunkerized-nginx/gen/main.py
do_and_check_cmd chmod 750 /opt/bunkerized-nginx/jobs/main.py do_and_check_cmd chmod 750 /opt/bunkerized-nginx/jobs/main.py
do_and_check_cmd chmod 750 /opt/bunkerized-nginx/jobs/reload.py do_and_check_cmd chmod 750 /opt/bunkerized-nginx/jobs/reload.py
do_and_check_cmd chmod 750 /opt/bunkerized-nginx/jobs/certbot-*.py
# Set permissions for /usr/local/bin/bunkerized-nginx # Set permissions for /usr/local/bin/bunkerized-nginx
do_and_check_cmd chown root:root /usr/local/bin/bunkerized-nginx do_and_check_cmd chown root:root /usr/local/bin/bunkerized-nginx
do_and_check_cmd chmod 750 /usr/local/bin/bunkerized-nginx do_and_check_cmd chmod 750 /usr/local/bin/bunkerized-nginx

View File

@@ -27,7 +27,7 @@ spec:
- name: KUBERNETES_MODE - name: KUBERNETES_MODE
value: "yes" value: "yes"
- name: DNS_RESOLVERS - name: DNS_RESOLVERS
value: "kube-dns.kube-system.svc.cluster.local" value: "coredns.kube-system.svc.cluster.local"
- name: USE_API - name: USE_API
value: "yes" value: "yes"
- name: API_URI - name: API_URI
@@ -36,36 +36,6 @@ spec:
value: "" value: ""
- name: MULTISITE - name: MULTISITE
value: "yes" value: "yes"
volumeMounts:
- name: confs
mountPath: /etc/nginx
readOnly: true
- name: letsencrypt
mountPath: /etc/letsencrypt
readOnly: true
- name: acme-challenge
mountPath: /acme-challenge
readOnly: true
- name: www
mountPath: /www
readOnly: true
volumes:
- name: confs
hostPath:
path: /shared/confs
type: Directory
- name: letsencrypt
hostPath:
path: /shared/letsencrypt
type: Directory
- name: acme-challenge
hostPath:
path: /shared/acme-challenge
type: Directory
- name: www
hostPath:
path: /shared/www
type: Directory
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
@@ -82,6 +52,17 @@ spec:
selector: selector:
name: bunkerized-nginx name: bunkerized-nginx
--- ---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-nginx
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
@@ -99,6 +80,30 @@ spec:
app: bunkerized-nginx-autoconf app: bunkerized-nginx-autoconf
spec: spec:
serviceAccountName: bunkerized-nginx-ingress-controller serviceAccountName: bunkerized-nginx-ingress-controller
volumes:
- name: vol-nginx
persistentVolumeClaim:
claimName: pvc-nginx
initContainers:
- name: change-data-dir-permissions
command:
- chown
- -R
- 101:101
- /etc/letsencrypt
- /cache
image: busybox
volumeMounts:
- name: vol-nginx
mountPath: /etc/letsencrypt
subPath: letsencrypt
- name: vol-nginx
mountPath: /cache
subPath: cache
securityContext:
runAsNonRoot: false
runAsUser: 0
runAsGroup: 0
containers: containers:
- name: bunkerized-nginx-autoconf - name: bunkerized-nginx-autoconf
image: bunkerity/bunkerized-nginx-autoconf image: bunkerity/bunkerized-nginx-autoconf
@@ -108,22 +113,9 @@ spec:
- name: API_URI - name: API_URI
value: "/ChangeMeToSomethingHardToGuess" value: "/ChangeMeToSomethingHardToGuess"
volumeMounts: volumeMounts:
- name: confs - name: vol-nginx
mountPath: /etc/nginx
- name: letsencrypt
mountPath: /etc/letsencrypt mountPath: /etc/letsencrypt
- name: acme-challenge subPath: letsencrypt
mountPath: /acme-challenge - name: vol-nginx
volumes: mountPath: /cache
- name: confs subPath: cache
hostPath:
path: /shared/confs
type: Directory
- name: letsencrypt
hostPath:
path: /shared/letsencrypt
type: Directory
- name: acme-challenge
hostPath:
path: /shared/acme-challenge
type: Directory

View File

@@ -13,11 +13,6 @@ services:
target: 8443 target: 8443
mode: host mode: host
protocol: tcp protocol: tcp
volumes:
- /shared/confs:/etc/nginx:ro
- /shared/www:/www:ro
- /shared/letsencrypt:/etc/letsencrypt:ro
- /shared/acme-challenge:/acme-challenge:ro
environment: environment:
- SWARM_MODE=yes - SWARM_MODE=yes
- USE_API=yes - USE_API=yes
@@ -41,9 +36,8 @@ services:
image: bunkerity/bunkerized-nginx-autoconf image: bunkerity/bunkerized-nginx-autoconf
volumes: volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro - /var/run/docker.sock:/var/run/docker.sock:ro
- /shared/confs:/etc/nginx - cache-vol:/cache
- /shared/letsencrypt:/etc/letsencrypt - certs-vol:/etc/letsencrypt
- /shared/acme-challenge:/acme-challenge
environment: environment:
- SWARM_MODE=yes - SWARM_MODE=yes
- API_URI=/ChangeMeToSomethingHardToGuess # must match API_URI from nginx - API_URI=/ChangeMeToSomethingHardToGuess # must match API_URI from nginx
@@ -65,3 +59,7 @@ networks:
driver: overlay driver: overlay
attachable: true attachable: true
name: services-net name: services-net
# And the volumes too
volumes:
cache-vol:
certs-vol:

View File

@@ -6,7 +6,7 @@ class CertbotNew(Job) :
def __init__(self, redis_host=None, copy_cache=False, domain="", email="", staging=False) : def __init__(self, redis_host=None, copy_cache=False, domain="", email="", staging=False) :
name = "certbot-new" name = "certbot-new"
data = ["certbot", "certonly", "--webroot", "-w", "/opt/bunkerized-nginx/acme-challenge", "-n", "-d", domain, "--email", email, "--agree-tos"] data = ["certbot", "certonly", "--manual", "--preferred-challenges=http", "--manual-auth-hook", "/opt/bunkerized-nginx/jobs/certbot-auth.py", "--manual-cleanup-hook", "/opt/bunkerized-nginx/jobs/certbot-cleanup.py", "-n", "-d", domain, "--email", email, "--agree-tos"]
if staging : if staging :
data.append("--staging") data.append("--staging")
type = "exec" type = "exec"

View File

@@ -15,10 +15,13 @@ class ReloadRet(enum.Enum) :
class JobManagement() : class JobManagement() :
def __init__(self) : def __init__(self) :
self.__docker_nginx = False
self.__local_nginx = False self.__local_nginx = False
if os.path.isfile("/usr/sbin/nginx") and os.path.isfile("/tmp/nginx.pid") :
self.__local_nginx = True
self.__autoconf_socket = None self.__autoconf_socket = None
if os.path.isfile("/usr/sbin/nginx") and os.path.isfile("/tmp/nginx.pid") and not os.path.isfile("/opt/bunkerized-nginx/ui/linux.sh") :
self.__docker_nginx = True
if os.path.isfile("/usr/sbin/nginx") and os.path.isfile("/tmp/nginx.pid") and os.path.isfile("/opt/bunkerized-nginx/ui/linux.sh") :
self.__local_nginx = True
if os.path.exists("/tmp/autoconf.sock") and stat.S_ISSOCK(os.stat("/tmp/autoconf.sock").st_mode) : if os.path.exists("/tmp/autoconf.sock") and stat.S_ISSOCK(os.stat("/tmp/autoconf.sock").st_mode) :
self.__autoconf_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.__autoconf_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.__autoconf_socket.connect("/tmp/autoconf.sock") self.__autoconf_socket.connect("/tmp/autoconf.sock")
@@ -43,10 +46,22 @@ class JobManagement() :
return True return True
def reload(self) : def reload(self) :
if self.__autoconf_socket != None : if self.__docker_nginx :
proc = subprocess.run(["/usr/sbin/nginx", "-s", "reload"], capture_output=True)
if proc.returncode != 0 :
log("reload", "ERROR", "can't reload nginx (status code = " + str(proc.returncode) + ")")
if len(proc.stdout.decode("ascii")) > 1 :
log("reload", "ERROR", proc.stdout.decode("ascii"))
if len(proc.stderr.decode("ascii")) > 1 :
log("reload", "ERROR", proc.stderr.decode("ascii"))
return ReloadRet.KO
return ReloadRet.OK
elif self.__autoconf_socket != None :
if self.__autoconf_order(b"reload") : if self.__autoconf_order(b"reload") :
return ReloadRet.OK return ReloadRet.OK
return ReloadRet.KO return ReloadRet.KO
elif self.__local_nginx : elif self.__local_nginx :
proc = subprocess.run(["sudo", "/opt/bunkerized-nginx/ui/linux.sh", "reload"], capture_output=True) proc = subprocess.run(["sudo", "/opt/bunkerized-nginx/ui/linux.sh", "reload"], capture_output=True)
if proc.returncode != 0 : if proc.returncode != 0 :
@@ -57,11 +72,12 @@ class JobManagement() :
log("reload", "ERROR", proc.stderr.decode("ascii")) log("reload", "ERROR", proc.stderr.decode("ascii"))
return ReloadRet.KO return ReloadRet.KO
return ReloadRet.OK return ReloadRet.OK
return ReloadRet.NO return ReloadRet.NO
class Job(abc.ABC) : class Job(abc.ABC) :
def __init__(self, name, data, filename=None, redis_host=None, redis_ex=86400, type="line", regex=r"^.+$", copy_cache=False) : def __init__(self, name, data, filename=None, redis_host=None, redis_ex=86400, type="line", regex=r"^.+$", copy_cache=False, json_data=None, method="GET") :
self._name = name self._name = name
self._data = data self._data = data
self._filename = filename self._filename = filename
@@ -76,11 +92,13 @@ class Job(abc.ABC) :
self._type = type self._type = type
self._regex = regex self._regex = regex
self._copy_cache = copy_cache self._copy_cache = copy_cache
self._json_data = json_data
self._method = method
def run(self) : def run(self) :
ret = JobRet.KO ret = JobRet.KO
try : try :
if self._type == "line" or self._type == "file" : if self._type in ["line", "file", "json"] :
if self._copy_cache : if self._copy_cache :
ret = self.__from_cache() ret = self.__from_cache()
if ret != JobRet.KO : if ret != JobRet.KO :
@@ -98,7 +116,11 @@ class Job(abc.ABC) :
if self._redis == None : if self._redis == None :
if os.path.isfile("/tmp/" + self._filename) : if os.path.isfile("/tmp/" + self._filename) :
os.remove("/tmp/" + self._filename) os.remove("/tmp/" + self._filename)
file = open("/tmp/" + self._filename, "ab") # mode = "a"
# if self._type == "file" :
# mode = "ab"
# file = open("/tmp/" + self._filename, mode)
file = open("/tmp/" + self._filename, "wb")
elif self._redis != None : elif self._redis != None :
pipe = self._redis.pipeline() pipe = self._redis.pipeline()
@@ -107,27 +129,31 @@ class Job(abc.ABC) :
for url in self._data : for url in self._data :
data = self.__download_data(url) data = self.__download_data(url)
for chunk in data : for chunk in data :
if self._type == "line" : if isinstance(chunk, bytes) and self._type in ["line", "json"] :
if not re.match(self._regex, chunk.decode("utf-8")) : chunk = chunk.decode("utf-8")
if self._type in ["line", "json"] :
if not re.match(self._regex, chunk) :
#log(self._name, "WARN", chunk + " doesn't match regex " + self._regex)
continue continue
chunks = self._edit(chunk)
if self._redis == None : if self._redis == None :
if self._type == "line" : if self._type in ["line", "json"] :
for chunk in chunks : chunks = self._edit(chunk)
file.write(chunk + b"\n") for more_chunk in chunks :
file.write(more_chunk.encode("utf-8") + b"\n")
else : else :
file.write(chunk) file.write(chunk)
else : else :
if self._type == "line" : if self._type in ["line", "json"] :
for chunk in chunks : chunks = self._edit(chunk)
pipe.set(self._name + "_" + chunk, "1", ex=self._redis_ex) for more_chunk in chunks :
pipe.set(self._name + "_" + more_chunk, "1", ex=self._redis_ex)
else : else :
pipe.set(self._name + "_" + chunk, "1", ex=self._redis_ex) pipe.set(self._name + "_" + chunk, "1", ex=self._redis_ex)
count += 1 count += 1
if self._redis == None : if self._redis == None :
file.close() file.close()
if count > 0 : #if count > 0 :
shutil.copyfile("/tmp/" + self._filename, "/etc/nginx/" + self._filename) shutil.copyfile("/tmp/" + self._filename, "/etc/nginx/" + self._filename)
os.remove("/tmp/" + self._filename) os.remove("/tmp/" + self._filename)
return JobRet.OK_RELOAD return JobRet.OK_RELOAD
@@ -139,11 +165,16 @@ class Job(abc.ABC) :
return JobRet.KO return JobRet.KO
def __download_data(self, url) : def __download_data(self, url) :
r = requests.get(url, stream=True) r = requests.request(self._method, url, stream=True, json=self._json_data)
if not r or r.status_code != 200 : if not r or r.status_code != 200 :
raise Exception("can't download data at " + url) raise Exception("can't download data at " + url)
if self._type == "line" : if self._type == "line" :
return r.iter_lines() return r.iter_lines(decode_unicode=True)
if self._type == "json" :
try :
return self._json(r.json())
except :
raise Exception("can't decode json from " + url)
return r.iter_content(chunk_size=8192) return r.iter_content(chunk_size=8192)
def __exec(self) : def __exec(self) :
@@ -161,6 +192,9 @@ class Job(abc.ABC) :
self._callback(True) self._callback(True)
return JobRet.OK_RELOAD return JobRet.OK_RELOAD
def _json(self, data) :
return data
def _edit(self, chunk) : def _edit(self, chunk) :
return [chunk] return [chunk]
@@ -177,7 +211,7 @@ class Job(abc.ABC) :
return JobRet.OK_RELOAD return JobRet.OK_RELOAD
return JobRet.OK_NO_RELOAD return JobRet.OK_NO_RELOAD
if self._redis != None and self._type == "line" : if self._redis != None and self._type in ["line", "json"] :
with open("/opt/bunkerized-nginx/cache/" + self._filename) as f : with open("/opt/bunkerized-nginx/cache/" + self._filename) as f :
pipe = self._redis.pipeline() pipe = self._redis.pipeline()
while True : while True :
@@ -194,7 +228,7 @@ class Job(abc.ABC) :
def __to_cache(self) : def __to_cache(self) :
if self._redis == None or self._type == "file" : if self._redis == None or self._type == "file" :
shutil.copyfile("/etc/nginx/" + self._filename, "/opt/bunkerized-nginx/cache/" + self._filename) shutil.copyfile("/etc/nginx/" + self._filename, "/opt/bunkerized-nginx/cache/" + self._filename)
elif self._redis != None and self._type == "line" : elif self._redis != None and self._type in ["line", "json"] :
if os.path.isfile("/opt/bunkerized-nginx/cache/" + self._filename) : if os.path.isfile("/opt/bunkerized-nginx/cache/" + self._filename) :
os.remove("/opt/bunkerized-nginx/cache/" + self._filename) os.remove("/opt/bunkerized-nginx/cache/" + self._filename)
with open("/opt/bunkerized-nginx/cache/" + self._filename, "a") as f : with open("/opt/bunkerized-nginx/cache/" + self._filename, "a") as f :

View File

@@ -12,4 +12,4 @@ class Referrers(Job) :
super().__init__(name, data, filename, redis_host=redis_host, redis_ex=redis_ex, type=type, regex=regex, copy_cache=copy_cache) super().__init__(name, data, filename, redis_host=redis_host, redis_ex=redis_ex, type=type, regex=regex, copy_cache=copy_cache)
def _edit(self, chunk) : def _edit(self, chunk) :
return [chunk.replace(b".", b"%.").replace(b"-", b"%-")] return [chunk.replace(".", "%.").replace("-", "%-")]

16
jobs/RemoteApiDatabase.py Normal file
View File

@@ -0,0 +1,16 @@
from Job import Job
class RemoteApiDatabase(Job) :
def __init__(self, server="", version="", id="", redis_host=None, copy_cache=False) :
name = "remote-api-database"
data = [server + "/db"]
filename = "remote-api.db"
type = "json"
redis_ex = 3600
regex = r"^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$"
json_data = {"version": version, "id": id}
super().__init__(name, data, filename, type=type, redis_host=redis_host, redis_ex=redis_ex, regex=regex, copy_cache=copy_cache, json_data=json_data)
def _json(self, data) :
return data["data"]

16
jobs/RemoteApiRegister.py Normal file
View File

@@ -0,0 +1,16 @@
from Job import Job
class RemoteApiRegister(Job) :
def __init__(self, server="", version="") :
name = "remote-api-register"
data = [server + "/register"]
filename = "machine.id"
type = "json"
regex = r"^[0-9a-f]{256}$"
json_data = {"version": version}
method = "POST"
super().__init__(name, data, filename, type=type, regex=regex, copy_cache=True, json_data=json_data, method=method)
def _json(self, data) :
return [data["data"]]

View File

@@ -12,4 +12,4 @@ class UserAgents(Job) :
super().__init__(name, data, filename, redis_host=redis_host, redis_ex=redis_ex, type=type, regex=regex, copy_cache=copy_cache) super().__init__(name, data, filename, redis_host=redis_host, redis_ex=redis_ex, type=type, regex=regex, copy_cache=copy_cache)
def _edit(self, chunk) : def _edit(self, chunk) :
return [chunk.replace(b"\\ ", b" ").replace(b"\\.", b"%.").replace(b"\\\\", b"\\").replace(b"-", b"%-")] return [chunk.replace("\\ ", " ").replace("\\.", "%.").replace("\\\\", "\\").replace("-", "%-")]

28
jobs/certbot-auth.py Normal file
View File

@@ -0,0 +1,28 @@
#!/usr/bin/python3
import os, socket, sys, stat
VALIDATION = os.getenv("CERTBOT_VALIDATION", None)
TOKEN = os.getenv("CERTBOT_TOKEN", None)
if VALIDATION == None or TOKEN == None :
sys.exit(1)
try :
with open("/opt/bunkerized-nginx/acme-challenge/.well-known/acme-challenge/" + TOKEN, "w") as f :
f.write(VALIDATION)
except :
sys.exit(2)
try :
if os.path.exists("/tmp/autoconf.sock") and stat.S_ISSOCK(os.stat("/tmp/autoconf.sock").st_mode) :
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect("/tmp/autoconf.sock")
sock.sendall(b"acme")
data = sock.recv(512)
if data != b"ok" :
raise Exception("can't acme")
sock.sendall(b"close")
except :
sys.exit(3)
sys.exit(0)

14
jobs/certbot-cleanup.py Normal file
View File

@@ -0,0 +1,14 @@
#!/usr/bin/python3
import os, sys
TOKEN = os.getenv("CERTBOT_TOKEN", None)
if TOKEN == None :
sys.exit(1)
try :
os.remove("/opt/bunkerized-nginx/acme-challenge/.well-known/acme-challenge/" + TOKEN)
except :
sys.exit(2)
sys.exit(0)

View File

@@ -4,7 +4,7 @@ import argparse, sys, re
sys.path.append("/opt/bunkerized-nginx/jobs") sys.path.append("/opt/bunkerized-nginx/jobs")
import Abusers, CertbotNew, CertbotRenew, ExitNodes, GeoIP, Proxies, Referrers, SelfSignedCert, UserAgents import Abusers, CertbotNew, CertbotRenew, ExitNodes, GeoIP, Proxies, Referrers, SelfSignedCert, UserAgents, RemoteApiDatabase, RemoteApiRegister
from Job import JobRet, JobManagement, ReloadRet from Job import JobRet, JobManagement, ReloadRet
from logger import log from logger import log
@@ -17,8 +17,11 @@ JOBS = {
"geoip": GeoIP.GeoIP, "geoip": GeoIP.GeoIP,
"proxies": Proxies.Proxies, "proxies": Proxies.Proxies,
"referrers": Referrers.Referrers, "referrers": Referrers.Referrers,
"remote-api-database": RemoteApiDatabase.RemoteApiDatabase,
"remote-api-register": RemoteApiRegister.RemoteApiRegister,
"self-signed-cert": SelfSignedCert.SelfSignedCert, "self-signed-cert": SelfSignedCert.SelfSignedCert,
"user-agents": UserAgents.UserAgents "user-agents": UserAgents.UserAgents
} }
if __name__ == "__main__" : if __name__ == "__main__" :
@@ -36,6 +39,9 @@ if __name__ == "__main__" :
parser.add_argument("--dst_key", default="", type=str, help="key path for self-signed-cert job (e.g. : /etc/nginx/default-key.pem)") parser.add_argument("--dst_key", default="", type=str, help="key path for self-signed-cert job (e.g. : /etc/nginx/default-key.pem)")
parser.add_argument("--expiry", default="", type=str, help="number of validity days for self-signed-cert job (e.g. : 365)") parser.add_argument("--expiry", default="", type=str, help="number of validity days for self-signed-cert job (e.g. : 365)")
parser.add_argument("--subj", default="", type=str, help="certificate subject for self-signed-cert job (e.g. : OU=X/CN=Y...)") parser.add_argument("--subj", default="", type=str, help="certificate subject for self-signed-cert job (e.g. : OU=X/CN=Y...)")
parser.add_argument("--server", default="", type=str, help="address of the server for remote-api jobs")
parser.add_argument("--id", default="", type=str, help="machine id for remote-api jobs")
parser.add_argument("--version", default="", type=str, help="bunkerized-nginx version for remote-api jobs")
args = parser.parse_args() args = parser.parse_args()
# Check job name # Check job name
@@ -68,6 +74,10 @@ if __name__ == "__main__" :
instance = JOBS[job](redis_host=redis_host, copy_cache=args.cache, domain=args.domain, email=args.email, staging=args.staging) instance = JOBS[job](redis_host=redis_host, copy_cache=args.cache, domain=args.domain, email=args.email, staging=args.staging)
elif job == "self-signed-cert" : elif job == "self-signed-cert" :
instance = JOBS[job](redis_host=redis_host, copy_cache=args.cache, dst_cert=args.dst_cert, dst_key=args.dst_key, expiry=args.expiry, subj=args.subj) instance = JOBS[job](redis_host=redis_host, copy_cache=args.cache, dst_cert=args.dst_cert, dst_key=args.dst_key, expiry=args.expiry, subj=args.subj)
elif job == "remote-api-database" :
instance = JOBS[job](server=args.server, version=args.version, id=args.id, redis_host=redis_host, copy_cache=args.cache)
elif job == "remote-api-register" :
instance = JOBS[job](server=args.server, version=args.version)
else : else :
instance = JOBS[job](redis_host=redis_host, copy_cache=args.cache) instance = JOBS[job](redis_host=redis_host, copy_cache=args.cache)
ret = instance.run() ret = instance.run()

View File

@@ -1,6 +1,8 @@
local M = {} local M = {}
local api_list = {} local api_list = {}
local iputils = require "resty.iputils" local iputils = require "resty.iputils"
local upload = require "resty.upload"
local logger = require "logger"
api_list["^/ping$"] = function () api_list["^/ping$"] = function ()
return true return true
@@ -26,6 +28,90 @@ api_list["^/stop$"] = function ()
return os.execute("/usr/sbin/nginx -s quit") == 0 return os.execute("/usr/sbin/nginx -s quit") == 0
end end
api_list["^/stop%-temp$"] = function ()
return os.execute("/usr/sbin/nginx -c /tmp/nginx-temp.conf -s stop") == 0
end
api_list["^/conf$"] = function ()
if not M.save_file("/tmp/conf.tar.gz") then
return false
end
return M.extract_file("/tmp/conf.tar.gz", "/etc/nginx/")
end
api_list["^/letsencrypt$"] = function ()
if not M.save_file("/tmp/letsencrypt.tar.gz") then
return false
end
return M.extract_file("/tmp/letsencrypt.tar.gz", "/etc/letsencrypt/")
end
api_list["^/acme$"] = function ()
if not M.save_file("/tmp/acme.tar.gz") then
return false
end
return M.extract_file("/tmp/acme.tar.gz", "/acme-challenge")
end
api_list["^/http$"] = function ()
if not M.save_file("/tmp/http.tar.gz") then
return false
end
return M.extract_file("/tmp/http.tar.gz", "/http-confs/")
end
api_list["^/server$"] = function ()
if not M.save_file("/tmp/server.tar.gz") then
return false
end
return M.extract_file("/tmp/server.tar.gz", "/server-confs/")
end
api_list["^/modsec$"] = function ()
if not M.save_file("/tmp/modsec.tar.gz") then
return false
end
return M.extract_file("/tmp/modsec.tar.gz", "/modsec-confs/")
end
api_list["^/modsec%-crs$"] = function ()
if not M.save_file("/tmp/modsec-crs.tar.gz") then
return false
end
return M.extract_file("/tmp/modsec-crs.tar.gz", "/modsec-crs-confs/")
end
function M.save_file (name)
local form, err = upload:new(4096)
if not form then
logger.log(ngx.ERR, "API", err)
return false
end
form:set_timeout(1000)
local file = io.open(name, "w")
while true do
local typ, res, err = form:read()
if not typ then
file:close()
logger.log(ngx.ERR, "API", "not typ")
return false
end
if typ == "eof" then
break
end
if typ == "body" then
file:write(res)
end
end
file:flush()
file:close()
return true
end
function M.extract_file(archive, destination)
return os.execute("tar xzf " .. archive .. " -C " .. destination) == 0
end
function M.is_api_call (api_uri, api_whitelist_ip) function M.is_api_call (api_uri, api_whitelist_ip)
local whitelist = iputils.parse_cidrs(api_whitelist_ip) local whitelist = iputils.parse_cidrs(api_whitelist_ip)
if iputils.ip_in_cidrs(ngx.var.remote_addr, whitelist) and ngx.var.request_uri:sub(1, #api_uri) .. "/" == api_uri .. "/" then if iputils.ip_in_cidrs(ngx.var.remote_addr, whitelist) and ngx.var.request_uri:sub(1, #api_uri) .. "/" == api_uri .. "/" then

View File

@@ -16,17 +16,18 @@ function M.count (status_codes, threshold, count_time, ban_time)
local ok, err = ngx.shared.behavior_count:set(ngx.var.remote_addr, count, count_time) local ok, err = ngx.shared.behavior_count:set(ngx.var.remote_addr, count, count_time)
if not ok then if not ok then
logger.log(ngx.ERR, "BEHAVIOR", "not enough memory allocated to behavior_ip_count") logger.log(ngx.ERR, "BEHAVIOR", "not enough memory allocated to behavior_ip_count")
return return false
end end
if count >= threshold then if count >= threshold then
logger.log(ngx.WARN, "BEHAVIOR", "threshold reached for " .. ngx.var.remote_addr .. " (" .. count .. " / " .. threshold .. ") : IP is banned for " .. ban_time .. " seconds") logger.log(ngx.WARN, "BEHAVIOR", "threshold reached for " .. ngx.var.remote_addr .. " (" .. count .. " / " .. threshold .. ") : IP is banned for " .. ban_time .. " seconds")
local ok, err = ngx.shared.behavior_ban:safe_set(ngx.var.remote_addr, true, ban_time) local ok, err = ngx.shared.behavior_ban:safe_set(ngx.var.remote_addr, true, ban_time)
if not ok then if not ok then
logger.log(ngx.ERR, "BEHAVIOR", "not enough memory allocated to behavior_ip_ban") logger.log(ngx.ERR, "BEHAVIOR", "not enough memory allocated to behavior_ip_ban")
return return false
end end
return true
end end
break return false
end end
end end
end end

View File

@@ -4,19 +4,19 @@ local base64 = require "misc.base64"
function M.get_challenge () function M.get_challenge ()
local cap = captcha.new() local cap = captcha.new()
cap:font("/usr/local/lib/lua/misc/Vera.ttf") cap:font("/opt/bunkerized-nginx/lua/misc/Vera.ttf")
cap:generate() cap:generate()
return cap:jpegStr(70), cap:getStr() return cap:jpegStr(70), cap:getStr()
end end
function M.get_code (img, antibot_uri) function M.get_code (img, antibot_uri)
-- get template -- get template
local f = io.open("/antibot/captcha.html", "r") local f = io.open("/opt/bunkerized-nginx/antibot/captcha.html", "r")
local template = f:read("*all") local template = f:read("*all")
f:close() f:close()
-- get captcha code -- get captcha code
f = io.open("/antibot/captcha.data", "r") f = io.open("/opt/bunkerized-nginx/antibot/captcha.data", "r")
local captcha_data = f:read("*all") local captcha_data = f:read("*all")
f:close() f:close()

View File

@@ -15,12 +15,12 @@ end
function M.get_code (challenge, antibot_uri, original_uri) function M.get_code (challenge, antibot_uri, original_uri)
-- get template -- get template
local f = io.open("/antibot/javascript.html", "r") local f = io.open("/opt/bunkerized-nginx/antibot/javascript.html", "r")
local template = f:read("*all") local template = f:read("*all")
f:close() f:close()
-- get JS code -- get JS code
f = io.open("/antibot/javascript.data", "r") f = io.open("/opt/bunkerized-nginx/antibot/javascript.data", "r")
local javascript = f:read("*all") local javascript = f:read("*all")
f:close() f:close()
@@ -32,14 +32,11 @@ function M.get_code (challenge, antibot_uri, original_uri)
end end
function M.check (challenge, user) function M.check (challenge, user)
ngx.log(ngx.ERR, "debug challenge = " .. challenge)
ngx.log(ngx.ERR, "debug user = " .. user)
local resty_sha256 = require "resty.sha256" local resty_sha256 = require "resty.sha256"
local str = require "resty.string" local str = require "resty.string"
local sha256 = resty_sha256:new() local sha256 = resty_sha256:new()
sha256:update(challenge .. user) sha256:update(challenge .. user)
local digest = sha256:final() local digest = sha256:final()
ngx.log(ngx.ERR, "debug digest = " .. str.to_hex(digest))
return str.to_hex(digest):find("^0000") ~= nil return str.to_hex(digest):find("^0000") ~= nil
end end

72
lua/limitreq.lua Normal file
View File

@@ -0,0 +1,72 @@
local M = {}
local logger = require "logger"
function M.decr (key, delay)
local function callback (premature, key)
if premature then
ngx.shared.limit_req:delete(key)
return
end
local value, flags = ngx.shared.limit_req:get(key)
if value ~= nil then
if value - 1 == 0 then
ngx.shared.limit_req:delete(key)
return
end
ngx.shared.limit_req:set(key, value-1, 0)
end
end
local ok, err = ngx.timer.at(delay, callback, key)
if not ok then
logger.log(ngx.ERR, "REQ LIMIT", "can't setup decrement timer : " .. err)
return false
end
return true
end
function M.incr (key)
local newval, err, forcible = ngx.shared.limit_req:incr(key, 1, 0, 0)
if not newval then
logger.log(ngx.ERR, "REQ LIMIT", "can't increment counter : " .. err)
return false
end
return true
end
function M.check (rate, burst, sleep)
local key = ngx.var.remote_addr .. ngx.var.uri
local rate_split = {}
for str in rate:gmatch("([^r/]+)") do
table.insert(rate_split, str)
end
local max = tonumber(rate_split[1])
local unit = rate_split[2]
local delay = 0
if unit == "s" then
delay = 1
elseif unit == "m" then
delay = 60
elseif unit == "h" then
delay = 3600
elseif unit == "d" then
delay = 86400
end
if M.incr(key) then
local current, flags = ngx.shared.limit_req:get(key)
if M.decr(key, delay) then
if current > max + burst then
logger.log(ngx.WARN, "REQ LIMIT", "ip " .. ngx.var.remote_addr .. " has reached the limit for uri " .. ngx.var.uri .. " : " .. current .. "r/" .. unit .. " (max = " .. rate .. ")")
return true
elseif current > max then
if sleep > 0 then
ngx.sleep(sleep)
end
end
else
ngx.shared.limit_req:set(key, current-1, 0)
end
end
return false
end
return M

3314
lua/misc/root-ca.pem Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -4,15 +4,15 @@ local cjson = require "cjson"
function M.get_code (antibot_uri, recaptcha_sitekey) function M.get_code (antibot_uri, recaptcha_sitekey)
-- get template -- get template
local f = io.open("/antibot/recaptcha.html", "r") local f = io.open("/opt/bunkerized-nginx/antibot/recaptcha.html", "r")
local template = f:read("*all") local template = f:read("*all")
f:close() f:close()
-- get recaptcha code -- get recaptcha code
f = io.open("/antibot/recaptcha-head.data", "r") f = io.open("/opt/bunkerized-nginx/antibot/recaptcha-head.data", "r")
local recaptcha_head = f:read("*all") local recaptcha_head = f:read("*all")
f:close() f:close()
f = io.open("/antibot/recaptcha-body.data", "r") f = io.open("/opt/bunkerized-nginx/antibot/recaptcha-body.data", "r")
local recaptcha_body = f:read("*all") local recaptcha_body = f:read("*all")
f:close() f:close()
@@ -27,7 +27,6 @@ end
function M.check (token, recaptcha_secret) function M.check (token, recaptcha_secret)
local httpc = http.new() local httpc = http.new()
local res, err = httpc:request_uri("https://www.google.com/recaptcha/api/siteverify", { local res, err = httpc:request_uri("https://www.google.com/recaptcha/api/siteverify", {
ssl_verify = false,
method = "POST", method = "POST",
body = "secret=" .. recaptcha_secret .. "&response=" .. token .. "&remoteip=" .. ngx.var.remote_addr, body = "secret=" .. recaptcha_secret .. "&response=" .. token .. "&remoteip=" .. ngx.var.remote_addr,
headers = { ["Content-Type"] = "application/x-www-form-urlencoded" } headers = { ["Content-Type"] = "application/x-www-form-urlencoded" }

104
lua/remoteapi.lua Normal file
View File

@@ -0,0 +1,104 @@
local M = {}
local http = require "resty.http"
local cjson = require "cjson"
local logger = require "logger"
function M.send(method, url, data)
local httpc, err = http.new()
if not httpc then
logger.log(ngx.ERR, "REMOTE API", "Can't instantiate HTTP object : " .. err)
return false, nil, nil
end
local res, err = httpc:request_uri(ngx.shared.remote_api:get("server") .. url, {
method = method,
body = cjson.encode(data),
headers = {
["Content-Type"] = "application/json",
["User-Agent"] = "bunkerized-nginx/" .. data["version"]
}
})
if not res then
logger.log(ngx.ERR, "REMOTE API", "Can't send HTTP request : " .. err)
return false, nil, nil
end
if res.status ~= 200 then
logger.log(ngx.WARN, "REMOTE API", "Received status " .. res.status .. " from API : " .. res.body)
end
return true, res.status, cjson.decode(res.body)["data"]
end
function M.gen_data(use_id, data)
local all_data = {}
if use_id then
all_data["id"] = ngx.shared.remote_api:get("id")
end
all_data["version"] = ngx.shared.remote_api:get("version")
for k, v in pairs(data) do
all_data[k] = v
end
return all_data
end
function M.ping2()
local https = require "ssl.https"
local ltn12 = require "ltn12"
local request_body = cjson.encode(M.gen_data(true, {}))
local response_body = {}
local res, code, headers, status = https.request {
url = ngx.shared.remote_api:get("server") .. "/ping",
method = "GET",
headers = {
["Content-Type"] = "application/json",
["User-Agent"] = "bunkerized-nginx/" .. ngx.shared.remote_api:get("version"),
["Content-Length"] = request_body:len()
},
source = ltn12.source.string(request_body),
sink = ltn12.sink.table(response_body)
}
if res and status:match("^.*% 200% .*$") then
response_body = cjson.decode(response_body[1])
return response_body["data"] == "pong"
end
return false
end
function M.register()
local request = {}
local res, status, data = M.send("POST", "/register", M.gen_data(false, request))
if res and status == 200 then
return true, data
end
return false, data
end
function M.ping()
local request = {}
local res, status, data = M.send("GET", "/ping", M.gen_data(true, request))
if res and status == 200 then
return true, data
end
return false, data
end
function M.ip(ip, reason)
local request = {
["ip"] = ip,
["reason"] = reason
}
local res, status, data = M.send("POST", "/ip", M.gen_data(true, request))
if res and status == 200 then
return true, data
end
return false, data
end
function M.db()
local request = {}
local res, status, data = M.send("GET", "/db", M.gen_data(true, request))
if res and status == 200 then
return true, data
end
return false, data
end
return M

View File

@@ -1,7 +1,8 @@
15 0 * * * /opt/bunkerized-nginx/jobs/main.py --reload --name certbot-renew >> /var/log/nginx/jobs.log 2>&1 15 0 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value AUTO_LETS_ENCRYPT yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name certbot-renew >> /var/log/nginx/jobs.log 2>&1
30 0 * * * /opt/bunkerized-nginx/jobs/main.py --reload --name user-agents >> /var/log/nginx/jobs.log 2>&1 30 0 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_USER_AGENT yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name user-agents >> /var/log/nginx/jobs.log 2>&1
45 0 * * * /opt/bunkerized-nginx/jobs/main.py --reload --name referrers >> /var/log/nginx/jobs.log 2>&1 45 0 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_REFERRER yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name referrers >> /var/log/nginx/jobs.log 2>&1
0 1 * * * /opt/bunkerized-nginx/jobs/main.py --reload --name abusers >> /var/log/nginx/jobs.log 2>&1 0 1 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_ABUSERS yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name abusers >> /var/log/nginx/jobs.log 2>&1
0 2 * * * /opt/bunkerized-nginx/jobs/main.py --reload --name proxies >> /var/log/nginx/jobs.log 2>&1 0 2 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_PROXIES yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name proxies >> /var/log/nginx/jobs.log 2>&1
0 */1 * * * /opt/bunkerized-nginx/jobs/main.py --reload --name exit-nodes >> /var/log/nginx/jobs.log 2>&1 30 */1 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value USE_REMOTE_API yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name remote-api-database --server "$(grep '^REMOTE_API_SERVER=' /etc/nginx/global.env | cut -d '=' -f 2)" --version "$(cat /opt/bunkerized-nginx/VERSION)" --id "$(cat /opt/bunkerized-nginx/cache/machine.id)" >> /var/log/nginx/jobs.log 2>&1
0 3 2 * * /opt/bunkerized-nginx/jobs/main.py --reload --name geoip >> /var/log/nginx/jobs.log 2>&1 0 */1 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_TOR_EXIT_NODE yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name exit-nodes >> /var/log/nginx/jobs.log 2>&1
0 3 2 * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ [ "$(has_value BLACKLIST_COUNTRY ".\+")" != "" ] || [ "$(has_value WHITELIST_COUNTRY ".\+")" != "" ] ] && /opt/bunkerized-nginx/jobs/main.py --reload --name geoip >> /var/log/nginx/jobs.log 2>&1

View File

@@ -1,7 +1,8 @@
15 0 * * * /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name certbot-renew" nginx >> /var/log/nginx/jobs.log 2>&1 15 0 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value AUTO_LETS_ENCRYPT yes)" != "" ] && /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name certbot-renew" nginx >> /var/log/nginx/jobs.log 2>&1
30 0 * * * /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name user-agents" nginx >> /var/log/nginx/jobs.log 2>&1 30 0 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_USER_AGENT yes)" != "" ] && /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name user-agents" nginx >> /var/log/nginx/jobs.log 2>&1
45 0 * * * /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name referrers" nginx >> /var/log/nginx/jobs.log 2>&1 45 0 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_REFERRER yes)" != "" ] && /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name referrers" nginx >> /var/log/nginx/jobs.log 2>&1
0 1 * * * /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name abusers" nginx >> /var/log/nginx/jobs.log 2>&1 0 1 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_ABUSERS yes)" != "" ] && /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name abusers" nginx >> /var/log/nginx/jobs.log 2>&1
0 2 * * * /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name proxies" nginx >> /var/log/nginx/jobs.log 2>&1 0 2 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_PROXIES yes)" != "" ] && /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name proxies" nginx >> /var/log/nginx/jobs.log 2>&1
0 */1 * * * /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name exit-nodes" nginx >> /var/log/nginx/jobs.log 2>&1 30 */1 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value USE_REMOTE_API yes)" != "" ] && /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name remote-api-database --server $(grep '^REMOTE_API_SERVER=' /etc/nginx/global.env | cut -d '=' -f 2) --version $(cat /opt/bunkerized-nginx/VERSION) --id $(cat /opt/bunkerized-nginx/cache/machine.id)" nginx >> /var/log/nginx/jobs.log 2>&1
0 3 2 * * /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name geoip" nginx >> /var/log/nginx/jobs.log 2>&1 0 */1 * * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_TOR_EXIT_NODE yes)" != "" ] && /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name exit-nodes" nginx >> /var/log/nginx/jobs.log 2>&1
0 3 2 * * . /opt/bunkerized-nginx/entrypoint/utils.sh && [ [ "$(has_value BLACKLIST_COUNTRY ".\+")" != "" ] || [ "$(has_value WHITELIST_COUNTRY ".\+")" != "" ] ] && /bin/su -c "/opt/bunkerized-nginx/jobs/main.py --reload --lock --name geoip" nginx >> /var/log/nginx/jobs.log 2>&1

View File

@@ -1,7 +1,8 @@
15 0 * * * nginx /opt/bunkerized-nginx/jobs/main.py --reload --name certbot-renew >> /var/log/nginx/jobs.log 2>&1 15 0 * * * nginx . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value AUTO_LETS_ENCRYPT yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name certbot-renew >> /var/log/nginx/jobs.log 2>&1
30 0 * * * nginx /opt/bunkerized-nginx/jobs/main.py --reload --name user-agents >> /var/log/nginx/jobs.log 2>&1 30 0 * * * nginx . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_USER_AGENT yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name user-agents >> /var/log/nginx/jobs.log 2>&1
45 0 * * * nginx /opt/bunkerized-nginx/jobs/main.py --reload --name referrers >> /var/log/nginx/jobs.log 2>&1 45 0 * * * nginx . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_REFERRER yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name referrers >> /var/log/nginx/jobs.log 2>&1
0 1 * * * nginx /opt/bunkerized-nginx/jobs/main.py --reload --name abusers >> /var/log/nginx/jobs.log 2>&1 0 1 * * * nginx . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_ABUSERS yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name abusers >> /var/log/nginx/jobs.log 2>&1
0 2 * * * nginx /opt/bunkerized-nginx/jobs/main.py --reload --name proxies >> /var/log/nginx/jobs.log 2>&1 0 2 * * * nginx . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_PROXIES yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name proxies >> /var/log/nginx/jobs.log 2>&1
0 */1 * * * nginx /opt/bunkerized-nginx/jobs/main.py --reload --name exit-nodes >> /var/log/nginx/jobs.log 2>&1 30 */1 * * * nginx . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value USE_REMOTE_API yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name remote-api-database --server "$(grep '^REMOTE_API_SERVER=' /etc/nginx/global.env | cut -d '=' -f 2)" --version "$(cat /opt/bunkerized-nginx/VERSION)" --id "$(cat /opt/bunkerized-nginx/cache/machine.id)" >> /var/log/nginx/jobs.log 2>&1
0 3 2 * * nginx /opt/bunkerized-nginx/jobs/main.py --reload --name geoip >> /var/log/nginx/jobs.log 2>&1 0 */1 * * * nginx . /opt/bunkerized-nginx/entrypoint/utils.sh && [ "$(has_value BLOCK_TOR_EXIT_NODE yes)" != "" ] && /opt/bunkerized-nginx/jobs/main.py --reload --name exit-nodes >> /var/log/nginx/jobs.log 2>&1
0 3 2 * * nginx . /opt/bunkerized-nginx/entrypoint/utils.sh && [ [ "$(has_value BLACKLIST_COUNTRY ".\+")" != "" ] || [ "$(has_value WHITELIST_COUNTRY ".\+")" != "" ] ] && /opt/bunkerized-nginx/jobs/main.py --reload --name geoip >> /var/log/nginx/jobs.log 2>&1

View File

@@ -99,6 +99,56 @@
} }
] ]
}, },
"Bad behavior": {
"id": "bad-behavior",
"params": [
{
"context": "multisite",
"default": "yes",
"env": "USE_BAD_BEHAVIOR",
"id": "use-bad-behavior",
"label": "Use bad behavior",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "multisite",
"default": "86400",
"env": "BAD_BEHAVIOR_BAN_TIME",
"id": "bad-behavior-ban-time",
"label": "Ban duration time",
"regex": "^[0-9]+$",
"type": "text"
},
{
"context": "multisite",
"default": "60",
"env": "BAD_BEHAVIOR_COUNT_TIME",
"id": "bad-behavior-count-time",
"label": "Count time",
"regex": "^[0-9]+$",
"type": "text"
},
{
"context": "multisite",
"default": "400 401 403 404 405 429 444",
"env": "BAD_BEHAVIOR_STATUS_CODES",
"id": "bad-behavior-status-codes",
"label": "Status codes",
"regex": "^([0-9]{3} ?)*$",
"type": "text"
},
{
"context": "multisite",
"default": "10",
"env": "BAD_BEHAVIOR_THRESHOLD",
"id": "bad-behavior-threshold",
"label": "Threshold",
"regex": "^[0-9]+$",
"type": "text"
}
]
},
"Basic auth": { "Basic auth": {
"id": "auth-basic", "id": "auth-basic",
"params": [ "params": [
@@ -724,7 +774,7 @@
}, },
{ {
"context": "multisite", "context": "multisite",
"default": "accelerometer 'none'; ambient-light-sensor 'none'; autoplay 'none'; camera 'none'; display-capture 'none'; document-domain 'none'; encrypted-media 'none'; fullscreen 'none'; geolocation 'none'; gyroscope 'none'; magnetometer 'none'; microphone 'none'; midi 'none'; payment 'none'; picture-in-picture 'none'; speaker 'none'; sync-xhr 'none'; usb 'none'; vibrate 'none'; vr 'none'", "default": "accelerometer 'none'; ambient-light-sensor 'none'; autoplay 'none'; battery 'none'; camera 'none'; display-capture 'none'; document-domain 'none'; encrypted-media 'none'; fullscreen 'none'; geolocation 'none'; gyroscope 'none'; magnetometer 'none'; microphone 'none'; midi 'none'; payment 'none'; picture-in-picture 'none'; publickey-credentials-get 'none'; sync-xhr 'none'; usb 'none'; wake-lock 'none'; web-share 'none'; xr-spatial-tracking 'none'",
"env": "FEATURE_POLICY", "env": "FEATURE_POLICY",
"id": "feature-policy", "id": "feature-policy",
"label": "Feature policy", "label": "Feature policy",
@@ -733,7 +783,7 @@
}, },
{ {
"context": "multisite", "context": "multisite",
"default": "accelerometer=(), ambient-light-sensor=(), autoplay=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), fullscreen=(), geolocation=(), gyroscope=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), speaker=(), sync-xhr=(), usb=(), vibrate=(), vr=()", "default": "accelerometer=(), ambient-light-sensor=(), autoplay=(), battery=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), fullscreen=(), geolocation=(), gyroscope=(), interest-cohort=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), publickey-credentials-get=(), screen-wake-lock=(), sync-xhr=(), usb=(), web-share=(), xr-spatial-tracking=()",
"env": "PERMISSIONS_POLICY", "env": "PERMISSIONS_POLICY",
"id": "permissions-policy", "id": "permissions-policy",
"label": "Permissions policy", "label": "Permissions policy",
@@ -775,6 +825,23 @@
"label": "Content security policy", "label": "Content security policy",
"regex": "^([\\S ]*)$", "regex": "^([\\S ]*)$",
"type": "text" "type": "text"
},
{
"id": "custom-headers",
"label": "Custom headers",
"params": [
{
"context": "multisite",
"default": "",
"env": "CUSTOM_HEADER",
"id": "custom-headers",
"label": "Custom header",
"multiple": "Custom headers",
"regex": "^([\\S ]*)$",
"type": "text"
}
],
"type": "multiple"
} }
] ]
}, },
@@ -810,6 +877,92 @@
} }
] ]
}, },
"Internal": {
"id": "internal",
"params": [
{
"context": "global",
"default": "no",
"env": "USE_API",
"id": "use-api",
"label": "Enable API",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "192.168.0.0/16 172.16.0.0/12 10.0.0.0/8",
"env": "API_WHITELIST_IP",
"id": "api-whitelist-ip",
"label": "API whitelist IP",
"regex": "^(\\d+.\\d+.\\d+.\\d+(/\\d+)? ?)*$",
"type": "text"
},
{
"context": "global",
"default": "random",
"env": "API_URI",
"id": "api-uri",
"label": "API URI",
"regex": "^(random|\\/[A-Za-z0-9\\-\\/]+)$",
"type": "text"
},
{
"context": "global",
"default": "no",
"env": "SWARM_MODE",
"id": "swarm-mode",
"label": "Swarm mode",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "no",
"env": "KUBERNETES_MODE",
"id": "kubernetes-mode",
"label": "Kubernetes mode",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "no",
"env": "USE_REDIS",
"id": "use-redis",
"label": "Use external redis when coupled with autoconf",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "",
"env": "REDIS_HOST",
"id": "redis-host",
"label": "Hostname/IP of the Redis service",
"regex": "^([A-Za-z0-9\\-\\.\\_]+|.{0})$",
"type": "text"
},
{
"context": "multisite",
"default": "yes",
"env": "USE_REMOTE_API",
"id": "use-remote-api",
"label": "Use a remote service for enhanced security",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "https://api.bunkerity.com/bunkerized",
"env": "REMOTE_API_SERVER",
"id": "remote-api-server",
"label": "The URL of the remote service",
"regex": "^.*$",
"type": "text"
}
]
},
"Limit conn": { "Limit conn": {
"id": "limit-conn", "id": "limit-conn",
"params": [ "params": [
@@ -854,24 +1007,53 @@
"regex": "^(yes|no)$", "regex": "^(yes|no)$",
"type": "checkbox" "type": "checkbox"
}, },
{
"id": "limit-req-params",
"label": "Limit request",
"params": [
{
"context": "multisite",
"default": "",
"env": "LIMIT_REQ_URL",
"id": "limit-req-url",
"label": "Limit req url",
"multiple": "Limit request",
"regex": "^.*$",
"type": "text"
},
{ {
"context": "multisite", "context": "multisite",
"default": "1r/s", "default": "1r/s",
"env": "LIMIT_REQ_RATE", "env": "LIMIT_REQ_RATE",
"id": "limit-req-rate", "id": "limit-req-rate",
"label": "Limit req rate", "label": "Limit req rate",
"regex": "^\\d+r/(ms|s|m|h|d)$", "multiple": "Limit request",
"regex": "^\\d+r/(s|m|h|d)$",
"type": "text" "type": "text"
}, },
{ {
"context": "multisite", "context": "multisite",
"default": "2", "default": "5",
"env": "LIMIT_REQ_BURST", "env": "LIMIT_REQ_BURST",
"id": "limit-req-burst", "id": "limit-req-burst",
"label": "Limit req burst", "label": "Limit req burst",
"multiple": "Limit request",
"regex": "^\\d+$", "regex": "^\\d+$",
"type": "text" "type": "text"
}, },
{
"context": "multisite",
"default": "1",
"env": "LIMIT_REQ_DELAY",
"id": "limit-req-delay",
"label": "Limit req delay",
"multiple": "Limit request",
"regex": "^\\d+(\\.\\d+)?$",
"type": "text"
}
],
"type": "multiple"
},
{ {
"context": "global", "context": "global",
"default": "10m", "default": "10m",
@@ -1070,6 +1252,26 @@
"regex": "^(yes|no)$", "regex": "^(yes|no)$",
"type": "checkbox" "type": "checkbox"
}, },
{
"context": "multisite",
"default": "yes",
"env": "REVERSE_PROXY_BUFFERING",
"id": "reverse-proxy-buffering",
"label": "Reverse proxy buffering",
"multiple": "Reverse proxy",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "multisite",
"default": "yes",
"env": "REVERSE_PROXY_KEEPALIVE",
"id": "reverse-proxy-keepalive",
"label": "Reverse proxy keepalive",
"multiple": "Reverse proxy",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{ {
"context": "multisite", "context": "multisite",
"default": "", "default": "",
@@ -1121,120 +1323,70 @@
} }
] ]
}, },
"Bad behavior": { "Whitelist": {
"id": "bad-behavior", "id": "whitelist",
"params": [ "params": [
{ {
"context": "multisite", "context": "multisite",
"default": "yes", "default": "yes",
"env": "USE_BAD_BEHAVIOR", "env": "USE_WHITELIST_IP",
"id": "use-bad-behavior", "id": "use-whitelist-ip",
"label": "Use bad behavior", "label": "Use whitelist ip",
"regex": "^(yes|no)$", "regex": "^(yes|no)$",
"type": "checkbox" "type": "checkbox"
}, },
{ {
"context": "multisite", "context": "multisite",
"default": "86400", "default": "23.21.227.69 40.88.21.235 50.16.241.113 50.16.241.114 50.16.241.117 50.16.247.234 52.204.97.54 52.5.190.19 54.197.234.188 54.208.100.253 54.208.102.37 107.21.1.8",
"env": "BAD_BEHAVIOR_BAN_TIME", "env": "WHITELIST_IP_LIST",
"id": "bad-behavior-ban-time", "id": "whitelist-ip-list",
"label": "Ban duration time", "label": "Whitelist ip list",
"regex": "^[0-9]+$",
"type": "text"
},
{
"context": "multisite",
"default": "60",
"env": "BAD_BEHAVIOR_COUNT_TIME",
"id": "bad-behavior-count-time",
"label": "Count time",
"regex": "^[0-9]+$",
"type": "text"
},
{
"context": "multisite",
"default": "400 401 403 404 405 429 444",
"env": "BAD_BEHAVIOR_STATUS_CODES",
"id": "bad-behavior-status-codes",
"label": "Status codes",
"regex": "^([0-9]{3} ?)*$",
"type": "text"
},
{
"context": "multisite",
"default": "10",
"env": "BAD_BEHAVIOR_THRESHOLD",
"id": "bad-behavior-threshold",
"label": "Threshold",
"regex": "^[0-9]+$",
"type": "text"
}
]
},
"Internal": {
"id": "internal",
"params": [
{
"context": "global",
"default": "no",
"env": "USE_API",
"id": "use-api",
"label": "Enable API",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "192.168.0.0/16 172.16.0.0/12 10.0.0.0/8",
"env": "API_WHITELIST_IP",
"id": "api-whitelist-ip",
"label": "API whitelist IP",
"regex": "^(\\d+.\\d+.\\d+.\\d+(/\\d+)? ?)*$", "regex": "^(\\d+.\\d+.\\d+.\\d+(/\\d+)? ?)*$",
"type": "text" "type": "text"
}, },
{ {
"context": "global", "context": "multisite",
"default": "random", "default": "yes",
"env": "API_URI", "env": "USE_WHITELIST_REVERSE",
"id": "api-uri", "id": "use-whitelist-reverse",
"label": "API URI", "label": "Use whitelist reverse",
"regex": "^(random|\\/[A-Za-z0-9\\-\\/]+)$", "regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "multisite",
"default": ".googlebot.com .google.com .search.msn.com .crawl.yahoo.net .crawl.baidu.jp .crawl.baidu.com .yandex.com .yandex.ru .yandex.net",
"env": "WHITELIST_REVERSE_LIST",
"id": "whitelist-reverse-list",
"label": "Whitelist reverse list",
"regex": "^([a-z\\-0-9\\.]+ ?)*$",
"type": "text" "type": "text"
}, },
{ {
"context": "global", "context": "multisite",
"default": "no",
"env": "SWARM_MODE",
"id": "swarm-mode",
"label": "Swarm mode",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "no",
"env": "KUBERNETES_MODE",
"id": "kubernetes-mode",
"label": "Kubernetes mode",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "no",
"env": "USE_REDIS",
"id": "use-redis",
"label": "Use external redis when coupled with autoconf",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "global",
"default": "", "default": "",
"env": "REDIS_HOST", "env": "WHITELIST_COUNTRY",
"id": "redis-host", "id": "whitelist-country",
"label": "Hostname/IP of the Redis service", "label": "Whitelist country",
"regex": "^([A-Za-z0-9\\-\\.\\_]+|.{0})$", "regex": "^([A-Z]{2} ?)*$",
"type": "text"
},
{
"context": "multisite",
"default": "",
"env": "WHITELIST_USER_AGENT",
"id": "whitelist-user-agent",
"label": "Whitelist user agent",
"regex": ".*",
"type": "text"
},
{
"context": "multisite",
"default": "",
"env": "WHITELIST_URI",
"id": "whitelist-uri",
"label": "Whitelist URI",
"regex": "^(\\S ?)*$",
"type": "text" "type": "text"
} }
] ]
@@ -1308,6 +1460,7 @@
{ {
"context": "global", "context": "global",
"default": "8080", "default": "8080",
"env": "HTTP_PORT", "env": "HTTP_PORT",
"id": "http-port", "id": "http-port",
"label": "HTTP port", "label": "HTTP port",
@@ -1351,73 +1504,5 @@
"type": "text" "type": "text"
} }
] ]
},
"Whitelist": {
"id": "whitelist",
"params": [
{
"context": "multisite",
"default": "yes",
"env": "USE_WHITELIST_IP",
"id": "use-whitelist-ip",
"label": "Use whitelist ip",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "multisite",
"default": "23.21.227.69 40.88.21.235 50.16.241.113 50.16.241.114 50.16.241.117 50.16.247.234 52.204.97.54 52.5.190.19 54.197.234.188 54.208.100.253 54.208.102.37 107.21.1.8",
"env": "WHITELIST_IP_LIST",
"id": "whitelist-ip-list",
"label": "Whitelist ip list",
"regex": "^(\\d+.\\d+.\\d+.\\d+(/\\d+)? ?)*$",
"type": "text"
},
{
"context": "multisite",
"default": "yes",
"env": "USE_WHITELIST_REVERSE",
"id": "use-whitelist-reverse",
"label": "Use whitelist reverse",
"regex": "^(yes|no)$",
"type": "checkbox"
},
{
"context": "multisite",
"default": ".googlebot.com .google.com .search.msn.com .crawl.yahoo.net .crawl.baidu.jp .crawl.baidu.com .yandex.com .yandex.ru .yandex.net",
"env": "WHITELIST_REVERSE_LIST",
"id": "whitelist-reverse-list",
"label": "Whitelist reverse list",
"regex": "^([a-z\\-0-9\\.]+ ?)*$",
"type": "text"
},
{
"context": "multisite",
"default": "",
"env": "WHITELIST_COUNTRY",
"id": "whitelist-country",
"label": "Whitelist country",
"regex": "^([A-Z]{2} ?)*$",
"type": "text"
},
{
"context": "multisite",
"default": "",
"env": "WHITELIST_USER_AGENT",
"id": "whitelist-user-agent",
"label": "Whitelist user agent",
"regex": ".*",
"type": "text"
},
{
"context": "multisite",
"default": "",
"env": "WHITELIST_URI",
"id": "whitelist-uri",
"label": "Whitelist URI",
"regex": "^(\\S ?)*$",
"type": "text"
}
]
} }
} }

View File

@@ -0,0 +1,12 @@
FROM archlinux:base
RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
rm -f /lib/systemd/system/multi-user.target.wants/*;\
rm -f /etc/systemd/system/*.wants/*;\
rm -f /lib/systemd/system/local-fs.target.wants/*; \
rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
rm -f /lib/systemd/system/basic.target.wants/*;\
rm -f /lib/systemd/system/anaconda.target.wants/*;
RUN pacman -Syu --noconfirm

View File

@@ -1,6 +1,6 @@
FROM debian:buster-slim FROM debian:bullseye-slim
RUN apt update && apt install -y systemd RUN apt update && apt install -y systemd init
RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \ RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
rm -f /lib/systemd/system/multi-user.target.wants/*;\ rm -f /lib/systemd/system/multi-user.target.wants/*;\

View File

@@ -9,15 +9,13 @@ COPY confs/site/ /opt/bunkerized-nginx/confs/site
COPY confs/global/ /opt/bunkerized-nginx/confs/global COPY confs/global/ /opt/bunkerized-nginx/confs/global
COPY ui/ /opt/bunkerized-nginx/ui COPY ui/ /opt/bunkerized-nginx/ui
COPY settings.json /opt/bunkerized-nginx COPY settings.json /opt/bunkerized-nginx
COPY VERSION /opt/bunkerized-nginx
COPY ui/prepare.sh /tmp COPY ui/prepare.sh /tmp
RUN chmod +x /tmp/prepare.sh && \ RUN chmod +x /tmp/prepare.sh && \
/tmp/prepare.sh && \ /tmp/prepare.sh && \
rm -f /tmp/prepare.sh rm -f /tmp/prepare.sh
# Fix CVE-2021-36159
RUN apk add "apk-tools>=2.12.6-r0"
EXPOSE 5000 EXPOSE 5000
WORKDIR /opt/bunkerized-nginx/ui WORKDIR /opt/bunkerized-nginx/ui