177 Commits

Author SHA1 Message Date
florian
0772a9ba8e docs - edit badge version 2021-08-23 16:50:08 +02:00
florian
33e0ffd5b1 Merge branch 'master' into dev 2021-08-23 16:44:50 +02:00
bunkerity
4cb3e089e3 linux - git SHA1 commit in install.sh 2021-08-20 17:06:58 +02:00
bunkerity
8808f161c5 docs - dev to master links and VERSION upgrade 2021-08-20 16:58:37 +02:00
bunkerity
1c60ec9804 tests - fix volume wait with linux tests 2021-08-20 15:13:16 +02:00
bunkerity
b13ff34569 add REDIRECT_TO_REQUEST_URI variable and edit environment variables docs 2021-08-20 14:59:16 +02:00
bunkerity
58f2926e95 docs - various examples fixes 2021-08-20 09:38:18 +02:00
bunkerity
a824e15684 linux - rename cron 2021-08-18 17:46:28 +02:00
bunkerity
fd52bb7c8d linux - fix cron jobs 2021-08-18 16:15:26 +02:00
bunkerity
0938b20eb8 UI - use sudo for Linux integration 2021-08-18 14:38:31 +02:00
bunkerity
b948e08bd5 UI - use systemctl on Linux 2021-08-18 14:09:42 +02:00
bunkerity
fde14d1621 linux - fix unknown scheme error and do nginx reload as root in UI 2021-08-18 13:38:09 +02:00
bunkerity
8a4eb3f2a3 remove .site files (gen), uninstall remove folder at the end (linux) and run jobs when reloading local instances (UI) 2021-08-18 12:07:11 +02:00
bunkerity
2a0b84074a ui - fix bug when Docker is used but Swarm is disabled, add jobs from API /reload and fix docker-compose doc 2021-08-18 11:36:19 +02:00
bunkerity
aec22d1a81 ui - edit docs and fix CSRF 2021-08-17 17:34:05 +02:00
bunkerity
028fc61b4f docs - add dns_resolvers and permissions to Linux 2021-08-17 14:02:15 +02:00
bunkerity
a903960b4c docs - fix missing subfolder in Linux quickstart guide 2021-08-17 10:55:15 +02:00
bunkerity
a28f06f08f linux - run temp nginx to solve let's encrypt challenges 2021-08-17 10:24:40 +02:00
bunkerity
6c8bc6b349 tests - fix Linux systemd bug when writing to /tmp folder 2021-08-17 09:35:50 +02:00
bunkerity
2b3b4a5c3f linux - systemd support 2021-08-16 15:21:44 +02:00
florian
57e4247eab linux - systemd unit file 2021-08-15 22:48:41 +02:00
florian
f9d4e90894 docs - edit k8s php service port and append suffix to hosts 2021-08-15 00:26:37 +02:00
florian
4f024ec566 docs - add DNS_RESOLVERS for k8s integration 2021-08-15 00:17:42 +02:00
florian
bc46fc3d4c append suffix to ingress hosts 2021-08-14 23:52:03 +02:00
florian
0be1da18a6 remove old conf before generation, dynamic DNS for PHP and reverse proxy and swarm fixes in quickstart guide 2021-08-14 20:52:17 +02:00
florian
3cedc0ae13 quickstart guide fixes 2021-08-13 21:29:58 +02:00
bunkerity
f1d5c07cc1 autoconf - various kubernetes fixes 2021-08-13 16:42:31 +02:00
bunkerity
c9a6b6c27d autoconf - fixed infinite lock 2021-08-13 10:02:14 +02:00
florian
b199464a73 various bug fixes related to Swarm integration 2021-08-12 22:53:07 +02:00
bunkerity
4a9d64d9d9 add favicon to web UI and fix some tech docs 2021-08-12 17:28:13 +02:00
bunkerity
31536a3fe2 linux - reload as root 2021-08-12 15:44:10 +02:00
bunkerity
7b47c7304f examples - minor fixes in architecture images 2021-08-12 14:51:29 +02:00
bunkerity
83e7ce9cde examples - polishing before next release 2021-08-12 10:19:43 +02:00
bunkerity
0ad5159a33 docs - add changelog for next version 2021-08-11 16:54:23 +02:00
bunkerity
6240d8e28d ui - read variables.env when Linux is used 2021-08-11 16:37:01 +02:00
bunkerity
2f80f64dd5 docs - last polish 2021-08-11 16:26:35 +02:00
bunkerity
e98da9b637 docs polishing and fix install.sh gpg --verify 2021-08-11 15:13:44 +02:00
bunkerity
d9f7706969 docs - web UI 2021-08-11 11:51:13 +02:00
bunkerity
75f299978c docs - special folders 2021-08-11 10:16:34 +02:00
bunkerity
ef34b2cec1 docs quickstart / multisite 2021-08-11 09:43:31 +02:00
florian
9b9110214a docs - quickstart guide / php 2021-08-11 00:34:57 +02:00
florian
9e2a8070e4 docs - quickstart guide / reverse proxy 2021-08-10 23:39:28 +02:00
florian
733136ac1a docs - init quickstart 2021-08-10 22:41:45 +02:00
florian
fa172ce5a9 docs - linux integration 2021-08-10 21:13:17 +02:00
bunkerity
f6a9184ae9 docs - k8s integration 2021-08-10 17:04:17 +02:00
bunkerity
d37dc2b629 docs - swarm integration 2021-08-10 15:01:03 +02:00
bunkerity
f7c115edff docs - add autoconf doc to Docker section 2021-08-10 12:03:59 +02:00
bunkerity
dfbb091361 docs - init integrations/Docker 2021-08-10 10:41:34 +02:00
florian
8e4a65feca fix global.env generation and add web UI gif to README 2021-08-09 20:56:52 +02:00
bunkerity
0573ba7b5a ui - centering things without breaking sticky navbar and menu 2021-08-09 17:10:26 +02:00
florian
bcd421de09 ui - various bug fixes more or less related to UI 2021-08-09 13:19:10 +02:00
florian
2ec28c79cb docs - fix README toc 2021-08-08 00:30:54 +02:00
florian
fec60a4b14 ui - minor styling fixes 2021-08-08 00:04:56 +02:00
florian
dd7d1a2c78 ui - fix example, subpath behind reverse proxy and add socket proxy rights for swarm 2021-08-07 21:56:08 +02:00
florian
0c1883472d docs - edit kubernetes overview image and add configuration section on the readme 2021-08-07 20:02:47 +02:00
florian
4e6eab794d docs - fix wrong swarm image 2021-08-06 23:19:50 +02:00
florian
b23135b663 docs - add docker and kubernetes images 2021-08-06 23:15:24 +02:00
bunkerity
ace9be3979 docs - add autoconf and swarm images 2021-08-06 16:51:46 +02:00
bunkerity
8958e5107c docs - add overview image 2021-08-06 11:55:24 +02:00
florian
b2cfc15c2a security - add security policy 2021-08-05 23:25:50 +02:00
bunkerity
94bef079a8 examples - add architecture images 2021-08-05 17:05:31 +02:00
bunkerity
50266c2285 examples - add the last missing README.md stubs 2021-08-05 09:48:27 +02:00
florian
22e2fe869f examples improvement - added some README.md stubs 2021-08-05 00:10:21 +02:00
bunkerity
55186bbef5 examples improvement - hardened, joomla, kubernetes, load-balancer and moodle 2021-08-04 16:54:59 +02:00
bunkerity
d8286ced7c examples improvement - certbot cloudflare and wildcard, clamav, crowdsec, ghost and gogs 2021-08-04 15:02:55 +02:00
bunkerity
44de2253d2 examples improvement - traefik alternative, autoconf reverse proxy and basic website 2021-08-04 12:01:07 +02:00
bunkerity
6d73fbdedb examples - update authelia and autoconf-php 2021-08-04 10:47:38 +02:00
florian
b6809266af autoconf - let's encrypt support for ingress controller 2021-08-03 22:38:00 +02:00
bunkerity
4e178b474c autoconf - basic ingress controller support for kubernetes 2021-08-03 16:39:39 +02:00
bunkerity
021147f9d9 autoconf - fix wait and redis 2021-08-02 16:37:50 +02:00
bunkerity
5a26d06c87 autoconf - fix infinite lock and honor DOCKER_HOST env var 2021-08-02 11:52:00 +02:00
bunkerity
bc01427def ignore CVE-2021-36159 and redirect job logs as root when using autoconf 2021-08-02 10:41:46 +02:00
florian
652614f41b autoconf - use DNS for Swarm instances discovery 2021-08-01 23:10:29 +02:00
bunkerity
24d9cce82f autoconf - various bug fixes in Swarm mode 2021-07-30 17:08:54 +02:00
bunkerity
f866ef6325 autoconf - minor fixes, prepare Swarm testing 2021-07-29 17:32:33 +02:00
bunkerity
1a32e7c02c autoconf - various bug fixes with DockerController 2021-07-29 15:43:51 +02:00
bunkerity
7180378d0c autoconf - init Config refactoring 2021-07-29 10:19:34 +02:00
florian
6e66571fb9 various cleaning 2021-07-28 23:25:05 +02:00
florian
f44e41cede jobs - lock and reload management 2021-07-28 23:04:56 +02:00
bunkerity
26db144df4 autoconf refactoring and fix CVE-2021-36159 2021-07-28 17:27:39 +02:00
bunkerity
a68ad53c3f autoconf - controller classes 2021-07-28 15:52:35 +02:00
bunkerity
01bba1d3f6 autoconf - init refactoring before k8s integration 2021-07-28 11:56:45 +02:00
bunkerity
0597074438 k8s - init work on parsing ingress rules, helpers to setup on k8s, basic examples 2021-07-27 17:15:39 +02:00
bunkerity
bc3c17a2f0 examples - init k8s example 2021-07-27 12:03:31 +02:00
bunkerity
556836b499 autoconf - init annotations parser for k8s 2021-07-27 09:56:58 +02:00
bunkerity
22612f1757 minor edit on Linux tests and init work on k8s API 2021-07-26 17:15:24 +02:00
bunkerity
50c279617b jobs - improved log and reload management 2021-07-23 17:28:05 +02:00
bunkerity
ef8969e2cf certbot - add USE_LETS_ENCRYPT_STAGING=yes/no env var for using staging or production servers of let's encrypt 2021-07-23 11:51:50 +02:00
bunkerity
0dc2a5ec25 edit visibility of Job members and integration of a generic checker for nginx 2021-07-22 23:07:35 +02:00
bunkerity
9a207dfdc5 fix missing import in generator, expand networks to ips in jobs and init work on a generic checker with shared dict and redis support 2021-07-22 17:11:15 +02:00
bunkerity
a60fbbb5b3 hotfix - fix CVE-2021-33560 2021-07-22 14:50:09 +02:00
bunkerity
a1b9010d9e pull v1.2.8 fixes when applicable 2021-07-22 14:47:37 +02:00
bunkerity
3178545c2f v1.2.8 release 2021-07-22 14:36:07 +02:00
bunkerity
36b8760d4d resolve bugs on the stable version 2021-07-22 12:12:55 +02:00
bunkerity
8bb6676f58 settings - fix PHP_* again 2021-07-22 09:58:17 +02:00
bunkerity
4234f82c01 settings - edit EMAIL_LETS_ENCRYPT regex 2021-07-22 09:47:34 +02:00
bunkerity
b99fb27df5 fix missing parameter when calling reload in autoconf and edit REMOTE_PHP_PATH regex 2021-07-22 09:29:15 +02:00
bunkerity
876fcd1814 conf - add WORKER_PROCESSES 2021-07-21 22:18:02 +02:00
bunkerity
26dc796155 jobs - fix line edit 2021-07-21 22:14:05 +02:00
bunkerity
280d189864 jobs - avoid reload when not necessary 2021-07-21 17:09:42 +02:00
bunkerity
5f845680ff jobs - edit referrers and user-agents data and init work on autoconf integration 2021-07-21 14:42:55 +02:00
bunkerity
d12369c900 jobs - various bugs fixed and old files removed 2021-07-21 11:55:14 +02:00
bunkerity
366e39f591 jobs - SelfSignedCert, runner and reloader 2021-07-20 22:52:01 +02:00
bunkerity
71741b2d34 jobs - cache management 2021-07-20 14:34:39 +02:00
bunkerity
2fca4cd014 jobs - logging and error management 2021-07-20 12:14:50 +02:00
bunkerity
fccf14627f jobs - python stubs 2021-07-20 11:41:31 +02:00
bunkerity
b3684efaf6 jobs - init work on refactoring 2021-07-20 09:58:09 +02:00
bunkerity
82548378ae crowdsec - move as external plugin 2021-07-16 10:05:53 +02:00
bunkerity
b926b0db62 examples - use example.com instead of website.com 2021-07-16 09:40:02 +02:00
bunkerity
6713f56ec1 linux - fix centos install 2021-07-15 17:20:15 +02:00
bunkerity
2b923c05c1 compile and install LUA 5.1.5 to /opt/bunkerized-nginx/deps and introduced REDIRECT_TO feature 2021-07-15 16:36:26 +02:00
bunkerity
71cf3cf5c1 use local sources when building Docker image, add LOCAL_PHP and LOCAL_PHP_REMOTE to settings.json and fix pip bug related to removed working directory 2021-07-13 11:00:18 +02:00
bunkerity
8e3dbf1c70 fixed some fedora bugs, support LOCAL_PHP and LOCAL_PHP_PATH and sample variables.env 2021-07-12 22:21:13 +02:00
bunkerity
49ada6a8c5 linux - init work on fedora support 2021-07-12 16:57:57 +02:00
bunkerity
947e86f7c3 linux - uninstall script 2021-07-12 15:56:01 +02:00
bunkerity
a12561a85b remove useless nginx-keys folder and add lua_package_cpath to http conf 2021-07-12 15:08:37 +02:00
bunkerity
6b19bd0264 deps - add cjson LUA files to deps folder 2021-07-11 23:53:54 +02:00
bunkerity
6738b28b99 deps - move dependencies to dedicated /opt/bunkerized-nginx/deps folder to avoid messing with the system 2021-07-11 23:41:33 +02:00
bunkerity
010c0fd6d4 rename gen/requirements.py to requirements.txt, add git/bash to Docker deps and fix typos in README 2021-07-11 17:58:35 +02:00
bunkerity
ecf30a71f7 deps - init work on single install script 2021-07-11 16:45:15 +02:00
bunkerity
ffc4fc950e deps - manual compile/install of libmaxmind and upgrade lua-resty-core 2021-07-11 15:33:40 +02:00
Florian Pitance
b9955699b7 Merge pull request #152 from thelittlefireman/patch-11
Upgrade deps
2021-07-11 13:57:39 +02:00
thelittlefireman
860fd1ace5 Upgrade desps
luajit2 to v2.1-20210510
upgrade lua-resty-core to 0.1.22
lua-resty-dns to 0.22
lua-resty-lrucache to 0.11
lua-resty-session to 3.8
lua-resty-string to 0.14
lua-resty-http to 0.16.1
Upgrade from https://github.com/Neopallium/lualogging to https://github.com/lunarmodules/lualogging.git
luasec to 1.0.1
2021-07-10 22:24:16 +02:00
thelittlefireman
eb5d13fb8d Upgrade lua-nginx module to 0.10.20 2021-07-10 22:05:44 +02:00
thelittlefireman
ca41987cd6 Upgrade corerules to 3.3.0 & modsecurity to 3.0.5 2021-07-10 21:53:54 +02:00
bunkerity
3af1b397fa UI - digging bugs from services, still some work to do 2021-07-09 16:58:55 +02:00
bunkerity
72a09eac6d UI - add CSRF protection 2021-07-09 16:01:51 +02:00
bunkerity
0d3f7d3925 UI - admin authentication and bootstrap update 2021-07-09 14:17:50 +02:00
bunkerity
6be082e0a9 UI - init work on admin account 2021-07-09 12:18:42 +02:00
bunkerity
4947796c99 UI - fix instances bugs 2021-07-09 11:26:23 +02:00
bunkerity
ba197dfa43 UI - bind gunicorn to 127.0.0.1/0.0.0.0:5000 2021-07-09 10:55:09 +02:00
bunkerity
4dd1ff8479 UI - copy from helpers, systemd service and instances page update 2021-07-09 10:27:38 +02:00
bunkerity
f771ec43f1 ui - init Instances class to support Linux and API for Docker/Swarm 2021-07-08 23:45:58 +02:00
bunkerity
e241b0c939 logs - move everything from /var/log to /var/log/nginx 2021-07-07 14:36:00 +02:00
bunkerity
d03a1a6e3b linux - add jobs.log 2021-07-07 13:47:37 +02:00
bunkerity
2c9c9fb62c linux - run master process as root 2021-07-07 13:38:13 +02:00
bunkerity
deb28c5991 autoconf - fix folders 2021-07-05 17:40:26 +02:00
bunkerity
2ea7331dad jobs - disable post-jobs when SWARM_MODE=yes on SIGHUP 2021-07-05 16:49:27 +02:00
bunkerity
92ee40819e whitelist - fix /.well-known/acme-challenge whitelist for let's encrypt 2021-07-05 14:15:25 +02:00
bunkerity
2ccfb26e81 docker - fix CVE-2021-33560 2021-07-02 11:16:19 +02:00
bunkerity
70f9f8417e templates - add missing new line when necessary 2021-07-02 09:48:57 +02:00
bunkerity
c4aef1d606 authelia - choose portal or auth basic mode 2021-06-29 20:32:15 +02:00
bunkerity
a385183d88 authelia - various fixes 2021-06-29 17:14:57 +02:00
bunkerity
cec47f3a75 body injection feature and add authelia to documentation 2021-06-29 16:36:24 +02:00
bunkerity
c894c8370e authelia - add variables to settings.json 2021-06-29 16:00:47 +02:00
bunkerity
f73b088f79 authelia - initial work 2021-06-29 15:52:56 +02:00
Florian Pitance
130c6752dd Merge pull request #148 from aFresquetIntech/dev
zammad example
2021-06-28 17:13:37 +02:00
alexis
f97ea67855 Create .env 2021-06-28 16:22:01 +02:00
alexis
8504299861 Correction
Modification nom fichier

Correction des lignes 126 à 130
2021-06-28 15:22:57 +02:00
alexis
4a8da40cf2 reverse-proxy-zammad 2021-06-28 09:42:52 +02:00
bunkerity
0114c7b09f examples - edit basic PHP 2021-06-24 21:32:49 +02:00
bunkerity
bebe89afb0 linux - edit path for default errors, ignore comments in variables.env, install/prepare certbot 2021-06-24 15:22:57 +02:00
bunkerity
b2cceb608c linux - fix centos 2021-06-23 16:58:24 +02:00
bunkerity
37f5e4ed71 linux - fixed debian/ubuntu but still some work needed on centos 2021-06-23 15:46:04 +02:00
bunkerity
98568a57c9 linux - fix /var/log and typo in daemon directive 2021-06-23 14:26:33 +02:00
bunkerity
4991922878 linux - fix daemon directive and rights on /etc/nginx 2021-06-23 14:06:39 +02:00
bunkerity
bcb8acc364 linux - add RX permissions to /opt 2021-06-23 12:16:31 +02:00
bunkerity
a9279053a4 linux - add executable right to gen/main.py 2021-06-22 22:02:42 +02:00
bunkerity
60057a17e3 linux - fix tests docker cp and pass single -c argument to su 2021-06-22 21:56:10 +02:00
bunkerity
d0366fcc0d linux - started work on bunkerized-nginx command 2021-06-22 21:19:12 +02:00
bunkerity
b448d91ca8 actions - fix centos test and docker image name when pushing 2021-06-22 13:59:42 +02:00
bunkerity
e309ce6fd5 docker - fix permissions on /opt 2021-06-22 10:58:07 +02:00
bunkerity
37090dc66e actions - fix manifest error with buildx and load 2021-06-22 10:35:29 +02:00
bunkerity
6bb6facd88 add load: true when autobuilding images and move from /bin/sh to /bin/bash 2021-06-22 10:16:20 +02:00
bunkerity
a1fcbd4b83 fix actions and configure 2021-06-21 18:27:12 +02:00
bunkerity
09a2a4f9e5 github actions refactoring 2021-06-21 15:32:13 +02:00
bunkerity
1e02368e8a linux/docker - common /opt/bunkerized-nginx folder 2021-06-21 14:56:48 +02:00
bunkerity
bbb5134a39 fix configure arguments and CRS include 2021-06-20 21:51:57 +02:00
bunkerity
b0f93fb840 fix Dockerfile again 2021-06-19 22:15:42 +02:00
bunkerity
c892f037db fix Dockerfile 2021-06-19 22:06:00 +02:00
bunkerity
731c0f61df linux - init work on installer 2021-06-19 22:03:14 +02:00
bunkerity
93543d3962 Linux - use the same dependencies script for Docker 2021-06-18 17:04:22 +02:00
bunkerity
5ec9e6ab49 linux - CentOS 7 install 2021-06-18 14:58:41 +02:00
bunkerity
cc0d0af8d2 linux - ubuntu installer 2021-06-18 12:22:19 +02:00
bunkerity
43d2097d14 linux - nginx install on Debian 2021-06-17 21:33:16 +02:00
bunkerity
f880e5e2aa linux - continued work on install helpers for Debian 2021-06-16 21:26:55 +02:00
bunkerity
9636013f5e linux - started work on installer 2021-06-16 17:46:20 +02:00
339 changed files with 8158 additions and 3307 deletions

View File

@@ -1,26 +0,0 @@
name: Automatic test on autoconf
on:
push:
branches: [dev, master]
pull_request:
branches: [dev, master]
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout source code
uses: actions/checkout@v2
- name: Build the image
run: docker build -t autotest-autoconf -f autoconf/Dockerfile .
- name: Run Trivy security scanner
uses: aquasecurity/trivy-action@master
with:
image-ref: 'autotest-autoconf'
format: 'table'
exit-code: '1'
ignore-unfixed: true
severity: 'UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL'

View File

@@ -1,26 +0,0 @@
name: Automatic test on ui
on:
push:
branches: [dev, master]
pull_request:
branches: [dev, master]
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout source code
uses: actions/checkout@v2
- name: Build the image
run: docker build -t autotest-ui -f ui/Dockerfile .
- name: Run Trivy security scanner
uses: aquasecurity/trivy-action@master
with:
image-ref: 'autotest-ui'
format: 'table'
exit-code: '1'
ignore-unfixed: true
severity: 'UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL'

View File

@@ -1,26 +0,0 @@
name: Automatic test
on:
push:
branches: [dev, master]
pull_request:
branches: [dev, master]
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout source code
uses: actions/checkout@v2
- name: Build the image
run: docker build -t autotest .
- name: Run autotest
run: docker run autotest test
- name: Run Trivy security scanner
uses: aquasecurity/trivy-action@master
with:
image-ref: 'autotest'
format: 'table'
exit-code: '1'
ignore-unfixed: true
severity: 'UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL'

View File

@@ -12,6 +12,20 @@ jobs:
- name: Checkout source code
uses: actions/checkout@v2
# temp fix : can't use buildx + load because of manifest error
# so we need to build the image the traditional way
- name: Temp build to check security issues
run: docker build -t bunkerized-nginx-autoconf -f autoconf/Dockerfile .
- name: Run Trivy security scanner
uses: aquasecurity/trivy-action@master
with:
image-ref: 'bunkerized-nginx-autoconf'
format: 'table'
exit-code: '1'
ignore-unfixed: true
severity: 'UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL'
- name: Set variables
run: |
VER=$(cat VERSION | tr -d '\n')
@@ -23,6 +37,15 @@ jobs:
- name: Setup Buildx
uses: docker/setup-buildx-action@v1
- name: Setup Docker cache
uses: actions/cache@v2
if: github.ref == 'refs/heads/dev'
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
@@ -38,6 +61,14 @@ jobs:
platforms: linux/amd64,linux/386,linux/arm/v7,linux/arm64/v8
push: true
tags: bunkerity/bunkerized-nginx-autoconf:dev
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache-new
- name: Move Docker cache
if: github.ref == 'refs/heads/dev'
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Build and push (master)
uses: docker/build-push-action@v2
@@ -48,3 +79,4 @@ jobs:
platforms: linux/amd64,linux/386,linux/arm/v7,linux/arm64/v8
push: true
tags: bunkerity/bunkerized-nginx-autoconf:latest,bunkerity/bunkerized-nginx-autoconf:${{ env.VERSION }}

View File

@@ -12,6 +12,20 @@ jobs:
- name: Checkout source code
uses: actions/checkout@v2
# temp fix : can't use buildx + load because of manifest error
# so we need to build the image the traditional way
- name: Temp build to check security issues
run: docker build -t bunkerized-nginx-ui -f ui/Dockerfile .
- name: Run Trivy security scanner
uses: aquasecurity/trivy-action@master
with:
image-ref: 'bunkerized-nginx-ui'
format: 'table'
exit-code: '1'
ignore-unfixed: true
severity: 'UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL'
- name: Set variables
run: |
VER=$(cat VERSION | tr -d '\n')
@@ -23,6 +37,15 @@ jobs:
- name: Setup Buildx
uses: docker/setup-buildx-action@v1
- name: Setup Docker cache
uses: actions/cache@v2
if: github.ref == 'refs/heads/dev'
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
@@ -38,6 +61,14 @@ jobs:
platforms: linux/amd64,linux/386,linux/arm/v7,linux/arm64/v8
push: true
tags: bunkerity/bunkerized-nginx-ui:dev
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache-new
- name: Move Docker cache
if: github.ref == 'refs/heads/dev'
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Build and push (master)
uses: docker/build-push-action@v2

View File

@@ -12,6 +12,23 @@ jobs:
- name: Checkout source code
uses: actions/checkout@v2
# temp fix : can't use buildx + load because of manifest error
# so we need to build the image the traditional way
- name: Temp build to check security issues
run: docker build -t bunkerized-nginx .
- name: Run Trivy security scanner
uses: aquasecurity/trivy-action@master
with:
image-ref: 'bunkerized-nginx'
format: 'table'
exit-code: '1'
ignore-unfixed: true
severity: 'UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL'
- name: Run autotest
run: docker run bunkerized-nginx test
- name: Set variables
run: |
VER=$(cat VERSION | tr -d '\n')

View File

@@ -0,0 +1,30 @@
name: Automatic test for Linux
on:
push:
branches: [dev, master]
pull_request:
branches: [dev, master]
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout source code
uses: actions/checkout@v2
- name: Build Debian with systemd
run: docker build -t debian-systemd -f tests/Dockerfile-debian .
- name: Build Ubuntu with systemd
run: docker build -t ubuntu-systemd -f tests/Dockerfile-ubuntu .
- name: Build CentOS with systemd
run: docker build -t centos-systemd -f tests/Dockerfile-centos .
- name: Build Fedora with systemd
run: docker build -t fedora-systemd -f tests/Dockerfile-fedora .
- name: Debian test
run: ./tests/linux-run.sh debian-systemd test-debian
- name: Ubuntu test
run: ./tests/linux-run.sh ubuntu-systemd test-ubuntu
- name: CentOS test
run: ./tests/linux-run.sh centos-systemd test-centos
- name: Fedora test
run: ./tests/linux-run.sh fedora-systemd test-fedora

7
.gitmodules vendored Normal file
View File

@@ -0,0 +1,7 @@
[submodule "bunkerized-nginx-crowdsec"]
path = examples/crowdsec/bunkerized-nginx-crowdsec
url = https://github.com/bunkerity/bunkerized-nginx-crowdsec
[submodule "examples/clamav/bunkerized-nginx-clamav"]
path = examples/clamav/bunkerized-nginx-clamav
url = https://github.com/bunkerity/bunkerized-nginx-clamav.git

View File

@@ -1,5 +1,33 @@
# Changelog
## v1.3.0
- Kubernetes integration in beta
- Linux integration in beta
- autoconf refactoring
- jobs refactoring
- UI refactoring
- UI security : login/password authentication and CRSF protection
- various dependencies updates
- move CrowdSec as an external plugin
- Authelia support
- improve various regexes
- add INJECT_BODY variable
- add WORKER_PROCESSES variable
- add USE_LETS_ENCRYPT_STAGING variable
- add LOCAL_PHP and LOCAL_PHP_PATH variables
- add REDIRECT_TO variable
## v1.2.8 - 2021/07/22
- Fix broken links in README
- Fix regex for EMAIL_LETS_ENCRYPT
- Fix regex for REMOTE_PHP and REMOTE_PHP_PATH
- Fix regex for SELF_SIGNED_*
- Fix various bugs related to web UI
- Fix bug in autoconf (missing instances parameter to reload function)
- Remove old .env files when generating a new configuration
## v1.2.7 - 2021/06/14
- Add custom robots.txt and sitemap to RTD

View File

@@ -1,33 +1,19 @@
FROM nginx:1.20.1-alpine
COPY nginx-keys/ /tmp/nginx-keys
COPY compile.sh /tmp/compile.sh
RUN chmod +x /tmp/compile.sh && \
/tmp/compile.sh && \
rm -rf /tmp/*
COPY . /tmp/bunkerized-nginx-docker
COPY helpers/install.sh /tmp/install.sh
RUN apk --no-cache add bash && \
chmod +x /tmp/install.sh && \
/tmp/install.sh && \
rm -f /tmp/install.sh
COPY dependencies.sh /tmp/dependencies.sh
RUN chmod +x /tmp/dependencies.sh && \
/tmp/dependencies.sh && \
rm -rf /tmp/dependencies.sh
COPY helpers/docker.sh /tmp/docker.sh
RUN chmod +x /tmp/docker.sh && \
/tmp/docker.sh && \
rm -f /tmp/docker.sh
COPY gen/ /opt/gen
COPY entrypoint/ /opt/entrypoint
COPY confs/ /opt/confs
COPY scripts/ /opt/scripts
COPY lua/ /usr/local/lib/lua
COPY antibot/ /antibot
COPY defaults/ /defaults
COPY settings.json /opt
COPY misc/cron /etc/crontabs/nginx
COPY prepare.sh /tmp/prepare.sh
RUN chmod +x /tmp/prepare.sh && \
/tmp/prepare.sh && \
rm -f /tmp/prepare.sh
# Fix CVE-2021-22901, CVE-2021-22898 and CVE-2021-22897
RUN apk add "curl>=7.77.0-r0"
# Fix CVE-2021-22901, CVE-2021-22898, CVE-2021-22897, CVE-2021-33560 and CVE-2021-36159
RUN apk add "curl>=7.77.0-r0" "libgcrypt>=1.8.8-r0" "apk-tools>=2.12.6-r0"
VOLUME /www /http-confs /server-confs /modsec-confs /modsec-crs-confs /cache /pre-server-confs /acme-challenge /plugins
@@ -35,6 +21,6 @@ EXPOSE 8080/tcp 8443/tcp
USER nginx:nginx
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 CMD [ -f /tmp/nginx.pid ] || exit 1
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 CMD [ -f /tmp/nginx.pid ] || [ -f /tmp/nginx-temp.pid ] || exit 1
ENTRYPOINT ["/opt/entrypoint/entrypoint.sh"]
ENTRYPOINT ["/opt/bunkerized-nginx/entrypoint/entrypoint.sh"]

491
README.md
View File

@@ -1,9 +1,9 @@
<p align="center">
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/logo.png?raw=true" width="425" />
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/logo.png?raw=true" width="425" />
</p>
<p align="center">
<img src="https://img.shields.io/badge/bunkerized--nginx-1.2.7-blue" />
<img src="https://img.shields.io/badge/bunkerized--nginx-1.3.0-blue" />
<img src="https://img.shields.io/badge/nginx-1.20.1-blue" />
<img src="https://img.shields.io/github/last-commit/bunkerity/bunkerized-nginx" />
<img src="https://img.shields.io/github/workflow/status/bunkerity/bunkerized-nginx/Automatic%20test?label=automatic%20test" />
@@ -14,20 +14,22 @@
<p align="center">
<strong>
<a href="https://bunkerized-nginx.readthedocs.io">Documentation</a>
<span> | </span>
&#124;
<a href="https://github.com/bunkerity/bunkerized-nginx/tree/master/examples">Examples</a>
<span> | </span>
&#124;
<a href="https://www.bunkerity.com/category/bunkerized-nginx/">Blog posts</a>
<span> | </span>
&#124;
<a href="https://coso.me/bunkerity-chat">Community chat</a>
<span> | </span>
&#124;
<a href="https://coso.me/bunkerity">Follow us</a>
</strong>
</p>
nginx Docker image secure by default.
> Make security by default great again !
Avoid the hassle of following security best practices "by hand" each time you need a web server or reverse proxy. Bunkerized-nginx provides generic security configs, settings and tools so you don't need to do it yourself.
bunkerized-nginx is a web server based on the notorious nginx and focused on security. It integrates into existing environments (Linux, Docker, Swarm, Kubernetes, ...) to make your web services "secure by default" without any hassle. The security best practices are automatically applied for you while keeping control of every settings to meet your own use case.
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/overview.png?raw=true" />
Non-exhaustive list of features :
- HTTPS support with transparent Let's Encrypt automation
@@ -36,389 +38,166 @@ Non-exhaustive list of features :
- Automatic ban of strange behaviors
- Antibot challenge through cookie, javascript, captcha or recaptcha v3
- Block TOR, proxies, bad user-agents, countries, ...
- Block known bad IP with DNSBL and CrowdSec
- Block known bad IP with DNSBL
- Prevent bruteforce attacks with rate limiting
- Plugins system for external security checks (e.g. : ClamAV)
- Plugins system for external security checks (ClamAV, CrowdSec, ...)
- Easy to configure with environment variables or web UI
- Automatic configuration with container labels
- Docker Swarm support
- Seamless integration into existing environments : Linux, Docker, Swarm, Kubernetes, ...
Fooling automated tools/scanners :
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/demo.gif?raw=true" />
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/demo.gif?raw=true" />
You can find a live demo at https://demo-nginx.bunkerity.com, feel free to do some security tests.
You can find a live demo at [https://demo-nginx.bunkerity.com](https://demo-nginx.bunkerity.com), feel free to do some security tests.
# Table of contents
<details>
<summary>Click to show</summary>
- [Table of contents](#table-of-contents)
- [Quickstart guide](#quickstart-guide)
* [Run HTTP server with default settings](#run-http-server-with-default-settings)
* [In combination with PHP](#in-combination-with-php)
* [Run HTTPS server with automated Let's Encrypt](#run-https-server-with-automated-lets-encrypt)
* [As a reverse proxy](#as-a-reverse-proxy)
* [Behind a reverse proxy](#behind-a-reverse-proxy)
- [Integrations](#integrations)
* [Docker](#docker)
* [Docker autoconf](#docker-autoconf)
* [Swarm](#swarm)
* [Kubernetes](#kubernetes)
* [Linux](#linux)
- [Configuration](#configuration)
* [Singlesite](#singlesite)
* [Multisite](#multisite)
* [Automatic configuration](#automatic-configuration)
* [Swarm mode](#swarm-mode)
* [Web UI](#web-ui)
* [Special folders](#special-folders)
- [Web UI](#web-ui)
- [Security tuning](#security-tuning)
- [Going further](#going-further)
- [License](#license)
- [Contributing](#contributing)
- [Security policy](#security-policy)
</details>
# Quickstart guide
# Integrations
## Run HTTP server with default settings
## Docker
You can get official prebuilt Docker images of bunkerized-nginx for x86, x64, armv7 and aarch64/arm64 architectures on Docker Hub :
```shell
docker run -p 80:8080 -v /path/to/web/files:/www:ro bunkerity/bunkerized-nginx
$ docker pull bunkerity/bunkerized-nginx
```
Web files are stored in the /www directory, the container will serve files from there. Please note that *bunkerized-nginx* doesn't run as root but as an unprivileged user with UID/GID 101 therefore you should set the rights of */path/to/web/files* accordingly.
## In combination with PHP
Or you can build it from source if you wish :
```shell
docker network create mynet
$ git clone https://github.com/bunkerity/bunkerized-nginx.git
$ cd bunkerized-nginx
$ docker build -t bunkerized-nginx .
```
```shell
docker run --network mynet \
-p 80:8080 \
-v /path/to/web/files:/www:ro \
-e REMOTE_PHP=myphp \
-e REMOTE_PHP_PATH=/app \
bunkerity/bunkerized-nginx
To use bunkerized-nginx as a Docker container you have to pass specific environment variables, mount volumes and redirect ports to make it accessible from the outside.
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/docker.png?raw=true" />
You will find more information about Docker integration in the [documentation](https://bunkerized-nginx.readthedocs.io/en/latest/integrations.html#docker).
## Docker autoconf
The downside of using environment variables is that the container needs to be recreated each time there is an update which is not very convenient. To counter that issue, you can use another image called bunkerized-nginx-autoconf which will listen for Docker events and automatically configure bunkerized-nginx instance in real time without recreating the container. Instead of defining environment variables for the bunkerized-nginx container, you simply add labels to your web services and bunkerized-nginx-autoconf will "automagically" take care of the rest.
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/autoconf-docker.png?raw=true" />
You will find more information about Docker autoconf feature in the [documentation](https://bunkerized-nginx.readthedocs.io/en/latest/integrations.html#docker-autoconf).
## Swarm
Using bunkerized-nginx in a Docker Swarm cluster requires a shared folder accessible from both managers and workers (anything like NFS, GlusterFS, CephFS or even SSHFS will work). The deployment and configuration is very similar to the "Docker autoconf" one but with services instead of containers. A service based on the bunkerized-nginx-autoconf image needs to be scheduled on a manager node (don't worry it doesn't expose any network port for obvious security reasons). This service will listen for Docker Swarm events like service creation or deletion and generate the configuration according to the labels of each service. Once configuration generation is done, the bunkerized-nginx-autoconf service will send a reload order to all the bunkerized-nginx tasks so they can load the new configuration.
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/swarm.png?raw=true" />
You will find more information about Docker Swarm integration in the [documentation](https://bunkerized-nginx.readthedocs.io/en/latest/integrations.html#docker-swarm).
## Kubernetes
**This integration is still in beta, please fill an issue if you find a bug or have an idea on how to improve it.**
Using bunkerized-nginx in a Kubernetes cluster requires a shared folder accessible from the nodes (anything like NFS, GlusterFS, CephFS or even SSHFS will work). The bunkerized-nginx-autoconf acts as an Ingress Controller and connects to the k8s API to get cluster events and generate a new configuration when it's needed. Once the configuration is generated, the Ingress Controller sends a reload order to the bunkerized-nginx instances running in the cluster.
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/kubernetes.png?raw=true" />
You will find more information about Kubernetes integration in the [documentation](https://bunkerized-nginx.readthedocs.io/en/latest/integrations.html#kubernetes).
## Linux
**This integration is still in beta, please fill an issue if you find a bug or have an idea on how to improve it.**
List of supported Linux distributions :
- Debian buster (10)
- Ubuntu focal (20.04)
- CentOS 7
- Fedora 34
Unlike containers, Linux integration can be tedious because bunkerized-nginx has a bunch of dependencies that need to be installed before we can use it. Fortunately, we provide a [helper script](https://github.com/bunkerity/bunkerized-nginx/blob/master/helpers/install.sh) to make the process easier and automatic. Once installed, the configuration is really simple, all you have to do is to edit the `/opt/bunkerized-nginx/variables.env` configuration file and run the `bunkerized-nginx` command to apply it.
You will find more information about Linux integration in the [documentation](https://bunkerized-nginx.readthedocs.io/en/latest/integrations.html#linux).
# Configuration
The configuration is made through what we call "environment variables" as a form of key/value pairs. You will find the [quickstart guide](https://bunkerized-nginx.readthedocs.io/en/latest/quickstart_guide.html) and the complete [list of environment variables](https://bunkerized-nginx.readthedocs.io/en/latest/environment_variables.html) in the documentation.
## Singlesite
By default, bunkerized-nginx will only create one server block in the nginx configuration. This cover the simplest use-case where you want to protect one service easily and quickly.
Here is a dummy configuration as an example :
```conf
SERVER_NAME=example.com www.example.com
AUTO_LETS_ENCRYPT=yes
DISABLE_DEFAULT_SERVER=yes
USE_REVERSE_PROXY=yes
REVERSE_PROXY_URL=/
REVERSE_PROXY_HOST=http://internal-service.example.local:8080
# Uncomment the HTTP_PORT and HTTPS_PORTS variables when using Linux configuration
#HTTP_PORT=80
#HTTPS_PORT=443
```
```shell
docker run --network mynet \
--name myphp \
-v /path/to/web/files:/app \
php:fpm
```
The `REMOTE_PHP` environment variable lets you define the address of a remote PHP-FPM instance that will execute the .php files. `REMOTE_PHP_PATH` must be set to the directory where the PHP container will find the files.
## Run HTTPS server with automated Let's Encrypt
```shell
docker run -p 80:8080 \
-p 443:8443 \
-v /path/to/web/files:/www:ro \
-v /where/to/save/certificates:/etc/letsencrypt \
-e SERVER_NAME=www.yourdomain.com \
-e AUTO_LETS_ENCRYPT=yes \
-e REDIRECT_HTTP_TO_HTTPS=yes \
bunkerity/bunkerized-nginx
```
Certificates are stored in the /etc/letsencrypt directory, you should save it on your local drive. Please note that *bunkerized-nginx* doesn't run as root but as an unprivileged user with UID/GID 101 therefore you should set the rights of */where/to/save/certificates* accordingly.
If you don't want your webserver to listen on HTTP add the environment variable `LISTEN_HTTP` with a *no* value (e.g. HTTPS only). But Let's Encrypt needs the port 80 to be opened so redirecting the port is mandatory.
Here you have three environment variables :
- `SERVER_NAME` : define the FQDN of your webserver, this is mandatory for Let's Encrypt (www.yourdomain.com should point to your IP address)
- `AUTO_LETS_ENCRYPT` : enable automatic Let's Encrypt creation and renewal of certificates
- `REDIRECT_HTTP_TO_HTTPS` : enable HTTP to HTTPS redirection
## As a reverse proxy
```shell
docker run -p 80:8080 \
-e USE_REVERSE_PROXY=yes \
-e REVERSE_PROXY_URL=/ \
-e REVERSE_PROXY_HOST=http://myserver:8080 \
bunkerity/bunkerized-nginx
```
This is a simple reverse proxy to a unique application. If you have more than one application you can add more REVERSE_PROXY_URL/REVERSE_PROXY_HOST by appending a suffix number like this :
```shell
docker run -p 80:8080 \
-e USE_REVERSE_PROXY=yes \
-e REVERSE_PROXY_URL_1=/app1/ \
-e REVERSE_PROXY_HOST_1=http://myapp1:3000/ \
-e REVERSE_PROXY_URL_2=/app2/ \
-e REVERSE_PROXY_HOST_2=http://myapp2:3000/ \
bunkerity/bunkerized-nginx
```
## Behind a reverse proxy
```shell
docker run -p 80:8080 \
-v /path/to/web/files:/www \
-e PROXY_REAL_IP=yes \
bunkerity/bunkerized-nginx
```
The `PROXY_REAL_IP` environment variable, when set to *yes*, activates the [ngx_http_realip_module](https://nginx.org/en/docs/http/ngx_http_realip_module.html) to get the real client IP from the reverse proxy.
See [this section](https://bunkerized-nginx.readthedocs.io/en/latest/environment_variables.html#reverse-proxy) if you need to tweak some values (trusted ip/network, header, ...).
## Multisite
By default, bunkerized-nginx will only create one server block. When setting the `MULTISITE` environment variable to *yes*, one server block will be created for each host defined in the `SERVER_NAME` environment variable.
You can set/override values for a specific server by prefixing the environment variable with one of the server name previously defined.
If you have multiple services to protect, the easiest way to do it is by enabling the "multisite" mode. When using multisite, bunkerized-nginx will create one server block per server defined in the `SERVER_NAME` environment variable. You can configure each servers independently by adding the server name as a prefix.
```shell
docker run -p 80:8080 \
-p 443:8443 \
-v /where/to/save/certificates:/etc/letsencrypt \
-e SERVER_NAME=app1.domain.com app2.domain.com \
-e MULTISITE=yes \
-e AUTO_LETS_ENCRYPT=yes \
-e REDIRECT_HTTP_TO_HTTPS=yes \
-e USE_REVERSE_PROXY=yes \
-e app1.domain.com_REVERSE_PROXY_URL=/ \
-e app1.domain.com_REVERSE_PROXY_HOST=http://myapp1:8000 \
-e app2.domain.com_REVERSE_PROXY_URL=/ \
-e app2.domain.com_REVERSE_PROXY_HOST=http://myapp2:8000 \
bunkerity/bunkerized-nginx
Here is a dummy configuration as an example :
```conf
SERVER_NAME=app1.example.com app2.example.com
# Without prefix the variables are applied globally but can still be overriden
AUTO_LETS_ENCRYPT=yes
DISABLE_DEFAULT_SERVER=yes
# Specific configurations for first service
app1.example.com_USE_REVERSE_PROXY=yes
app1.example.com_REVERSE_PROXY_URL=/
app1.example.com_REVERSE_PROXY_HOST=http://internal-service.example.local:8080
# Specific configuration for second service
app2.example.com_REMOTE_PHP=my-fpm
app2.example.com_REMOTE_PHP_PATH=/var/www/html
# Uncomment the HTTP_PORT and HTTPS_PORTS variables when using Linux configuration
#HTTP_PORT=80
#HTTPS_PORT=443
```
The `USE_REVERSE_PROXY` is a *global* variable that will be applied to each server block. Whereas the `app1.domain.com_*` and `app2.domain.com_*` will only be applied to the app1.domain.com and app2.domain.com server block respectively.
## Special folders
When serving files, the web root directory should contains subdirectories named as the servers defined in the `SERVER_NAME` environment variable. Here is an example :
| Name | Location | Purpose | Multisite |
|:----------------:|:--------------------------------------------------------------------------------:|:-----------------------------------------------------------------------:|:---------:|
| www | /www (container)<br> /opt/bunkerized-nginx/www (Linux) | Static files that need to be delivered by bunkerized-nginx. | Yes |
| http-confs | /http-confs (container)<br> /opt/bunkerized-nginx/http-confs (Linux) | Custom nginx configuration files loaded at http context. | No |
| server-confs | /server-confs (container)<br> /opt/bunkerized-nginx/server-confs (Linux) | Custom nginx configuration files loaded at server context. | Yes |
| modsec-confs | /modsec-confs (container)<br> /opt/bunkerized-nginx/modsec-confs (Linux) | Custom ModSecurity configuration files loaded before the Core Rule Set. | Yes |
| modsec-crs-confs | /modsec-crs-confs (container)<br> /opt/bunkerized-nginx/modsec-crs-confs (Linux) | Custom ModSecurity configuration files loaded after the Core Rule Set. | Yes |
| plugins | /plugins (container)<br> /opt/bunkerized-nginx/plugins (Linux) | Location of bunkerized-nginx plugins. | No |
| cache | /cache (container)<br> /opt/bunkerized-nginx/plugins (Linux) | Placeholder for caching data like external blacklists. | No |
| acme-challenge | /acme-challenge (container)<br> /opt/bunkerized-nginx/acme-challenge (Linux) | Placeholder for Let's Encrypt challenges. | No |
```shell
You will find more information about the special folders in the [documentation](https://bunkerized-nginx.readthedocs.io/en/latest/special_folders.html).
docker run -p 80:8080 \
-p 443:8443 \
-v /where/to/save/certificates:/etc/letsencrypt \
-v /where/are/web/files:/www:ro \
-e SERVER_NAME=app1.domain.com app2.domain.com \
-e MULTISITE=yes \
-e AUTO_LETS_ENCRYPT=yes \
-e REDIRECT_HTTP_TO_HTTPS=yes \
-e app1.domain.com_REMOTE_PHP=php1 \
-e app1.domain.com_REMOTE_PHP_PATH=/app \
-e app2.domain.com_REMOTE_PHP=php2 \
-e app2.domain.com_REMOTE_PHP_PATH=/app \
bunkerity/bunkerized-nginx
```
# Web UI
The */where/are/web/files* directory should have a structure like this :
```shell
/where/are/web/files
├── app1.domain.com
│ └── index.php
│ └── ...
└── app2.domain.com
└── index.php
└── ...
```
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/web-ui.gif?raw=true" />
## Automatic configuration
The downside of using environment variables is that you need to recreate a new container each time you want to add or remove a web service. An alternative is to use the *bunkerized-nginx-autoconf* image which listens for Docker events and "automagically" generates the configuration.
First we need a volume that will store the configurations :
```shell
docker volume create nginx_conf
```
Then we run bunkerized-nginx with the `bunkerized-nginx.AUTOCONF` label, mount the created volume at /etc/nginx and set some default configurations for our services (e.g. : automatic Let's Encrypt and HTTP to HTTPS redirect) :
```shell
docker network create mynet
docker run -p 80:8080 \
-p 443:8443 \
--network mynet \
-v /where/to/save/certificates:/etc/letsencrypt \
-v /where/are/web/files:/www:ro \
-v nginx_conf:/etc/nginx \
-e SERVER_NAME= \
-e MULTISITE=yes \
-e AUTO_LETS_ENCRYPT=yes \
-e REDIRECT_HTTP_TO_HTTPS=yes \
-l bunkerized.nginx.AUTOCONF \
bunkerity/bunkerized-nginx
```
When setting `SERVER_NAME` to nothing bunkerized-nginx won't create any server block (in case we only want automatic configuration).
Once bunkerized-nginx is created, let's setup the autoconf container :
```shell
docker run -v /var/run/docker.sock:/var/run/docker.sock:ro \
-v nginx_conf:/etc/nginx \
bunkerity/bunkerized-nginx-autoconf
```
We can now create a new container and use labels to dynamically configure bunkerized-nginx. Labels for automatic configuration are the same as environment variables but with the "bunkerized-nginx." prefix.
Here is a PHP example :
```shell
docker run --network mynet \
--name myapp \
-v /where/are/web/files/app.domain.com:/app \
-l bunkerized-nginx.SERVER_NAME=app.domain.com \
-l bunkerized-nginx.REMOTE_PHP=myapp \
-l bunkerized-nginx.REMOTE_PHP_PATH=/app \
php:fpm
```
And a reverse proxy example :
```shell
docker run --network mynet \
--name anotherapp \
-l bunkerized-nginx.SERVER_NAME=app2.domain.com \
-l bunkerized-nginx.USE_REVERSE_PROXY=yes \
-l bunkerized-nginx.REVERSE_PROXY_URL=/ \
-l bunkerized-nginx.REVERSE_PROXY_HOST=http://anotherapp \
tutum/hello-world
```
## Swarm mode
Automatic configuration through labels is also supported in swarm mode. The *bunkerized-nginx-autoconf* is used to listen for Swarm events (e.g. service create/rm) and "automagically" edit configurations files and reload nginx.
As a use case we will assume the following :
- Some managers are also workers (they will only run the *autoconf* container for obvious security reasons)
- The bunkerized-nginx service will be deployed on all workers (global mode) so clients can connect to each of them (e.g. load balancing, CDN, edge proxy, ...)
- There is a shared folder mounted on managers and workers (e.g. NFS, GlusterFS, CephFS, ...)
Let's start by creating the network to allow communications between our services :
```shell
docker network create -d overlay mynet
```
We can now create the *autoconf* service that will listen to swarm events :
```shell
docker service create --name autoconf \
--network mynet \
--mount type=bind,source=/var/run/docker.sock,destination=/var/run/docker.sock,ro \
--mount type=bind,source=/shared/confs,destination=/etc/nginx \
--mount type=bind,source=/shared/letsencrypt,destination=/etc/letsencrypt \
--mount type=bind,source=/shared/acme-challenge,destination=/acme-challenge \
-e SWARM_MODE=yes \
-e API_URI=/ChangeMeToSomethingHardToGuess \
--replicas 1 \
--constraint node.role==manager \
bunkerity/bunkerized-nginx-autoconf
```
**You need to change `API_URI` to something hard to guess since there is no other security mechanism to protect the API at the moment.**
When *autoconf* is created, it's time for the *bunkerized-nginx* service to be up :
```shell
docker service create --name nginx \
--network mynet \
-p published=80,target=8080,mode=host \
-p published=443,target=8443,mode=host \
--mount type=bind,source=/shared/confs,destination=/etc/nginx \
--mount type=bind,source=/shared/letsencrypt,destination=/etc/letsencrypt,ro \
--mount type=bind,source=/shared/acme-challenge,destination=/acme-challenge,ro \
--mount type=bind,source=/shared/www,destination=/www,ro \
-e SWARM_MODE=yes \
-e USE_API=yes \
-e API_URI=/ChangeMeToSomethingHardToGuess \
-e MULTISITE=yes \
-e SERVER_NAME= \
-e AUTO_LETS_ENCRYPT=yes \
-e REDIRECT_HTTP_TO_HTTPS=yes \
-l bunkerized-nginx.AUTOCONF \
--mode global \
--constraint node.role==worker \
bunkerity/bunkerized-nginx
```
The `API_URI` value must be the same as the one specified for the *autoconf* service.
We can now create a new service and use labels to dynamically configure bunkerized-nginx. Labels for automatic configuration are the same as environment variables but with the "bunkerized-nginx." prefix.
Here is a PHP example :
```shell
docker service create --name myapp \
--network mynet \
--mount type=bind,source=/shared/www/app.domain.com,destination=/app \
-l bunkerized-nginx.SERVER_NAME=app.domain.com \
-l bunkerized-nginx.REMOTE_PHP=myapp \
-l bunkerized-nginx.REMOTE_PHP_PATH=/app \
--constraint node.role==worker \
php:fpm
```
And a reverse proxy example :
```shell
docker service create --name anotherapp \
--network mynet \
-l bunkerized-nginx.SERVER_NAME=app2.domain.com \
-l bunkerized-nginx.USE_REVERSE_PROXY=yes \
-l bunkerized-nginx.REVERSE_PROXY_URL=/ \
-l bunkerized-nginx.REVERSE_PROXY_HOST=http://anotherapp \
--constraint node.role==worker \
tutum/hello-world
```
## Web UI
A dedicated image, *bunkerized-nginx-ui*, lets you manage bunkerized-nginx instances and services configurations through a web user interface. This feature is still in beta, feel free to open a new issue if you find a bug and/or you have an idea to improve it.
First we need a volume that will store the configurations and a network because bunkerized-nginx will be used as a reverse proxy for the web UI :
```shell
docker volume create nginx_conf
docker network create mynet
```
Let's create the bunkerized-nginx-ui container that will host the web UI behind bunkerized-nginx :
```shell
docker run --network mynet \
--name myui \
-v /var/run/docker.sock:/var/run/docker.sock:ro \
-v nginx_conf:/etc/nginx \
-e ABSOLUTE_URI=https://admin.domain.com/webui/ \
bunkerity/bunkerized-nginx-ui
```
You will need to edit the `ABSOLUTE_URI` environment variable to reflect your actual URI of the web UI.
We can now setup the bunkerized-nginx instance with the `bunkerized-nginx.UI` label and a reverse proxy configuration for our web UI :
```shell
docker network create mynet
docker run -p 80:8080 \
-p 443:8443 \
--network mynet \
-v nginx_conf:/etc/nginx \
-v /where/are/web/files:/www:ro \
-v /where/to/save/certificates:/etc/letsencrypt \
-e SERVER_NAME=admin.domain.com \
-e MULTISITE=yes \
-e AUTO_LETS_ENCRYPT=yes \
-e REDIRECT_HTTP_TO_HTTPS=yes \
-e DISABLE_DEFAULT_SERVER=yes \
-e admin.domain.com_USE_MODSECURITY=no \
-e admin.domain.com_SERVE_FILES=no \
-e admin.domain.com_USE_AUTH_BASIC=yes \
-e admin.domain.com_AUTH_BASIC_USER=admin \
-e admin.domain.com_AUTH_BASIC_PASSWORD=password \
-e admin.domain.com_USE_REVERSE_PROXY=yes \
-e admin.domain.com_REVERSE_PROXY_URL=/webui/ \
-e admin.domain.com_REVERSE_PROXY_HOST=http://myui:5000/ \
-l bunkerized-nginx.UI \
bunkerity/bunkerized-nginx
```
The `AUTH_BASIC` environment variables let you define a login/password that must be provided before accessing to the web UI. At the moment, there is no authentication mechanism integrated into bunkerized-nginx-ui so **using auth basic with a strong password coupled with a "hard to guess" URI is strongly recommended**.
Web UI should now be accessible from https://admin.domain.com/webui/.
You will find more information about the web UI in the [documentation](https://bunkerized-nginx.readthedocs.io/en/latest/web_ui.html).
# Security tuning
@@ -432,8 +211,12 @@ bunkerized-nginx comes with a set of predefined security settings that you can (
# License
This project is licensed under the terms of the [GNU Affero General Public License (AGPL) version 3](https://github.com/bunkerity/bunkerized-nginx/LICENSE.md).
This project is licensed under the terms of the [GNU Affero General Public License (AGPL) version 3](https://github.com/bunkerity/bunkerized-nginx/blob/master/LICENSE.md).
# Contributing
If you would like to contribute to the project you can read the [contributing guidelines](https://github.com/bunkerity/bunkerized-nginx/CONTRIBUTING.md) to get started.
If you would like to contribute to the project you can read the [contributing guidelines](https://github.com/bunkerity/bunkerized-nginx/blob/master/CONTRIBUTING.md) to get started.
# Security policy
We take security bugs as serious issues and encourage responsible disclosure, see our [security policy](https://github.com/bunkerity/bunkerized-nginx/blob/master/SECURITY.md) for more information.

17
SECURITY.md Normal file
View File

@@ -0,0 +1,17 @@
# Security policy
Even though this project is focused on security, it is still prone to possible vulnerabilities. We consider every security bug as a serious issue and will try our best to address it.
## Responsible disclosure
If you have found a security bug, please send us an email at security \[@\] bunkerity.com with technical details so we can resolve it as soon as possible.
Here is a non-exhaustive list of issues we consider as high risk :
- Vulnerability in the core
- Bypass of a security feature
- Vulnerability in a third-party dependency
- Risk in the supply chain
## Bounty
To encourage responsible disclosure, we may reward you with a bounty at the sole discretion of the maintainers.

View File

@@ -1 +1 @@
1.2.7
1.3.0

View File

@@ -1,195 +0,0 @@
from Config import Config
import utils
import os, time
class AutoConf :
def __init__(self, swarm, api) :
self.__swarm = swarm
self.__servers = {}
self.__instances = {}
self.__env = {}
self.__config = Config(self.__swarm, api)
def get_server(self, id) :
if id in self.__servers :
return self.__servers[id]
return False
def reload(self) :
return self.__config.reload(self.__instances)
def __gen_env(self) :
self.__env.clear()
# TODO : check actual state (e.g. : running, stopped ?)
for id, instance in self.__instances.items() :
env = []
if self.__swarm :
env = instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"]
else :
env = instance.attrs["Config"]["Env"]
for entry in env :
self.__env[entry.split("=")[0]] = entry.replace(entry.split("=")[0] + "=", "", 1)
blacklist = ["NGINX_VERSION", "NJS_VERSION", "PATH", "PKG_RELEASE"]
for entry in blacklist :
if entry in self.__env :
del self.__env[entry]
if not "SERVER_NAME" in self.__env or self.__env["SERVER_NAME"] == "" :
self.__env["SERVER_NAME"] = []
else :
self.__env["SERVER_NAME"] = self.__env["SERVER_NAME"].split(" ")
for server in self.__servers :
(id, name, labels) = self.__get_infos(self.__servers[server])
first_server = labels["bunkerized-nginx.SERVER_NAME"].split(" ")[0]
for label in labels :
if label.startswith("bunkerized-nginx.") :
self.__env[first_server + "_" + label.replace("bunkerized-nginx.", "", 1)] = labels[label]
for server_name in labels["bunkerized-nginx.SERVER_NAME"].split(" ") :
if not server_name in self.__env["SERVER_NAME"] :
self.__env["SERVER_NAME"].append(server_name)
self.__env["SERVER_NAME"] = " ".join(self.__env["SERVER_NAME"])
def pre_process(self, objs) :
for instance in objs :
(id, name, labels) = self.__get_infos(instance)
if "bunkerized-nginx.AUTOCONF" in labels :
if self.__swarm :
self.__process_instance(instance, "create", id, name, labels)
else :
if instance.status in ("restarting", "running", "created", "exited") :
self.__process_instance(instance, "create", id, name, labels)
if instance.status == "running" :
self.__process_instance(instance, "start", id, name, labels)
for server in objs :
(id, name, labels) = self.__get_infos(server)
if "bunkerized-nginx.SERVER_NAME" in labels :
if self.__swarm :
self.__process_server(server, "create", id, name, labels)
else :
if server.status in ("restarting", "running", "created", "exited") :
self.__process_server(server, "create", id, name, labels)
if server.status == "running" :
self.__process_server(server, "start", id, name, labels)
def process(self, obj, event) :
(id, name, labels) = self.__get_infos(obj)
if "bunkerized-nginx.AUTOCONF" in labels :
self.__process_instance(obj, event, id, name, labels)
elif "bunkerized-nginx.SERVER_NAME" in labels :
self.__process_server(obj, event, id, name, labels)
def __get_infos(self, obj) :
if self.__swarm :
id = obj.id
name = obj.name
labels = obj.attrs["Spec"]["Labels"]
else :
id = obj.id
name = obj.name
labels = obj.labels
return (id, name, labels)
def __process_instance(self, instance, event, id, name, labels) :
if event == "create" :
self.__instances[id] = instance
self.__gen_env()
utils.log("[*] bunkerized-nginx instance created : " + name + " / " + id)
if self.__swarm and len(self.__instances) == 1 :
if self.__config.generate(self.__env) :
utils.log("[*] Initial config succeeded")
if not self.__config.swarm_wait(self.__instances) :
utils.log("[!] Removing bunkerized-nginx instances from list (API not available)")
del self.__instances[id]
else :
utils.log("[!] Initial config failed")
elif not self.__swarm and len(self.__instances) == 1 :
utils.log("[*] Wait until bunkerized-nginx is healthy (timeout = 120s) ...")
i = 0
healthy = False
while i < 120 :
self.__instances[id].reload()
if self.__instances[id].attrs["State"]["Health"]["Status"] == "healthy" :
healthy = True
break
time.sleep(1)
i = i + 1
if not healthy :
utils.log("[!] Removing bunkerized-nginx instances from list (unhealthy)")
del self.__instances[id]
elif event == "start" :
self.__instances[id].reload()
self.__gen_env()
utils.log("[*] bunkerized-nginx instance started : " + name + " / " + id)
elif event == "die" :
self.__instances[id].reload()
self.__gen_env()
utils.log("[*] bunkerized-nginx instance stopped : " + name + " / " + id)
elif event == "destroy" or event == "remove" :
del self.__instances[id]
self.__gen_env()
utils.log("[*] bunkerized-nginx instance removed : " + name + " / " + id)
def __process_server(self, instance, event, id, name, labels) :
vars = { k.replace("bunkerized-nginx.", "", 1) : v for k, v in labels.items() if k.startswith("bunkerized-nginx.")}
if event == "create" :
utils.log("[*] Generating config for " + vars["SERVER_NAME"] + " ...")
self.__servers[id] = instance
self.__gen_env()
if self.__config.generate(self.__env) :
utils.log("[*] Generated config for " + vars["SERVER_NAME"])
if self.__swarm :
utils.log("[*] Activating config for " + vars["SERVER_NAME"] + " ...")
if self.__config.reload(self.__instances) :
utils.log("[*] Activated config for " + vars["SERVER_NAME"])
else :
utils.log("[!] Can't activate config for " + vars["SERVER_NAME"])
else :
utils.log("[!] Can't generate config for " + vars["SERVER_NAME"])
del self.__servers[id]
self.__gen_env()
self.__config.generate(self.__env)
elif event == "start" :
if id in self.__servers :
self.__servers[id].reload()
utils.log("[*] Activating config for " + vars["SERVER_NAME"] + " ...")
self.__gen_env()
if self.__config.reload(self.__instances) :
utils.log("[*] Activated config for " + vars["SERVER_NAME"])
else :
utils.log("[!] Can't activate config for " + vars["SERVER_NAME"])
elif event == "die" :
if id in self.__servers :
self.__servers[id].reload()
utils.log("[*] Deactivating config for " + vars["SERVER_NAME"])
self.__gen_env()
if self.__config.reload() :
utils.log("[*] Deactivated config for " + vars["SERVER_NAME"])
else :
utils.log("[!] Can't deactivate config for " + vars["SERVER_NAME"])
elif event == "destroy" or event == "remove" :
if id in self.__servers :
utils.log("[*] Removing config for " + vars["SERVER_NAME"])
del self.__servers[id]
self.__gen_env()
if self.__config.generate(self.__env) :
utils.log("[*] Removed config for " + vars["SERVER_NAME"])
else :
utils.log("[!] Can't remove config for " + vars["SERVER_NAME"])
utils.log("[*] Deactivating config for " + vars["SERVER_NAME"])
if self.__config.reload(self.__instances) :
utils.log("[*] Deactivated config for " + vars["SERVER_NAME"])
else :
utils.log("[!] Can't deactivate config for " + vars["SERVER_NAME"])

View File

@@ -1,125 +0,0 @@
#!/usr/bin/python3
import utils
import subprocess, shutil, os, traceback, requests, time
class Config :
def __init__(self, swarm, api) :
self.__swarm = swarm
self.__api = api
def __jobs(self, type) :
utils.log("[*] Starting jobs (type = " + type + ") ...")
proc = subprocess.run(["/bin/su", "-c", "/opt/entrypoint/" + type + "-jobs.sh", "nginx"], capture_output=True)
stdout = proc.stdout.decode("ascii")
stderr = proc.stderr.decode("ascii")
if len(stdout) > 1 :
utils.log("[*] Jobs stdout :")
utils.log(stdout)
if stderr != "" :
utils.log("[!] Jobs stderr :")
utils.log(stderr)
if proc.returncode != 0 :
utils.log("[!] Jobs error : return code != 0")
return False
return True
def swarm_wait(self, instances) :
try :
with open("/etc/nginx/autoconf", "w") as f :
f.write("ok")
utils.log("[*] Waiting for bunkerized-nginx tasks ...")
i = 1
started = False
while i <= 10 :
time.sleep(i)
if self.__ping(instances) :
started = True
break
i = i + 1
utils.log("[!] Waiting " + str(i) + " seconds before retrying to contact bunkerized-nginx tasks")
if started :
utils.log("[*] bunkerized-nginx tasks started")
return True
else :
utils.log("[!] bunkerized-nginx tasks are not started")
except Exception as e :
utils.log("[!] Error while waiting for Swarm tasks : " + str(e))
return False
def generate(self, env) :
try :
# Write environment variables to a file
with open("/tmp/variables.env", "w") as f :
for k, v in env.items() :
f.write(k + "=" + v + "\n")
# Call the generator
proc = subprocess.run(["/bin/su", "-c", "/opt/gen/main.py --settings /opt/settings.json --templates /opt/confs --output /etc/nginx --variables /tmp/variables.env", "nginx"], capture_output=True)
# Print stdout/stderr
stdout = proc.stdout.decode("ascii")
stderr = proc.stderr.decode("ascii")
if len(stdout) > 1 :
utils.log("[*] Generator output :")
utils.log(stdout)
if stderr != "" :
utils.log("[*] Generator error :")
utils.log(error)
# We're done
if proc.returncode == 0 :
if self.__swarm :
return self.__jobs("pre")
return True
utils.log("[!] Error while generating site config for " + env["SERVER_NAME"] + " : return code = " + str(proc.returncode))
except Exception as e :
utils.log("[!] Exception while generating site config : " + str(e))
return False
def reload(self, instances) :
if self.__api_call(instances, "/reload") :
if self.__swarm :
return self.__jobs("post")
return True
return False
def __ping(self, instances) :
return self.__api_call(instances, "/ping")
def __api_call(self, instances, path) :
ret = True
for instance_id, instance in instances.items() :
# Reload the instance object just in case
instance.reload()
# Reload via API
if self.__swarm :
# Send POST request on http://serviceName.NodeID.TaskID:8000/action
name = instance.name
for task in instance.tasks() :
if task["Status"]["State"] != "running" :
continue
nodeID = task["NodeID"]
taskID = task["ID"]
fqdn = name + "." + nodeID + "." + taskID
req = False
try :
req = requests.post("http://" + fqdn + ":8080" + self.__api + path)
except :
pass
if req and req.status_code == 200 and req.text == "ok" :
utils.log("[*] Sent API order " + path + " to instance " + fqdn + " (service.node.task)")
else :
utils.log("[!] Can't send API order " + path + " to instance " + fqdn + " (service.node.task)")
ret = False
# Send SIGHUP to running instance
elif instance.status == "running" :
try :
instance.kill("SIGHUP")
utils.log("[*] Sent SIGHUP signal to bunkerized-nginx instance " + instance.name + " / " + instance.id)
except docker.errors.APIError as e :
utils.log("[!] Docker error while sending SIGHUP signal : " + str(e))
ret = False
return ret

View File

@@ -1,22 +1,27 @@
FROM alpine
COPY autoconf/dependencies.sh /tmp
RUN chmod +x /tmp/dependencies.sh && \
/tmp/dependencies.sh && \
rm -f /tmp/dependencies.sh
COPY gen/ /opt/bunkerized-nginx/gen
COPY entrypoint/ /opt/bunkerized-nginx/entrypoint
COPY confs/global/ /opt/bunkerized-nginx/confs/global
COPY confs/site/ /opt/bunkerized-nginx/confs/site
COPY jobs/ /opt/bunkerized-nginx/jobs
COPY settings.json /opt/bunkerized-nginx/
COPY misc/cron-autoconf /etc/crontabs/root
COPY autoconf/entrypoint.sh /opt/bunkerized-nginx/entrypoint/
COPY autoconf/requirements.txt /opt/bunkerized-nginx/entrypoint/
COPY autoconf/src/* /opt/bunkerized-nginx/entrypoint/
COPY gen/ /opt/gen
COPY entrypoint/ /opt/entrypoint
COPY confs/global/ /opt/confs/global
COPY confs/site/ /opt/confs/site
COPY scripts/ /opt/scripts
COPY settings.json /opt
COPY misc/cron /etc/crontabs/nginx
COPY autoconf/* /opt/entrypoint/
RUN apk add --no-cache py3-pip bash certbot curl openssl && \
pip3 install -r /opt/bunkerized-nginx/gen/requirements.txt && \
pip3 install -r /opt/bunkerized-nginx/entrypoint/requirements.txt && \
pip3 install -r /opt/bunkerized-nginx/jobs/requirements.txt
COPY autoconf/prepare.sh /tmp
RUN chmod +x /tmp/prepare.sh && \
/tmp/prepare.sh && \
rm -f /tmp/prepare.sh
ENTRYPOINT ["/opt/entrypoint/entrypoint.sh"]
# Fix CVE-2021-36159
RUN apk add "apk-tools>=2.12.6-r0"
ENTRYPOINT ["/opt/bunkerized-nginx/entrypoint/entrypoint.sh"]

View File

@@ -1,28 +0,0 @@
import socketserver, threading, utils, os, stat
class ReloadServerHandler(socketserver.StreamRequestHandler):
def handle(self) :
try :
data = self.request.recv(512)
if not data :
return
with self.server.lock :
ret = self.server.autoconf.reload()
if ret :
self.request.sendall("ok".encode("utf-8"))
else :
self.request.sendall("ko".encode("utf-8"))
except Exception as e :
utils.log("Exception " + str(e))
def run_reload_server(autoconf, lock) :
server = socketserver.UnixStreamServer("/tmp/autoconf.sock", ReloadServerHandler)
os.chown("/tmp/autoconf.sock", 0, 101)
os.chmod("/tmp/autoconf.sock", 0o770)
server.autoconf = autoconf
server.lock = lock
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return (server, thread)

View File

@@ -1,71 +0,0 @@
#!/usr/bin/python3
from AutoConf import AutoConf
from ReloadServer import run_reload_server
import utils
import docker, os, stat, sys, select, threading
# Connect to the endpoint
endpoint = "/var/run/docker.sock"
if not os.path.exists(endpoint) or not stat.S_ISSOCK(os.stat(endpoint).st_mode) :
utils.log("[!] /var/run/docker.sock not found (is it mounted ?)")
sys.exit(1)
try :
client = docker.DockerClient(base_url='unix:///var/run/docker.sock')
except Exception as e :
utils.log("[!] Can't instantiate DockerClient : " + str(e))
sys.exit(2)
# Check if we are in Swarm mode
swarm = os.getenv("SWARM_MODE") == "yes"
# Our object to process events
api = ""
if swarm :
api = os.getenv("API_URI")
autoconf = AutoConf(swarm, api)
lock = threading.Lock()
if swarm :
(server, thread) = run_reload_server(autoconf, lock)
# Get all bunkerized-nginx instances and web services created before
try :
if swarm :
before = client.services.list(filters={"label" : "bunkerized-nginx.AUTOCONF"}) + client.services.list(filters={"label" : "bunkerized-nginx.SERVER_NAME"})
else :
before = client.containers.list(all=True, filters={"label" : "bunkerized-nginx.AUTOCONF"}) + client.containers.list(filters={"label" : "bunkerized-nginx.SERVER_NAME"})
except docker.errors.APIError as e :
utils.log("[!] Docker API error " + str(e))
sys.exit(3)
# Process them before events
autoconf.pre_process(before)
# Process events received from Docker
try :
utils.log("[*] Listening for Docker events ...")
for event in client.events(decode=True) :
# Process only container/service events
if (swarm and event["Type"] != "service") or (not swarm and event["Type"] != "container") :
continue
# Get Container/Service object
try :
if swarm :
id = service_id=event["Actor"]["ID"]
server = client.services.get(service_id=id)
else :
id = event["id"]
server = client.containers.get(id)
except docker.errors.NotFound as e :
server = autoconf.get_server(id)
if not server :
continue
# Process the event
autoconf.process(server, event["Action"])
except docker.errors.APIError as e :
utils.log("[!] Docker API error " + str(e))
sys.exit(4)

View File

@@ -1,5 +0,0 @@
#!/bin/sh
# install dependencies
apk add py3-pip bash certbot curl openssl
pip3 install docker requests jinja2

View File

@@ -3,16 +3,11 @@
echo "[*] Starting autoconf ..."
# check permissions
su -s "/opt/entrypoint/permissions.sh" nginx
su -s "/opt/bunkerized-nginx/entrypoint/permissions.sh" nginx
if [ "$?" -ne 0 ] ; then
exit 1
fi
if [ "$SWARM_MODE" = "yes" ] ; then
chown -R root:nginx /etc/nginx
chmod -R 770 /etc/nginx
fi
# trap SIGTERM and SIGINT
function trap_exit() {
echo "[*] Catched stop operation"
@@ -27,7 +22,7 @@ trap "trap_exit" TERM INT QUIT
crond
# run autoconf app
/opt/entrypoint/app.py &
/opt/bunkerized-nginx/entrypoint/app.py &
pid="$!"
# wait while app is up

View File

@@ -1,12 +0,0 @@
#!/bin/bash
curl -Lo manifest-tool https://github.com/estesp/manifest-tool/releases/download/v1.0.3/manifest-tool-linux-amd64
chmod +x manifest-tool
VERSION=$(cat VERSION | tr -d '\n')
if [ "$SOURCE_BRANCH" = "dev" ] ; then
./manifest-tool push from-args --ignore-missing --platforms linux/amd64,linux/386,linux/arm/v7,linux/arm64/v8 --template bunkerity/bunkerized-nginx-autoconf:dev-ARCHVARIANT --target bunkerity/bunkerized-nginx-autoconf:dev
elif [ "$SOURCE_BRANCH" = "master" ] ; then
./manifest-tool push from-args --ignore-missing --platforms linux/amd64,linux/386,linux/arm/v7,linux/arm64/v8 --template bunkerity/bunkerized-nginx-autoconf:ARCHVARIANT --target bunkerity/bunkerized-nginx-autoconf:${VERSION}
./manifest-tool push from-args --ignore-missing --platforms linux/amd64,linux/386,linux/arm/v7,linux/arm64/v8 --template bunkerity/bunkerized-nginx-autoconf:ARCHVARIANT --target bunkerity/bunkerized-nginx-autoconf:latest
fi

View File

@@ -1,5 +0,0 @@
#!/bin/bash
# Register qemu-*-static for all supported processors except the
# current one, but also remove all registered binfmt_misc before
docker run --rm --privileged multiarch/qemu-user-static:register --reset

View File

@@ -5,16 +5,25 @@ addgroup -g 101 nginx
adduser -h /var/cache/nginx -g nginx -s /bin/sh -G nginx -D -H -u 101 nginx
# prepare /opt
chown -R root:nginx /opt
find /opt -type f -exec chmod 0740 {} \;
find /opt -type d -exec chmod 0750 {} \;
chmod ugo+x /opt/entrypoint/* /opt/scripts/*
chmod ugo+x /opt/gen/main.py
chmod 770 /opt
chmod 440 /opt/settings.json
chown root:nginx /opt
chmod 750 /opt
# prepare /opt/bunkerized-nginx
chown -R root:nginx /opt/bunkerized-nginx
find /opt/bunkerized-nginx -type f -exec chmod 0740 {} \;
find /opt/bunkerized-nginx -type d -exec chmod 0750 {} \;
chmod ugo+x /opt/bunkerized-nginx/entrypoint/* /opt/bunkerized-nginx/scripts/*
chmod ugo+x /opt/bunkerized-nginx/gen/main.py
chmod ugo+x /opt/bunkerized-nginx/jobs/main.py
chmod ugo+x /opt/bunkerized-nginx/jobs/reload.py
chmod 770 /opt/bunkerized-nginx
chmod 440 /opt/bunkerized-nginx/settings.json
# prepare /var/log
ln -s /proc/1/fd/1 /var/log/jobs.log
mkdir /var/log/nginx
chown root:nginx /var/log/nginx
chmod 770 /var/log/nginx
ln -s /proc/1/fd/1 /var/log/nginx/jobs.log
mkdir /var/log/letsencrypt
chown nginx:nginx /var/log/letsencrypt
chmod 770 /var/log/letsencrypt
@@ -29,16 +38,26 @@ mkdir /var/lib/letsencrypt
chown root:nginx /var/lib/letsencrypt
chmod 770 /var/lib/letsencrypt
# prepare /cache
# prepare /opt/bunkerized-nginx/cache
ln -s /cache /opt/bunkerized-nginx/cache
mkdir /cache
chown root:nginx /cache
chmod 770 /cache
# prepare /acme-challenge
ln -s /acme-challenge /opt/bunkerized-nginx/acme-challenge
mkdir /acme-challenge
chown root:nginx /acme-challenge
chmod 770 /acme-challenge
# prepare /etc/crontabs/nginx
chown root:nginx /etc/crontabs/nginx
chmod 440 /etc/crontabs/nginx
# prepare /modsec-confs
ln -s /modsec-confs /opt/bunkerized-nginx/modsec-confs
mkdir /modsec-confs
chown root:nginx /modsec-confs
chmod 770 /modsec-confs
# prepare /modsec-crs-confs
ln -s /modsec-crs-confs /opt/bunkerized-nginx/modsec-crs-confs
mkdir /modsec-crs-confs
chown root:nginx /modsec-crs-confs
chmod 770 /modsec-crs-confs

View File

@@ -1,19 +0,0 @@
#!/usr/bin/python3
import sys, socket, os
if not os.path.exists("/tmp/autoconf.sock") :
sys.exit(1)
try :
client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client.connect("/tmp/autoconf.sock")
client.send("reload".encode("utf-8"))
data = client.recv(512)
client.close()
if not data or data.decode("utf-8") != "ok" :
sys.exit(3)
except Exception as e :
sys.exit(2)
sys.exit(0)

View File

@@ -0,0 +1,5 @@
docker
requests
jinja2
kubernetes
dnspython

158
autoconf/src/Config.py Normal file
View File

@@ -0,0 +1,158 @@
#!/usr/bin/python3
import subprocess, shutil, os, traceback, requests, time, dns.resolver
import Controller
from logger import log
class Config :
def __init__(self, type, api_uri, http_port="8080") :
self.__type = type
self.__api_uri = api_uri
self.__http_port = http_port
def __jobs(self) :
log("config", "INFO", "starting jobs ...")
proc = subprocess.run(["/bin/su", "-c", "/opt/bunkerized-nginx/entrypoint/jobs.sh", "nginx"], capture_output=True)
stdout = proc.stdout.decode("ascii")
stderr = proc.stderr.decode("ascii")
if len(stdout) > 1 :
log("config", "INFO", "jobs stdout : " + stdout)
if stderr != "" :
log("config", "ERROR", "jobs stderr : " + stderr)
if proc.returncode != 0 :
log("config", "ERROR", "jobs error (return code = " + str(proc.returncode) + ")")
return False
return True
def gen(self, env) :
try :
# Write environment variables to a file
with open("/tmp/variables.env", "w") as f :
for k, v in env.items() :
f.write(k + "=" + v + "\n")
# Call the generator
proc = subprocess.run(["/bin/su", "-c", "/opt/bunkerized-nginx/gen/main.py --settings /opt/bunkerized-nginx/settings.json --templates /opt/bunkerized-nginx/confs --output /etc/nginx --variables /tmp/variables.env", "nginx"], capture_output=True)
# Print stdout/stderr
stdout = proc.stdout.decode("ascii")
stderr = proc.stderr.decode("ascii")
if len(stdout) > 1 :
log("config", "INFO", "generator output : " + stdout)
if stderr != "" :
log("config", "ERROR", "generator error : " + stderr)
# We're done
if proc.returncode == 0 :
if self.__type == Controller.Type.SWARM or self.__type == Controller.Type.KUBERNETES :
return self.__jobs()
return True
log("config", "ERROR", "error while generating config (return code = " + str(proc.returncode) + ")")
except Exception as e :
log("config", "ERROR", "exception while generating site config : " + traceback.format_exc())
return False
def reload(self, instances) :
ret = True
if self.__type == Controller.Type.DOCKER :
for instance in instances :
try :
instance.kill("SIGHUP")
except :
ret = False
elif self.__type == Controller.Type.SWARM :
ret = self.__api_call(instances, "/reload")
elif self.__type == Controller.Type.KUBERNETES :
ret = self.__api_call(instances, "/reload")
return ret
def __ping(self, instances) :
return self.__api_call(instances, "/ping")
def wait(self, instances) :
ret = True
if self.__type == Controller.Type.DOCKER :
ret = self.__wait_docker(instances)
elif self.__type == Controller.Type.SWARM or self.__type == Controller.Type.KUBERNETES :
ret = self.__wait_api(instances)
return ret
def __wait_docker(self, instances) :
all_healthy = False
i = 0
while i < 120 :
one_not_healthy = False
for instance in instances :
instance.reload()
if instance.attrs["State"]["Health"]["Status"] != "healthy" :
one_not_healthy = True
break
if not one_not_healthy :
all_healthy = True
break
time.sleep(1)
i += 1
return all_healthy
def __wait_api(self, instances) :
try :
with open("/etc/nginx/autoconf", "w") as f :
f.write("ok")
i = 1
started = False
while i <= 10 :
time.sleep(i)
if self.__ping(instances) :
started = True
break
i = i + 1
log("config", "INFO", "waiting " + str(i) + " seconds before retrying to contact bunkerized-nginx instances")
if started :
log("config", "INFO", "bunkerized-nginx instances started")
return True
else :
log("config", "ERROR", "bunkerized-nginx instances are not started")
except Exception as e :
log("config", "ERROR", "exception while waiting for bunkerized-nginx instances : " + traceback.format_exc())
return False
def __api_call(self, instances, path) :
ret = True
nb = 0
urls = []
if self.__type == Controller.Type.SWARM :
for instance in instances :
name = instance.name
try :
dns_result = dns.resolver.query("tasks." + name)
for ip in dns_result :
urls.append("http://" + ip.to_text() + ":" + self.__http_port + self.__api_uri + path)
except :
ret = False
elif self.__type == Controller.Type.KUBERNETES :
for instance in instances :
name = instance.metadata.name
try :
dns_result = dns.resolver.query(name + "." + instance.metadata.namespace + ".svc.cluster.local")
for ip in dns_result :
urls.append("http://" + ip.to_text() + ":" + self.__http_port + self.__api_uri + path)
except :
ret = False
for url in urls :
req = None
try :
req = requests.post(url)
except :
pass
if req and req.status_code == 200 and req.text == "ok" :
log("config", "INFO", "successfully sent API order to " + url)
nb += 1
else :
log("config", "INFO", "failed API order to " + url)
ret = False
return ret and nb > 0

View File

@@ -0,0 +1,53 @@
from abc import ABC, abstractmethod
from enum import Enum
from Config import Config
class Type(Enum) :
DOCKER = 1
SWARM = 2
KUBERNETES = 3
class Controller(ABC) :
def __init__(self, type, api_uri=None, lock=None, http_port="8080") :
self._config = Config(type, api_uri, http_port=http_port)
self.lock = lock
@abstractmethod
def get_env(self) :
pass
def _fix_env(self, env) :
fixed_env = env.copy()
blacklist = ["NGINX_VERSION", "NJS_VERSION", "PATH", "PKG_RELEASE"]
for key in blacklist :
if key in fixed_env :
del fixed_env[key]
return fixed_env
def gen_conf(self, env) :
try :
ret = self._config.gen(env)
except :
ret = False
return ret
@abstractmethod
def wait(self) :
pass
@abstractmethod
def process_events(self, current_env) :
pass
@abstractmethod
def reload(self) :
pass
def _reload(self, instances) :
try :
ret = self._config.reload(instances)
except :
ret = False
return ret

View File

@@ -0,0 +1,78 @@
import docker, time
import Controller
from logger import log
class DockerController(Controller.Controller) :
def __init__(self, docker_host) :
super().__init__(Controller.Type.DOCKER)
self.__client = docker.DockerClient(base_url=docker_host)
def __get_instances(self) :
return self.__client.containers.list(filters={"label" : "bunkerized-nginx.AUTOCONF"})
def __get_containers(self) :
return self.__client.containers.list(filters={"label" : "bunkerized-nginx.SERVER_NAME"})
def get_env(self) :
env = {}
for instance in self.__get_instances() :
for variable in instance.attrs["Config"]["Env"] :
env[variable.split("=")[0]] = variable.replace(variable.split("=")[0] + "=", "", 1)
first_servers = []
if "SERVER_NAME" in env and env["SERVER_NAME"] != "" :
first_servers = env["SERVER_NAME"].split(" ")
for container in self.__get_containers() :
first_server = container.labels["bunkerized-nginx.SERVER_NAME"].split(" ")[0]
first_servers.append(first_server)
for variable, value in container.labels.items() :
if variable.startswith("bunkerized-nginx.") and variable != "bunkerized-nginx.AUTOCONF" :
env[first_server + "_" + variable.replace("bunkerized-nginx.", "", 1)] = value
if len(first_servers) == 0 :
env["SERVER_NAME"] = ""
else :
env["SERVER_NAME"] = " ".join(first_servers)
return self._fix_env(env)
def process_events(self, current_env) :
old_env = current_env
# TODO : check why filter isn't working as expected
#for event in self.__client.events(decode=True, filters={"type": "container", "label": ["bunkerized-nginx.AUTOCONF", "bunkerized-nginx.SERVER_NAME"]}) :
for event in self.__client.events(decode=True, filters={"type": "container"}) :
new_env = self.get_env()
if new_env != old_env :
try :
log("controller", "INFO", "generating new configuration")
if self.gen_conf(new_env) :
old_env = new_env.copy()
log("controller", "INFO", "successfully generated new configuration")
if self.reload() :
log("controller", "INFO", "successful reload")
else :
log("controller", "ERROR", "failed reload")
else :
log("controller", "ERROR", "can't generate new configuration")
except :
log("controller", "ERROR", "exception while receiving event")
def reload(self) :
return self._reload(self.__get_instances())
def wait(self) :
try :
# Wait for a container
instances = self.__get_instances()
while len(instances) == 0 :
time.sleep(1)
instances = self.__get_instances()
# Generate first config
env = self.get_env()
if not self.gen_conf(env) :
return False, env
# Wait for nginx
return self._config.wait(instances), env
except :
pass
return False, {}

View File

@@ -0,0 +1,193 @@
from kubernetes import client, config, watch
from threading import Thread, Lock
import time
import Controller
from logger import log
class IngressController(Controller.Controller) :
def __init__(self, api_uri, http_port) :
super().__init__(Controller.Type.KUBERNETES, api_uri=api_uri, lock=Lock(), http_port=http_port)
config.load_incluster_config()
self.__api = client.CoreV1Api()
self.__extensions_api = client.ExtensionsV1beta1Api()
self.__old_env = {}
def __get_pods(self) :
return self.__api.list_pod_for_all_namespaces(watch=False, label_selector="bunkerized-nginx").items
def __get_ingresses(self) :
return self.__extensions_api.list_ingress_for_all_namespaces(watch=False, label_selector="bunkerized-nginx").items
def __get_services(self, autoconf=False) :
services = self.__api.list_service_for_all_namespaces(watch=False, label_selector="bunkerized-nginx").items
if not autoconf :
return services
services_autoconf = []
for service in services :
if service.metadata.annotations != None and "bunkerized-nginx.AUTOCONF" in service.metadata.annotations :
services_autoconf.append(service)
return services_autoconf
def __pod_to_env(self, pod_env) :
env = {}
for env_var in pod_env :
env[env_var.name] = env_var.value
if env_var.value == None :
env[env_var.name] = ""
return env
def __annotations_to_env(self, annotations) :
env = {}
prefix = annotations["bunkerized-nginx.SERVER_NAME"].split(" ")[0] + "_"
for annotation in annotations :
if annotation.startswith("bunkerized-nginx.") and annotation.replace("bunkerized-nginx.", "", 1) != "" and annotation.replace("bunkerized-nginx.", "", 1) != "AUTOCONF" :
env[prefix + annotation.replace("bunkerized-nginx.", "", 1)] = annotations[annotation]
return env
def __rules_to_env(self, rules, namespace="default") :
env = {}
first_servers = []
for rule in rules :
rule = rule.to_dict()
prefix = ""
if "host" in rule :
prefix = rule["host"] + "_"
first_servers.append(rule["host"])
if not "http" in rule or not "paths" in rule["http"] :
continue
for path in rule["http"]["paths"] :
env[prefix + "USE_REVERSE_PROXY"] = "yes"
env[prefix + "REVERSE_PROXY_URL"] = path["path"]
env[prefix + "REVERSE_PROXY_HOST"] = "http://" + path["backend"]["service_name"] + "." + namespace + ".svc.cluster.local:" + str(path["backend"]["service_port"])
env["SERVER_NAME"] = " ".join(first_servers)
return env
def get_env(self) :
pods = self.__get_pods()
ingresses = self.__get_ingresses()
services = self.__get_services()
env = {}
first_servers = []
for pod in pods :
env.update(self.__pod_to_env(pod.spec.containers[0].env))
if "SERVER_NAME" in env and env["SERVER_NAME"] != "" :
first_servers.extend(env["SERVER_NAME"].split(" "))
for ingress in ingresses :
env.update(self.__rules_to_env(ingress.spec.rules, namespace=ingress.metadata.namespace))
if ingress.spec.tls :
for tls_entry in ingress.spec.tls :
for host in tls_entry.hosts :
env[host + "_AUTO_LETS_ENCRYPT"] = "yes"
if "SERVER_NAME" in env and env["SERVER_NAME"] != "" :
first_servers.extend(env["SERVER_NAME"].split(" "))
for service in services :
if service.metadata.annotations != None and "bunkerized-nginx.SERVER_NAME" in service.metadata.annotations :
env.update(self.__annotations_to_env(service.metadata.annotations))
first_servers.append(service.metadata.annotations["bunkerized-nginx.SERVER_NAME"])
first_servers = list(dict.fromkeys(first_servers))
if len(first_servers) == 0 :
env["SERVER_NAME"] = ""
else :
env["SERVER_NAME"] = " ".join(first_servers)
return self._fix_env(env)
def process_events(self, current_env) :
self.__old_env = current_env
t_pod = Thread(target=self.__watch_pod)
t_ingress = Thread(target=self.__watch_ingress)
t_service = Thread(target=self.__watch_service)
t_pod.start()
t_ingress.start()
t_service.start()
t_pod.join()
t_ingress.join()
t_service.join()
def __watch_pod(self) :
w = watch.Watch()
for event in w.stream(self.__api.list_pod_for_all_namespaces, label_selector="bunkerized-nginx") :
self.lock.acquire()
new_env = self.get_env()
if new_env != self.__old_env :
try :
if self.gen_conf(new_env) :
self.__old_env = new_env.copy()
log("CONTROLLER", "INFO", "successfully generated new configuration")
if self.reload() :
log("controller", "INFO", "successful reload")
else :
log("controller", "ERROR", "failed reload")
except :
log("controller", "ERROR", "exception while receiving event")
self.lock.release()
def __watch_ingress(self) :
w = watch.Watch()
for event in w.stream(self.__extensions_api.list_ingress_for_all_namespaces, label_selector="bunkerized-nginx") :
self.lock.acquire()
new_env = self.get_env()
if new_env != self.__old_env :
try :
if self.gen_conf(new_env) :
self.__old_env = new_env.copy()
log("CONTROLLER", "INFO", "successfully generated new configuration")
if self.reload() :
log("controller", "INFO", "successful reload")
else :
log("controller", "ERROR", "failed reload")
except :
log("controller", "ERROR", "exception while receiving event")
self.lock.release()
def __watch_service(self) :
w = watch.Watch()
for event in w.stream(self.__api.list_service_for_all_namespaces, label_selector="bunkerized-nginx") :
self.lock.acquire()
new_env = self.get_env()
if new_env != self.__old_env :
try :
if self.gen_conf(new_env) :
self.__old_env = new_env.copy()
log("CONTROLLER", "INFO", "successfully generated new configuration")
if self.reload() :
log("controller", "INFO", "successful reload")
else :
log("controller", "ERROR", "failed reload")
except :
log("controller", "ERROR", "exception while receiving event")
self.lock.release()
def reload(self) :
return self._reload(self.__get_services(autoconf=True))
def wait(self) :
self.lock.acquire()
try :
# Wait for at least one bunkerized-nginx pod
pods = self.__get_pods()
while len(pods) == 0 :
time.sleep(1)
pods = self.__get_pods()
# Wait for at least one bunkerized-nginx service
services = self.__get_services(autoconf=True)
while len(services) == 0 :
time.sleep(1)
services = self.__get_services(autoconf=True)
# Generate first config
env = self.get_env()
if not self.gen_conf(env) :
self.lock.release()
return False, env
# Wait for bunkerized-nginx
self.lock.release()
return self._config.wait(services), env
except :
pass
self.lock.release()
return False, {}

View File

@@ -0,0 +1,42 @@
import socketserver, threading, os, stat
from logger import log
class ReloadServerHandler(socketserver.StreamRequestHandler):
def handle(self) :
locked = False
try :
while True :
data = self.request.recv(512)
if not data or not data in [b"lock", b"reload", b"unlock"] :
break
if data == b"lock" :
self.server.controller.lock.acquire()
locked = True
self.request.sendall(b"ok")
elif data == b"unlock" :
self.server.controller.lock.release()
locked = False
self.request.sendall(b"ok")
elif data == b"reload" :
ret = self.server.controller.reload()
if ret :
self.request.sendall(b"ok")
else :
self.request.sendall(b"ko")
except Exception as e :
log("RELOADSERVER", "ERROR", "exception : " + str(e))
if locked :
self.server.controller.lock.release()
def run_reload_server(controller) :
server = socketserver.UnixStreamServer("/tmp/autoconf.sock", ReloadServerHandler)
os.chown("/tmp/autoconf.sock", 0, 101)
os.chmod("/tmp/autoconf.sock", 0o770)
server.controller = controller
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return (server, thread)

View File

@@ -0,0 +1,85 @@
import docker, time
from threading import Lock
from logger import log
import Controller
class SwarmController(Controller.Controller) :
def __init__(self, docker_host, api_uri, http_port) :
super().__init__(Controller.Type.SWARM, api_uri=api_uri, lock=Lock(), http_port=http_port)
self.__client = docker.DockerClient(base_url=docker_host)
def __get_instances(self) :
return self.__client.services.list(filters={"label" : "bunkerized-nginx.AUTOCONF"})
def __get_services(self) :
return self.__client.services.list(filters={"label" : "bunkerized-nginx.SERVER_NAME"})
def get_env(self) :
env = {}
for instance in self.__get_instances() :
for variable in instance.attrs["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"] :
env[variable.split("=")[0]] = variable.replace(variable.split("=")[0] + "=", "", 1)
first_servers = []
if "SERVER_NAME" in env and env["SERVER_NAME"] != "" :
first_servers = env["SERVER_NAME"].split(" ")
for service in self.__get_services() :
first_server = service.attrs["Spec"]["Labels"]["bunkerized-nginx.SERVER_NAME"].split(" ")[0]
first_servers.append(first_server)
for variable, value in service.attrs["Spec"]["Labels"].items() :
if variable.startswith("bunkerized-nginx.") and variable != "bunkerized-nginx.AUTOCONF" :
env[first_server + "_" + variable.replace("bunkerized-nginx.", "", 1)] = value
if len(first_servers) == 0 :
env["SERVER_NAME"] = ""
else :
env["SERVER_NAME"] = " ".join(first_servers)
return self._fix_env(env)
def process_events(self, current_env) :
old_env = current_env
# TODO : check why filter isn't working as expected
#for event in self.__client.events(decode=True, filters={"type": "service", "label": ["bunkerized-nginx.AUTOCONF", "bunkerized-nginx.SERVER_NAME"]}) :
for event in self.__client.events(decode=True, filters={"type": "service"}) :
new_env = self.get_env()
if new_env != old_env :
self.lock.acquire()
try :
log("controller", "INFO", "generating new configuration")
if self.gen_conf(new_env) :
old_env = new_env.copy()
log("controller", "INFO", "successfully generated new configuration")
if self.reload() :
log("controller", "INFO", "successful reload")
else :
log("controller", "ERROR", "failed reload")
else :
log("controller", "ERROR", "can't generate new configuration")
except :
log("controller", "ERROR", "exception while receiving event")
self.lock.release()
def reload(self) :
return self._reload(self.__get_instances())
def wait(self) :
self.lock.acquire()
try :
# Wait for a service
instances = self.__get_instances()
while len(instances) == 0 :
time.sleep(1)
instances = self.__get_instances()
# Generate first config
env = self.get_env()
if not self.gen_conf(env) :
self.lock.release()
return False, env
# Wait for nginx
self.lock.release()
return self._config.wait(instances), env
except :
pass
self.lock.release()
return False, {}

46
autoconf/src/app.py Normal file
View File

@@ -0,0 +1,46 @@
#!/usr/bin/python3
from ReloadServer import run_reload_server
import docker, os, stat, sys, select, threading
from DockerController import DockerController
from SwarmController import SwarmController
from IngressController import IngressController
from logger import log
# Get variables
swarm = os.getenv("SWARM_MODE", "no") == "yes"
kubernetes = os.getenv("KUBERNETES_MODE", "no") == "yes"
api_uri = os.getenv("API_URI", "")
docker_host = os.getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
http_port = os.getenv("HTTP_PORT", "8080")
# Instantiate the controller
if swarm :
log("autoconf", "INFO", "swarm mode detected")
controller = SwarmController(docker_host, api_uri, http_port)
elif kubernetes :
log("autoconf", "INFO", "kubernetes mode detected")
controller = IngressController(api_uri, http_port)
else :
log("autoconf", "INFO", "docker mode detected")
controller = DockerController(docker_host)
# Run the reload server in background if needed
if swarm or kubernetes :
log("autoconf", "INFO", "start reload server in background")
(server, thread) = run_reload_server(controller)
# Wait for instances
log("autoconf", "INFO", "wait until a bunkerized-nginx instance is started ...")
ret, env = controller.wait()
if ret :
log("autoconf", "INFO", "bunkerized-nginx instances started")
else :
log("autoconf", "ERROR", "bunkerized-nginx instances not started")
# Process events
log("autoconf", "INFO", "waiting for events ...")
controller.process_events(env)

6
autoconf/src/logger.py Normal file
View File

@@ -0,0 +1,6 @@
import datetime
def log(title, severity, msg) :
when = datetime.datetime.today().strftime("[%Y-%m-%d %H:%M:%S]")
what = title + " - " + severity + " - " + msg
print(when + " " + what, flush=True)

View File

@@ -1,24 +0,0 @@
#!/usr/bin/python3
import datetime
def log(event) :
print("[" + str(datetime.datetime.now().replace(microsecond=0)) + "] " + event, flush=True)
def replace_in_file(file, old_str, new_str) :
with open(file) as f :
data = f.read()
data = data[::-1].replace(old_str[::-1], new_str[::-1], 1)[::-1]
with open(file, "w") as f :
f.write(data)
def install_cron(service, vars, crons) :
for var in vars :
if var in crons :
with open("/etc/crontabs/root", "a+") as f :
f.write(vars[var] + " /opt/cron/" + crons[var] + ".py " + service["Actor"]["ID"])
def uninstall_cron(service, vars, crons) :
for var in vars :
if var in crons :
replace_in_file("/etc/crontabs/root", vars[var] + " /opt/cron/" + crons[var] + ".py " + service["Actor"]["ID"] + "\n", "")

View File

@@ -1,163 +0,0 @@
#!/bin/sh
function git_secure_checkout() {
path="$1"
commit="$2"
ret=$(pwd)
cd $path
git checkout "${commit}^{commit}"
if [ $? -ne 0 ] ; then
echo "[!] Commit hash $commit is absent from submodules $path !"
exit 3
fi
cd $ret
}
function git_secure_clone() {
repo="$1"
commit="$2"
folder=$(echo "$repo" | sed -E "s@https://github.com/.*/(.*)\.git@\1@")
git clone "$repo"
cd "$folder"
git checkout "${commit}^{commit}"
if [ $? -ne 0 ] ; then
echo "[!] Commit hash $commit is absent from repository $repo !"
exit 2
fi
cd ..
}
NTASK=$(nproc)
# install build dependencies
apk add --no-cache --virtual build autoconf libtool automake git geoip-dev yajl-dev g++ gcc curl-dev libxml2-dev pcre-dev make linux-headers libmaxminddb-dev musl-dev lua-dev gd-dev gnupg brotli-dev openssl-dev
# compile and install ModSecurity library
cd /tmp
git_secure_clone https://github.com/SpiderLabs/ModSecurity.git 753145fbd1d6751a6b14fdd700921eb3cc3a1d35
cd ModSecurity
./build.sh
git submodule init
git submodule update
git_secure_checkout bindings/python 47a6925df187f96e4593afab18dc92d5f22bd4d5
git_secure_checkout others/libinjection bf234eb2f385b969c4f803b35fda53cffdd93922
git_secure_checkout test/test-cases/secrules-language-tests d03f4c1e930440df46c1faa37d820a919704d9da
./configure --enable-static=no --disable-doxygen-doc --disable-dependency-tracking
make -j $NTASK
make install-strip
# download and install CRS rules
cd /tmp
git_secure_clone https://github.com/coreruleset/coreruleset.git 7776fe23f127fd2315bad0e400bdceb2cabb97dc
cd coreruleset
mkdir /opt/owasp
cp -r rules /opt/owasp/crs
cp crs-setup.conf.example /opt/owasp/crs.conf
# get nginx modules
cd /tmp
# ModSecurity connector for nginx
git_secure_clone https://github.com/SpiderLabs/ModSecurity-nginx.git 2497e6ac654d0b117b9534aa735b757c6b11c84f
# headers more
git_secure_clone https://github.com/openresty/headers-more-nginx-module.git d6d7ebab3c0c5b32ab421ba186783d3e5d2c6a17
# geoip
git_secure_clone https://github.com/leev/ngx_http_geoip2_module.git 1cabd8a1f68ea3998f94e9f3504431970f848fbf
# cookie
git_secure_clone https://github.com/AirisX/nginx_cookie_flag_module.git c4ff449318474fbbb4ba5f40cb67ccd54dc595d4
# brotli
git_secure_clone https://github.com/google/ngx_brotli.git 9aec15e2aa6feea2113119ba06460af70ab3ea62
# LUA requirements
git_secure_clone https://github.com/openresty/luajit2.git fe32831adcb3f5fe9259a9ce404fc54e1399bba3
cd luajit2
make -j $NTASK
make install
cd /tmp
git_secure_clone https://github.com/openresty/lua-resty-core.git b7d0a681bb41e6e3f29e8ddc438ef26fd819bb19
cd lua-resty-core
make install
cd /tmp
git_secure_clone https://github.com/openresty/lua-resty-lrucache.git b2035269ac353444ac65af3969692bcae4fc1605
cd lua-resty-lrucache
make install
cd /tmp
git_secure_clone https://github.com/openresty/lua-resty-dns.git 24c9a69808aedfaf029ae57707cdef75d83e2d19
cd lua-resty-dns
make install
cd /tmp
git_secure_clone https://github.com/bungle/lua-resty-session.git f300870ce4eee3f4903e0565c589f1faf0c1c5aa
cd lua-resty-session
cp -r lib/resty/* /usr/local/lib/lua/resty
cd /tmp
git_secure_clone https://github.com/bungle/lua-resty-random.git 17b604f7f7dd217557ca548fc1a9a0d373386480
cd lua-resty-random
make install
cd /tmp
git_secure_clone https://github.com/openresty/lua-resty-string.git 9a543f8531241745f8814e8e02475351042774ec
cd lua-resty-string
make install
cd /tmp
git_secure_clone https://github.com/openresty/lua-cjson.git 0df488874f52a881d14b5876babaa780bb6200ee
cd lua-cjson
make -j $NTASK
make install
make install-extra
cd /tmp
git_secure_clone https://github.com/ittner/lua-gd.git 2ce8e478a8591afd71e607506bc8c64b161bbd30
cd lua-gd
make -j $NTASK
make INSTALL_PATH=/usr/local/lib/lua/5.1 install
cd /tmp
git_secure_clone https://github.com/ledgetech/lua-resty-http.git 984fdc26054376384e3df238fb0f7dfde01cacf1
cd lua-resty-http
make install
cd /tmp
git_secure_clone https://github.com/Neopallium/lualogging.git cadc4e8fd652be07a65b121a3e024838db330c15
cd lualogging
cp -r src/* /usr/local/lib/lua
cd /tmp
git_secure_clone https://github.com/diegonehab/luasocket.git 5b18e475f38fcf28429b1cc4b17baee3b9793a62
cd luasocket
make -j $NTASK
make CDIR_linux=lib/lua/5.1 LDIR_linux=lib/lua install
cd /tmp
git_secure_clone https://github.com/brunoos/luasec.git c6704919bdc85f3324340bdb35c2795a02f7d625
cd luasec
make linux -j $NTASK
make LUACPATH=/usr/local/lib/lua/5.1 LUAPATH=/usr/local/lib/lua install
cd /tmp
git_secure_clone https://github.com/crowdsecurity/lua-cs-bouncer.git 3c235c813fc453dcf51a391bc9e9a36ca77958b0
cd lua-cs-bouncer
mkdir /usr/local/lib/lua/crowdsec
cp lib/*.lua /usr/local/lib/lua/crowdsec
sed -i 's/require "lrucache"/require "resty.lrucache"/' /usr/local/lib/lua/crowdsec/CrowdSec.lua
sed -i 's/require "config"/require "crowdsec.config"/' /usr/local/lib/lua/crowdsec/CrowdSec.lua
cd /tmp
git_secure_clone https://github.com/hamishforbes/lua-resty-iputils.git 3151d6485e830421266eee5c0f386c32c835dba4
cd lua-resty-iputils
make LUA_LIB_DIR=/usr/local/lib/lua install
cd /tmp
git_secure_clone https://github.com/openresty/lua-nginx-module.git 2d23bc4f0a29ed79aaaa754c11bffb1080aa44ba
export LUAJIT_LIB=/usr/local/lib
export LUAJIT_INC=/usr/local/include/luajit-2.1
# compile and install dynamic modules
cd /tmp
wget https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz
wget https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz.asc
gpg --import /tmp/nginx-keys/*.key
check=$(gpg --verify /tmp/nginx-${NGINX_VERSION}.tar.gz.asc /tmp/nginx-${NGINX_VERSION}.tar.gz 2>&1 | grep "^gpg: Good signature from ")
if [ "$check" = "" ] ; then
echo "[!] Wrong signature from nginx source !"
exit 1
fi
tar -xvzf nginx-${NGINX_VERSION}.tar.gz
cd nginx-$NGINX_VERSION
CONFARGS=$(nginx -V 2>&1 | sed -n -e 's/^.*arguments: //p')
CONFARGS=${CONFARGS/-Os -fomit-frame-pointer -g/-Os}
./configure $CONFARGS --add-dynamic-module=/tmp/ModSecurity-nginx --add-dynamic-module=/tmp/headers-more-nginx-module --add-dynamic-module=/tmp/ngx_http_geoip2_module --add-dynamic-module=/tmp/nginx_cookie_flag_module --add-dynamic-module=/tmp/lua-nginx-module --add-dynamic-module=/tmp/ngx_brotli
make -j $NTASK modules
cp ./objs/*.so /usr/lib/nginx/modules
# remove build dependencies
apk del build

View File

@@ -2,7 +2,7 @@
rewrite_by_lua_block {
local api = require "api"
local api_whitelist_ip = {% raw %}{{% endraw %}{% if API_WHITELIST_IP != ""%}{% set elements = API_WHITELIST_IP.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw %}
local api_whitelist_ip = {% raw %}{{% endraw %}{% if API_WHITELIST_IP != ""%}{% set elements = API_WHITELIST_IP.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw +%}
local api_uri = "{{ API_URI }}"
local logger = require "logger"

View File

@@ -5,14 +5,14 @@ geoip2 /etc/nginx/geoip.mmdb {
}
map $geoip2_data_country_code $allowed_country {
default {% if WHITELIST_COUNTRY != "" %}no{% else %}yes{% endif %};
default {% if WHITELIST_COUNTRY != "" %}no{% else %}yes{% endif +%};
{% if WHITELIST_COUNTRY != "" %}
{% for country in WHITELIST_COUNTRY.split(" ") %}
{% for country in WHITELIST_COUNTRY.split(" ") +%}
{{ country }} yes;
{% endfor %}
{% endif %}
{% if BLACKLIST_COUNTRY != "" %}
{% for country in BLACKLIST_COUNTRY.split(" ") %}
{% for country in BLACKLIST_COUNTRY.split(" ") +%}
{{ country }} no;
{% endfor %}
{% endif %}

View File

@@ -1,49 +1,42 @@
init_by_lua_block {
local dataloader = require "dataloader"
local logger = require "logger"
local cjson = require "cjson"
local dataloader = require "dataloader"
local logger = require "logger"
local cjson = require "cjson"
local use_proxies = {% if has_value("BLOCK_PROXIES", "yes") %}true{% else %}false{% endif %}
local use_abusers = {% if has_value("BLOCK_ABUSERS", "yes") %}true{% else %}false{% endif %}
local use_tor_exit_nodes = {% if has_value("BLOCK_TOR_EXIT_NODE", "yes") %}true{% else %}false{% endif %}
local use_user_agents = {% if has_value("BLOCK_USER_AGENT", "yes") %}true{% else %}false{% endif %}
local use_referrers = {% if has_value("BLOCK_REFERRER", "yes") %}true{% else %}false{% endif %}
local use_crowdsec = {% if has_value("USE_CROWDSEC", "yes") %}true{% else %}false{% endif %}
local use_redis = {% if USE_REDIS == "yes" %}true{% else %}false{% endif +%}
if use_proxies then
dataloader.load_ip("/etc/nginx/proxies.list", ngx.shared.proxies_data)
end
local use_proxies = {% if has_value("BLOCK_PROXIES", "yes") %}true{% else %}false{% endif +%}
local use_abusers = {% if has_value("BLOCK_ABUSERS", "yes") %}true{% else %}false{% endif +%}
local use_tor_exit_nodes = {% if has_value("BLOCK_TOR_EXIT_NODE", "yes") %}true{% else %}false{% endif +%}
local use_user_agents = {% if has_value("BLOCK_USER_AGENT", "yes") %}true{% else %}false{% endif +%}
local use_referrers = {% if has_value("BLOCK_REFERRER", "yes") %}true{% else %}false{% endif +%}
if use_abusers then
dataloader.load_ip("/etc/nginx/abusers.list", ngx.shared.abusers_data)
end
if use_tor_exit_nodes then
dataloader.load_ip("/etc/nginx/tor-exit-nodes.list", ngx.shared.tor_exit_nodes_data)
end
if use_user_agents then
dataloader.load_raw("/etc/nginx/user-agents.list", ngx.shared.user_agents_data)
end
if use_referrers then
dataloader.load_raw("/etc/nginx/referrers.list", ngx.shared.referrers_data)
end
if use_crowdsec then
local cs = require "crowdsec.CrowdSec"
local ok, err = cs.init("/etc/nginx/crowdsec.conf")
if ok == nil then
logger.log(ngx.ERR, "CROWDSEC", err)
error()
if not use_redis then
if use_proxies then
dataloader.load_ip("/etc/nginx/proxies.list", ngx.shared.proxies_data)
end
if use_abusers then
dataloader.load_ip("/etc/nginx/abusers.list", ngx.shared.abusers_data)
end
if use_tor_exit_nodes then
dataloader.load_ip("/etc/nginx/tor-exit-nodes.list", ngx.shared.tor_exit_nodes_data)
end
if use_user_agents then
dataloader.load_raw("/etc/nginx/user-agents.list", ngx.shared.user_agents_data)
end
if use_referrers then
dataloader.load_raw("/etc/nginx/referrers.list", ngx.shared.referrers_data)
end
logger.log(ngx.ERR, "CROWDSEC", "*NOT AN ERROR* initialisation done")
end
-- Load plugins
ngx.shared.plugins_data:safe_set("plugins", nil, 0)
local p = io.popen("find /plugins -maxdepth 1 -type d ! -path /plugins")
local p = io.popen("find /opt/bunkerized-nginx/plugins -maxdepth 1 -type d ! -path /opt/bunkerized-nginx/plugins")
for dir in p:lines() do
-- read JSON
local file = io.open(dir .. "/plugin.json")
@@ -54,14 +47,24 @@ for dir in p:lines() do
ngx.shared.plugins_data:safe_set(data.id .. "_" .. k, v, 0)
end
file:close()
-- store plugin
local plugins, flags = ngx.shared.plugins_data:get("plugins")
if plugins == nil then
ngx.shared.plugins_data:safe_set("plugins", data.id, 0)
else
ngx.shared.plugins_data:safe_set("plugins", plugins .. " " .. data.id, 0)
-- call init
local plugin = require(data.id .. "/" .. data.id)
local init = true
if plugin["init"] ~= nil then
init = plugin.init()
end
-- store plugin
if init then
local plugins, flags = ngx.shared.plugins_data:get("plugins")
if plugins == nil then
ngx.shared.plugins_data:safe_set("plugins", data.id, 0)
else
ngx.shared.plugins_data:safe_set("plugins", plugins .. " " .. data.id, 0)
end
logger.log(ngx.ERR, "PLUGINS", "*NOT AN ERROR* plugin " .. data.name .. "/" .. data.version .. " has been loaded")
else
logger.log(ngx.ERR, "PLUGINS", "init failed for plugin " .. data.name .. "/" .. data.version)
end
logger.log(ngx.ERR, "PLUGINS", "*NOT AN ERROR* plugin " .. data.name .. "/" .. data.version .. " has been loaded")
else
logger.log(ngx.ERR, "PLUGINS", "Can't load " .. dir .. "/plugin.json")
end

View File

@@ -6,8 +6,7 @@ ssl_prefer_server_ciphers off;
ssl_session_tickets off;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m;
{% if "TLSv1.2" in HTTPS_PROTOCOLS %}
{% if "TLSv1.2" in HTTPS_PROTOCOLS +%}
ssl_dhparam /etc/nginx/dhparam;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
{% endif %}
include /etc/nginx/multisite-default-server-lets-encrypt-webroot.conf;
{% endif +%}

View File

@@ -1,3 +1,3 @@
location ~ ^/.well-known/acme-challenge/ {
root /acme-challenge;
root /opt/bunkerized-nginx/acme-challenge;
}

View File

@@ -1,11 +1,12 @@
server {
{% if LISTEN_HTTP == "yes" %}listen 0.0.0.0:{{ HTTP_PORT }} default_server{% endif %};
{% if LISTEN_HTTP == "yes" %}listen 0.0.0.0:{{ HTTP_PORT }} default_server{% endif +%};
server_name _;
{% if has_value("AUTO_LETS_ENCRYPT", "yes") %}include /etc/nginx/multisite-default-server-https.conf;{% endif %}
{% if has_value("AUTO_LETS_ENCRYPT", "yes") %}include /etc/nginx/multisite-default-server-https.conf;{% endif +%}
include /etc/nginx/multisite-default-server-lets-encrypt-webroot.conf;
{% if USE_API == "yes" %}
location ^~ {{ API_URI }} {
include /etc/nginx/api.conf;
}
{% endif %}
{% if DISABLE_DEFAULT_SERVER == "yes" %}include /etc/nginx/multisite-disable-default-server.conf;{% endif %}
{% if DISABLE_DEFAULT_SERVER == "yes" %}include /etc/nginx/multisite-disable-default-server.conf;{% endif +%}
}

View File

@@ -15,12 +15,13 @@ http {
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
lua_package_path "/usr/local/lib/lua/?.lua;;";
lua_package_path "/opt/bunkerized-nginx/lua/?.lua;/opt/bunkerized-nginx/plugins/?.lua;/opt/bunkerized-nginx/deps/lib/lua/?.lua;;";
lua_package_cpath "/opt/bunkerized-nginx/deps/lib/?.so;/opt/bunkerized-nginx/deps/lib/lua/?.so;;";
server {
listen 0.0.0.0:%HTTP_PORT% default_server;
server_name _;
location ~ ^/.well-known/acme-challenge/ {
root /acme-challenge;
root /opt/bunkerized-nginx/acme-challenge;
}
%USE_API%
location / {

View File

@@ -6,18 +6,14 @@ load_module /usr/lib/nginx/modules/ngx_http_geoip2_module.so;
load_module /usr/lib/nginx/modules/ngx_http_headers_more_filter_module.so;
load_module /usr/lib/nginx/modules/ngx_http_lua_module.so;
load_module /usr/lib/nginx/modules/ngx_http_modsecurity_module.so;
load_module /usr/lib/nginx/modules/ngx_stream_geoip2_module.so;
load_module /usr/lib/nginx/modules/ngx_http_brotli_filter_module.so;
load_module /usr/lib/nginx/modules/ngx_http_brotli_static_module.so;
# run in foreground
daemon off;
# PID file
pid /tmp/nginx.pid;
# worker number = CPU core(s)
worker_processes auto;
# worker number (default = auto)
worker_processes {{ WORKER_PROCESSES }};
# faster regexp
pcre_jit on;
@@ -47,13 +43,13 @@ http {
tcp_nodelay on;
# load mime types and set default one
include /etc/nginx/mime.types;
include /etc/nginx/mime-types.conf;
default_type application/octet-stream;
# write logs to local syslog
log_format logf '{{ LOG_FORMAT }}';
access_log /var/log/access.log logf;
error_log /var/log/error.log {{ LOG_LEVEL }};
access_log /var/log/nginx/access.log logf;
error_log /var/log/nginx/error.log {{ LOG_LEVEL }};
# temp paths
proxy_temp_path /tmp/proxy_temp;
@@ -78,43 +74,44 @@ http {
port_in_redirect off;
# lua path and dicts
lua_package_path "/usr/local/lib/lua/?.lua;/plugins/?.lua;;";
{% if has_value("USE_WHITELIST_IP", "yes") %}lua_shared_dict whitelist_ip_cache 10m;{% endif %}
{% if has_value("USE_WHITELIST_REVERSE", "yes") %}lua_shared_dict whitelist_reverse_cache 10m;{% endif %}
{% if has_value("USE_BLACKLIST_IP", "yes") %}lua_shared_dict blacklist_ip_cache 10m;{% endif %}
{% if has_value("USE_BLACKLIST_REVERSE", "yes") %}lua_shared_dict blacklist_reverse_cache 10m;{% endif %}
{% if has_value("USE_DNSBL", "yes") %}lua_shared_dict dnsbl_cache 10m;{% endif %}
{% if has_value("BLOCK_PROXIES", "yes") %}lua_shared_dict proxies_data 250m;{% endif %}
{% if has_value("BLOCK_ABUSERS", "yes") %}lua_shared_dict abusers_data 50m;{% endif %}
{% if has_value("BLOCK_TOR_EXIT_NODE", "yes") %}lua_shared_dict tor_exit_nodes_data 1m;{% endif %}
{% if has_value("BLOCK_USER_AGENT", "yes") %}lua_shared_dict user_agents_data 1m;{% endif %}
{% if has_value("BLOCK_USER_AGENT", "yes") %}lua_shared_dict user_agents_cache 10m;{% endif %}
{% if has_value("BLOCK_REFERRER", "yes") %}lua_shared_dict referrers_data 1m;{% endif %}
{% if has_value("BLOCK_REFERRER", "yes") %}lua_shared_dict referrers_cache 10m;{% endif %}
{% if has_value("USE_BAD_BEHAVIOR", "yes") %}lua_shared_dict behavior_ban 10m;{% endif %}
{% if has_value("USE_BAD_BEHAVIOR", "yes") %}lua_shared_dict behavior_count 10m;{% endif %}
lua_package_path "/opt/bunkerized-nginx/lua/?.lua;/opt/bunkerized-nginx/plugins/?.lua;/opt/bunkerized-nginx/deps/lib/lua/?.lua;;";
lua_package_cpath "/opt/bunkerized-nginx/deps/lib/?.so;/opt/bunkerized-nginx/deps/lib/lua/?.so;;";
{% if has_value("USE_WHITELIST_IP", "yes") %}lua_shared_dict whitelist_ip_cache 10m;{% endif +%}
{% if has_value("USE_WHITELIST_REVERSE", "yes") %}lua_shared_dict whitelist_reverse_cache 10m;{% endif +%}
{% if has_value("USE_BLACKLIST_IP", "yes") %}lua_shared_dict blacklist_ip_cache 10m;{% endif +%}
{% if has_value("USE_BLACKLIST_REVERSE", "yes") %}lua_shared_dict blacklist_reverse_cache 10m;{% endif +%}
{% if has_value("USE_DNSBL", "yes") %}lua_shared_dict dnsbl_cache 10m;{% endif +%}
{% if has_value("BLOCK_PROXIES", "yes") %}lua_shared_dict proxies_data 250m;{% endif +%}
{% if has_value("BLOCK_ABUSERS", "yes") %}lua_shared_dict abusers_data 50m;{% endif +%}
{% if has_value("BLOCK_TOR_EXIT_NODE", "yes") %}lua_shared_dict tor_exit_nodes_data 1m;{% endif +%}
{% if has_value("BLOCK_USER_AGENT", "yes") %}lua_shared_dict user_agents_data 1m;{% endif +%}
{% if has_value("BLOCK_USER_AGENT", "yes") %}lua_shared_dict user_agents_cache 10m;{% endif +%}
{% if has_value("BLOCK_REFERRER", "yes") %}lua_shared_dict referrers_data 1m;{% endif +%}
{% if has_value("BLOCK_REFERRER", "yes") %}lua_shared_dict referrers_cache 10m;{% endif +%}
{% if has_value("USE_BAD_BEHAVIOR", "yes") %}lua_shared_dict behavior_ban 10m;{% endif +%}
{% if has_value("USE_BAD_BEHAVIOR", "yes") %}lua_shared_dict behavior_count 10m;{% endif +%}
lua_shared_dict plugins_data 10m;
# shared memory zone for limit_req
{% if has_value("USE_LIMIT_REQ", "yes") %}limit_req_zone $binary_remote_addr$uri zone=limit:{{ LIMIT_REQ_CACHE }} rate={{ LIMIT_REQ_RATE }};{% endif %}
{% if has_value("USE_LIMIT_REQ", "yes") %}limit_req_zone $binary_remote_addr$uri zone=limit:{{ LIMIT_REQ_CACHE }} rate={{ LIMIT_REQ_RATE }};{% endif +%}
# shared memory zone for limit_conn
{% if has_value("USE_LIMIT_CONN", "yes") %}limit_conn_zone $binary_remote_addr zone=ddos:{{ LIMIT_CONN_CACHE }};{% endif %}
{% if has_value("USE_LIMIT_CONN", "yes") %}limit_conn_zone $binary_remote_addr zone=ddos:{{ LIMIT_CONN_CACHE }};{% endif +%}
# whitelist or blacklist country
{% if BLACKLIST_COUNTRY != "" or WHITELIST_COUNTRY != "" %}include /etc/nginx/geoip.conf;{% endif %}
{% if BLACKLIST_COUNTRY != "" or WHITELIST_COUNTRY != "" %}include /etc/nginx/geoip.conf;{% endif +%}
# zone for proxy_cache
{% if has_value("USE_PROXY_CACHE", "yes") %}proxy_cache_path /tmp/proxy_cache keys_zone=proxycache:{{ PROXY_CACHE_PATH_ZONE_SIZE }} {{ PROXY_CACHE_PATH_PARAMS }};{% endif %}
{% if has_value("USE_PROXY_CACHE", "yes") %}proxy_cache_path /tmp/proxy_cache keys_zone=proxycache:{{ PROXY_CACHE_PATH_ZONE_SIZE }} {{ PROXY_CACHE_PATH_PARAMS }};{% endif +%}
# custom http confs
include /http-confs/*.conf;
include /opt/bunkerized-nginx/http-confs/*.conf;
# LUA init block
include /etc/nginx/init-lua.conf;
# default server when MULTISITE=yes
{% if MULTISITE == "yes" %}include /etc/nginx/multisite-default-server.conf;{% endif %}
{% if MULTISITE == "yes" %}include /etc/nginx/multisite-default-server.conf;{% endif +%}
# server config(s)
{% if MULTISITE == "yes" and SERVER_NAME != "" %}
@@ -137,13 +134,13 @@ http {
{% endif %}
{% endif %}
{% endfor %}
{% for first_server in map_servers %}
{% for first_server in map_servers +%}
include /etc/nginx/{{ first_server }}/server.conf;
{% endfor %}
{% elif MULTISITE == "no" %}
{% elif MULTISITE == "no" +%}
include /etc/nginx/server.conf;
{% endif %}
# API
{% if USE_API == "yes" %}include /etc/nginx/api.conf;{% endif %}
{% if USE_API == "yes" %}include /etc/nginx/api.conf;{% endif +%}
}

View File

@@ -0,0 +1,26 @@
# Basic Authelia Config
# Send a subsequent request to Authelia to verify if the user is authenticated
# and has the right permissions to access the resource.
auth_request /authelia;
# Set the `target_url` variable based on the request. It will be used to build the portal
# URL with the correct redirection parameter.
auth_request_set $target_url $scheme://$http_host$request_uri;
# Set the X-Forwarded-User and X-Forwarded-Groups with the headers
# returned by Authelia for the backends which can consume them.
# This is not safe, as the backend must make sure that they come from the
# proxy. In the future, it's gonna be safe to just use OAuth.
auth_request_set $user $upstream_http_remote_user;
auth_request_set $groups $upstream_http_remote_groups;
auth_request_set $name $upstream_http_remote_name;
auth_request_set $email $upstream_http_remote_email;
fastcgi_param REMOTE_USER $user;
fastcgi_param REMOTE_GROUPS $groups;
fastcgi_param REMOTE_NAME $name;
fastcgi_param REMOTE_EMAIL $email;
proxy_set_header Remote-User $user;
proxy_set_header Remote-Groups $groups;
proxy_set_header Remote-Name $name;
proxy_set_header Remote-Email $email;
{% if AUTHELIA_MODE == "portal" +%}
error_page 401 =302 {{ AUTHELIA_BACKEND }}/?rd=$target_url;
{% endif %}

View File

@@ -0,0 +1,40 @@
set $upstream_authelia {{ AUTHELIA_UPSTREAM }}/api/verify;
# Virtual endpoint created by nginx to forward auth requests.
location /authelia {
internal;
proxy_pass_request_body off;
proxy_pass $upstream_authelia;
proxy_set_header Content-Length "";
# Timeout if the real server is dead
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503;
# [REQUIRED] Needed by Authelia to check authorizations of the resource.
# Provide either X-Original-URL and X-Forwarded-Proto or
# X-Forwarded-Proto, X-Forwarded-Host and X-Forwarded-Uri or both.
# Those headers will be used by Authelia to deduce the target url of the user.
# Basic Proxy Config
client_body_buffer_size 128k;
proxy_set_header Host $host;
proxy_set_header X-Original-URL $scheme://$http_host$request_uri;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Method $request_method;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Uri $request_uri;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Ssl on;
proxy_redirect http:// $scheme://;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_cache_bypass $cookie_session;
proxy_no_cache $cookie_session;
proxy_buffers 4 32k;
# Advanced Proxy Config
send_timeout 5m;
proxy_read_timeout 240;
proxy_send_timeout 240;
proxy_connect_timeout 240;
}

View File

@@ -1 +1 @@
set_cookie_flag {{ COOKIE_FLAGS }}{% if COOKIE_AUTO_SECURE_FLAG == "yes" %} Secure{% endif %};
set_cookie_flag {{ COOKIE_FLAGS }}{% if COOKIE_AUTO_SECURE_FLAG == "yes" and (AUTO_LETS_ENCRYPT == "yes" or USE_CUSTOM_HTTPS == "yes" or GENERATE_SELF_SIGNED_SSL == "yes") %} Secure{% endif %};

View File

@@ -4,6 +4,6 @@ ssl_certificate_key {{ HTTPS_CUSTOM_KEY }};
ssl_protocols TLSv1.3;
ssl_prefer_server_ciphers off;
ssl_session_tickets off;
{% if STRICT_TRANSPORT_SECURITY != "" %}
{% if STRICT_TRANSPORT_SECURITY != "" +%}
more_set_headers 'Strict-Transport-Security: {{ STRICT_TRANSPORT_SECURITY }}';
{% endif %}

View File

@@ -14,11 +14,11 @@ location = {{ page }} {
{% set default_errors = ["400", "401", "403", "404", "429", "500", "501", "502", "503", "504"] %}
{% for default_error in default_errors %}
{% if not default_error + "=" in ERRORS %}
{% if not default_error + "=" in ERRORS +%}
error_page {{ default_error }} /errors/{{ default_error }}.html;
location = /errors/{{ default_error }}.html {
root /defaults;
root /opt/bunkerized-nginx/defaults;
modsecurity off;
internal;
}

View File

@@ -1,4 +1,10 @@
{% if REMOTE_PHP != "" +%}
fastcgi_param SCRIPT_FILENAME {{ REMOTE_PHP_PATH }}/$fastcgi_script_name;
{% elif LOCAL_PHP != "" +%}
fastcgi_param SCRIPT_FILENAME {{ LOCAL_PHP_PATH }}/$fastcgi_script_name;
{% else +%}
fastcgi_param SCRIPT_FILENAME $fastcgi_script_name;
{% endif %}
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;

View File

@@ -22,13 +22,13 @@ ssl_prefer_server_ciphers on;
ssl_session_tickets off;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m;
{% if STRICT_TRANSPORT_SECURITY != "" %}
{% if STRICT_TRANSPORT_SECURITY != "" +%}
more_set_headers 'Strict-Transport-Security: {{ STRICT_TRANSPORT_SECURITY }}';
{% endif %}
{% if "TLSv1.2" in HTTPS_PROTOCOLS %}
{% if "TLSv1.2" in HTTPS_PROTOCOLS +%}
ssl_dhparam /etc/nginx/dhparam;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
{% endif %}
{% if AUTO_LETS_ENCRYPT %}
{% if AUTO_LETS_ENCRYPT +%}
include {{ NGINX_PREFIX }}lets-encrypt-webroot.conf;
{% endif %}

View File

@@ -0,0 +1 @@
sub_filter '</body>' '{{ INJECT_BODY }}</body>';

View File

@@ -1,3 +1,3 @@
location ~ ^/.well-known/acme-challenge/ {
root /acme-challenge;
root /opt/bunkerized-nginx/acme-challenge;
}

View File

@@ -1,9 +1,9 @@
log_by_lua_block {
-- bad behavior
local use_bad_behavior = {% if USE_BAD_BEHAVIOR == "yes" %}true{% else %}false{% endif %}
local use_bad_behavior = {% if USE_BAD_BEHAVIOR == "yes" %}true{% else %}false{% endif +%}
local behavior = require "behavior"
local bad_behavior_status_codes = {% raw %}{{% endraw %}{% if BAD_BEHAVIOR_STATUS_CODES != "" %}{% set elements = BAD_BEHAVIOR_STATUS_CODES.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw %}
local bad_behavior_status_codes = {% raw %}{{% endraw %}{% if BAD_BEHAVIOR_STATUS_CODES != "" %}{% set elements = BAD_BEHAVIOR_STATUS_CODES.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw +%}
local bad_behavior_threshold = {{ BAD_BEHAVIOR_THRESHOLD }}
local bad_behavior_count_time = {{ BAD_BEHAVIOR_COUNT_TIME }}
local bad_behavior_ban_time = {{ BAD_BEHAVIOR_BAN_TIME }}

View File

@@ -1,6 +1,6 @@
{% if ANTIBOT_SESSION_SECRET == "random" %}
set $session_secret {{ random(32) }} ;
{% else %}
{% if ANTIBOT_SESSION_SECRET == "random" +%}
set $session_secret {{ random(32) }};
{% else +%}
set $session_secret {{ ANTIBOT_SESSION_SECRET }};
{% endif %}
set $session_check_addr on;
@@ -13,48 +13,49 @@ if ngx.req.is_internal() then
end
-- let's encrypt
local use_lets_encrypt = {% if AUTO_LETS_ENCRYPT == "yes" %}true{% else %}false{% endif %}
local use_lets_encrypt = {% if AUTO_LETS_ENCRYPT == "yes" %}true{% else %}false{% endif +%}
-- redis
local use_redis = {% if USE_REDIS == "yes" %}true{% else %}false{% endif +%}
local redis_host = "{{ REDIS_HOST }}"
-- external blacklists
local use_user_agents = {% if BLOCK_USER_AGENT == "yes" %}true{% else %}false{% endif %}
local use_proxies = {% if BLOCK_PROXIES == "yes" %}true{% else %}false{% endif %}
local use_abusers = {% if BLOCK_ABUSERS == "yes" %}true{% else %}false{% endif %}
local use_tor_exit_nodes = {% if BLOCK_TOR_EXIT_NODE == "yes" %}true{% else %}false{% endif %}
local use_referrers = {% if BLOCK_REFERRER == "yes" %}true{% else %}false{% endif %}
local use_user_agents = {% if BLOCK_USER_AGENT == "yes" %}true{% else %}false{% endif +%}
local use_proxies = {% if BLOCK_PROXIES == "yes" %}true{% else %}false{% endif +%}
local use_abusers = {% if BLOCK_ABUSERS == "yes" %}true{% else %}false{% endif +%}
local use_tor_exit_nodes = {% if BLOCK_TOR_EXIT_NODE == "yes" %}true{% else %}false{% endif +%}
local use_referrers = {% if BLOCK_REFERRER == "yes" %}true{% else %}false{% endif +%}
-- countries
local use_country = {% if WHITELIST_COUNTRY != "" or BLACKLIST_COUNTRY != "" %}true{% else %}false{% endif %}
-- crowdsec
local use_crowdsec = {% if USE_CROWDSEC == "yes" %}true{% else %}false{% endif %}
local use_country = {% if WHITELIST_COUNTRY != "" or BLACKLIST_COUNTRY != "" %}true{% else %}false{% endif +%}
-- antibot
local use_antibot_cookie = {% if USE_ANTIBOT == "cookie" %}true{% else %}false{% endif %}
local use_antibot_javascript = {% if USE_ANTIBOT == "javascript" %}true{% else %}false{% endif %}
local use_antibot_captcha = {% if USE_ANTIBOT == "captcha" %}true{% else %}false{% endif %}
local use_antibot_recaptcha = {% if USE_ANTIBOT == "recaptcha" %}true{% else %}false{% endif %}
local use_antibot_cookie = {% if USE_ANTIBOT == "cookie" %}true{% else %}false{% endif +%}
local use_antibot_javascript = {% if USE_ANTIBOT == "javascript" %}true{% else %}false{% endif +%}
local use_antibot_captcha = {% if USE_ANTIBOT == "captcha" %}true{% else %}false{% endif +%}
local use_antibot_recaptcha = {% if USE_ANTIBOT == "recaptcha" %}true{% else %}false{% endif +%}
-- resolvers
local dns_resolvers = {% raw %}{{% endraw %}{% if DNS_RESOLVERS != "" %}{% set elements = DNS_RESOLVERS.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw %}
local dns_resolvers = {% raw %}{{% endraw %}{% if DNS_RESOLVERS != "" %}{% set elements = DNS_RESOLVERS.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw +%}
-- whitelist
local use_whitelist_ip = {% if USE_WHITELIST_IP == "yes" %}true{% else %}false{% endif %}
local use_whitelist_reverse = {% if USE_WHITELIST_REVERSE == "yes" %}true{% else %}false{% endif %}
local whitelist_ip_list = {% raw %}{{% endraw %}{% if WHITELIST_IP_LIST != "" %}{% set elements = WHITELIST_IP_LIST.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw %}
local whitelist_reverse_list = {% raw %}{{% endraw %}{% if WHITELIST_REVERSE_LIST != "" %}{% set elements = WHITELIST_REVERSE_LIST.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw %}
local use_whitelist_ip = {% if USE_WHITELIST_IP == "yes" %}true{% else %}false{% endif +%}
local use_whitelist_reverse = {% if USE_WHITELIST_REVERSE == "yes" %}true{% else %}false{% endif +%}
local whitelist_ip_list = {% raw %}{{% endraw %}{% if WHITELIST_IP_LIST != "" %}{% set elements = WHITELIST_IP_LIST.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw +%}
local whitelist_reverse_list = {% raw %}{{% endraw %}{% if WHITELIST_REVERSE_LIST != "" %}{% set elements = WHITELIST_REVERSE_LIST.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw +%}
-- blacklist
local use_blacklist_ip = {% if USE_BLACKLIST_IP == "yes" %}true{% else %}false{% endif %}
local use_blacklist_reverse = {% if USE_BLACKLIST_REVERSE == "yes" %}true{% else %}false{% endif %}
local blacklist_ip_list = {% raw %}{{% endraw %}{% if BLACKLIST_IP_LIST != "" %}{% set elements = BLACKLIST_IP_LIST.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw %}
local blacklist_reverse_list = {% raw %}{{% endraw %}{% if BLACKLIST_REVERSE_LIST != "" %}{% set elements = BLACKLIST_REVERSE_LIST.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw %}
local use_blacklist_ip = {% if USE_BLACKLIST_IP == "yes" %}true{% else %}false{% endif +%}
local use_blacklist_reverse = {% if USE_BLACKLIST_REVERSE == "yes" %}true{% else %}false{% endif +%}
local blacklist_ip_list = {% raw %}{{% endraw %}{% if BLACKLIST_IP_LIST != "" %}{% set elements = BLACKLIST_IP_LIST.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw +%}
local blacklist_reverse_list = {% raw %}{{% endraw %}{% if BLACKLIST_REVERSE_LIST != "" %}{% set elements = BLACKLIST_REVERSE_LIST.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw +%}
-- dnsbl
local use_dnsbl = {% if USE_DNSBL == "yes" %}true{% else %}false{% endif %}
local dnsbl_list = {% raw %}{{% endraw %}{% if DNSBL_LIST != "" %}{% set elements = DNSBL_LIST.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw %}
local use_dnsbl = {% if USE_DNSBL == "yes" %}true{% else %}false{% endif +%}
local dnsbl_list = {% raw %}{{% endraw %}{% if DNSBL_LIST != "" %}{% set elements = DNSBL_LIST.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw +%}
-- bad behavior
local use_bad_behavior = {% if USE_BAD_BEHAVIOR == "yes" %}true{% else %}false{% endif %}
local use_bad_behavior = {% if USE_BAD_BEHAVIOR == "yes" %}true{% else %}false{% endif +%}
-- include LUA code
local whitelist = require "whitelist"
@@ -67,11 +68,13 @@ local recaptcha = require "recaptcha"
local iputils = require "resty.iputils"
local behavior = require "behavior"
local logger = require "logger"
local redis = require "resty.redis"
local checker = require "checker"
-- user variables
local antibot_uri = "{{ ANTIBOT_URI }}"
local whitelist_user_agent = {% raw %}{{% endraw %}{% if WHITELIST_USER_AGENT != "" %}{% set elements = WHITELIST_USER_AGENT.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw %}
local whitelist_uri = {% raw %}{{% endraw %}{% if WHITELIST_URI != "" %}{% set elements = WHITELIST_URI.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw %}
local whitelist_user_agent = {% raw %}{{% endraw %}{% if WHITELIST_USER_AGENT != "" %}{% set elements = WHITELIST_USER_AGENT.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw +%}
local whitelist_uri = {% raw %}{{% endraw %}{% if WHITELIST_URI != "" %}{% set elements = WHITELIST_URI.split(" ") %}{% for i in range(0, elements|length) %}"{{ elements[i] }}"{% if i < elements|length-1 %},{% endif %}{% endfor %}{% endif %}{% raw %}}{% endraw +%}
-- check if already in whitelist cache
if use_whitelist_ip and whitelist.ip_cached_ok() then
@@ -117,7 +120,7 @@ for k, v in pairs(whitelist_uri) do
end
-- check if it's certbot
if use_lets_encrypt and string.match(ngx.var.request_uri, "^/.well-known/acme-challenge/") then
if use_lets_encrypt and string.match(ngx.var.request_uri, "^/%.well%-known/acme%-challenge/[A-Za-z0-9%-%_]+$") then
logger.log(ngx.INFO, "LETSENCRYPT", "got a visit from Let's Encrypt")
ngx.exit(ngx.OK)
end
@@ -142,10 +145,21 @@ if use_bad_behavior and behavior.is_banned() then
ngx.exit(ngx.HTTP_FORBIDDEN)
end
-- our redis client
local redis_client = nil
if use_redis then
redis_client = redis:new()
local ok, err = redis_client:connect(redis_host, 6379)
if not ok then
redis_client = nil
logger.log(ngx.ERR, "REDIS", "Can't connect to the Redis service " .. redis_host)
end
end
-- check if IP is in proxies list
if use_proxies then
local value, flags = ngx.shared.proxies_data:get(iputils.ip2bin(ngx.var.remote_addr))
if value ~= nil then
local checker = checker:new("proxies", ngx.shared.proxies_data, redis_client, "simple")
if checker:check(iputils.ip2bin(ngx.var.remote_addr)) then
logger.log(ngx.WARN, "PROXIES", "IP " .. ngx.var.remote_addr .. " is in proxies list")
ngx.exit(ngx.HTTP_FORBIDDEN)
end
@@ -153,8 +167,8 @@ end
-- check if IP is in abusers list
if use_abusers then
local value, flags = ngx.shared.abusers_data:get(iputils.ip2bin(ngx.var.remote_addr))
if value ~= nil then
local checker = checker:new("abusers", ngx.shared.abusers_data, redis_client, "simple")
if checker:check(iputils.ip2bin(ngx.var.remote_addr)) then
logger.log(ngx.WARN, "ABUSERS", "IP " .. ngx.var.remote_addr .. " is in abusers list")
ngx.exit(ngx.HTTP_FORBIDDEN)
end
@@ -162,8 +176,8 @@ end
-- check if IP is in TOR exit nodes list
if use_tor_exit_nodes then
local value, flags = ngx.shared.tor_exit_nodes_data:get(iputils.ip2bin(ngx.var.remote_addr))
if value ~= nil then
local checker = checker:new("exit-nodes", ngx.shared.tor_exit_nodes_data, redis_client, "simple")
if checker:check(iputils.ip2bin(ngx.var.remote_addr)) then
logger.log(ngx.WARN, "TOR", "IP " .. ngx.var.remote_addr .. " is in TOR exit nodes list")
ngx.exit(ngx.HTTP_FORBIDDEN)
end
@@ -180,23 +194,9 @@ if use_user_agents and ngx.var.http_user_agent ~= nil then
end
end
if not whitelisted then
local value, flags = ngx.shared.user_agents_cache:get(ngx.var.http_user_agent)
if value == nil then
local patterns = ngx.shared.user_agents_data:get_keys(0)
for i, pattern in ipairs(patterns) do
if string.match(ngx.var.http_user_agent, pattern) then
value = "ko"
ngx.shared.user_agents_cache:set(ngx.var.http_user_agent, "ko", 86400)
break
end
end
if value == nil then
value = "ok"
ngx.shared.user_agents_cache:set(ngx.var.http_user_agent, "ok", 86400)
end
end
if value == "ko" then
logger.log(ngx.WARN, "USER-AGENT", "User-Agent " .. ngx.var.http_user_agent .. " is blacklisted")
local checker = checker:new("user-agents", ngx.shared.user_agents_data, redis_client, "match")
if checker:check(ngx.var.http_user_agent) then
logger.log(ngx.WARN, "USER-AGENTS", "User-Agent " .. ngx.var.http_user_agent .. " is blacklisted")
ngx.exit(ngx.HTTP_FORBIDDEN)
end
end
@@ -204,23 +204,9 @@ end
-- check if referrer is allowed
if use_referrer and ngx.var.http_referer ~= nil then
local value, flags = ngx.shared.referrers_cache:get(ngx.var.http_referer)
if value == nil then
local patterns = ngx.shared.referrers_data:get_keys(0)
for i, pattern in ipairs(patterns) do
if string.match(ngx.var.http_referer, pattern) then
value = "ko"
ngx.shared.referrers_cache:set(ngx.var.http_referer, "ko", 86400)
break
end
end
if value == nil then
value = "ok"
ngx.shared.referrers_cache:set(ngx.var.http_referer, "ok", 86400)
end
end
if value == "ko" then
logger.log(ngx.WARN, "REFERRER", "Referrer " .. ngx.var.http_referer .. " is blacklisted")
local checker = checker:new("referrers", ngx.shared.referrers_data, redis_client, "match")
if checker:check(ngx.var.http_referer) then
logger.log(ngx.WARN, "REFERRERS", "Referrer " .. ngx.var.http_referer .. " is blacklisted")
ngx.exit(ngx.HTTP_FORBIDDEN)
end
end
@@ -238,18 +224,6 @@ if use_dnsbl and not dnsbl.cached() then
end
end
-- check if IP is in CrowdSec DB
if use_crowdsec then
local ok, err = require "crowdsec.CrowdSec".allowIp(ngx.var.remote_addr)
if ok == nil then
logger.log(ngx.ERR, "CROWDSEC", err)
end
if not ok then
logger.log(ngx.WARN, "CROWDSEC", "denied " .. ngx.var.remote_addr)
ngx.exit(ngx.HTTP_FORBIDDEN)
end
end
-- cookie check
if use_antibot_cookie and ngx.var.uri ~= "/favicon.ico" then
if not cookie.is_set("uri") then
@@ -309,10 +283,10 @@ ngx.exit(ngx.OK)
}
{% if USE_ANTIBOT == "javascript" %}
{% if USE_ANTIBOT == "javascript" +%}
include {{ NGINX_PREFIX }}antibot-javascript.conf;
{% elif USE_ANTIBOT == "captcha" %}
{% elif USE_ANTIBOT == "captcha" +%}
include {{ NGINX_PREFIX }}antibot-captcha.conf;
{% elif USE_ANTIBOT == "recaptcha" %}
{% elif USE_ANTIBOT == "recaptcha" +%}
include {{ NGINX_PREFIX }}antibot-recaptcha.conf;
{% endif %}

View File

@@ -55,24 +55,24 @@ SecAuditLog /var/log/nginx/modsec_audit.log
# include OWASP CRS configuration
{% if USE_MODSECURITY_CRS == "yes" %}
include /opt/owasp/crs.conf
include /opt/bunkerized-nginx/crs-setup.conf
# custom CRS configurations before loading rules (exclusions)
{% if is_custom_conf("/modsec-crs-confs") %}
include /modsec-crs-confs/*.conf
{% if is_custom_conf("/opt/bunkerized-nginx/modsec-crs-confs") +%}
include /opt/bunkerized-nginx/modsec-crs-confs/*.conf
{% endif %}
{% if MULTISITE == "yes" and is_custom_conf("/modsec-crs-confs/" + FIRST_SERVER) %}
include /modsec-crs-confs/{{ FIRST_SERVER }}/*.conf
{% if MULTISITE == "yes" and is_custom_conf("/opt/bunkerized-nginx/modsec-crs-confs/" + FIRST_SERVER) +%}
include /opt/bunkerized-nginx/modsec-crs-confs/{{ FIRST_SERVER }}/*.conf
{% endif %}
# include OWASP CRS rules
include /opt/owasp/crs/*.conf
include /opt/bunkerized-nginx/crs/*.conf
{% endif %}
# custom rules after loading the CRS
{% if is_custom_conf("/modsec-confs") %}
include /modsec-confs/*.conf
{% if is_custom_conf("/opt/bunkerized-nginx/modsec-confs") +%}
include /opt/bunkerized-nginx/modsec-confs/*.conf
{% endif %}
{% if MULTISITE == "yes" and is_custom_conf("/modsec-confs/" + FIRST_SERVER) %}
include /modsec-confs/{{ FIRST_SERVER }}/*.conf
{% if MULTISITE == "yes" and is_custom_conf("/opt/bunkerized-nginx/modsec-confs/" + FIRST_SERVER) +%}
include /opt/bunkerized-nginx/modsec-confs/{{ FIRST_SERVER }}/*.conf
{% endif %}

View File

@@ -1,4 +1,9 @@
location ~ \.php$ {
fastcgi_pass {{ REMOTE_PHP }}:9000;
fastcgi_index index.php;
{% if REMOTE_PHP != "" +%}
set $backend "{{ REMOTE_PHP }}:9000";
fastcgi_pass $backend;
{% elif LOCAL_PHP != "" +%}
fastcgi_pass unix:{{ LOCAL_PHP }};
{% endif %}
fastcgi_index index.php;
}

View File

@@ -5,7 +5,7 @@ proxy_cache_key {{ PROXY_CACHE_KEY }};
proxy_no_cache {{ PROXY_NO_CACHE }};
proxy_cache_bypass {{ PROXY_CACHE_BYPASS }};
{% if PROXY_CACHE_VALID != "" %}
{% for element in PROXY_CACHE_VALID.split(" ") %}
{% for element in PROXY_CACHE_VALID.split(" ") +%}
proxy_cache_valid {{ element.split("=")[0] }} {{ element.split("=")[1] }};
{% endfor %}
{% endif %}

View File

@@ -1,5 +1,5 @@
{% if PROXY_REAL_IP_FROM != "" %}
{% for element in PROXY_REAL_IP_FROM.split(" ") %}
{% for element in PROXY_REAL_IP_FROM.split(" ") +%}
set_real_ip_from {{ element }};
{% endfor %}
{% endif %}

View File

@@ -0,0 +1,5 @@
{% if REDIRECT_TO_REQUEST_URI == "yes" %}
return 301 {{ REDIRECT_TO }}$request_uri;
{% else %}
return 301 {{ REDIRECT_TO }};
{% endif %}

View File

@@ -1,21 +1,25 @@
{% if USE_REVERSE_PROXY == "yes" %}
{% for k, v in all.items() %}
{% if k.startswith("REVERSE_PROXY_URL") and v != "" %}
{% if k.startswith("REVERSE_PROXY_URL") and v != "" +%}
{% set url = v %}
{% set host = all[k.replace("URL", "HOST")] if k.replace("URL", "HOST") in all else "" %}
{% set ws = all[k.replace("URL", "WS")] if k.replace("URL", "WS") in all else "" %}
{% set headers = all[k.replace("URL", "HEADERS")] if k.replace("URL", "HEADERS") in all else "" %}
location {{ url }} {% raw %}{{% endraw %}
location {{ url }} {% raw %}{{% endraw +%}
etag off;
proxy_pass {{ host }};
set $backend "{{ host }}";
proxy_pass $backend;
{% if USE_AUTHELIA == "yes" +%}
include {{ NGINX_PREFIX }}authelia-auth-request.conf;
{% endif %}
include {{ NGINX_PREFIX }}reverse-proxy-headers.conf;
{% if ws == "yes" %}
{% if ws == "yes" +%}
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
{% endif %}
{% if headers != "" %}
{% for header in headers.split(";") %}
{% for header in headers.split(";") +%}
proxy_set_header {{ header }};
{% endfor %}
{% endif %}

View File

@@ -1,24 +1,24 @@
# custom config before server block
include /pre-server-confs/*.conf;
{% if MULTISITE == "yes" %}
include /pre-server-confs/{{ FIRST_SERVER }}/*.conf;
include /opt/bunkerized-nginx/pre-server-confs/*.conf;
{% if MULTISITE == "yes" +%}
include /opt/bunkerized-nginx/pre-server-confs/{{ FIRST_SERVER }}/*.conf;
{% endif %}
server {
# FastCGI variables
{% if REMOTE_PHP != "" %}
{% if REMOTE_PHP != "" or LOCAL_PHP != "" +%}
include {{ NGINX_PREFIX }}fastcgi.conf;
{% endif %}
# custom config
include /server-confs/*.conf;
{% if MULTISITE == "yes" %}
include /server-confs/{{ FIRST_SERVER }}/*.conf;
include /opt/bunkerized-nginx/server-confs/*.conf;
{% if MULTISITE == "yes" +%}
include /opt/bunkerized-nginx/server-confs/{{ FIRST_SERVER }}/*.conf;
{% endif %}
# proxy real IP
{% if PROXY_REAL_IP == "yes" %}
{% if PROXY_REAL_IP == "yes" +%}
include {{ NGINX_PREFIX }}proxy-real-ip.conf;
{% endif %}
@@ -27,22 +27,22 @@ server {
include {{ NGINX_PREFIX }}log-lua.conf;
# ModSecurity
{% if USE_MODSECURITY == "yes" %}
{% if USE_MODSECURITY == "yes" +%}
include {{ NGINX_PREFIX }}modsecurity.conf;
{% endif %}
# HTTP listen
{% if LISTEN_HTTP == "yes" %}
{% if LISTEN_HTTP == "yes" +%}
listen 0.0.0.0:{{ HTTP_PORT }};
{% endif %}
# HTTPS listen + config
{% if AUTO_LETS_ENCRYPT == "yes" or USE_CUSTOM_HTTPS == "yes" or GENERATE_SELF_SIGNED_SSL == "yes" %}
{% if AUTO_LETS_ENCRYPT == "yes" or USE_CUSTOM_HTTPS == "yes" or GENERATE_SELF_SIGNED_SSL == "yes" +%}
include {{ NGINX_PREFIX }}https.conf;
{% endif %}
# HTTP to HTTPS
{% if REDIRECT_HTTP_TO_HTTPS == "yes" %}
{% if REDIRECT_HTTP_TO_HTTPS == "yes" +%}
include {{ NGINX_PREFIX }}redirect-http-to-https.conf;
{% endif %}
@@ -50,12 +50,12 @@ server {
server_name {{ SERVER_NAME }};
# disable default server
{% if DISABLE_DEFAULT_SERVER == "yes" and MULTISITE != "yes" %}
{% if DISABLE_DEFAULT_SERVER == "yes" and MULTISITE != "yes" +%}
include {{ NGINX_PREFIX }}disable-default-server.conf;
{% endif %}
# serve local files
{% if SERVE_FILES == "yes" %}
{% if SERVE_FILES == "yes" +%}
include {{ NGINX_PREFIX }}serve-files.conf;
{% endif %}
@@ -65,17 +65,17 @@ server {
}
# requests limiting
{% if USE_LIMIT_REQ == "yes" %}
{% if USE_LIMIT_REQ == "yes" +%}
include {{ NGINX_PREFIX }}limit-req.conf;
{% endif %}
# connections limiting
{% if USE_LIMIT_CONN == "yes" %}
{% if USE_LIMIT_CONN == "yes" +%}
include {{ NGINX_PREFIX }}limit-conn.conf;
{% endif %}
# auth basic
{% if USE_AUTH_BASIC == "yes" %}
{% if USE_AUTH_BASIC == "yes" +%}
{% if AUTH_BASIC_LOCATION == "sitewide" %}
include {{ NGINX_PREFIX }}auth-basic-sitewide.conf;
{% else %}
@@ -85,48 +85,48 @@ server {
# remove headers
{% if REMOVE_HEADERS != "" %}
{% for header in REMOVE_HEADERS.split(" ") %}
{% for header in REMOVE_HEADERS.split(" ") +%}
more_clear_headers '{{ header }}';
{% endfor %}
{% endif %}
# X-Frame-Option header
{% if X_FRAME_OPTIONS != "" %}
{% if X_FRAME_OPTIONS != "" +%}
include {{ NGINX_PREFIX }}x-frame-options.conf;
{% endif %}
# X-XSS-Protection header
{% if X_XSS_PROTECTION != "" %}
{% if X_XSS_PROTECTION != "" +%}
include {{ NGINX_PREFIX }}x-xss-protection.conf;
{% endif %}
# X-Content-Type header
{% if X_CONTENT_TYPE_OPTIONS != "" %}
{% if X_CONTENT_TYPE_OPTIONS != "" +%}
include {{ NGINX_PREFIX }}x-content-type-options.conf;
{% endif %}
# Content-Security-Policy header
{% if CONTENT_SECURITY_POLICY != "" %}
{% if CONTENT_SECURITY_POLICY != "" +%}
include {{ NGINX_PREFIX }}content-security-policy.conf;
{% endif %}
# Referrer-Policy header
{% if REFERRER_POLICY != "" %}
{% if REFERRER_POLICY != "" +%}
include {{ NGINX_PREFIX }}referrer-policy.conf;
{% endif %}
# Feature-Policy header
{% if FEATURE_POLICY != "" %}
{% if FEATURE_POLICY != "" +%}
include {{ NGINX_PREFIX }}feature-policy.conf;
{% endif %}
# Permissions-Policy header
{% if PERMISSIONS_POLICY != "" %}
{% if PERMISSIONS_POLICY != "" +%}
include {{ NGINX_PREFIX }}permissions-policy.conf;
{% endif %}
# cookie flags
{% if COOKIE_FLAGS != "" %}
{% if COOKIE_FLAGS != "" +%}
include {{ NGINX_PREFIX }}cookie-flags.conf;
{% endif %}
@@ -134,17 +134,17 @@ server {
include {{ NGINX_PREFIX }}error.conf;
# client caching
{% if USE_CLIENT_CACHE == "yes" %}
{% if USE_CLIENT_CACHE == "yes" +%}
include {{ NGINX_PREFIX }}client-cache.conf;
{% endif %}
# gzip compression
{% if USE_GZIP == "yes" %}
{% if USE_GZIP == "yes" +%}
include {{ NGINX_PREFIX }}gzip.conf;
{% endif %}
# brotli compression
{% if USE_BROTLI == "yes" %}
{% if USE_BROTLI == "yes" +%}
include {{ NGINX_PREFIX }}brotli.conf;
{% endif %}
@@ -155,22 +155,38 @@ server {
server_tokens {{ SERVER_TOKENS }};
# open file caching
{% if USE_OPEN_FILE_CACHE == "yes" %}
{% if USE_OPEN_FILE_CACHE == "yes" +%}
include {{ NGINX_PREFIX }}open-file-cache.conf;
{% endif %}
# proxy caching
{% if USE_PROXY_CACHE == "yes" %}
{% if USE_PROXY_CACHE == "yes" +%}
include {{ NGINX_PREFIX }}proxy-cache.conf;
{% endif %}
# authelia
{% if USE_AUTHELIA == "yes" +%}
include {{ NGINX_PREFIX }}authelia-upstream.conf;
include {{ NGINX_PREFIX }}authelia-auth-request.conf;
{% endif %}
# inject into body
{% if INJECT_BODY != "" +%}
include {{ NGINX_PREFIX }}inject-body.conf;
{% endif %}
# redirect everything to another host
{% if REDIRECT_TO != "" +%}
include {{ NGINX_PREFIX }}redirect-to.conf;
{% endif %}
# reverse proxy
{% if USE_REVERSE_PROXY == "yes" %}
{% if USE_REVERSE_PROXY == "yes" +%}
include {{ NGINX_PREFIX }}reverse-proxy.conf;
{% endif %}
# remote PHP
{% if REMOTE_PHP != "" %}
{% if REMOTE_PHP != "" or LOCAL_PHP != "" +%}
include {{ NGINX_PREFIX }}php.conf;
{% endif %}

View File

@@ -1,5 +0,0 @@
#!/bin/sh
# install dependencies
apk add certbot bash libmaxminddb libgcc lua yajl libstdc++ openssl py3-pip
pip3 install jinja2

View File

@@ -50,7 +50,7 @@ copyright = '2021, bunkerity'
author = 'bunkerity'
# The full version, including alpha/beta/rc tags
release = 'v1.2.7'
release = 'v1.3.0'
# -- General configuration ---------------------------------------------------
@@ -92,3 +92,8 @@ else :
# custom robots.txt
html_extra_path = ['robots.txt']
# toc depth
html_theme_options = {
"navigation_depth": 2
}

View File

@@ -87,18 +87,42 @@ Default value : *8443*
Context : *global*
The HTTPS port number used by nginx inside the container.
`WORKER_CONNECTIONS`
`WORKER_CONNECTIONS`
Values : *\<any positive integer\>*
Default value : 1024
Context : *global*
Sets the value of the [worker_connections](https://nginx.org/en/docs/ngx_core_module.html#worker_connections) directive.
`WORKER_RLIMIT_NOFILE`
`WORKER_RLIMIT_NOFILE`
Values : *\<any positive integer\>*
Default value : 2048
Context : *global*
Sets the value of the [worker_rlimit_nofile](https://nginx.org/en/docs/ngx_core_module.html#worker_rlimit_nofile) directive.
`WORKER_PROCESSES`
Values : *\<any positive integer or auto\>*
Default value : auto
Context : *global*
Sets the value of the [worker_processes](https://nginx.org/en/docs/ngx_core_module.html#worker_processes) directive.
`INJECT_BODY`
Values : *\<any HTML code\>*
Default value :
Context : *global*, *multisite*
Use this variable to inject any HTML code you want before the \</body\> tag (e.g. : `\<script src="https://..."\>`)
`REDIRECT_TO`
Values : *\<any valid absolute URI\>*
Default value :
Context : *global*, *multisite*
Use this variable if you want to redirect one server to another (e.g., redirect apex to www : `REDIRECT_TO=https://www.example.com`).
`REDIRECT_TO_REQUEST_URI`
Values : *yes* | *no*
Default value : *no*
Context : *global*, *multisite*
When set to yes and `REDIRECT_TO` is set it will append the requested path to the redirection (e.g., https://example.com/something redirects to https://www.example.com/something).
### Information leak
`SERVER_TOKENS`
@@ -391,6 +415,12 @@ Default value : *contact@first-domain-in-server-name*
Context : *global*, *multisite*
Define the contact email address declare in the certificate.
`USE_LETS_ENCRYPT_STAGING`
Values : *yes* | *no*
Default value : *no*
Context : *global*, *multisite*
When set to yes, it tells certbot to use the [staging environment](https://letsencrypt.org/docs/staging-environment/) for Let's Encrypt certificate generation. Useful when you are testing your deployments to avoid being rate limited in the production environment.
### HTTP
`LISTEN_HTTP`
@@ -521,6 +551,8 @@ Sets the value of the [SecAuditEngine directive](https://github.com/SpiderLabs/M
## Security headers
If you want to keep your application headers and tell bunkerized-nginx to not override it, just set the corresponding environment variable to an empty value (e.g., `CONTENT_SECURITY_POLICY=`, `PERMISSIONS_POLICY=`, ...).
`X_FRAME_OPTIONS`
Values : *DENY* | *SAMEORIGIN* | *ALLOW-FROM https://www.website.net*
Default value : *DENY*
@@ -845,6 +877,18 @@ Default value : */app*
Context : *global*, *multisite*
The path where the PHP files are located inside the server specified in `REMOTE_PHP`.
`LOCAL_PHP`
Values : *\<any valid absolute path\>*
Default value :
Context : *global*, *multisite*
Set the absolute path of the unix socket file of a local PHP-FPM instance to execute .php files.
`LOCAL_PHP_PATH`
Values : *\<any valid absolute path\>*
Default value : */app*
Context : *global*, *multisite*
The path where the PHP files are located inside the server specified in `LOCAL_PHP`.
## Bad behavior
`USE_BAD_BEHAVIOR`
@@ -860,45 +904,83 @@ Context : *global*, *multisite*
List of HTTP status codes considered as "suspicious".
`BAD_BEHAVIOR_THRESHOLD`
Values : *<any positive integer>*
Values : *\<any positive integer\>*
Default value : *10*
Context : *global*, *multisite*
The number of "suspicious" HTTP status code before the corresponding IP is banned.
`BAD_BEHAVIOR_BAN_TIME`
Values : *<any positive integer>*
Values : *\<any positive integer\>*
Default value : *86400*
Context : *global*, *multisite*
The duration time (in seconds) of a ban when the corresponding IP has reached the `BAD_BEHAVIOR_THRESHOLD`.
`BAD_BEHAVIOR_COUNT_TIME`
Values : *<any positive integer>*
Values : *\<any positive integer\>*
Default value : *60*
Context : *global*, *multisite*
The duration time (in seconds) before the counter of "suspicious" HTTP is reset.
## Authelia
`USE_AUTHELIA`
Values : *yes* | *no*
Default value : *no*
Context : *global*, *multisite*
Enable or disable [Authelia](https://www.authelia.com/) support. See the [authelia example](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/authelia) for more information on how to setup Authelia with bunkerized-nginx.
`AUTHELIA_BACKEND`
Values : *\<any valid http(s) address\>*
Default value :
Context : *global*, *multisite*
The public Authelia address that users will be redirect to when they will be asked to login (e.g. : `https://auth.example.com`).
`AUTHELIA_UPSTREAM`
Values : *\<any valid http(s) address\>*
Default value :
Context : *global*, *multisite*
The private Authelia address when doing requests from nginx (e.g. : http://my-authelia.local:9091).
`AUTHELIA_MODE`
Values : *portal* | *auth-basic*
Default value : *portal*
Context : *global*, *multisite*
Choose authentication mode : show a web page (`portal`) or a simple auth basic prompt (`auth-basic`).
## misc
`SWARM_MODE`
Values : *yes* | *no*
Default value : *no*
Context : *global*
Only set to *yes* when you use *bunkerized-nginx* with *autoconf* feature in swarm mode. More info [here](#swarm-mode).
Only set to *yes* when you use *bunkerized-nginx* with Docker Swarm integration.
`KUBERNETES_MODE`
Values : *yes* | *no*
Default value : *no*
Context : *global*
Only set to *yes* when you use bunkerized-nginx with Kubernetes integration.
`USE_API`
Values : *yes* | *no*
Default value : *no*
Context : *global*
Only set to *yes* when you use *bunkerized-nginx* with *autoconf* feature in swarm mode. More info [here](#swarm-mode).
Only set to *yes* when you use bunkerized-nginx with Swarm/Kubernetes integration or with the web UI.
`API_URI`
Values : *random* | *\<any valid URI path\>*
Default value : *random*
Context : *global*
Set it to a random path when you use *bunkerized-nginx* with *autoconf* feature in swarm mode. More info [here](#swarm-mode).
Only set to *yes* when you use bunkerized-nginx with Swarm/Kubernetes integration or with the web UI.
`API_WHITELIST_IP`
Values : *\<list of IP/CIDR separated with space\>*
Default value : *192.168.0.0/16 172.16.0.0/12 10.0.0.0/8*
Context : *global*
List of IP/CIDR block allowed to send API order using the `API_URI` uri.
`USE_REDIS`
Undocumented. Reserved for future use.
`REDIS_HOST`
Undocumented. Reserved for future use.

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

View File

Before

Width:  |  Height:  |  Size: 12 MiB

After

Width:  |  Height:  |  Size: 12 MiB

BIN
docs/img/docker.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

BIN
docs/img/kubernetes.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 128 KiB

View File

Before

Width:  |  Height:  |  Size: 76 KiB

After

Width:  |  Height:  |  Size: 76 KiB

BIN
docs/img/overview.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

BIN
docs/img/swarm.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 148 KiB

BIN
docs/img/web-ui.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 MiB

View File

@@ -1,13 +1,14 @@
# bunkerized-nginx official documentation
```{toctree}
:maxdepth: 2
:caption: Contents
introduction
integrations
quickstart_guide
special_folders
security_tuning
troubleshooting
volumes
web_ui
environment_variables
troubleshooting
plugins
```

774
docs/integrations.md Normal file
View File

@@ -0,0 +1,774 @@
# Integrations
## Docker
You can get official prebuilt Docker images of bunkerized-nginx for x86, x64, armv7 and aarch64/arm64 architectures on Docker Hub :
```shell
$ docker pull bunkerity/bunkerized-nginx
```
Or you can build it from source if you wish :
```shell
$ git clone https://github.com/bunkerity/bunkerized-nginx.git
$ cd bunkerized-nginx
$ docker build -t bunkerized-nginx .
```
To use bunkerized-nginx as a Docker container you have to pass specific environment variables, mount volumes and redirect ports to make it accessible from the outside.
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/docker.png?raw=true" />
To demonstrate the use of the Docker image, we will create a simple "Hello World" static file that will be served by bunkerized-nginx.
**One important thing to know is that the container runs as an unprivileged user with UID and GID 101. The reason behind this behavior is the security : in case a vulnerability is exploited the attacker won't have full privileges inside the container. But there is also a downside because bunkerized-nginx (heavily) make use of volumes, you will need to adjust the rights on the host.**
First create the environment on the host :
```shell
$ mkdir bunkerized-hello bunkerized-hello/www bunkerized-hello/certs
$ cd bunkerized-hello
$ chown root:101 www certs
$ chmod 750 www
$ chmod 770 certs
```
The www folder will contain our static files that will be served by bunkerized-nginx. Whereas the certs folder will store the automatically generated Let's Encrypt certificates.
Let's create a dummy static page into the www folder :
```shell
$ echo "Hello bunkerized World !" > www/index.html
$ chown root:101 www/index.html
$ chmod 740 www/index.html
```
It's time to run the container :
```shell
$ docker run \
-p 80:8080 \
-p 443:8443 \
-v "${PWD}/www:/www:ro" \
-v "${PWD}/certs:/etc/letsencrypt" \
-e SERVER_NAME=www.example.com \
-e AUTO_LETS_ENCRYPT=yes \
bunkerity/bunkerized-nginx
```
Or if you prefer docker-compose :
```yaml
version: '3'
services:
mybunkerized:
image: bunkerity/bunkerized-nginx
ports:
- 80:8080
- 443:8443
volumes:
- ./www:/www:ro
- ./certs:/etc/letsencrypt
environment:
- SERVER_NAME=www.example.com
- AUTO_LETS_ENCRYPT=yes
```
Important things to note :
- Replace www.example.com with your own domain (it must points to your server IP address if you want Let's Encrypt to work)
- Automatic Let's Encrypt is enabled thanks to `AUTO_LETS_ENCRYPT=yes` (since the default is `AUTO_LETS_ENCRYPT=no` you can remove the environment variable to disable Let's Encrypt)
- The container is exposing TCP/8080 for HTTP and TCP/8443 for HTTPS
- The /www volume is used to deliver static files and can be mounted as read-only for security reason
- The /etc/letsencrypt volume is used to store certificates and must be mounted as read/write
Inspect the container logs until bunkerized-nginx is started then visit http(s)://www.example.com to confirm that everything is working as expected.
This example is really simple but, as you can see in the [list of environment variables](https://bunkerized-nginx.readthedocs.io/en/latest/environment_variables.html), you may get a lot of environment variables depending on your use case. To make things cleanier, you can write the environment variables to a file :
```shell
$ cat variables.env
SERVER_NAME=www.example.com
AUTO_LETS_ENCRYPT=yes
```
And load the file when creating the container :
```shell
$ docker run ... --env-file "${PWD}/variables.env" ... bunkerity/bunkerized-nginx
```
Or if you prefer docker-compose :
```yaml
...
services:
mybunkerized:
...
env_file:
- ./variables.env
...
...
```
## Docker autoconf
The downside of using environment variables is that the container needs to be recreated each time there is an update which is not very convenient. To counter that issue, you can use another image called bunkerized-nginx-autoconf which will listen for Docker events and automatically configure bunkerized-nginx instance in real time without recreating the container. Instead of defining environment variables for the bunkerized-nginx container, you simply add labels to your web services and bunkerized-nginx-autoconf will "automagically" take care of the rest.
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/autoconf-docker.png?raw=true" />
First of all, you will need a network to allow communication between bunkerized-nginx and your web services :
```shell
$ docker network create services-net
```
We will also make use of a named volume to share the configuration between autoconf and bunkerized-nginx :
```shell
$ docker volume create bunkerized-vol
```
You can now create the bunkerized-nginx container :
```shell
$ docker run \
--name mybunkerized \
-l bunkerized-nginx.AUTOCONF \
--network services-net \
-p 80:8080 \
-p 443:8443 \
-v "${PWD}/www:/www:ro" \
-v "${PWD}/certs:/etc/letsencrypt" \
-v bunkerized-vol:/etc/nginx \
-e MULTISITE=yes \
-e SERVER_NAME= \
-e AUTO_LETS_ENCRYPT=yes \
bunkerity/bunkerized-nginx
```
The autoconf one can now be started :
```shell
$ docker run \
--name myautoconf \
--volumes-from mybunkerized:rw \
-v /var/run/docker.sock:/var/run/docker.sock:ro \
bunkerity/bunkerized-nginx-autoconf
```
Here is the docker-compose equivalent :
```yaml
version: '3'
services:
mybunkerized:
image: bunkerity/bunkerized-nginx
restart: always
ports:
- 80:8080
- 443:8443
volumes:
- ./certs:/etc/letsencrypt
- ./www:/www:ro
- bunkerized-vol:/etc/nginx
environment:
- SERVER_NAME=
- MULTISITE=yes
- AUTO_LETS_ENCRYPT=yes
labels:
- "bunkerized-nginx.AUTOCONF"
networks:
- services-net
myautoconf:
image: bunkerity/bunkerized-nginx-autoconf
restart: always
volumes_from:
- mybunkerized
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
depends_on:
- mybunkerized
volumes:
bunkerized-vol:
networks:
services-net:
name: services-net
```
Important things to note :
- autoconf is generating config files and other artefacts for the bunkerized-nginx, they need to share the same volumes
- autoconf must have access to the Docker socket in order to get events, access to labels and send SIGHUP signal (reload order) to bunkerized-nginx
- bunkerized-nginx must have the bunkerized-nginx.AUTOCONF label
- bunkerized-nginx must be started in [multisite mode](https://bunkerized-nginx.readthedocs.io/en/latest/quickstart_guide.html#multisite) with the `MULTISITE=yes` environment variable
- When setting the `SERVER_NAME` environment variable to an empty value, bunkerized-nginx won't generate any web service configuration at startup
- The `AUTO_LETS_ENCRYPT=yes` will be applied to all subsequent web service configuration, unless overriden by the web service labels
Check the logs of both autoconf and bunkerized-nginx to see if everything is working as expected.
You can now create a new web service and add environment variables as labels with the `bunkerized-nginx.` prefix to let the autoconf service "automagically" do the configuration for you :
```shell
$ docker run \
--name myservice \
--network services-net \
-l bunkerized-nginx.SERVER_NAME=www.example.com \
-l bunkerized-nginx.USE_REVERSE_PROXY=yes \
-l bunkerized-nginx.REVERSE_PROXY_URL=/ \
-l bunkerized-nginx.REVERSE_PROXY_HOST=http://myservice \
tutum/hello-world
```
docker-compose equivalent :
```yaml
version: "3"
services:
myservice:
image: tutum/hello-world
networks:
services-net:
aliases:
- myservice
labels:
- "bunkerized-nginx.SERVER_NAME=www.example.com"
- "bunkerized-nginx.USE_REVERSE_PROXY=yes"
- "bunkerized-nginx.REVERSE_PROXY_URL=/"
- "bunkerized-nginx.REVERSE_PROXY_HOST=http://myservice"
networks:
services-net:
external:
name: services-net
```
Please note that if you want to override the `AUTO_LETS_ENCRYPT=yes` previously defined in the bunkerized-nginx container, you simply need to add the `bunkerized-nginx.AUTO_LETS_ENCRYPT=no` label.
Look at the logs of both autoconf and bunkerized-nginx to check if the configuration has been generated and loaded by bunkerized-nginx. You should now be able to visit http(s)://www.example.com.
When your container is not needed anymore, you can delete it as usual. The autoconf should get the event and generate the configuration again.
## Docker Swarm
Using bunkerized-nginx in a Docker Swarm cluster requires a shared folder accessible from both managers and workers (anything like NFS, GlusterFS, CephFS or even SSHFS will work). The deployment and configuration is very similar to the "Docker autoconf" one but with services instead of containers. A service based on the bunkerized-nginx-autoconf image needs to be scheduled on a manager node (don't worry it doesn't expose any network port for obvious security reasons). This service will listen for Docker Swarm events like service creation or deletion and generate the configuration according to the labels of each service. Once configuration generation is done, the bunkerized-nginx-autoconf service will send a reload order to all the bunkerized-nginx tasks so they can load the new configuration.
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/swarm.png?raw=true" />
**We will assume that a shared directory is mounted at the /shared location on both your managers and workers. Keep in mind that bunkerized-nginx and autoconf are running as unprivileged users with UID and GID 101. You must set the rights and permissions of the subfolders in /shared accordingly.**
In this setup we will deploy bunkerized-nginx in global mode on all workers and autoconf as a single replica on a manager.
First of all, you will need to setup the shared folders :
```shell
$ cd /shared
$ mkdir www confs letsencrypt acme-challenge
$ chown root:101 www confs letsencrypt acme-challenge
$ chmod 770 www confs letsencrypt acme-challenge
```
Then you will need to create 2 networks, one for the communication between bunkerized-nginx and autoconf and the other one for the communication between bunkerized-nginx and the web services :
```shell
$ docker network create -d overlay --attachable bunkerized-net
$ docker network create -d overlay --attachable services-net
```
We can now start the bunkerized-nginx as a service :
```shell
$ docker service create \
--name mybunkerized \
--mode global \
--constraint node.role==worker \
-l bunkerized-nginx.AUTOCONF \
--network bunkerized-net \
-p published=80,target=8080,mode=host \
-p published=443,target=8443,mode=host \
--mount type=bind,source=/shared/confs,destination=/etc/nginx,ro \
--mount type=bind,source=/shared/www,destination=/www,ro \
--mount type=bind,source=/shared/letsencrypt,destination=/etc/letsencrypt,ro \
--mount type=bind,source=/shared/acme-challenge,destination=/acme-challenge,ro \
-e SWARM_MODE=yes \
-e USE_API=yes \
-e API_URI=/ChangeMeToSomethingHardToGuess \
-e SERVER_NAME= \
-e MULTISITE=yes \
-e AUTO_LETS_ENCRYPT=yes \
bunkerity/bunkerized-nginx
$ docker service update \
--network-add services-net \
mybunkerized
```
Once bunkerized-nginx has been started you can start the autoconf as a service :
```shell
$ docker service create \
--name myautoconf \
--replicas 1 \
--constraint node.role==manager \
--network bunkerized-net \
--mount type=bind,source=/var/run/docker.sock,destination=/var/run/docker.sock,ro \
--mount type=bind,source=/shared/confs,destination=/etc/nginx \
--mount type=bind,source=/shared/letsencrypt,destination=/etc/letsencrypt \
--mount type=bind,source=/shared/acme-challenge,destination=/acme-challenge \
-e SWARM_MODE=yes \
-e API_URI=/ChangeMeToSomethingHardToGuess \
bunkerity/bunkerized-nginx-autoconf
```
Or do the same with docker-compose if you wish :
```yaml
version: '3.8'
services:
nginx:
image: bunkerity/bunkerized-nginx
ports:
- published: 80
target: 8080
mode: host
protocol: tcp
- published: 443
target: 8443
mode: host
protocol: tcp
volumes:
- /shared/confs:/etc/nginx:ro
- /shared/www:/www:ro
- /shared/letsencrypt:/etc/letsencrypt:ro
- /shared/acme-challenge:/acme-challenge:ro
environment:
- SWARM_MODE=yes
- USE_API=yes
- API_URI=/ChangeMeToSomethingHardToGuess # must match API_URI from autoconf
- MULTISITE=yes
- SERVER_NAME=
- AUTO_LETS_ENCRYPT=yes
networks:
- bunkerized-net
- services-net
deploy:
mode: global
placement:
constraints:
- "node.role==worker"
# mandatory label
labels:
- "bunkerized-nginx.AUTOCONF"
autoconf:
image: bunkerity/bunkerized-nginx-autoconf
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /shared/confs:/etc/nginx
- /shared/letsencrypt:/etc/letsencrypt
- /shared/acme-challenge:/acme-challenge
environment:
- SWARM_MODE=yes
- API_URI=/ChangeMeToSomethingHardToGuess # must match API_URI from nginx
networks:
- bunkerized-net
deploy:
replicas: 1
placement:
constraints:
- "node.role==manager"
# This will create the networks for you
networks:
bunkerized-net:
driver: overlay
attachable: true
name: bunkerized-net
services-net:
driver: overlay
attachable: true
name: services-net
```
Check the logs of both autoconf and bunkerized-nginx services to see if everything is working as expected.
You can now create a new service and add environment variables as labels with the `bunkerized-nginx.` prefix to let the autoconf service "automagically" do the configuration for you :
```shell
$ docker service create \
--name myservice \
--constraint node.role==worker \
--network services-net \
-l bunkerized-nginx.SERVER_NAME=www.example.com \
-l bunkerized-nginx.USE_REVERSE_PROXY=yes \
-l bunkerized-nginx.REVERSE_PROXY_URL=/ \
-l bunkerized-nginx.REVERSE_PROXY_HOST=http://myservice \
tutum/hello-world
```
docker-compose equivalent :
```yaml
version: "3"
services:
myservice:
image: tutum/hello-world
networks:
- services-net
deploy:
placement:
constraints:
- "node.role==worker"
labels:
- "bunkerized-nginx.SERVER_NAME=www.example.com"
- "bunkerized-nginx.USE_REVERSE_PROXY=yes"
- "bunkerized-nginx.REVERSE_PROXY_URL=/"
- "bunkerized-nginx.REVERSE_PROXY_HOST=http://myservice"
networks:
services-net:
external:
name: services-net
```
Please note that if you want to override the `AUTO_LETS_ENCRYPT=yes` previously defined in the bunkerized-nginx service, you simply need to add the `bunkerized-nginx.AUTO_LETS_ENCRYPT=no` label.
Look at the logs of both autoconf and bunkerized-nginx to check if the configuration has been generated and loaded by bunkerized-nginx. You should now be able to visit http(s)://www.example.com.
When your service is not needed anymore, you can delete it as usual. The autoconf should get the event and generate the configuration again.
## Kubernetes
**This integration is still in beta, please fill an issue if you find a bug or have an idea on how to improve it.**
Using bunkerized-nginx in a Kubernetes cluster requires a shared folder accessible from the nodes (anything like NFS, GlusterFS, CephFS or even SSHFS will work). The bunkerized-nginx-autoconf acts as an Ingress Controller and connects to the k8s API to get cluster events and generate a new configuration when it's needed. Once the configuration is generated, the Ingress Controller sends a reload order to the bunkerized-nginx instances running in the cluster.
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/kubernetes.png?raw=true" />
**We will assume that a shared directory is mounted at the /shared location on your nodes. Keep in mind that bunkerized-nginx and autoconf are running as unprivileged users with UID and GID 101. You must set the rights and permissions of the subfolders in /shared accordingly.**
First of all, you will need to setup the shared folders :
```shell
$ cd /shared
$ mkdir www confs letsencrypt acme-challenge
$ chown root:nginx www confs letsencrypt acme-challenge
$ chmod 770 www confs letsencrypt acme-challenge
```
The first step to do is to declare the RBAC authorization that will be used by the Ingress Controller to access the Kubernetes API. A ready-to-use declaration is available here :
```yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: bunkerized-nginx-ingress-controller
rules:
- apiGroups: [""]
resources: ["services", "pods"]
verbs: ["get", "watch", "list"]
- apiGroups: ["extensions"]
resources: ["ingresses"]
verbs: ["get", "watch", "list"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: bunkerized-nginx-ingress-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: bunkerized-nginx-ingress-controller
subjects:
- kind: ServiceAccount
name: bunkerized-nginx-ingress-controller
namespace: default
apiGroup: ""
roleRef:
kind: ClusterRole
name: bunkerized-nginx-ingress-controller
apiGroup: rbac.authorization.k8s.io
```
Next, you can deploy bunkerized-nginx as a DaemonSet :
```yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: bunkerized-nginx
labels:
app: bunkerized-nginx
spec:
selector:
matchLabels:
name: bunkerized-nginx
template:
metadata:
labels:
name: bunkerized-nginx
# this label is mandatory
bunkerized-nginx: "yes"
spec:
containers:
- name: bunkerized-nginx
image: bunkerity/bunkerized-nginx
ports:
- containerPort: 8080
hostPort: 80
- containerPort: 8443
hostPort: 443
env:
- name: KUBERNETES_MODE
value: "yes"
- name: DNS_RESOLVERS
value: "kube-dns.kube-system.svc.cluster.local"
- name: USE_API
value: "yes"
- name: API_URI
value: "/ChangeMeToSomethingHardToGuess"
- name: SERVER_NAME
value: ""
- name: MULTISITE
value: "yes"
volumeMounts:
- name: confs
mountPath: /etc/nginx
readOnly: true
- name: letsencrypt
mountPath: /etc/letsencrypt
readOnly: true
- name: acme-challenge
mountPath: /acme-challenge
readOnly: true
- name: www
mountPath: /www
readOnly: true
volumes:
- name: confs
hostPath:
path: /shared/confs
type: Directory
- name: letsencrypt
hostPath:
path: /shared/letsencrypt
type: Directory
- name: acme-challenge
hostPath:
path: /shared/acme-challenge
type: Directory
- name: www
hostPath:
path: /shared/www
type: Directory
---
apiVersion: v1
kind: Service
metadata:
name: bunkerized-nginx-service
# this label is mandatory
labels:
bunkerized-nginx: "yes"
# this annotation is mandatory
annotations:
bunkerized-nginx.AUTOCONF: "yes"
spec:
clusterIP: None
selector:
name: bunkerized-nginx
```
Important thing to note, labels and annotations defined are mandatory for autoconf to work.
You can now deploy the autoconf which will act as the ingress controller :
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: bunkerized-nginx-ingress-controller
labels:
app: bunkerized-nginx-autoconf
spec:
replicas: 1
selector:
matchLabels:
app: bunkerized-nginx-autoconf
template:
metadata:
labels:
app: bunkerized-nginx-autoconf
spec:
serviceAccountName: bunkerized-nginx-ingress-controller
containers:
- name: bunkerized-nginx-autoconf
image: bunkerity/bunkerized-nginx-autoconf
env:
- name: KUBERNETES_MODE
value: "yes"
- name: API_URI
value: "/ChangeMeToSomethingHardToGuess"
volumeMounts:
- name: confs
mountPath: /etc/nginx
- name: letsencrypt
mountPath: /etc/letsencrypt
- name: acme-challenge
mountPath: /acme-challenge
volumes:
- name: confs
hostPath:
path: /shared/confs
type: Directory
- name: letsencrypt
hostPath:
path: /shared/letsencrypt
type: Directory
- name: acme-challenge
hostPath:
path: /shared/acme-challenge
type: Directory
```
Check the logs of both bunkerized-nginx and autoconf deployments to see if everything is working as expected.
You can now deploy your web service and make it accessible from within the cluster :
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp
labels:
app: myapp
spec:
replicas: 1
selector:
matchLabels:
app: myapp
template:
metadata:
labels:
app: myapp
spec:
containers:
- name: myapp
image: containous/whoami
---
apiVersion: v1
kind: Service
metadata:
name: myapp
spec:
type: ClusterIP
selector:
app: myapp
ports:
- protocol: TCP
port: 80
targetPort: 80
```
Last but not least, it's time to define your Ingress resource to make your web service publicly available :
```yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: bunkerized-nginx-ingress
# this label is mandatory
labels:
bunkerized-nginx: "yes"
annotations:
# add any global and default environment variables here as annotations with the "bunkerized-nginx." prefix
# examples :
#bunkerized-nginx.AUTO_LETS_ENCRYPT: "yes"
#bunkerized-nginx.USE_ANTIBOT: "javascript"
#bunkerized-nginx.REDIRECT_HTTP_TO_HTTPS: "yes"
#bunkerized-nginx.www.example.com_REVERSE_PROXY_WS: "yes"
#bunkerized-nginx.www.example.com_USE_MODSECURITY: "no"
spec:
tls:
- hosts:
- www.example.com
rules:
- host: "www.example.com"
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: myapp
port:
number: 80
```
Check the logs to see if the configuration has been generated and bunkerized-nginx reloaded. You should be able to visit http(s)://www.example.com.
Note that an alternative would be to add annotations directly to your services (a common use-case is for [PHP applications](https://bunkerized-nginx.readthedocs.io/en/latest/quickstart_guide.html#php-applications) because the Ingress resource is only for reverse proxy) without editing the Ingress resource :
```yaml
apiVersion: v1
kind: Service
metadata:
name: myapp
# this label is mandatory
labels:
bunkerized-nginx: "yes"
annotations:
bunkerized-nginx.SERVER_NAME: "www.example.com"
bunkerized-nginx.AUTO_LETS_ENCRYPT: "yes"
bunkerized-nginx.USE_REVERSE_PROXY: "yes"
bunkerized-nginx.REVERSE_PROXY_URL: "/"
bunkerized-nginx.REVERSE_PROXY_HOST: "http://myapp.default.svc.cluster.local"
spec:
type: ClusterIP
selector:
app: myapp
ports:
- protocol: TCP
port: 80
targetPort: 80
```
## Linux
**This integration is still in beta, please fill an issue if you find a bug or have an idea on how to improve it.**
List of supported Linux distributions :
- Debian buster (10)
- Ubuntu focal (20.04)
- CentOS 7
- Fedora 34
Unlike containers, Linux integration can be tedious because bunkerized-nginx has a bunch of dependencies that need to be installed before we can use it. Fortunately, we provide a helper script to make the process easier and automatic. Once installed, the configuration is really simple, all you have to do is to edit the `/opt/bunkerized-nginx/variables.env` configuration file and run the `bunkerized-nginx` command to apply it.
First of all you will need to install bunkerized-nginx. The recommended way is to use the official installer script :
```shell
$ curl -fsSL https://github.com/bunkerity/bunkerized-nginx/releases/download/v1.3.0/linux-install.sh -o /tmp/bunkerized-nginx.sh
```
Before executing it, you should also check the signature :
```shell
$ curl -fsSL https://github.com/bunkerity/bunkerized-nginx/releases/download/v1.3.0/linux-install.sh.asc -o /tmp/bunkerized-nginx.sh.asc
$ gpg --auto-key-locate hkps://keys.openpgp.org --locate-keys contact@bunkerity.com
$ gpg --verify /tmp/bunkerized-nginx.sh.asc /tmp/bunkerized-nginx.sh
```
You can now install bunkerized-nginx (and take a coffee because it may take a while) :
```shell
$ chmod +x /tmp/bunkerized-nginx.sh
$ /tmp/bunkerized-nginx.sh
```
To demonstrate the configuration on Linux, we will create a simple “Hello World” static file that will be served by bunkerized-nginx.
Static files are stored inside the `/opt/bunkerized-nginx/www` folder and the unprivileged nginx user must have read access on it :
```shell
$ echo "Hello bunkerized World !" > /opt/bunkerized-nginx/www/index.html
$ chown root:nginx /opt/bunkerized-nginx/www/index.html
$ chmod 740 /opt/bunkerized-nginx/www/index.html
```
Here is the example configuration file that needs to be written at `/opt/bunkerized-nginx/variables.env` :
```conf
HTTP_PORT=80
HTTPS_PORT=443
DNS_RESOLVERS=8.8.8.8 8.8.4.4
SERVER_NAME=www.example.com
AUTO_LETS_ENCRYPT=yes
```
Important things to note :
- Replace www.example.com with your own domain (it must points to your server IP address if you want Lets Encrypt to work)
- Automatic Lets Encrypt is enabled thanks to `AUTO_LETS_ENCRYPT=yes` (since the default is `AUTO_LETS_ENCRYPT=no` you can remove the environment variable to disable Lets Encrypt)
- The default values for `HTTP_PORT` and `HTTPS_PORT` are `8080` and `8443` hence the explicit declaration with standard ports values
- Replace the `DNS_RESOLVERS` value with your own DNS resolver(s) if you need nginx to resolve internal DNS requests (e.g., reverse proxy to an internal service)
You can now apply the configuration by running the **bunkerized-nginx** command :
```shell
$ bunkerized-nginx
```
Visit http(s)://www.example.com to confirm that everything is working as expected.

View File

@@ -1,12 +1,14 @@
# Introduction
<p align="center">
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/logo.png?raw=true" width="425" />
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/logo.png?raw=true" width="425" />
</p>
nginx Docker image secure by default.
> Make security by default great again !
Avoid the hassle of following security best practices "by hand" each time you need a web server or reverse proxy. Bunkerized-nginx provides generic security configs, settings and tools so you don't need to do it yourself.
bunkerized-nginx is a web server based on the notorious nginx and focused on security. It integrates into existing environments (Linux, Docker, Swarm, Kubernetes, ...) to make your web services "secured by default" without any hassle. The security best practices are automatically applied for you while keeping control of every settings to meet your own use case.
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/overview.png?raw=true" />
Non-exhaustive list of features :
- HTTPS support with transparent Let's Encrypt automation
@@ -15,15 +17,14 @@ Non-exhaustive list of features :
- Automatic ban of strange behaviors
- Antibot challenge through cookie, javascript, captcha or recaptcha v3
- Block TOR, proxies, bad user-agents, countries, ...
- Block known bad IP with DNSBL and CrowdSec
- Block known bad IP with DNSBL
- Prevent bruteforce attacks with rate limiting
- Plugins system for external security checks (e.g. : ClamAV)
- Plugins system for external security checks (ClamAV, CrowdSec, ...)
- Easy to configure with environment variables or web UI
- Automatic configuration with container labels
- Docker Swarm support
- Seamless integration into existing environments : Linux, Docker, Swarm, Kubernetes, ...
Fooling automated tools/scanners :
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/demo.gif?raw=true" />
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/demo.gif?raw=true" />
You can find a live demo at <a href="https://demo-nginx.bunkerity.com" target="_blank">https://demo-nginx.bunkerity.com</a>, feel free to do some security tests.
You can find a live demo at [https://demo-nginx.bunkerity.com](https://demo-nginx.bunkerity.com), feel free to do some security tests.

View File

@@ -1,10 +1,11 @@
# Plugins
Bunkerized-nginx comes with a plugin system that lets you extend the core with extra security features. To add a plugin you will need to download it, edit its settings and mount it to the `/plugins` volume.
Bunkerized-nginx comes with a plugin system that lets you extend the core with extra security features.
## Official plugins
- [ClamAV](https://github.com/bunkerity/bunkerized-nginx-clamav) : automatically scan uploaded files and deny access if a virus is detected
- [CrowdSec](https://github.com/bunkerity/bunkerized-nginx-crowdsec) : CrowdSec bouncer integration within bunkerized-nginx
## Community plugins
@@ -13,9 +14,10 @@ If you have made a plugin and want it to be listed here, feel free to [create a
## Use a plugin
The generic way of using a plugin consists of :
- Download it to a folder (e.g. : myplugin/)
- Edit the settings inside the plugin.json files (e.g. : myplugin/plugin.json)
- Mount the plugin folder to the /plugins/plugin-id inside the container (e.g. : /where/is/myplugin:/plugins/myplugin)
- Download the plugin into your local drive (e.g., git clone)
- Edit the settings inside the plugin.json files (e.g., myplugin/plugin.json)
- If you are using a container based integration, you need to mount it to the [plugins special folder](https://bunkerized-nginx.readthedocs.io/en/latest/special_folders.html#plugins) inside the container (e.g., /where/is/myplugin:/plugins/myplugin)
- If you are using Linux integration, copy the downloaded plugin folder to the [plugins special folder](https://bunkerized-nginx.readthedocs.io/en/latest/special_folders.html#plugins) (e.g., cp -r myplugin /plugins)
To check if the plugin is loaded you should see log entries like that :
@@ -52,6 +54,20 @@ Settings names and default values can be choosen freely. There will be no confli
local M = {}
local logger = require "logger"
-- this function will be called at startup
-- the name MUST be init without any argument
function M.init ()
-- the logger.log function lets you write into the logs
-- only ERROR level is available in init()
logger.log(ngx.ERR, "MyPlugin", "*NOT AN ERROR* init called")
-- here is how to retrieve a setting
local my_setting = ngx.shared.plugins_data:get("pluginid_MY_SETTING")
logger.log(ngx.ERR, "MyPlugin", "*NOT AN ERROR* my_setting = " .. my_setting)
return true
end
-- this function will be called for each request
-- the name MUST be check without any argument
function M.check ()
@@ -66,7 +82,6 @@ function M.check ()
if my_setting == "block" then
ngx.exit(ngx.HTTP_FORBIDDEN)
end
end
return M

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,7 @@ bunkerized-nginx comes with a set of predefined security settings that you can (
Here is a list of miscellaneous environment variables related more or less to security :
- `MAX_CLIENT_SIZE=10m` : maximum size of client body
- `ALLOWED_METHODS=GET|POST|HEAD` : list of HTTP methos that clients are allowed to use
- `ALLOWED_METHODS=GET|POST|HEAD` : list of HTTP methods that clients are allowed to use
- `DISABLE_DEFAULT_SERVER=no` : enable/disable the default server (i.e. : should your server respond to unknown Host header ?)
- `SERVER_TOKENS=off` : enable/disable sending the version number of nginx
@@ -26,11 +26,11 @@ Here is a list of environment variables and the corresponding default value rela
Using Let's Encrypt with the `AUTO_LETS_ENCRYPT=yes` environment variable is the easiest way to add HTTPS supports to your web services if they are connected to internet and you have public DNS A record(s).
You can also set the `EMAIL_LETS_ENCRYPT` environment variable if you want to receive notifications from Let's Encrypt (e.g. : expiration).
You can also set the `EMAIL_LETS_ENCRYPT` environment variable if you want to receive notifications from Let's Encrypt like expiration alerts.
### Custom certificate(s)
If you have security constraints (e.g : local network, custom PKI, ...) you can use custom certificates of your choice and tell bunkerized-nginx to use them with the following environment variables :
If you have security constraints (e.g., local network, custom PKI, ...) you can use custom certificates of your choice and tell bunkerized-nginx to use them with the following environment variables :
- `USE_CUSTOM_HTTPS=yes`
- `CUSTOM_HTTPS_CERT=/path/inside/container/to/cert.pem`
- `CUSTOM_HTTPS_KEY=/path/inside/container/to/key.pem`
@@ -53,12 +53,23 @@ $ docker run -p 80:8080 \
Please note that if you have one or more intermediate certificate(s) in your chain of trust, you will need to provide the bundle to `CUSTOM_HTTPS_CERT` (more info [here](https://nginx.org/en/docs/http/configuring_https_servers.html#chains)).
You can reload the certificate(s) (e.g. : in case of a renewal) by sending the SIGHUP/HUP signal to the container bunkerized-nginx will catch the signal and send a reload order to nginx :
You can reload the certificate(s) (i.e., in case of a renewal) by sending a reload order to bunkerized-nginx.
Docker reload :
```shell
docker kill --signal=SIGHUP my-container
```
Swarm and Kubernetes reload (repeat for each node) :
```shell
$ curl http://node-local-ip:80/reload
```
Linux reload :
```shell
$ /usr/sbin/nginx -s reload
```
### Self-signed certificate
This method is not recommended in production but can be used to quickly deploy HTTPS for testing purposes. Just use the `GENERATE_SELF_SIGNED_SSL=yes` environment variable and bunkerized-nginx will generate a self-signed certificate for you :
@@ -74,17 +85,19 @@ $ docker run -p 80:8080 \
Some important HTTP headers related to client security are sent with a default value. Sometimes it can break a web application or can be tuned to provide even more security. The complete list is available [here](https://bunkerized-nginx.readthedocs.io/en/latest/environment_variables.html#security-headers).
You can also remove headers (e.g. : too verbose ones) by using the `REMOVE_HEADERS` environment variable which takes a list of header name separated with space (default value = `Server X-Powered-By X-AspNet-Version X-AspNetMvc-Version`).
You can also remove headers (e.g., too verbose ones) by using the `REMOVE_HEADERS` environment variable which takes a list of header name separated with space (default value = `Server X-Powered-By X-AspNet-Version X-AspNetMvc-Version`).
If you want to keep your application headers and tell bunkerized-nginx to not override it, just set the corresponding environment variable to an empty value (e.g., `CONTENT_SECURITY_POLICY=`, `PERMISSIONS_POLICY=`, ...).
## ModSecurity
ModSecurity is integrated and enabled by default alongside the OWASP Core Rule Set within bunkerized-nginx. To change this behaviour you can use the `USE_MODSECURITY=no` or `USE_MODSECURITY_CRS=no` environment variables.
We strongly recommend to keep both ModSecurity and the OWASP Core Rule Set enabled. The only downsides are the false positives that may occur. But they can be fixed easily and the CRS team maintains a list of exclusions for common application (e.g : wordpress, nextcloud, drupal, cpanel, ...).
We strongly recommend to keep both ModSecurity and the OWASP Core Rule Set enabled. The only downsides are the false positives that may occur. But they can be fixed easily and the CRS team maintains a list of exclusions for common application (e.g., wordpress, nextcloud, drupal, cpanel, ...).
Tuning the CRS with bunkerized-nginx is pretty simple : you can add configuration before (i.e. : exclusions) and after (i.e. : exceptions/tuning) the rules are loaded. You just need to mount your .conf files into the /modsec-crs-confs (before CRS is loaded) and /modsec-confs (after CRS is loaded).
Tuning the CRS with bunkerized-nginx is pretty simple : you can add configuration before and after the rules are loaded. You just need to mount your .conf files into the `/modsec-crs-confs` (before CRS is loaded) and `/modsec-confs` (after CRS is loaded) volumes. If you are using Linux integration the [special folders](https://bunkerized-nginx.readthedocs.io/en/latest/special_folders.html) are `/opt/bunkerized-nginx/modsec-confs` and `/opt/bunkerized-nginx/modsec-crs-confs`.
Here is an example to illustrate it :
Here is a Docker example to illustrate it :
```shell
$ cat /data/exclusions-crs/wordpress.conf
@@ -122,7 +135,7 @@ That kind of security measure is implemented and enabled by default in bunkerize
## Antibot challenge
Attackers will certainly use automated tools to exploit/find some vulnerabilities on your web service. One countermeasure is to challenge the users to detect if it looks like a bot. It might be effective against script kiddies or "lazy" attackers.
Attackers will certainly use automated tools to exploit/find some vulnerabilities on your web services. One countermeasure is to challenge the users to detect if they look like a bot. It might be effective against script kiddies or "lazy" attackers.
You can use the `USE_ANTIBOT` environment variable to add that kind of checks whenever a new client is connecting. The available challenges are : `cookie`, `javascript`, `captcha` and `recaptcha`. More info [here](https://bunkerized-nginx.readthedocs.io/en/latest/environment_variables.html#antibot).
@@ -132,15 +145,6 @@ You can use the `USE_ANTIBOT` environment variable to add that kind of checks wh
Automatic checks on external DNS BlackLists are enabled by default with the `USE_DNSBL=yes` environment variable. The list of DNSBL zones is also configurable, you just need to edit the `DNSBL_LIST` environment variable which contains the following value by default `bl.blocklist.de problems.dnsbl.sorbs.net sbl.spamhaus.org xbl.spamhaus.org`.
### CrowdSec
CrowdSec is not enabled by default because it's more than an external blacklists and needs some extra work to get it working. But bunkerized-nginx is fully working with CrowdSec, here are the related environment variables :
- `USE_CROWDSEC=no` : enable/disable CrowdSec checks before we authorize a client
- `CROWDSEC_HOST=` : full URL to your CrowdSec instance API
- `CROWDSEC_KEY=` : bouncer key given from **cscli bouncer add MyBouncer**
You will also need to share the logs generated by bunkerized-nginx with your CrowdSec instance. One approach is to send the logs to a syslog server which is writing the logs to the file system and then CrowdSec can easily read the logs. If you want to give it a try, you have a concrete example on how to use CrowdSec with bunkerized-nginx [here](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/crowdsec).
### User-Agents
Sometimes script kiddies or lazy attackers don't put a "legitimate" value inside the **User-Agent** HTTP header so we can block them. This is controlled with the `BLOCK_USER_AGENT=yes` environment variable. The blacklist is composed of two files from [here](https://raw.githubusercontent.com/mitchellkrogza/nginx-ultimate-bad-bot-blocker/master/_generator_lists/bad-user-agents.list) and [here](https://raw.githubusercontent.com/JayBizzle/Crawler-Detect/master/raw/Crawlers.txt).
@@ -169,7 +173,7 @@ This list contains bad referrers domains known for spamming (downloaded from [he
### Requests
To limit bruteforce attacks we decided to use the [rate limiting feature in nginx](https://www.nginx.com/blog/rate-limiting-nginx/) so attackers will be limited to X request(s)/s for the same resource. That kind of protection might be useful against other attacks too (e.g. : blind SQL injection).
To limit bruteforce attacks we decided to use the [rate limiting feature in nginx](https://www.nginx.com/blog/rate-limiting-nginx/) so attackers will be limited to X request(s)/s for the same resource. That kind of protection might be useful against other attacks too (e.g., blind SQL injection).
Here is the list of related environment variables and their default value :
- `USE_LIMIT_REQ=yes` : enable/disable request limiting
@@ -197,6 +201,8 @@ You can quickly protect sensitive resources (e.g. : admin panels) by requiring H
- `AUTH_BASIC_PASSWORD=changeme` : the password required
- `AUTH_BASIC_TEXT=Restricted area` : the text that will be displayed to the user
Please note that bunkerized-nginx also supports [Authelia](https://github.com/authelia/authelia) for authentication (see the corresponding [environment variables](https://bunkerized-nginx.readthedocs.io/en/latest/environment_variables.html#authelia) and a [full example](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/authelia)).
## Whitelisting
Adding extra security can sometimes trigger false positives. Also, it might be not useful to do the security checks for specific clients because we decided to trust them. Bunkerized-nginx supports two types of whitelist : by IP address and by reverse DNS.
@@ -217,47 +223,16 @@ Here is the list of related environment variables and their default value :
- `USE_BLACKLIST_REVERSE=yes` : enable/disable blacklisting by reverse DNS
- `BLACKLIST_REVERSE_LIST=.shodan.io` : the list of reverse DNS suffixes to never trust
## Web UI
Mounting the docker socket in a container which is facing the network, like we do with the [web UI](https://bunkerized-nginx.readthedocs.io/en/latest/quickstart_guide.html#web-ui), is not a good security practice. In case of a vulnerability inside the application, attackers can freely use the Docker socket and the whole host can be compromised.
A possible workaround is to use the [tecnativa/docker-socket-proxy](https://github.com/Tecnativa/docker-socket-proxy) image which acts as a reverse proxy between the application and the Docker socket. It can allow/deny the requests made to the Docker API.
Before starting the web UI, you need to fire up the docker-socket-proxy (we also need a network because of inter-container communication) :
```shell
docker network create mynet
```
```shell
docker run --name mysocketproxy \
--network mynet \
-v /var/run/docker.sock:/var/run/docker.sock:ro \
-e POST=1 \
-e CONTAINERS=1 \
tecnativa/docker-socket-proxy
```
You can now start the web UI container and use the `DOCKER_HOST` environment variable to define the Docker API endpoint :
```shell
docker run --network mynet \
-v autoconf:/etc/nginx \
-e ABSOLUTE_URI=https://my.webapp.com/admin/ \
-e DOCKER_HOST=tcp://mysocketproxy:2375 \
bunkerity/bunkerized-nginx-ui
```
## Plugins
Some security features can be added through the plugins system (e.g. : ClamAV). You will find more info in the [plugins section](https://bunkerized-nginx.readthedocs.io/en/latest/plugins.html).
Some security features can be added through the plugins system (e.g., ClamAV, CrowdSec, ...). You will find more info in the [plugins section](https://bunkerized-nginx.readthedocs.io/en/latest/plugins.html).
## Container hardening
You will find a ready to use docker-compose.yml file focused on container hardening [here](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/hardened).
### Drop capabilities
By default, *bunkerized-nginx* runs as non-root user inside the container and should not use any of the default [capabilities](https://docs.docker.com/engine/security/#linux-kernel-capabilities) allowed by Docker. You can safely remove all capabilities to harden the container :
By default, bunkerized-nginx runs as non-root user inside the container and should not use any of the default [capabilities](https://docs.docker.com/engine/security/#linux-kernel-capabilities) allowed by Docker. You can safely remove all capabilities to harden the container :
```shell
docker run ... --drop-cap=all ... bunkerity/bunkerized-nginx

98
docs/special_folders.md Normal file
View File

@@ -0,0 +1,98 @@
# Special folders
Please note that bunkerized-nginx runs as an unprivileged user (UID/GID 101 when using the Docker image) and you should set the rights on the host accordingly to the files and folders on your host.
## Multisite
When the special folder "supports" the multisite mode, you can create subfolders named as the server names used in the configuration. When doing it only the subfolder files will be "used" by the corresponding web service.
## Web files
This special folder is used by bunkerized-nginx to deliver static files. The typical use case is when you have a PHP application that also contains static assets like CSS, JS and images.
Location (container) : `/www`
Location (Linux) : `/opt/bunkerized-nginx/www`
Multisite : `yes`
Read-only : `yes`
Examples :
- [Basic website with PHP](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/basic-website-with-php)
- [Multisite basic](https://github.com/bunkerity/bunkerized-nginx/blob/master/examples/multisite-basic)
## http configurations
This special folder contains .conf files that will be loaded by nginx at http context. The typical use case is when you need to add custom directives into the `http { }` block of nginx.
Location (container) : `/http-confs`
Location (Linux) : `/opt/bunkerized-nginx/http-confs`
Multisite : `no`
Read-only : `yes`
Examples :
- [Load balancer](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/load-balancer)
## server configurations
This special folder contains .conf files that will be loaded by nginx at server context. The typical use case is when you need to add custom directives into the `server { }` block of nginx.
Location (container) : `/server-confs`
Location (Linux) : `/opt/bunkerized-nginx/server-confs`
Multisite : `yes`
Read-only : `yes`
Examples :
- [Wordpress](https://github.com/bunkerity/bunkerized-nginx/blob/master/examples/wordpress)
- [Multisite custom confs](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/multisite-custom-confs)
## ModSecurity configurations
This special folder contains .conf files that will be loaded by ModSecurity before the OWASP Core Rule Set is loaded. The typical use case is when you want to specify exclusions for the CRS.
Location (container) : `/modsec-confs`
Location (Linux) : `/opt/bunkerized-nginx/modsec-confs`
Multisite : `yes`
Read-only : `yes`
Examples :
- [Wordpress](https://github.com/bunkerity/bunkerized-nginx/blob/master/examples/wordpress)
- [Multisite custom confs](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/multisite-custom-confs)
## CRS configurations
This special folder contains .conf file that will be loaded by ModSecurity after the OWASP Core Rule Set is loaded. The typical use case is to edit loaded CRS rules to avoid false positives.
Location (container) : `/modsec-crs-confs`
Location (Linux) : `/opt/bunkerized-nginx/modsec-crs-confs`
Multisite : `yes`
Read-only : `yes`
Examples :
- [Wordpress](https://github.com/bunkerity/bunkerized-nginx/blob/master/examples/wordpress)
- [Multisite custom confs](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/multisite-custom-confs)
## Cache
This special folder is used to cache some data like blacklists and avoid downloading them again if it is not necessary. The typical use case is to avoid the overhead when you are testing bunkerized-nginx in a container and you have to recreate it multiple times.
Location (container) : `/cache`
Location (Linux) : `/opt/bunkerized-nginx/cache`
Multisite : `no`
Read-only : `no`
## Plugins
This special folder is the placeholder for the plugins loaded by bunkerized-nginx. See the [plugins section](https://bunkerized-nginx.readthedocs.io/en/latest/plugins.html) for more information.
Location (container) : `/plugins`
Location (Linux) : `/opt/bunkerized-nginx/plugins`
Multisite : `no`
Read-only : `no`
## ACME challenge
This special folder is used as the web root for Let's Encrypt challenges. The typical use case is to share the same folder when you are using bunkerized-nginx in a clustered environment like Docker Swarm or Kubernetes.
Location (container) : `/acme-challenge`
Location (Linux) : `/opt/bunkerized-nginx/acme-challenge`
Multisite : `no`
Read-only : `no`

View File

@@ -2,13 +2,15 @@
## Logs
When troubleshooting, the logs are your best friends. We try our best to provide user-friendly logs to help you understand what happened. Please note that we don't store the logs inside the container, they are all displayed on stdout/stderr so Docker can capture them. They can be displayed using the [docker logs](https://docs.docker.com/engine/reference/commandline/logs/) command.
When troubleshooting, the logs are your best friends. We try our best to provide user-friendly logs to help you understand what happened.
If you are using container based integrations, you can get the logs using your manager/orchestrator (e.g., docker logs, docker service logs, kubectl logs, ...). For Linux integration, everything is stored inside the `/var/log` folder.
You can edit the `LOG_LEVEL` environment variable to increase or decrease the verbosity of logs with the following values : `debug`, `info`, `notice`, `warn`, `error`, `crit`, `alert` or `emerg` (with `debug` being the most verbose level).
## Permissions
Don't forget that bunkerized-nginx runs as an unprivileged user with UID/GID 101. Double check the permissions of files and folders for each volumes (see the [volumes list](https://bunkerized-nginx.readthedocs.io/en/latest/volumes.html)).
Don't forget that bunkerized-nginx runs as an unprivileged user with UID/GID 101 when using container based integrations or simply the `nginx` user on Linux. Double check the permissions of files and folders for each special folders (see the [volumes list](https://bunkerized-nginx.readthedocs.io/en/latest/special_folders.html)).
## ModSecurity

View File

@@ -1,93 +0,0 @@
# Volumes list
Please note that bunkerized-nginx run as an unprivileged user inside the container (UID/GID = 101) and you should set the rights on the host accordingly (e.g. : chmod 101:101 ...) to the files and folders on your host.
## Web files
Mountpoint : `/www`
Description :
If `MULTISITE=no`, the web files are directly stored inside the `/www` folder. When `MULTISITE=yes`, you need to create subdirectories named as the servers defined in the `SERVER_NAME` environment variable.
Examples : [basic](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/basic-website-with-php) and [multisite](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/multisite-basic)
Read-only : yes
## Let's Encrypt
Mountpoint : `/etc/letsencrypt`
Description :
When `AUTO_LETS_ENCRYPT=yes`, certbot will save configurations, certificates and keys inside the `/etc/letsencrypt` folder. It's a common practise to save it so you can remount it in case of a container restart and certbot won't generate new certificate(s).
Examples : [here](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/basic-website-with-php)
Read-only : no
## Custom nginx configurations
### http context
Mountpoint : `/http-confs`
Description :
If you need to add custom configurations at http context, you can create **.conf** files and mount them to the `/http-confs` folder.
Examples : [load balancer](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/load-balancer)
Read-only : yes
### server context
Mountpoint : `/server-confs`
Description :
If `MULTISITE=no`, you can create **.conf** files and mount them to the `/server-confs` folder. When `MULTISITE=yes`, you need to create subdirectories named as the servers defined in the `SERVER_NAME` environment variable.
Examples : [nextcloud](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/nextcloud) and [multisite](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/multisite-custom-server-confs)
Read-only : yes
## ModSecurity
### Rules and before CRS
Mountpoint : `/modsec-confs`
Description :
Use this volume if you need to add custom ModSecurity rules and/or OWASP Core Rule Set configurations before the rules are loaded (e.g. : exclusions).
If `MULTISITE=no` you can create **.conf** files and mount them to the `/modsec-confs` folder. When `MULTISITE=yes`, you need to create subdirectories named as the servers defined in the `SERVER_NAME` environment variable. You can also apply global configuration to all servers by putting **.conf** files directly on the root folder.
Examples : [wordpress](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/wordpress) and [multisite](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/multisite-custom-server-confs)
Read-only : yes
### After CRS
Mountpoint : `/modsec-crs-confs`
Description :
Use this volume to tweak OWASP Core Rule Set (e.g. : tweak rules to avoid false positives). Your files are loaded after the rules.
If `MULTISITE=no` you can create **.conf** files and mount them to the `/modsec-crs-confs` folder. When `MULTISITE=yes`, you need to create subdirectories named as the servers defined in the `SERVER_NAME` environment variable. You can also apply global configuration to all servers by putting **.conf** files directly on the root folder.
Examples : [wordpress](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/wordpress) and [multisite](https://github.com/bunkerity/bunkerized-nginx/tree/master/examples/multisite-custom-server-confs)
Read-only : yes
## Cache
Mountpoint : `/cache`
Description :
Depending of the settings you use, bunkerized-nginx may download external content (e.g. : blacklists, GeoIP DB, ...). To avoid downloading it again in case of a container restart, you can save the data on the host.
Read-only : no
## Plugins
Mountpoint : `/plugins`
Description :
This volume is used to extend bunkerized-nginx with [additional plugins](https://bunkerized-nginx.readthedocs.io/en/latest/plugins.html). Please note that you will need to have a subdirectory for each plugin you want to enable.
Read-only : yes

204
docs/web_ui.md Normal file
View File

@@ -0,0 +1,204 @@
# Web UI
## Overview
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/docs/img/web-ui.gif?raw=true" />
## Usage
The web UI has its own set of environment variables to configure it :
- `ADMIN_USERNAME` and `ADMIN_PASSWORD` : credentials for accessing the web UI
- `ABSOLUTE_URI` : the full public URI that points to the web UI
- `API_URI` : path of the bunkerized-nginx API (must match the corresponding `API_URI` of the bunkerized-nginx instance)
- `DOCKER_HOST` : Docker API endpoint address (default = `unix:///var/run/docker.sock`)
Since the web UI is a web service itself, we can use bunkerized-nginx as a reverse proxy in front of it.
**Using the web UI in a Docker environment exposes a security risk because you need to mount the Docker API socket into the web UI container. It's highly recommended to use a middleware like [tecnativa/docker-socket-proxy](https://github.com/Tecnativa/docker-socket-proxy) to reduce the risk as much as possible.**
**You need to apply the security best practices because the web UI contains code and that code might be vulnerable : complex admin password, hard to guess public URI, network isolation from others services, HTTPS only, ...**
### Docker
First of all, we will need to setup two networks one for ui communication and the other one for the services :
```shell
$ docker network create ui-net
$ docker network create services-net
```
We also need a volume to shared the generated configuration from the web UI to the bunkerized-nginx instances :
```shell
$ docker volume create bunkerized-vol
```
Next we will create the "Docker API proxy" container that will be in the front of the Docker socket and deny access to sensitive things :
```shell
$ docker run -d \
--name my-docker-proxy \
--network ui-net \
-v /var/run/docker.sock:/var/run/docker.sock:ro \
-e CONTAINERS=1 \
-e SWARM=1 \
-e SERVICES=1 \
tecnativa/docker-socket-proxy
```
We can now create the web UI container based on bunkerized-nginx-ui image :
```shell
$ docker run -d \
--name my-bunkerized-ui \
--network ui-net \
-v bunkerized-vol:/etc/nginx \
-e ABSOLUTE_URI=https://admin.example.com/admin-changeme/ \
-e DOCKER_HOST=tcp://my-docker-proxy:2375 \
-e API_URI=/ChangeMeToSomethingHardToGuess \
-e ADMIN_USERNAME=admin \
-e ADMIN_PASSWORD=changeme \
bunkerity/bunkerized-nginx-ui
```
Last but not least, you need to start the bunkerized-nginx and configure it as a reverse proxy for the web UI web service :
```shell
$ docker create \
--name my-bunkerized \
--network ui-net \
-p 80:8080 \
-p 443:8443 \
-v bunkerized-vol:/etc/nginx \
-v "${PWD}/certs:/etc/letsencrypt" \
-e SERVER_NAME=admin.example.com \
-e MULTISITE=yes \
-e USE_API=yes \
-e API_URI=/ChangeMeToSomethingHardToGuess \
-e AUTO_LETS_ENCRYPT=yes \
-e REDIRECT_HTTP_TO_HTTPS=yes \
-e admin.example.com_USE_REVERSE_PROXY=yes \
-e admin.example.com_REVERSE_PROXY_URL=/admin-changeme/ \
-e admin.example.com_REVERSE_PROXY_HOST=http://my-bunkerized-ui:5000 \
-e "admin.example.com_REVERSE_PROXY_HEADERS=X-Script-Name /admin-changeme" \
-e admin.example.com_USE_MODSECURITY=no \
-l bunkerized-nginx.UI \
bunkerity/bunkerized-nginx
$ docker network connect services-net my-bunkerized
$ docker start my-bunkerized
```
The web UI should now be accessible at https://admin.example.com/admin-changeme/.
docker-compose equivalent :
```yaml
version: '3'
services:
my-bunkerized:
image: bunkerity/bunkerized-nginx
restart: always
depends_on:
- my-bunkerized-ui
networks:
- services-net
- ui-net
ports:
- 80:8080
- 443:8443
volumes:
- ./letsencrypt:/etc/letsencrypt
- bunkerized-vol:/etc/nginx
environment:
- SERVER_NAME=admin.example.com # replace with your domain
- MULTISITE=yes
- USE_API=yes
- API_URI=/ChangeMeToSomethingHardToGuess # change it to something hard to guess + must match API_URI from myui service
- AUTO_LETS_ENCRYPT=yes
- REDIRECT_HTTP_TO_HTTPS=yes
- admin.example.com_USE_REVERSE_PROXY=yes
- admin.example.com_REVERSE_PROXY_URL=/admin-changeme/ # change it to something hard to guess
- admin.example.com_REVERSE_PROXY_HOST=http://my-bunkerized-ui:5000
- admin.example.com_REVERSE_PROXY_HEADERS=X-Script-Name /admin-changeme # must match REVERSE_PROXY_URL
- admin.example.com_USE_MODSECURITY=no
labels:
- "bunkerized-nginx.UI"
my-bunkerized-ui:
image: bunkerity/bunkerized-nginx-ui
restart: always
depends_on:
- my-docker-proxy
networks:
- ui-net
volumes:
- bunkerized-vol:/etc/nginx
environment:
- ABSOLUTE_URI=https://admin.example.com/admin-changeme/ # change it to your full URI
- DOCKER_HOST=tcp://my-docker-proxy:2375
- API_URI=/ChangeMeToSomethingHardToGuess # must match API_URI from bunkerized-nginx
- ADMIN_USERNAME=admin # change it to something hard to guess
- ADMIN_PASSWORD=changeme # change it to a good password
my-docker-proxy:
image: tecnativa/docker-socket-proxy
restart: always
networks:
- ui-net
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- CONTAINERS=1
- SWARM=1
- SERVICES=1
networks:
ui-net:
services-net:
name: services-net
volumes:
bunkerized-vol:
```
### Linux
First of all, you need to edit the web UI configuration file located at `/opt/bunkerized-nginx/ui/variables.env` :
```conf
ABSOLUTE_URI=https://admin.example.com/admin-changeme/
DOCKER_HOST=
ADMIN_USERNAME=admin
ADMIN_PASSWORD=changeme
```
Make sure that the web UI service is automatically started on boot :
```shell
$ systemctl enable bunkerized-nginx-ui
```
Now you can start the web UI service :
```shell
$ systemctl start bunkerized-nginx-ui
```
Edit the bunkerized-nginx configurations located at `/opt/bunkerized-nginx/variables.env` :
```conf
HTTP_PORT=80
HTTPS_PORT=443
DNS_RESOLVERS=8.8.8.8 8.8.4.4
SERVER_NAME=admin.example.com
MULTISITE=yes
AUTO_LETS_ENCRYPT=yes
REDIRECT_HTTP_TO_HTTPS=yes
admin.example.com_USE_REVERSE_PROXY=yes
admin.example.com_REVERSE_PROXY_URL=/admin-changeme/
# Local bunkerized-nginx-ui
admin.example.com_REVERSE_PROXY_HOST=http://127.0.0.1:5000
# Remote bunkerized-nginx-ui
#REVERSE_PROXY_HOST=http://service.example.local:5000
admin.example.com_REVERSE_PROXY_HEADERS=X-Script-Name /admin-changeme
admin.example.com_USE_MODSECURITY=no
```
And run the `bunkerized-nginx` command to apply changes :
```shell
$ bunkerized-nginx
```
The web UI should now be accessible at https://admin.example.com/admin-changeme/.

View File

@@ -1,28 +0,0 @@
#!/bin/sh
# load some functions
. /opt/entrypoint/utils.sh
if [ "$MULTISITE" != "yes" ] && [ "$AUTO_LETS_ENCRYPT" = "yes" ] ; then
first_server_name=$(echo "$SERVER_NAME" | cut -d " " -f 1)
domains_lets_encrypt=$(echo "$SERVER_NAME" | sed "s/ /,/g")
EMAIL_LETS_ENCRYPT="${EMAIL_LETS_ENCRYPT-contact@$first_server_name}"
if [ ! -f /etc/letsencrypt/live/${first_server_name}/fullchain.pem ] ; then
echo "[*] Performing Let's Encrypt challenge for $domains_lets_encrypt ..."
/opt/scripts/certbot-new.sh "$domains_lets_encrypt" "$EMAIL_LETS_ENCRYPT"
fi
elif [ "$MULTISITE" = "yes" ] ; then
servers=$(find /etc/nginx -name "site.env" | cut -d '/' -f 4)
for server in $servers ; do
lets_encrypt=$(grep "^AUTO_LETS_ENCRYPT=yes$" /etc/nginx/${server}/site.env)
if [ "$lets_encrypt" != "" ] && [ ! -f /etc/letsencrypt/live/${server}/fullchain.pem ] ; then
server_name=$(grep "^SERVER_NAME=.*$" /etc/nginx/${server}/site.env | sed "s/SERVER_NAME=//" | sed "s/ /,/g")
echo "[*] Performing Let's Encrypt challenge for $server_name ..."
EMAIL_LETS_ENCRYPT=$(grep "^EMAIL_LETS_ENCRYPT=.*$" /etc/nginx/${server}/site.env | sed "s/EMAIL_LETS_ENCRYPT=//")
if [ "$EMAIL_LETS_ENCRYPT" = "" ] ; then
EMAIL_LETS_ENCRYPT="contact@${server}"
fi
/opt/scripts/certbot-new.sh "$domains" "EMAIL_LETS_ENCRYPT"
fi
done
fi

View File

@@ -1,34 +1,35 @@
#!/bin/bash
echo "[*] Starting bunkerized-nginx ..."
. /opt/bunkerized-nginx/entrypoint/utils.sh
log "entrypoint" "INFO" "starting bunkerized-nginx ..."
# trap SIGTERM and SIGINT
function trap_exit() {
echo "[*] Catched stop operation"
echo "[*] Stopping crond ..."
log "stop" "INFO" "catched stop operation"
log "stop" "INFO" "stopping crond ..."
pkill -TERM crond
echo "[*] Stopping nginx ..."
log "stop" "INFO" "stopping nginx ..."
/usr/sbin/nginx -s stop
}
trap "trap_exit" TERM INT QUIT
# trap SIGHUP
function trap_reload() {
echo "[*] Catched reload operation"
if [ "$SWARM_MODE" != "yes" ] ; then
/opt/entrypoint/pre-jobs.sh
log "reload" "INFO" "catched reload operation"
if [ "$SWARM_MODE" != "yes" ] && [ "$KUBERNETES_MODE" != "yes" ] ; then
/opt/bunkerized-nginx/entrypoint/jobs.sh
fi
if [ -f /tmp/nginx.pid ] ; then
echo "[*] Reloading nginx ..."
log "reload" "INFO" "reloading nginx ..."
nginx -s reload
if [ $? -eq 0 ] ; then
echo "[*] Reload successfull"
/opt/entrypoint/post-jobs.sh
log "reload" "INFO" "reloading successful"
else
echo "[!] Reload failed"
log "reload" "ERROR" "reloading failed"
fi
else
echo "[!] Ignored reload operation because nginx is not running"
log "reload" "INFO" "ignored reload operation because nginx is not running"
fi
}
trap "trap_reload" HUP
@@ -36,42 +37,49 @@ trap "trap_reload" HUP
# do the configuration magic if needed
if [ ! -f "/etc/nginx/global.env" ] ; then
echo "[*] Configuring bunkerized-nginx ..."
log "entrypoint" "INFO" "configuring bunkerized-nginx ..."
# check permissions
if [ "$SWARM_MODE" != "yes" ] ; then
/opt/entrypoint/permissions.sh
if [ "$SWARM_MODE" != "yes" ] && [ "$KUBERNETES_MODE" != "yes" ] ; then
/opt/bunkerized-nginx/entrypoint/permissions.sh
else
/opt/entrypoint/permissions-swarm.sh
/opt/bunkerized-nginx/entrypoint/permissions-cluster.sh
fi
if [ "$?" -ne 0 ] ; then
exit 1
fi
# start temp nginx to solve Let's Encrypt challenges if needed
/opt/entrypoint/nginx-temp.sh
/opt/bunkerized-nginx/entrypoint/nginx-temp.sh
# only do config if we are not in swarm mode
if [ "$SWARM_MODE" != "yes" ] ; then
# only do config if we are not in swarm/kubernetes mode
if [ "$SWARM_MODE" != "yes" ] && [ "$KUBERNETES_MODE" != "yes" ] ; then
# export the variables
env | grep -E -v "^(HOSTNAME|PWD|PKG_RELEASE|NJS_VERSION|SHLVL|PATH|_|NGINX_VERSION|HOME)=" > "/tmp/variables.env"
# call the generator
/opt/gen/main.py --settings /opt/settings.json --templates /opt/confs --output /etc/nginx --variables /tmp/variables.env
gen_ret="$(/opt/bunkerized-nginx/gen/main.py --settings /opt/bunkerized-nginx/settings.json --templates /opt/bunkerized-nginx/confs --output /etc/nginx --variables /tmp/variables.env 2>&1)"
if [ "$?" -ne 0 ] ; then
log "entrypoint" "ERROR" "generator failed : $gen_ret"
exit 1
fi
if [ "$gen_ret" != "" ] ; then
log "entrypoint" "INFO" "generator output : $gen_ret"
fi
# pre-jobs
/opt/entrypoint/pre-jobs.sh
# call jobs
/opt/bunkerized-nginx/entrypoint/jobs.sh
fi
else
echo "[*] Skipping configuration process"
log "entrypoint" "INFO" "skipping configuration process"
fi
# start crond
crond
# wait until config has been generated if we are in swarm mode
if [ "$SWARM_MODE" = "yes" ] ; then
echo "[*] Waiting until config has been generated ..."
if [ "$SWARM_MODE" = "yes" ] || [ "$KUBERNETES_MODE" = "yes" ] ; then
log "entrypoint" "INFO" "waiting until config has been generated ..."
while [ ! -f "/etc/nginx/autoconf" ] ; do
sleep 1
done
@@ -83,14 +91,14 @@ if [ -f "/tmp/nginx-temp.pid" ] ; then
fi
# run nginx
echo "[*] Running nginx ..."
nginx &
log "entrypoint" "INFO" "running nginx ..."
nginx -g 'daemon off;' &
pid="$!"
# autotest
if [ "$1" == "test" ] ; then
sleep 10
echo -n "autotest" > /www/index.html
echo -n "autotest" > /opt/bunkerized-nginx/www/index.html
check=$(curl -H "User-Agent: legit" "http://localhost:8080")
if [ "$check" == "autotest" ] ; then
exit 0
@@ -98,9 +106,6 @@ if [ "$1" == "test" ] ; then
exit 1
fi
# post jobs
/opt/entrypoint/post-jobs.sh
# wait for nginx
wait "$pid"
while [ -f "/tmp/nginx.pid" ] ; do
@@ -108,5 +113,5 @@ while [ -f "/tmp/nginx.pid" ] ; do
done
# sigterm trapped
echo "[*] bunkerized-nginx stopped"
log "entrypoint" "INFO" "bunkerized-nginx stopped"
exit 0

View File

@@ -1,7 +1,7 @@
#!/bin/bash
# load some functions
. /opt/entrypoint/utils.sh
. /opt/bunkerized-nginx/entrypoint/utils.sh
# self signed certs for sites
files=$(has_value GENERATE_SELF_SIGNED_SSL yes)
@@ -19,12 +19,7 @@ if [ "$files" != "" ] ; then
SELF_SIGNED_SSL_ORG="$(sed -nE 's/^SELF_SIGNED_SSL_ORG=(.*)$/\1/p' $file)"
SELF_SIGNED_SSL_OU="$(sed -nE 's/^SELF_SIGNED_SSL_OU=(.*)$/\1/p' $file)"
SELF_SIGNED_SSL_CN="$(sed -nE 's/^SELF_SIGNED_SSL_CN=(.*)$/\1/p' $file)"
openssl_output=$(openssl req -nodes -x509 -newkey rsa:4096 -keyout ${dest}self-key.pem -out ${dest}self-cert.pem -days "$SELF_SIGNED_SSL_EXPIRY" -subj "/C=$SELF_SIGNED_SSL_COUNTRY/ST=$SELF_SIGNED_SSL_STATE/L=$SELF_SIGNED_SSL_CITY/O=$SELF_SIGNED_SSL_ORG/OU=$SELF_SIGNED_SSL_OU/CN=$SELF_SIGNED_SSL_CN" 2>&1)
if [ $? -eq 0 ] ; then
echo "[*] Generated self-signed certificate ${dest}self-cert.pem with key ${dest}self-key.pem"
else
echo "[!] Error while generating self-signed certificate : $openssl_output"
fi
/opt/bunkerized-nginx/jobs/main.py --name self-signed-cert --dst_cert "${dest}self-cert.pem" --dst_key "${dest}self-key.pem" --expiry "$SELF_SIGNED_SSL_EXPIRY" --subj "/C=$SELF_SIGNED_SSL_COUNTRY/ST=$SELF_SIGNED_SSL_STATE/L=$SELF_SIGNED_SSL_CITY/O=$SELF_SIGNED_SSL_ORG/OU=$SELF_SIGNED_SSL_OU/CN=$SELF_SIGNED_SSL_CN"
done
fi
@@ -37,12 +32,7 @@ if [ "$(has_value AUTO_LETS_ENCRYPT yes)" != "" ] || [ "$(has_value GENERATE_SEL
SELF_SIGNED_SSL_ORG="Your Company, Inc."
SELF_SIGNED_SSL_OU="IT"
SELF_SIGNED_SSL_CN="www.yourdomain.com"
openssl_output=$(openssl req -nodes -x509 -newkey rsa:4096 -keyout /etc/nginx/default-key.pem -out /etc/nginx/default-cert.pem -days $SELF_SIGNED_SSL_EXPIRY -subj "/C=$SELF_SIGNED_SSL_COUNTRY/ST=$SELF_SIGNED_SSL_STATE/L=$SELF_SIGNED_SSL_CITY/O=$SELF_SIGNED_SSL_ORG/OU=$SELF_SIGNED_SSL_OU/CN=$SELF_SIGNED_SSL_CN" 2>&1)
if [ $? -eq 0 ] ; then
echo "[*] Generated self-signed certificate for default server"
else
echo "[!] Error while generating self-signed certificate for default server : $openssl_output"
fi
/opt/bunkerized-nginx/jobs/main.py --name self-signed-cert --dst_cert "/etc/nginx/default-cert.pem" --dst_key "/etc/nginx/default-key.pem" --expiry "$SELF_SIGNED_SSL_EXPIRY" --subj "/C=$SELF_SIGNED_SSL_COUNTRY/ST=$SELF_SIGNED_SSL_STATE/L=$SELF_SIGNED_SSL_CITY/O=$SELF_SIGNED_SSL_ORG/OU=$SELF_SIGNED_SSL_OU/CN=$SELF_SIGNED_SSL_CN"
fi
# certbot
@@ -55,14 +45,14 @@ if [ "$files" != "" ] ; then
SERVER_NAME="$(sed -nE 's/^SERVER_NAME=(.*)$/\1/p' $file)"
FIRST_SERVER="$(echo $SERVER_NAME | cut -d ' ' -f 1)"
EMAIL_LETS_ENCRYPT="$(sed -nE 's/^EMAIL_LETS_ENCRYPT=(.*)$/\1/p' $file)"
USE_STAGING="$(grep "^USE_LETS_ENCRYPT_STAGING=yes$" $file)"
if [ "$EMAIL_LETS_ENCRYPT" = "" ] ; then
EMAIL_LETS_ENCRYPT="contact@${FIRST_SERVER}"
fi
certbot_output=$(/opt/scripts/certbot-new.sh "$(echo -n $SERVER_NAME | sed 's/ /,/g')" "$EMAIL_LETS_ENCRYPT" 2>&1)
if [ $? -eq 0 ] ; then
echo "[*] Certbot new successfully executed for domain(s) $(echo -n $SERVER_NAME | sed 's/ /,/g')"
if [ "$USE_STAGING" = "" ] ; then
/opt/bunkerized-nginx/jobs/main.py --name certbot-new --domain "$(echo -n $SERVER_NAME | sed 's/ /,/g')" --email "$EMAIL_LETS_ENCRYPT"
else
echo "[*] Error while executing certbot new : $certbot_output"
/opt/bunkerized-nginx/jobs/main.py --name certbot-new --domain "$(echo -n $SERVER_NAME | sed 's/ /,/g')" --email "$EMAIL_LETS_ENCRYPT" --staging
fi
done
fi
@@ -70,11 +60,30 @@ fi
# GeoIP
if [ "$(has_value BLACKLIST_COUNTRY ".\+")" != "" ] || [ "$(has_value WHITELIST_COUNTRY ".\+")" != "" ] ; then
if [ -f "/cache/geoip.mmdb" ] ; then
echo "[*] Copying cached geoip.mmdb ..."
cp /cache/geoip.mmdb /etc/nginx/geoip.mmdb
elif [ "$(ps aux | grep "geoip\.sh")" = "" ] ; then
echo "[*] Downloading GeoIP database ..."
/opt/scripts/geoip.sh > /dev/null 2>&1
fi
/opt/bunkerized-nginx/jobs/main.py --name geoip --cache
fi
# User-Agents
if [ "$(has_value BLOCK_USER_AGENT yes)" != "" ] ; then
/opt/bunkerized-nginx/jobs/main.py --name user-agents --cache
fi
# Referrers
if [ "$(has_value BLOCK_REFERRER yes)" != "" ] ; then
/opt/bunkerized-nginx/jobs/main.py --name referrers --cache
fi
# exit nodes
if [ "$(has_value BLOCK_TOR_EXIT_NODE yes)" != "" ] ; then
/opt/bunkerized-nginx/jobs/main.py --name exit-nodes --cache
fi
# proxies
if [ "$(has_value BLOCK_PROXIES yes)" != "" ] ; then
/opt/bunkerized-nginx/jobs/main.py --name proxies --cache
fi
# abusers
if [ "$(has_value BLOCK_ABUSERS yes)" != "" ] ; then
/opt/bunkerized-nginx/jobs/main.py --name abusers --cache
fi

View File

@@ -1,12 +1,12 @@
#!/bin/bash
# load some functions
. /opt/entrypoint/utils.sh
. /opt/bunkerized-nginx/entrypoint/utils.sh
# start nginx with temp conf for let's encrypt challenges and API
if [ "$(has_value AUTO_LETS_ENCRYPT yes)" != "" ] || [ "$SWARM_MODE" = "yes" ] || [ "$AUTO_LETS_ENCRYPT" = "yes" ] ; then
cp /opt/confs/global/nginx-temp.conf /tmp/nginx-temp.conf
cp /opt/confs/global/api-temp.conf /tmp/api.conf
if [ "$(has_value AUTO_LETS_ENCRYPT yes)" != "" ] || [ "$SWARM_MODE" = "yes" ] || [ "$AUTO_LETS_ENCRYPT" = "yes" ] || [ "$KUBERNETES_MODE" = "yes" ] ; then
cp /opt/bunkerized-nginx/confs/global/nginx-temp.conf /tmp/nginx-temp.conf
cp /opt/bunkerized-nginx/confs/global/api-temp.conf /tmp/api.conf
if [ "$SWARM_MODE" = "yes" ] ; then
replace_in_file "/tmp/nginx-temp.conf" "%USE_API%" "include /tmp/api.conf;"
replace_in_file "/tmp/api.conf" "%API_URI%" "$API_URI"

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
# /etc/letsencrypt
if [ ! -r "/etc/letsencrypt" ] || [ ! -x "/etc/letsencrypt" ] ; then

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
# /etc/letsencrypt
if [ ! -w "/etc/letsencrypt" ] || [ ! -r "/etc/letsencrypt" ] || [ ! -x "/etc/letsencrypt" ] ; then
@@ -10,38 +10,40 @@ if [ -f "/usr/sbin/nginx" ] ; then
# /www
if [ ! -r "/www" ] || [ ! -x "/www" ] ; then
echo "[!] ERROR - wrong permissions on /www"
exit 2
fi
# /modsec-confs
if [ ! -r "/modsec-confs" ] || [ ! -x "/modsec-confs" ] ; then
echo "[!] ERROR - wrong permissions on /modsec-confs"
exit 3
fi
# /modsec-crs-confs
if [ ! -r "/modsec-crs-confs" ] || [ ! -x "/modsec-crs-confs" ] ; then
echo "[!] ERROR - wrong permissions on /modsec-crs-confs"
exit 4
exit 1
fi
# /server-confs
if [ ! -r "/server-confs" ] || [ ! -x "/server-confs" ] ; then
echo "[!] ERROR - wrong permissions on /server-confs"
exit 5
exit 1
fi
# /http-confs
if [ ! -r "/http-confs" ] || [ ! -x "/http-confs" ] ; then
echo "[!] ERROR - wrong permissions on /http-confs"
exit 6
exit 1
fi
fi
# /modsec-confs
if [ ! -r "/modsec-confs" ] || [ ! -x "/modsec-confs" ] ; then
echo "[!] ERROR - wrong permissions on /modsec-confs"
exit 1
fi
# /modsec-crs-confs
if [ ! -r "/modsec-crs-confs" ] || [ ! -x "/modsec-crs-confs" ] ; then
echo "[!] ERROR - wrong permissions on /modsec-crs-confs"
exit 1
fi
# /acme-challenge
if [ ! -w "/acme-challenge" ] || [ ! -r "/acme-challenge" ] || [ ! -x "/acme-challenge" ] ; then
echo "[!] ERROR - wrong permissions on /acme-challenge"
exit 7
exit 1
fi
# /etc/nginx
if [ ! -w "/etc/nginx" ] || [ ! -r "/etc/nginx" ] || [ ! -x "/etc/nginx" ] ; then
echo "[!] ERROR - wrong permissions on /etc/nginx"
exit 8
exit 1
fi

View File

@@ -1,59 +0,0 @@
#!/bin/bash
# load some functions
. /opt/entrypoint/utils.sh
# User-Agents
if [ "$(has_value BLOCK_USER_AGENT yes)" != "" ] ; then
if [ -f "/cache/user-agents.list" ] && [ "$(wc -l /cache/user-agents.list | cut -d ' ' -f 1)" -gt 1 ] ; then
echo "[*] Copying cached user-agents.list ..."
cp /cache/user-agents.list /etc/nginx/user-agents.list
elif [ "$(ps aux | grep "user-agents\.sh")" = "" ] ; then
echo "[*] Downloading bad user-agent list (in background) ..."
/opt/scripts/user-agents.sh > /dev/null 2>&1 &
fi
fi
# Referrers
if [ "$(has_value BLOCK_REFERRER yes)" != "" ] ; then
if [ -f "/cache/referrers.list" ] && [ "$(wc -l /cache/referrers.list | cut -d ' ' -f 1)" -gt 1 ] ; then
echo "[*] Copying cached referrers.list ..."
cp /cache/referrers.list /etc/nginx/referrers.list
elif [ "$(ps aux | grep "referrers\.sh")" = "" ] ; then
echo "[*] Downloading bad referrer list (in background) ..."
/opt/scripts/referrers.sh > /dev/null 2>&1 &
fi
fi
# exit nodes
if [ "$(has_value BLOCK_TOR_EXIT_NODE yes)" != "" ] ; then
if [ -f "/cache/tor-exit-nodes.list" ] && [ "$(wc -l /cache/tor-exit-nodes.list | cut -d ' ' -f 1)" -gt 1 ] ; then
echo "[*] Copying cached tor-exit-nodes.list ..."
cp /cache/tor-exit-nodes.list /etc/nginx/tor-exit-nodes.list
elif [ "$(ps aux | grep "exit-nodes\.sh")" = "" ] ; then
echo "[*] Downloading tor exit nodes list (in background) ..."
/opt/scripts/exit-nodes.sh > /dev/null 2>&1 &
fi
fi
# proxies
if [ "$(has_value BLOCK_PROXIES yes)" != "" ] ; then
if [ -f "/cache/proxies.list" ] && [ "$(wc -l /cache/proxies.list | cut -d ' ' -f 1)" -gt 1 ] ; then
echo "[*] Copying cached proxies.list ..."
cp /cache/proxies.list /etc/nginx/proxies.list
elif [ "$(ps aux | grep "proxies\.sh")" = "" ] ; then
echo "[*] Downloading proxies list (in background) ..."
/opt/scripts/proxies.sh > /dev/null 2>&1 &
fi
fi
# abusers
if [ "$(has_value BLOCK_ABUSERS yes)" != "" ] ; then
if [ -f "/cache/abusers.list" ] && [ "$(wc -l /cache/abusers.list | cut -d ' ' -f 1)" -gt 1 ] ; then
echo "[*] Copying cached abusers.list ..."
cp /cache/abusers.list /etc/nginx/abusers.list
elif [ "$(ps aux | grep "abusers\.sh")" = "" ] ; then
echo "[*] Downloading abusers list (in background) ..."
/opt/scripts/abusers.sh > /dev/null 2>&1 &
fi
fi

View File

@@ -32,9 +32,12 @@ function has_value() {
done
}
# log to jobs.log
function job_log() {
# log to stdout
function log() {
when="$(date '+[%Y-%m-%d %H:%M:%S]')"
what="$1"
echo "$when $what" >> /var/log/jobs.log
category="$1"
severity="$2"
message="$3"
echo "$when $category - $severity - $message"
}

View File

@@ -0,0 +1,27 @@
# Authelia
Authelia is an open-source authentication and authorization server providing two-factor authentication and single sign-on (SSO) for your applications via a web portal. See [website](https://www.authelia.com/) and [GitHub repo](https://github.com/authelia/authelia) for more information.
## Preamble
We will assume that you already have some basic knownledges about Authelia. If that's not the case, you should read their [documentation](https://www.authelia.com/) first.
## Architecture
<img src="https://github.com/bunkerity/bunkerized-nginx/blob/master/examples/authelia/architecture.png?raw=true" />
## Configuration
First of all, you will need to edit the configuration files inside the authelia folder (e.g. : domains, DB backend, email notifier, ...).
## Docker
See [docker-compose.yml](https://github.com/bunkerity/bunkerized-nginx/blob/master/examples/authelia/docker-compose.yml).
## Docker autoconf
See [docker-compose.autoconf.yml](https://github.com/bunkerity/bunkerized-nginx/blob/master/examples/authelia/docker-compose.autoconf.yml).
## Docker Swarm
See [docker-compose.swarm.yml](https://github.com/bunkerity/bunkerized-nginx/blob/master/examples/authelia/docker-compose.swarm.yml).

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

View File

@@ -0,0 +1,72 @@
###############################################################
# Authelia configuration #
###############################################################
host: 0.0.0.0
port: 9091
# log:
# level: debug
# This secret can also be set using the env variables AUTHELIA_JWT_SECRET_FILE
jwt_secret: a_very_important_secret
default_redirection_url: https://auth.example.com
totp:
issuer: authelia.com
# duo_api:
# hostname: api-123456789.example.com
# integration_key: ABCDEF
# # This secret can also be set using the env variables AUTHELIA_DUO_API_SECRET_KEY_FILE
# secret_key: 1234567890abcdefghifjkl
authentication_backend:
file:
path: /config/users_database.yml
access_control:
default_policy: deny
rules:
# Rules applied to everyone
- domain: auth.example.com
policy: bypass
- domain: app1.example.com
policy: one_factor
- domain: app2.example.com
policy: two_factor
session:
name: authelia_session
# This secret can also be set using the env variables AUTHELIA_SESSION_SECRET_FILE
secret: unsecure_session_secret
expiration: 3600 # 1 hour
inactivity: 300 # 5 minutes
domain: example.com # Should match whatever your root protected domain is
redis:
host: redis
port: 6379
# This secret can also be set using the env variables AUTHELIA_SESSION_REDIS_PASSWORD_FILE
# password: authelia
regulation:
max_retries: 3
find_time: 120
ban_time: 300
storage:
local:
path: /config/db.sqlite3
notifier:
disable_startup_check: false
filesystem:
filename: /config/notification.txt
#notifier:
# smtp:
# username: test
# This secret can also be set using the env variables AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE
# password: password
# host: mail.example.com
# port: 25
# sender: admin@example.com

View File

@@ -0,0 +1,16 @@
###############################################################
# Users Database #
###############################################################
# This file can be used if you do not have an LDAP set up.
# List of users
users:
authelia:
displayname: "Authelia User"
# Password is Authelia
password: "$6$rounds=50000$BpLnfgDsc2WD8F2q$Zis.ixdg9s/UOJYrs56b5QEZFiZECu0qZVNsIYxBaNJ7ucIL.nlxVCT5tqh8KHG8X4tlwCFm5r6NTOZZ5qRFN/" # yamllint disable-line rule:line-length
email: authelia@authelia.com
groups:
- admins
- dev

Some files were not shown because too many files have changed in this diff Show More