From 4c77a14825444f90e47da7dd63614d4c8187c678 Mon Sep 17 00:00:00 2001 From: bunkerity Date: Wed, 13 Oct 2021 17:21:25 +0200 Subject: [PATCH] use annotations as env var in Ingress definition, fix cidr parsing for reserved ips, fix missing empty when job is external, fix ping check for remote api and init work hour/day support for request limit --- autoconf/src/IngressController.py | 6 ++- confs/global/init-lua.conf | 5 ++- confs/global/nginx.conf | 3 +- confs/site/log-lua.conf | 3 +- confs/site/main-lua.conf | 11 +++++ confs/site/server.conf | 6 +-- jobs/Job.py | 6 +-- lua/limitreq.lua | 67 +++++++++++++++++++++++++++++++ lua/remoteapi.lua | 5 ++- 9 files changed, 100 insertions(+), 12 deletions(-) create mode 100644 lua/limitreq.lua diff --git a/autoconf/src/IngressController.py b/autoconf/src/IngressController.py index af453fb..58dd368 100644 --- a/autoconf/src/IngressController.py +++ b/autoconf/src/IngressController.py @@ -41,7 +41,9 @@ class IngressController(Controller.Controller) : def __annotations_to_env(self, annotations) : env = {} - prefix = annotations["bunkerized-nginx.SERVER_NAME"].split(" ")[0] + "_" + prefix = "" + if "bunkerized-nginx.SERVER_NAME" in annotations : + prefix = annotations["bunkerized-nginx.SERVER_NAME"].split(" ")[0] + "_" for annotation in annotations : if annotation.startswith("bunkerized-nginx.") and annotation.replace("bunkerized-nginx.", "", 1) != "" and annotation.replace("bunkerized-nginx.", "", 1) != "AUTOCONF" : env[prefix + annotation.replace("bunkerized-nginx.", "", 1)] = annotations[annotation] @@ -85,6 +87,8 @@ class IngressController(Controller.Controller) : first_servers.extend(env["SERVER_NAME"].split(" ")) for ingress in ingresses : env.update(self.__rules_to_env(ingress.spec.rules, namespace=ingress.metadata.namespace)) + if ingress.metadata.annotations != None : + env.update(self.__annotations_to_env(ingress.metadata.annotations)) if ingress.spec.tls : for tls_entry in ingress.spec.tls : for host in tls_entry.hosts : diff --git a/confs/global/init-lua.conf b/confs/global/init-lua.conf index 4a7962d..5195fc2 100644 --- a/confs/global/init-lua.conf +++ b/confs/global/init-lua.conf @@ -37,7 +37,10 @@ local reserved_ips = { "240.0.0.0/4", "255.255.255.255/32" } -ngx.shared.reserved_ips:safe_set("cidrs", iputils.parse_cidrs(reserved_ips), 0) +local success, err, forcible = ngx.shared.reserved_ips:set("data", cjson.encode(iputils.parse_cidrs(reserved_ips)), 0) +if not success then + logger.log(ngx.ERR, "INIT", "Can't load reserved IPs : " .. err) +end -- Load blacklists if not use_redis then diff --git a/confs/global/nginx.conf b/confs/global/nginx.conf index 96fe66b..ffadffd 100644 --- a/confs/global/nginx.conf +++ b/confs/global/nginx.conf @@ -90,13 +90,14 @@ http { {% if has_value("BLOCK_REFERRER", "yes") %}lua_shared_dict referrers_cache 10m;{% endif +%} {% if has_value("USE_BAD_BEHAVIOR", "yes") %}lua_shared_dict behavior_ban 10m;{% endif +%} {% if has_value("USE_BAD_BEHAVIOR", "yes") %}lua_shared_dict behavior_count 10m;{% endif +%} + {% if has_value("USE_LIMIT_REQ", "yes") %}lua_shared_dict limit_req {{ LIMIT_REQ_CACHE }};{% endif +%} lua_shared_dict plugins_data 10m; lua_shared_dict reserved_ips 1m; {% if has_value("USE_REMOTE_API", "yes") %}lua_shared_dict remote_api 1m;{% endif +%} {% if has_value("USE_REMOTE_API", "yes") %}lua_shared_dict remote_api_db 10m;{% endif +%} # shared memory zone for limit_req - {% if has_value("USE_LIMIT_REQ", "yes") %}limit_req_zone $binary_remote_addr$uri zone=limit:{{ LIMIT_REQ_CACHE }} rate={{ LIMIT_REQ_RATE }};{% endif +%} + #{% if has_value("USE_LIMIT_REQ", "yes") %}limit_req_zone $binary_remote_addr$uri zone=limit:{{ LIMIT_REQ_CACHE }} rate={{ LIMIT_REQ_RATE }};{% endif +%} # shared memory zone for limit_conn {% if has_value("USE_LIMIT_CONN", "yes") %}limit_conn_zone $binary_remote_addr zone=ddos:{{ LIMIT_CONN_CACHE }};{% endif +%} diff --git a/confs/site/log-lua.conf b/confs/site/log-lua.conf index 3020285..d8758e9 100644 --- a/confs/site/log-lua.conf +++ b/confs/site/log-lua.conf @@ -1,6 +1,7 @@ log_by_lua_block { local logger = require "logger" +local cjson = require "cjson" -- bad behavior local use_bad_behavior = {% if USE_BAD_BEHAVIOR == "yes" %}true{% else %}false{% endif +%} @@ -22,7 +23,7 @@ local use_remote_api = {% if USE_REMOTE_API == "yes" %}true{% else %} local remoteapi = require "remoteapi" local iputils = require "resty.iputils" -if use_remote_api and not iputils.ip_in_cidrs(ngx.var.remote_addr, ngx.shared.reserved_ips:get("data")) and ngx.shared.remote_api:get("id") ~= "empty" and ngx.shared.remote_api:get("ping") ~= "ko" then +if use_remote_api and not iputils.ip_in_cidrs(ngx.var.remote_addr, cjson.decode(ngx.shared.reserved_ips:get("data"))) and ngx.shared.remote_api:get("id") ~= "empty" and ngx.shared.remote_api:get("ping") ~= "ko" then if ngx.status == ngx.HTTP_FORBIDDEN then local reason = "other" if use_bad_behavior and new_bad_behavior_ban then diff --git a/confs/site/main-lua.conf b/confs/site/main-lua.conf index a145415..59d1b2a 100644 --- a/confs/site/main-lua.conf +++ b/confs/site/main-lua.conf @@ -57,6 +57,11 @@ local dnsbl_list = {% raw %}{{% endraw %}{% if DNSBL_LIST != "" %}{% set elemen -- bad behavior local use_bad_behavior = {% if USE_BAD_BEHAVIOR == "yes" %}true{% else %}false{% endif +%} +-- limit req +local use_req_limit = {% if USE_REQ_LIMIT == "yes" %}true{% else %}false{% endif +%} +local limit_req_rate = "{{ LIMIT_REQ_RATE }}" +local limit_req_burst = "{{ LIMIT_REQ_BURST }}" + -- remote API local use_remote_api = {% if USE_REMOTE_API == "yes" %}true{% else %}false{% endif +%} @@ -73,6 +78,7 @@ local behavior = require "behavior" local logger = require "logger" local redis = require "resty.redis" local checker = require "checker" +local limitreq = require "limitreq" -- user variables local antibot_uri = "{{ ANTIBOT_URI }}" @@ -148,6 +154,11 @@ if use_bad_behavior and behavior.is_banned() then ngx.exit(ngx.HTTP_FORBIDDEN) end +-- check if IP is banned because of "request limit" +-- if use_req_limit and reqlimit.check() then +-- ngx.exit(ngx.HTTP_FORBIDDEN) +-- end + -- our redis client local redis_client = nil if use_redis then diff --git a/confs/site/server.conf b/confs/site/server.conf index 6405404..7354476 100644 --- a/confs/site/server.conf +++ b/confs/site/server.conf @@ -65,9 +65,9 @@ server { } # requests limiting -{% if USE_LIMIT_REQ == "yes" +%} - include {{ NGINX_PREFIX }}limit-req.conf; -{% endif %} +#{% if USE_LIMIT_REQ == "yes" +%} +# include {{ NGINX_PREFIX }}limit-req.conf; +#{% endif %} # connections limiting {% if USE_LIMIT_CONN == "yes" +%} diff --git a/jobs/Job.py b/jobs/Job.py index 198febe..1f58a8a 100644 --- a/jobs/Job.py +++ b/jobs/Job.py @@ -120,7 +120,7 @@ class Job(abc.ABC) : # if self._type == "file" : # mode = "ab" # file = open("/tmp/" + self._filename, mode) - file = open("/tmp/" + self._filename, "ab") + file = open("/tmp/" + self._filename, "wb") elif self._redis != None : pipe = self._redis.pipeline() @@ -153,8 +153,8 @@ class Job(abc.ABC) : if self._redis == None : file.close() - if count > 0 : - shutil.copyfile("/tmp/" + self._filename, "/etc/nginx/" + self._filename) + #if count > 0 : + shutil.copyfile("/tmp/" + self._filename, "/etc/nginx/" + self._filename) os.remove("/tmp/" + self._filename) return JobRet.OK_RELOAD diff --git a/lua/limitreq.lua b/lua/limitreq.lua new file mode 100644 index 0000000..bcdbae8 --- /dev/null +++ b/lua/limitreq.lua @@ -0,0 +1,67 @@ +local M = {} +local logger = require "logger" + +function M.decr (key, delay) + local function callback (premature, key) + if premature then + ngx.shared.limit_req:delete(key) + return + end + local value, flags = ngx.shared.limit_req:get(key) + if value ~= nil then + if value - 1 == 0 then + ngx.shared.limit_req:delete(key) + return + end + ngx.shared.limit_req:set(key, value-1, 0) + end + end + local hdl, err = ngx.timer.at(delay, callback, key) + if not ok then + logger.log(ngx.ERR, "REQ LIMIT", "can't setup decrement timer : " .. err) + return false + end + return true +end + +function M.incr (key) + local newval, err, forcible = ngx.shared.limit_req:incr(key, 1, 0, 0) + if not newval then + logger.log(ngx.ERR, "REQ LIMIT", "can't increment counter : " .. err) + return false + end + return true +end + +function M.check (url, rate) + if url == "/" or url == ngx.var.request_uri then + local key = ngx.var.remote_addr .. url + local rate_split = rate:gmatch("([^/]+)") + local max = rate_split[1] + local unit = rate_split[2] + local delay = 0 + if unit == "s" then + delay = 1 + elseif unit == "m" then + delay = 60 + elseif unit == "h" then + delay = 3600 + elseif unit == "d" then + delay = 86400 + end + if M.incr(key) then + local current, flags = ngx.shared.limit_req:get(key) + if M.decr(key, delay) then + if current > max then + logger.log(ngx.WARN, "REQ LIMIT", "ip " .. ngx.var.remote_addr .. " has reached the limit : " .. current .. "/" .. unit .. " (max = " .. rate .. ")") + return true + end + else + ngx.shared.limit_req:set(key, current-1, 0) + end + end + end + return false +end + +return M diff --git a/lua/remoteapi.lua b/lua/remoteapi.lua index c564086..c1cecc7 100644 --- a/lua/remoteapi.lua +++ b/lua/remoteapi.lua @@ -55,8 +55,9 @@ function M.ping2() source = ltn12.source.string(request_body), sink = ltn12.sink.table(response_body) } - if res and status == 200 and response_body["data"] == "pong" then - return true + if res and status:match("^.*% 200% .*$") then + response_body = cjson.decode(response_body[1]) + return response_body["data"] == "pong" end return false end