From 88d4659dde90e5afe00add58e6cd04a96556d8cf Mon Sep 17 00:00:00 2001 From: Shahbaz Nazir Date: Fri, 29 Dec 2017 16:21:43 +0100 Subject: [PATCH 1/3] Adding k8s deployment for new 3scale apicast gateway Signed-off-by: Shahbaz Nazir --- k8s/apicast/apicast-dep.yaml | 36 ++ k8s/apicast/apicast-svc.yaml | 26 ++ k8s/apicast/container/Dockerfile | 14 + k8s/apicast/container/README.md | 60 +++ .../container/docker_build_and_push.bash | 5 + k8s/apicast/container/nginx.conf.template | 197 +++++++++ k8s/apicast/container/nginx.lua.template | 416 ++++++++++++++++++ .../container/nginx_openresty_entrypoint.bash | 54 +++ 8 files changed, 808 insertions(+) create mode 100644 k8s/apicast/apicast-dep.yaml create mode 100644 k8s/apicast/apicast-svc.yaml create mode 100644 k8s/apicast/container/Dockerfile create mode 100644 k8s/apicast/container/README.md create mode 100755 k8s/apicast/container/docker_build_and_push.bash create mode 100644 k8s/apicast/container/nginx.conf.template create mode 100644 k8s/apicast/container/nginx.lua.template create mode 100755 k8s/apicast/container/nginx_openresty_entrypoint.bash diff --git a/k8s/apicast/apicast-dep.yaml b/k8s/apicast/apicast-dep.yaml new file mode 100644 index 00000000..0251100c --- /dev/null +++ b/k8s/apicast/apicast-dep.yaml @@ -0,0 +1,36 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: openresty-instance-1-dep +spec: + replicas: 1 + template: + metadata: + labels: + app: openresty-instance-1-dep + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: nginx-openresty + image: bigchaindb/nginx_3scale:unstable + imagePullPolicy: IfNotPresent + env: + - name: DNS_SERVER + valueFrom: + configMapKeyRef: + name: vars + key: cluster-dns-server-ip + - name: THREESCALE_PORTAL_ENDPOINT + valueFrom: + secretKeyRef: + name: 3scale-credentials + key: conf-url + ports: + - containerPort: 8080 + protocol: TCP + name: openresty-port + resources: + limits: + cpu: 200m + memory: 768Mi + restartPolicy: Always diff --git a/k8s/apicast/apicast-svc.yaml b/k8s/apicast/apicast-svc.yaml new file mode 100644 index 00000000..af0aaa56 --- /dev/null +++ b/k8s/apicast/apicast-svc.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Service +metadata: + name: openresty-instance-1 + namespace: default + labels: + name: openresty-instance-1 + annotations: + # NOTE: the following annotation is a beta feature and + # only available in GCE/GKE and Azure as of now + # Ref: https://kubernetes.io/docs/tutorials/services/source-ip/ + service.beta.kubernetes.io/external-traffic: OnlyLocal +spec: + selector: + app: openresty-instance-1-dep + ports: + - port: 8080 + targetPort: 8080 + name: apicast-port + protocol: TCP + - port: 80 + targetPort: 80 + protocol: TCP + name: http-port + type: ClusterIP + clusterIP: None \ No newline at end of file diff --git a/k8s/apicast/container/Dockerfile b/k8s/apicast/container/Dockerfile new file mode 100644 index 00000000..394a72f8 --- /dev/null +++ b/k8s/apicast/container/Dockerfile @@ -0,0 +1,14 @@ +FROM registry.access.redhat.com/3scale-amp20/apicast-gateway +LABEL maintainer "dev@bigchaindb.com" + +COPY nginx.conf.template /opt/app-root/src/sites.d/nginx.conf +COPY nginx.lua.template /opt/app-root/src/sites.d/nginx.lua +COPY config.json /opt/app-root/src/conf/config.json +COPY nginx_openresty_entrypoint.bash /opt/app-root/scripts/ + +ENV THREESCALE_CONFIG_FILE "/opt/app-root/src/conf/config.json" +# The following ports are the values we use to run the NGINX+3scale container. +# 80 for http, 8080 for the 3scale api, 8888 for health-check, 27017 for +# MongoDB +EXPOSE 80 8080 8888 27017 +ENTRYPOINT ["/nginx_openresty_entrypoint.bash"] diff --git a/k8s/apicast/container/README.md b/k8s/apicast/container/README.md new file mode 100644 index 00000000..47b37103 --- /dev/null +++ b/k8s/apicast/container/README.md @@ -0,0 +1,60 @@ +# nginx_3scale agent +nginx_3scale agent is a module that is responsible for providing authentication, +authorization and metering of BigchainDB API users, by communicating with 3scale. +We use the openresty for this, which is nginx bundled with lua libraries. +More information at their [website](openresty.org/en) + +It validates the tokens sent by users in HTTP headers. +The user tokens map directly to the Application Plan specified in 3scale. + +## Build and Push the Latest Container +Use the `docker_build_and_push.bash` script to build the latest docker image +and upload it to Docker Hub. +Ensure that the image tag is updated to a new version number to properly +reflect any changes made to the container. + + +## Working + +* We define a [lua module](./nginx.lua.template) and + custom hooks (lua functions to be executed at certain phases of the nginx + request processing lifecycle) to authenticate an API request. + +* Download the template available from 3scale which pre-defines all the + rules defined using the 3scale UI for monitoring, and the basic nginx + configuration. + +* We heavily modify these templates to add our custom functionality. + +* The nginx_3scale image reads the environment variables and accordingly + creates the nginx.conf and nginx.lua files from the templates. + +* Every request calls the `_M.access()` function. This function extracts the + `app_id` and `app_key` from the HTTP request headers and forwards it to + 3scale to see if a request is allowed to be forwarded to the BigchainDB + backend. The request also contains the + various parameters that one would like to set access policies on. If the + `app_id` and `app_key` is successful, the access rules for the parameters + passed with the request are checked to see if the request can pass through. + For example, we can send a parameter, say `request_body_size`, to the 3scale + auth API. If we have defined a rule in the 3scale dashboard to drop + `request_body_size` above a certain threshold, the authorization will fail + even if the `app_id` and `app_key` are valid. + +* A successful response from the auth API causes the request to be proxied to + the backend. After a backend response, the `_M.post_action_content` hook is + called. We calculate details about all the metrics we are interested in and + form a payload for the 3scale reporting API. This ensures that we update + parameters of every metric defined in the 3scale UI after every request. + +* Note: We do not cache the keys in nginx so that we can validate every request + with 3scale and apply plan rules immediately. We can add auth caching to + improve performance, and in case we move to a fully post-paid billing model. + +* Refer to the references made in the [lua module](./nginx.lua.template) for + more details about how nginx+lua+3scale works + +* For HTTPS support, we also need to add the signed certificate and the + corresponding private key to the folder + `/usr/local/openresty/nginx/conf/ssl/`. Name the pem-encoded certificate as + `cert.pem` and the private key as `cert.key`. diff --git a/k8s/apicast/container/docker_build_and_push.bash b/k8s/apicast/container/docker_build_and_push.bash new file mode 100755 index 00000000..31dbb5e9 --- /dev/null +++ b/k8s/apicast/container/docker_build_and_push.bash @@ -0,0 +1,5 @@ +#!/bin/bash + +docker build -t bigchaindb/nginx_3scale:unstable . + +docker push bigchaindb/nginx_3scale:unstable diff --git a/k8s/apicast/container/nginx.conf.template b/k8s/apicast/container/nginx.conf.template new file mode 100644 index 00000000..9a4e56bc --- /dev/null +++ b/k8s/apicast/container/nginx.conf.template @@ -0,0 +1,197 @@ +worker_processes 2; +daemon off; +user nobody nogroup; +pid /tmp/nginx.pid; +error_log /usr/local/openresty/nginx/logs/error.log; +env THREESCALE_DEPLOYMENT_ENV; + +events { + worker_connections 256; + accept_mutex on; + use epoll; +} + +http { + lua_shared_dict api_keys 10m; + server_names_hash_bucket_size 128; + lua_package_path ";;$prefix/?.lua;$prefix/conf/?.lua"; + init_by_lua 'math.randomseed(ngx.time()) ; cjson = require("cjson")'; + access_log /usr/local/openresty/nginx/logs/access.log combined buffer=16k flush=5s; + + # allow 10 req/sec from the same IP address, and store the counters in a + # `zone` or shared memory location tagged as 'one'. + limit_req_zone $binary_remote_addr zone=one:10m rate=10r/s; + # enable logging when requests are being throttled + limit_req_log_level notice; + + # the http status code to return to the client; 429 is for TooManyRequests, + # ref. RFC 6585 + limit_req_status 429; + + resolver DNS_SERVER valid=30s ipv6=off; + + map $remote_addr $bdb_backend { + default BIGCHAINDB_BACKEND_HOST; + } + + upstream backend_SERVICE_ID { + server localhost:9999 max_fails=5 fail_timeout=30; + } + + # Our frontend API server that accepts requests from the external world and + # takes care of authentication and authorization. If auth is successful, it + # forwards the request to the backend_SERVICE_ID upstream where a consortium + # can run a BDB cluster. + server { + lua_code_cache on; + listen OPENRESTY_FRONTEND_PORT; + keepalive_timeout 60s; + + underscores_in_headers on; + set_by_lua $deployment 'return os.getenv("THREESCALE_DEPLOYMENT_ENV")'; + set $threescale_backend "https://su1.3scale.net"; + #set $threescale_backend "http://su1.3scale.net"; + #set $threescale_backend "https://su1.3scale.net:443"; + #set $threescale_backend "https://echo-api.3scale.net"; + + # `slowloris` attack mitigation settings + client_body_timeout 10s; + client_header_timeout 10s; + + location = /out_of_band_authrep_action { + internal; + proxy_pass_request_headers off; + set $service_token "SERVICE_TOKEN"; + content_by_lua "require('nginx').post_action_content()"; + } + + # 3scale auth api that takes the auth credentials and metrics as input, + # and returns 200 OK if both the credentials match and the user has not + # exceeded the limits in his application plan. + location = /threescale_auth { + internal; + set $service_token "SERVICE_TOKEN"; + proxy_pass $threescale_backend/transactions/authorize.xml?service_token=$service_token&service_id=$service_id&$usage&$credentials&log%5Bcode%5D=$arg_code&log%5Brequest%5D=$arg_req&log%5Bresponse%5D=$arg_resp; + proxy_set_header Host "su1.3scale.net"; + #proxy_set_header Host "echo-api.3scale.net"; + proxy_set_header X-3scale-User-Agent "nginx$deployment"; + proxy_set_header X-3scale-Version "THREESCALE_VERSION_HEADER"; + } + + # 3scale reporting api that takes the metrics data and persists the metrics + # in the 3scale backend. + location = /threescale_report { + internal; + set $service_token "SERVICE_TOKEN"; + proxy_pass $threescale_backend/transactions.xml; + proxy_set_header Host "su1.3scale.net"; + #proxy_set_header Host "echo-api.3scale.net"; + # We have a bug in lua-nginx module that does not set + # Content-Type from lua script + proxy_pass_request_headers off; + proxy_set_header Content-Type "application/x-www-form-urlencoded"; + proxy_set_header X-3scale-User-Agent "nginx$deployment"; + proxy_set_header X-3scale-Version "THREESCALE_VERSION_HEADER"; + } + + location / { + proxy_ignore_client_abort on; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-3scale-proxy-secret-token $secret_token; + + # limit requests from the same client, allow `burst` to 20 r/s, + # `nodelay` or drop connection immediately in case it exceeds this + # threshold. + limit_req zone=one burst=20 nodelay; + + # We do not need the GET handling here as it's done in the other NGINX + # module + #if ($request_method = GET ) { + # proxy_pass http://$bdb_backend:BIGCHAINDB_API_PORT; + #} + + if ($request_method = POST ) { + set $service_token null; + set $cached_key null; + set $credentials null; + set $usage null; + set $service_id SERVICE_ID; + set $proxy_pass null; + set $secret_token null; + set $resp_body null; + set $resp_headers null; + access_by_lua "require('nginx').access()"; + body_filter_by_lua 'ngx.ctx.buffered = (ngx.ctx.buffered or "") .. string.sub(ngx.arg[1], 1, 1000) + if ngx.arg[2] then ngx.var.resp_body = ngx.ctx.buffered end'; + header_filter_by_lua 'ngx.var.resp_headers = cjson.encode(ngx.resp.get_headers())'; + + add_header 'Access-Control-Allow-Origin' '*'; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; + add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; + add_header 'Access-Control-Expose-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; + + proxy_pass $proxy_pass ; + post_action /out_of_band_authrep_action; + } + + if ($request_method = 'OPTIONS') { + add_header 'Access-Control-Allow-Origin' '*'; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; + add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range,app_key,app_id'; + add_header 'Access-Control-Max-Age' 43200; + add_header 'Content-Type' 'text/plain charset=UTF-8'; + add_header 'Content-Length' 0; + return 204; + } + } + } + + # Our backend server block that accepts requests from the nginx proxy and + # forwards it to instances of BDB cluster. We currently run only a single + # instance. + server { + sendfile on; + + listen 9999; + + # max client request body size: avg transaction size + client_max_body_size 15k; + + # keepalive connection settings + keepalive_timeout 60s; + + # `slowloris` attack mitigation settings + client_body_timeout 10s; + client_header_timeout 10s; + + if ( $http_x_3scale_proxy_secret_token != "THREESCALE_RESPONSE_SECRET_TOKEN" ) { + return 403; + } + + location / { + try_files $uri @proxy_to_app; + } + + location @proxy_to_app { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # enable the following line if and only if you use HTTPS + proxy_set_header X-Forwarded-Proto https; + proxy_set_header Host $http_host; + + # we don't want nginx trying to do something clever with + # redirects, we set the Host: header above already. + proxy_redirect off; + proxy_pass http://$bdb_backend:BIGCHAINDB_API_PORT; + + # limit requests from the same client, allow `burst` to 20 r/s on avg, + # `nodelay` or drop connection immediately in case it exceeds this + # threshold. + limit_req zone=one burst=20 nodelay; + } + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/local/openresty/nginx/html/50x.html; + } + } +} diff --git a/k8s/apicast/container/nginx.lua.template b/k8s/apicast/container/nginx.lua.template new file mode 100644 index 00000000..64baeaa1 --- /dev/null +++ b/k8s/apicast/container/nginx.lua.template @@ -0,0 +1,416 @@ +-- -*- mode: lua; -*- +-- Generated on: 2017-04-10 14:41:18 +0000 -- +-- Version: +-- Error Messages per service + +-- Ref: https://github.com/openresty/lua-nginx-module +-- Ref: https://ipdbtestnet-admin.3scale.net/p/admin/api_docs +-- Ref: http://nginx.org/en/docs/debugging_log.html + +local custom_config = false + +local _M = { + ['services'] = { + ['SERVICE_ID'] = { + error_auth_failed = 'Authentication failed', + error_auth_missing = 'Authentication parameters missing', + auth_failed_headers = 'text/plain; charset=us-ascii', + auth_missing_headers = 'text/plain; charset=us-ascii', + error_no_match = 'No Mapping Rule matched', + no_match_headers = 'text/plain; charset=us-ascii', + no_match_status = 404, + auth_failed_status = 403, + auth_missing_status = 403, + secret_token = 'THREESCALE_RESPONSE_SECRET_TOKEN', + get_credentials = function(service, params) + return ( + (params.app_id and params.app_key) + ) or error_no_credentials(service) + end, + extract_usage = function (service, request) + local method, url = unpack(string.split(request," ")) + local path, querystring = unpack(string.split(url, "?")) + local usage_t = {} + local matched_rules = {} + + local args = get_auth_params(nil, method) + + for i,r in ipairs(service.rules) do + check_rule({path=path, method=method, args=args}, r, usage_t, matched_rules) + end + + -- if there was no match, usage is set to nil and it will respond a 404, this behavior can be changed + return usage_t, table.concat(matched_rules, ", ") + end, + rules = { + { + method = 'POST', + pattern = '/api/{version}/transactions$', + parameters = { 'version' }, + querystring_params = function(args) + return true + end, + system_name = 'hits', + delta = 1 + }, + { + method = 'POST', + pattern = '/api/{version}/transactions$', + parameters = { 'version' }, + querystring_params = function(args) + return true + end, + system_name = 'request_body_size', + delta = 1 + }, + { + method = 'POST', + pattern = '/api/{version}/transactions$', + parameters = { 'version' }, + querystring_params = function(args) + return true + end, + system_name = 'response_body_size', + delta = 1 + }, + { + method = 'POST', + pattern = '/api/{version}/transactions$', + parameters = { 'version' }, + querystring_params = function(args) + return true + end, + system_name = 'post_transactions', + delta = 1 + }, + { + method = 'POST', + pattern = '/api/{version}/transactions$', + parameters = { 'version' }, + querystring_params = function(args) + return true + end, + system_name = 'total_body_size', + delta = 1 + }, + } +}, + } +} + +-- Error Codes +function error_no_credentials(service) + ngx.status = service.auth_missing_status + ngx.header.content_type = service.auth_missing_headers + ngx.print(service.error_auth_missing) + ngx.exit(ngx.HTTP_OK) +end + +function error_authorization_failed(service) + ngx.status = service.auth_failed_status + ngx.header.content_type = service.auth_failed_headers + ngx.print(service.error_auth_failed) + ngx.exit(ngx.HTTP_OK) +end + +function error_no_match(service) + ngx.status = service.no_match_status + ngx.header.content_type = service.no_match_headers + ngx.print(service.error_no_match) + ngx.exit(ngx.HTTP_OK) +end +-- End Error Codes + +-- Aux function to split a string + +function string:split(delimiter) + local result = { } + local from = 1 + local delim_from, delim_to = string.find( self, delimiter, from ) + if delim_from == nil then return {self} end + while delim_from do + table.insert( result, string.sub( self, from , delim_from-1 ) ) + from = delim_to + 1 + delim_from, delim_to = string.find( self, delimiter, from ) + end + table.insert( result, string.sub( self, from ) ) + return result +end + +function first_values(a) + r = {} + for k,v in pairs(a) do + if type(v) == "table" then + r[k] = v[1] + else + r[k] = v + end + end + return r +end + +function set_or_inc(t, name, delta) + return (t[name] or 0) + delta +end + +function build_querystring_formatter(fmt) + return function (query) + local function kvmap(f, t) + local res = {} + for k, v in pairs(t) do + table.insert(res, f(k, v)) + end + return res + end + + return table.concat(kvmap(function(k,v) return string.format(fmt, k, v) end, query or {}), "&") + end +end + +local build_querystring = build_querystring_formatter("usage[%s]=%s") +local build_query = build_querystring_formatter("%s=%s") + +function regexpify(path) + return path:gsub('?.*', ''):gsub("{.-}", '([\\w_.-]+)'):gsub("%.", "\\.") +end + +function check_rule(req, rule, usage_t, matched_rules) + local param = {} + local p = regexpify(rule.pattern) + local m = ngx.re.match(req.path, + string.format("^%s",p)) + if m and req.method == rule.method then + local args = req.args + if rule.querystring_params(args) then -- may return an empty table + -- when no querystringparams + -- in the rule. it's fine + for i,p in ipairs(rule.parameters) do + param[p] = m[i] + end + + table.insert(matched_rules, rule.pattern) + usage_t[rule.system_name] = set_or_inc(usage_t, rule.system_name, rule.delta) + end + end +end + +--[[ + Authorization logic + NOTE: We do not use any of the authorization logic defined in the template. + We use custom authentication and authorization logic defined in the + custom_app_id_authorize() function. +]]-- + +function get_auth_params(where, method) + local params = {} + if where == "headers" then + params = ngx.req.get_headers() + elseif method == "GET" then + params = ngx.req.get_uri_args() + else + ngx.req.read_body() + params = ngx.req.get_post_args() + end + return first_values(params) +end + +function get_debug_value() + local h = ngx.req.get_headers() + if h["X-3scale-debug"] == 'SERVICE_TOKEN' then + return true + else + return false + end +end + +function _M.authorize(auth_strat, params, service) + if auth_strat == 'oauth' then + oauth(params, service) + else + authrep(params, service) + end +end + +function oauth(params, service) + ngx.var.cached_key = ngx.var.cached_key .. ":" .. ngx.var.usage + local access_tokens = ngx.shared.api_keys + local is_known = access_tokens:get(ngx.var.cached_key) + + if is_known ~= 200 then + local res = ngx.location.capture("/threescale_oauth_authrep", { share_all_vars = true }) + + -- IN HERE YOU DEFINE THE ERROR IF CREDENTIALS ARE PASSED, BUT THEY ARE NOT VALID + if res.status ~= 200 then + access_tokens:delete(ngx.var.cached_key) + ngx.status = res.status + ngx.header.content_type = "application/json" + ngx.var.cached_key = nil + error_authorization_failed(service) + else + access_tokens:set(ngx.var.cached_key,200) + end + + ngx.var.cached_key = nil + end +end + +function authrep(params, service) + ngx.var.cached_key = ngx.var.cached_key .. ":" .. ngx.var.usage + local api_keys = ngx.shared.api_keys + local is_known = api_keys:get(ngx.var.cached_key) + + if is_known ~= 200 then + local res = ngx.location.capture("/threescale_authrep", { share_all_vars = true }) + + -- IN HERE YOU DEFINE THE ERROR IF CREDENTIALS ARE PASSED, BUT THEY ARE NOT VALID + if res.status ~= 200 then + -- remove the key, if it's not 200 let's go the slow route, to 3scale's backend + api_keys:delete(ngx.var.cached_key) + ngx.status = res.status + ngx.header.content_type = "application/json" + ngx.var.cached_key = nil + error_authorization_failed(service) + else + api_keys:set(ngx.var.cached_key,200) + end + ngx.var.cached_key = nil + end +end + +function _M.access() + local params = {} + local host = ngx.req.get_headers()["Host"] + local auth_strat = "" + local service = {} + local usage = {} + local matched_patterns = '' + + if ngx.status == 403 then + ngx.say("Throttling due to too many requests") + ngx.exit(403) + end + + if ngx.var.service_id == 'SERVICE_ID' then + local parameters = get_auth_params("headers", string.split(ngx.var.request, " ")[1] ) + service = _M.services['SERVICE_ID'] -- + ngx.var.secret_token = service.secret_token + params.app_id = parameters["app_id"] + params.app_key = parameters["app_key"] -- or "" -- Uncoment the first part if you want to allow not passing app_key + service.get_credentials(service, params) + ngx.var.cached_key = "SERVICE_ID" .. ":" .. params.app_id ..":".. params.app_key + auth_strat = "2" + ngx.var.service_id = "SERVICE_ID" + ngx.var.proxy_pass = "http://backend_SERVICE_ID" + usage, matched_patterns = service:extract_usage(ngx.var.request) + end + + usage['post_transactions'] = 0 + usage['request_body_size'] = 0 + usage['total_body_size'] = 0 + usage['response_body_size'] = 0 + ngx.var.credentials = build_query(params) + ngx.var.usage = build_querystring(usage) + + -- WHAT TO DO IF NO USAGE CAN BE DERIVED FROM THE REQUEST. + if ngx.var.usage == '' then + ngx.header["X-3scale-matched-rules"] = '' + error_no_match(service) + end + + if get_debug_value() then + ngx.header["X-3scale-matched-rules"] = matched_patterns + ngx.header["X-3scale-credentials"] = ngx.var.credentials + ngx.header["X-3scale-usage"] = ngx.var.usage + ngx.header["X-3scale-hostname"] = ngx.var.hostname + end + _M.custom_app_id_authorize(params, service) +end + +function _M.custom_app_id_authorize(params, service) + ngx.var.cached_key = ngx.var.cached_key .. ":" .. ngx.var.usage + local api_keys = ngx.shared.api_keys + local res = ngx.location.capture("/threescale_auth", { share_all_vars = true }) + if res.status ~= 200 then + ngx.status = res.status + ngx.header.content_type = "application/json" + ngx.var.cached_key = nil + error_authorization_failed(service) + end + ngx.var.cached_key = nil +end + +function _M.post_action_content() + local report_data = {} + + -- increment POST count + report_data['post_transactions'] = 1 + + -- NOTE: When we are querying for the length of the request here, we already + -- have the complete request data with us and hence can just use the len() + -- function to get the size of the payload in bytes. + -- However, we might not have a complete response from the backend at this + -- stage (esp. if it's a large response size). So, we decipher the payload + -- size by peeking into the content length header of the response. + -- Otherwise, nginx will have to buffer every response and then calculate + -- response payload size. + + -- req data size + local req_data = ngx.req.get_body_data() + if req_data then + report_data['request_body_size'] = req_data:len() + else + report_data['request_body_size'] = 0 + end + + -- res data size + local all_headers = cjson.decode(ngx.var.resp_headers) + local variable_header = "content-length" --<-- case sensitive + if all_headers[variable_header] then + report_data['response_body_size'] = all_headers[variable_header] + else + report_data['response_body_size'] = 0 + end + + -- total data size + report_data['total_body_size'] = report_data['request_body_size'] + report_data['response_body_size'] + + -- get the app_id + local app_id = "" + local credentials = ngx.var.credentials:split("&") + for i in pairs(credentials) do + if credentials[i]:match('app_id') then + local temp = credentials[i]:split("=") + app_id = temp[2] + end + end + + -- form the payload to report to 3scale + local report = {} + report['service_id'] = ngx.var.service_id + report['service_token'] = ngx.var.service_token + report['transactions[0][app_id]'] = app_id + report['transactions[0][usage][post_transactions]'] = report_data['post_transactions'] + report['transactions[0][usage][request_body_size]'] = report_data['request_body_size'] + report['transactions[0][usage][response_body_size]'] = report_data['response_body_size'] + report['transactions[0][usage][total_body_size]'] = report_data['total_body_size'] + local res1 = ngx.location.capture("/threescale_report", {method = ngx.HTTP_POST, body = ngx.encode_args(report), share_all_vars = true }) + --ngx.log(0, ngx.encode_args(report)) + ngx.log(0, "Status: "..res1.status) + ngx.log(0, "Body: "..res1.body) + --if res1.status ~= 200 then + -- local api_keys = ngx.shared.api_keys + -- api_keys:delete(cached_key) + --end + ngx.exit(ngx.HTTP_OK) +end + +if custom_config then + local ok, c = pcall(function() return require(custom_config) end) + if ok and type(c) == 'table' and type(c.setup) == 'function' then + c.setup(_M) + end +end + +return _M + +-- END OF SCRIPT diff --git a/k8s/apicast/container/nginx_openresty_entrypoint.bash b/k8s/apicast/container/nginx_openresty_entrypoint.bash new file mode 100755 index 00000000..d240d491 --- /dev/null +++ b/k8s/apicast/container/nginx_openresty_entrypoint.bash @@ -0,0 +1,54 @@ +#!/bin/bash +set -euo pipefail + +# Openresty vars +dns_server=`printenv DNS_SERVER` +openresty_frontend_port=`printenv OPENRESTY_FRONTEND_PORT` + + +# BigchainDB vars +bdb_backend_host=`printenv BIGCHAINDB_BACKEND_HOST` +bdb_api_port=`printenv BIGCHAINDB_API_PORT` + + +# Read the 3scale credentials from the mountpoint +# Should be mounted at the following directory +THREESCALE_CREDENTIALS_DIR=/usr/local/openresty/nginx/conf/threescale + +threescale_secret_token=`cat ${THREESCALE_CREDENTIALS_DIR}/secret-token` +threescale_service_id=`cat ${THREESCALE_CREDENTIALS_DIR}/service-id` +threescale_version_header=`cat ${THREESCALE_CREDENTIALS_DIR}/version-header` +threescale_service_token=`cat ${THREESCALE_CREDENTIALS_DIR}/service-token` + + +if [[ -z "${dns_server:?DNS_SERVER not specified. Exiting!}" || \ + -z "${openresty_frontend_port:?OPENRESTY_FRONTEND_PORT not specified. Exiting!}" || \ + -z "${bdb_backend_host:?BIGCHAINDB_BACKEND_HOST not specified. Exiting!}" || \ + -z "${bdb_api_port:?BIGCHAINDB_API_PORT not specified. Exiting!}" || \ + -z "${threescale_secret_token:?3scale secret token not specified. Exiting!}" || \ + -z "${threescale_service_id:?3scale service id not specified. Exiting!}" || \ + -z "${threescale_version_header:?3scale version header not specified. Exiting!}" || \ + -z "${threescale_service_token:?3scale service token not specified. Exiting!}" ]]; then + echo "Invalid environment settings detected. Exiting!" + exit 1 +fi + +NGINX_LUA_FILE=/usr/local/openresty/nginx/conf/nginx.lua +NGINX_CONF_FILE=/usr/local/openresty/nginx/conf/nginx.conf + +# configure the nginx.lua file with env variables +sed -i "s|SERVICE_ID|${threescale_service_id}|g" ${NGINX_LUA_FILE} +sed -i "s|THREESCALE_RESPONSE_SECRET_TOKEN|${threescale_secret_token}|g" ${NGINX_LUA_FILE} +sed -i "s|SERVICE_TOKEN|${threescale_service_token}|g" ${NGINX_LUA_FILE} + +# configure the nginx.conf file with env variables +sed -i "s|DNS_SERVER|${dns_server}|g" ${NGINX_CONF_FILE} +sed -i "s|OPENRESTY_FRONTEND_PORT|${openresty_frontend_port}|g" ${NGINX_CONF_FILE} +sed -i "s|BIGCHAINDB_BACKEND_HOST|${bdb_backend_host}|g" ${NGINX_CONF_FILE} +sed -i "s|BIGCHAINDB_API_PORT|${bdb_api_port}|g" ${NGINX_CONF_FILE} +sed -i "s|THREESCALE_RESPONSE_SECRET_TOKEN|${threescale_secret_token}|g" $NGINX_CONF_FILE +sed -i "s|SERVICE_ID|${threescale_service_id}|g" $NGINX_CONF_FILE +sed -i "s|THREESCALE_VERSION_HEADER|${threescale_version_header}|g" $NGINX_CONF_FILE +sed -i "s|SERVICE_TOKEN|${threescale_service_token}|g" $NGINX_CONF_FILE + +exec /opt/app-root/scripts/entrypoint \ No newline at end of file From d577b209e7065ed37134da3cc26aebd039131eb6 Mon Sep 17 00:00:00 2001 From: Shahbaz Nazir Date: Mon, 19 Feb 2018 11:21:05 +0100 Subject: [PATCH 2/3] WIP Signed-off-by: Shahbaz Nazir --- k8s/3scale-apicast/apicast-conf.yaml | 8 + .../apicast-dep.yaml | 20 +- .../apicast-svc.yaml | 11 +- k8s/3scale-apicast/container/Dockerfile | 16 + .../container/README.md | 0 .../container/docker_build_and_push.bash | 2 +- .../container/nginx_openresty_entrypoint.bash | 38 ++ k8s/apicast/container/Dockerfile | 14 - k8s/apicast/container/nginx.conf.template | 197 --------- k8s/apicast/container/nginx.lua.template | 416 ------------------ .../container/nginx_openresty_entrypoint.bash | 54 --- 11 files changed, 79 insertions(+), 697 deletions(-) create mode 100644 k8s/3scale-apicast/apicast-conf.yaml rename k8s/{apicast => 3scale-apicast}/apicast-dep.yaml (65%) rename k8s/{apicast => 3scale-apicast}/apicast-svc.yaml (82%) create mode 100644 k8s/3scale-apicast/container/Dockerfile rename k8s/{apicast => 3scale-apicast}/container/README.md (100%) rename k8s/{apicast => 3scale-apicast}/container/docker_build_and_push.bash (58%) create mode 100755 k8s/3scale-apicast/container/nginx_openresty_entrypoint.bash delete mode 100644 k8s/apicast/container/Dockerfile delete mode 100644 k8s/apicast/container/nginx.conf.template delete mode 100644 k8s/apicast/container/nginx.lua.template delete mode 100755 k8s/apicast/container/nginx_openresty_entrypoint.bash diff --git a/k8s/3scale-apicast/apicast-conf.yaml b/k8s/3scale-apicast/apicast-conf.yaml new file mode 100644 index 00000000..02fec5e1 --- /dev/null +++ b/k8s/3scale-apicast/apicast-conf.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: apicast-secret + namespace: default +type: Opaque +data: + portal-endpoint: "aHR0cHM6Ly9jNzQzOTRhM2M1NzQ5OWE5ZWIxNGI4YzYzZWZjNmVkNUBiaWdjaGFpbmRiLWFkbWluLjNzY2FsZS5uZXQ" \ No newline at end of file diff --git a/k8s/apicast/apicast-dep.yaml b/k8s/3scale-apicast/apicast-dep.yaml similarity index 65% rename from k8s/apicast/apicast-dep.yaml rename to k8s/3scale-apicast/apicast-dep.yaml index 0251100c..b1a6d1e9 100644 --- a/k8s/apicast/apicast-dep.yaml +++ b/k8s/3scale-apicast/apicast-dep.yaml @@ -15,7 +15,7 @@ spec: image: bigchaindb/nginx_3scale:unstable imagePullPolicy: IfNotPresent env: - - name: DNS_SERVER + - name: RESOLVER valueFrom: configMapKeyRef: name: vars @@ -23,14 +23,16 @@ spec: - name: THREESCALE_PORTAL_ENDPOINT valueFrom: secretKeyRef: - name: 3scale-credentials - key: conf-url + name: apicast-secret + key: portal-endpoint + - name: APICAST_LOG_LEVEL + value: "debug" + - name: APICAST_CONFIGURATION_LOADER + value: "boot" + - name: APICAST_MANAGEMENT_API + value: "debug" ports: - containerPort: 8080 protocol: TCP - name: openresty-port - resources: - limits: - cpu: 200m - memory: 768Mi - restartPolicy: Always + - containerPort: 8090 + protocol: TCP diff --git a/k8s/apicast/apicast-svc.yaml b/k8s/3scale-apicast/apicast-svc.yaml similarity index 82% rename from k8s/apicast/apicast-svc.yaml rename to k8s/3scale-apicast/apicast-svc.yaml index af0aaa56..835ab37e 100644 --- a/k8s/apicast/apicast-svc.yaml +++ b/k8s/3scale-apicast/apicast-svc.yaml @@ -16,11 +16,10 @@ spec: ports: - port: 8080 targetPort: 8080 - name: apicast-port + name: apicast-proxy + - port: 8090 + targetPort: 8090 + name: apicast-mgmt protocol: TCP - - port: 80 - targetPort: 80 - protocol: TCP - name: http-port type: ClusterIP - clusterIP: None \ No newline at end of file + clusterIP: None diff --git a/k8s/3scale-apicast/container/Dockerfile b/k8s/3scale-apicast/container/Dockerfile new file mode 100644 index 00000000..f06d23fa --- /dev/null +++ b/k8s/3scale-apicast/container/Dockerfile @@ -0,0 +1,16 @@ +FROM openresty/openresty:xenial +LABEL maintainer "dev@bigchaindb.com" +WORKDIR /opt/apicast +RUN apt-get update \ + && apt-get -y upgrade \ + && apt-get autoremove \ + && apt-get clean \ + && apt-get install wget +COPY nginx_openresty_entrypoint.bash / + +# The following ports are the values we use to run the NGINX+3scale container. +# 80 for http, 8080 for the 3scale api, 8888 for health-check, 27017 for +# MongoDB +EXPOSE 8080 8090 8888 + +ENTRYPOINT ["/nginx_openresty_entrypoint.bash"] \ No newline at end of file diff --git a/k8s/apicast/container/README.md b/k8s/3scale-apicast/container/README.md similarity index 100% rename from k8s/apicast/container/README.md rename to k8s/3scale-apicast/container/README.md diff --git a/k8s/apicast/container/docker_build_and_push.bash b/k8s/3scale-apicast/container/docker_build_and_push.bash similarity index 58% rename from k8s/apicast/container/docker_build_and_push.bash rename to k8s/3scale-apicast/container/docker_build_and_push.bash index 31dbb5e9..571b9e42 100755 --- a/k8s/apicast/container/docker_build_and_push.bash +++ b/k8s/3scale-apicast/container/docker_build_and_push.bash @@ -2,4 +2,4 @@ docker build -t bigchaindb/nginx_3scale:unstable . -docker push bigchaindb/nginx_3scale:unstable +#docker push bigchaindb/nginx_3scale:unstable diff --git a/k8s/3scale-apicast/container/nginx_openresty_entrypoint.bash b/k8s/3scale-apicast/container/nginx_openresty_entrypoint.bash new file mode 100755 index 00000000..5ad2f181 --- /dev/null +++ b/k8s/3scale-apicast/container/nginx_openresty_entrypoint.bash @@ -0,0 +1,38 @@ +#!/bin/bash +set -euo pipefail + +BASE_DIR=$(pwd) +APICAST_RELEASE="3.1.0" +BASE_GIT_URL="https://github.com/3scale/apicast/archive" + + +# Sanity Check +if [[ -z "${THREESCALE_PORTAL_ENDPOINT:?THREESCALE_PORTAL_ENDPOINT not specified. Exiting!}" || \ + -z "${BDB_SERVICE_ENDPOINT:?bigchaindb backend service endpoint not specified. Exiting!}" ]]; then + exit 1 +fi + +# Download and Install Apicast +wget "${BASE_GIT_URL}/v${APICAST_RELEASE}.tar.gz" +tar -xvzf "v${APICAST_RELEASE}.tar.gz" + +luarocks make apicast-${APICAST_RELEASE}/apicast/*.rockspec --tree /usr/local/openresty/luajit + + + +# Set Default config +export APICAST_CONFIGURATION_LOADER="boot" # Overriding apicast default lazy config loader +export APICAST_MANAGEMENT_API="debug" # Overriding apicast default fo 'status' mode to be + # able to update service endpoint from https://test.bigchaindb.com + # to local service endpoint + +# Print Current Configs +echo "Apicast Release: ${APICAST_RELEASE}" +echo "Apicast Download URL: ${BASE_GIT_URL}" +echo "APICAST_CONFIGURATION_LOADER: ${APICAST_CONFIGURATION_LOADER}" +echo "BDB_SERVICE_ENDPOINT: ${BDB_SERVICE_ENDPOINT}" + + +# Start nginx +echo "INFO: starting nginx..." +exec apicast-${APICAST_RELEASE}/apicast/bin/apicast -b -e production -v -v -v diff --git a/k8s/apicast/container/Dockerfile b/k8s/apicast/container/Dockerfile deleted file mode 100644 index 394a72f8..00000000 --- a/k8s/apicast/container/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM registry.access.redhat.com/3scale-amp20/apicast-gateway -LABEL maintainer "dev@bigchaindb.com" - -COPY nginx.conf.template /opt/app-root/src/sites.d/nginx.conf -COPY nginx.lua.template /opt/app-root/src/sites.d/nginx.lua -COPY config.json /opt/app-root/src/conf/config.json -COPY nginx_openresty_entrypoint.bash /opt/app-root/scripts/ - -ENV THREESCALE_CONFIG_FILE "/opt/app-root/src/conf/config.json" -# The following ports are the values we use to run the NGINX+3scale container. -# 80 for http, 8080 for the 3scale api, 8888 for health-check, 27017 for -# MongoDB -EXPOSE 80 8080 8888 27017 -ENTRYPOINT ["/nginx_openresty_entrypoint.bash"] diff --git a/k8s/apicast/container/nginx.conf.template b/k8s/apicast/container/nginx.conf.template deleted file mode 100644 index 9a4e56bc..00000000 --- a/k8s/apicast/container/nginx.conf.template +++ /dev/null @@ -1,197 +0,0 @@ -worker_processes 2; -daemon off; -user nobody nogroup; -pid /tmp/nginx.pid; -error_log /usr/local/openresty/nginx/logs/error.log; -env THREESCALE_DEPLOYMENT_ENV; - -events { - worker_connections 256; - accept_mutex on; - use epoll; -} - -http { - lua_shared_dict api_keys 10m; - server_names_hash_bucket_size 128; - lua_package_path ";;$prefix/?.lua;$prefix/conf/?.lua"; - init_by_lua 'math.randomseed(ngx.time()) ; cjson = require("cjson")'; - access_log /usr/local/openresty/nginx/logs/access.log combined buffer=16k flush=5s; - - # allow 10 req/sec from the same IP address, and store the counters in a - # `zone` or shared memory location tagged as 'one'. - limit_req_zone $binary_remote_addr zone=one:10m rate=10r/s; - # enable logging when requests are being throttled - limit_req_log_level notice; - - # the http status code to return to the client; 429 is for TooManyRequests, - # ref. RFC 6585 - limit_req_status 429; - - resolver DNS_SERVER valid=30s ipv6=off; - - map $remote_addr $bdb_backend { - default BIGCHAINDB_BACKEND_HOST; - } - - upstream backend_SERVICE_ID { - server localhost:9999 max_fails=5 fail_timeout=30; - } - - # Our frontend API server that accepts requests from the external world and - # takes care of authentication and authorization. If auth is successful, it - # forwards the request to the backend_SERVICE_ID upstream where a consortium - # can run a BDB cluster. - server { - lua_code_cache on; - listen OPENRESTY_FRONTEND_PORT; - keepalive_timeout 60s; - - underscores_in_headers on; - set_by_lua $deployment 'return os.getenv("THREESCALE_DEPLOYMENT_ENV")'; - set $threescale_backend "https://su1.3scale.net"; - #set $threescale_backend "http://su1.3scale.net"; - #set $threescale_backend "https://su1.3scale.net:443"; - #set $threescale_backend "https://echo-api.3scale.net"; - - # `slowloris` attack mitigation settings - client_body_timeout 10s; - client_header_timeout 10s; - - location = /out_of_band_authrep_action { - internal; - proxy_pass_request_headers off; - set $service_token "SERVICE_TOKEN"; - content_by_lua "require('nginx').post_action_content()"; - } - - # 3scale auth api that takes the auth credentials and metrics as input, - # and returns 200 OK if both the credentials match and the user has not - # exceeded the limits in his application plan. - location = /threescale_auth { - internal; - set $service_token "SERVICE_TOKEN"; - proxy_pass $threescale_backend/transactions/authorize.xml?service_token=$service_token&service_id=$service_id&$usage&$credentials&log%5Bcode%5D=$arg_code&log%5Brequest%5D=$arg_req&log%5Bresponse%5D=$arg_resp; - proxy_set_header Host "su1.3scale.net"; - #proxy_set_header Host "echo-api.3scale.net"; - proxy_set_header X-3scale-User-Agent "nginx$deployment"; - proxy_set_header X-3scale-Version "THREESCALE_VERSION_HEADER"; - } - - # 3scale reporting api that takes the metrics data and persists the metrics - # in the 3scale backend. - location = /threescale_report { - internal; - set $service_token "SERVICE_TOKEN"; - proxy_pass $threescale_backend/transactions.xml; - proxy_set_header Host "su1.3scale.net"; - #proxy_set_header Host "echo-api.3scale.net"; - # We have a bug in lua-nginx module that does not set - # Content-Type from lua script - proxy_pass_request_headers off; - proxy_set_header Content-Type "application/x-www-form-urlencoded"; - proxy_set_header X-3scale-User-Agent "nginx$deployment"; - proxy_set_header X-3scale-Version "THREESCALE_VERSION_HEADER"; - } - - location / { - proxy_ignore_client_abort on; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-3scale-proxy-secret-token $secret_token; - - # limit requests from the same client, allow `burst` to 20 r/s, - # `nodelay` or drop connection immediately in case it exceeds this - # threshold. - limit_req zone=one burst=20 nodelay; - - # We do not need the GET handling here as it's done in the other NGINX - # module - #if ($request_method = GET ) { - # proxy_pass http://$bdb_backend:BIGCHAINDB_API_PORT; - #} - - if ($request_method = POST ) { - set $service_token null; - set $cached_key null; - set $credentials null; - set $usage null; - set $service_id SERVICE_ID; - set $proxy_pass null; - set $secret_token null; - set $resp_body null; - set $resp_headers null; - access_by_lua "require('nginx').access()"; - body_filter_by_lua 'ngx.ctx.buffered = (ngx.ctx.buffered or "") .. string.sub(ngx.arg[1], 1, 1000) - if ngx.arg[2] then ngx.var.resp_body = ngx.ctx.buffered end'; - header_filter_by_lua 'ngx.var.resp_headers = cjson.encode(ngx.resp.get_headers())'; - - add_header 'Access-Control-Allow-Origin' '*'; - add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; - add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; - add_header 'Access-Control-Expose-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range'; - - proxy_pass $proxy_pass ; - post_action /out_of_band_authrep_action; - } - - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' '*'; - add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; - add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range,app_key,app_id'; - add_header 'Access-Control-Max-Age' 43200; - add_header 'Content-Type' 'text/plain charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; - } - } - } - - # Our backend server block that accepts requests from the nginx proxy and - # forwards it to instances of BDB cluster. We currently run only a single - # instance. - server { - sendfile on; - - listen 9999; - - # max client request body size: avg transaction size - client_max_body_size 15k; - - # keepalive connection settings - keepalive_timeout 60s; - - # `slowloris` attack mitigation settings - client_body_timeout 10s; - client_header_timeout 10s; - - if ( $http_x_3scale_proxy_secret_token != "THREESCALE_RESPONSE_SECRET_TOKEN" ) { - return 403; - } - - location / { - try_files $uri @proxy_to_app; - } - - location @proxy_to_app { - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # enable the following line if and only if you use HTTPS - proxy_set_header X-Forwarded-Proto https; - proxy_set_header Host $http_host; - - # we don't want nginx trying to do something clever with - # redirects, we set the Host: header above already. - proxy_redirect off; - proxy_pass http://$bdb_backend:BIGCHAINDB_API_PORT; - - # limit requests from the same client, allow `burst` to 20 r/s on avg, - # `nodelay` or drop connection immediately in case it exceeds this - # threshold. - limit_req zone=one burst=20 nodelay; - } - - error_page 500 502 503 504 /50x.html; - location = /50x.html { - root /usr/local/openresty/nginx/html/50x.html; - } - } -} diff --git a/k8s/apicast/container/nginx.lua.template b/k8s/apicast/container/nginx.lua.template deleted file mode 100644 index 64baeaa1..00000000 --- a/k8s/apicast/container/nginx.lua.template +++ /dev/null @@ -1,416 +0,0 @@ --- -*- mode: lua; -*- --- Generated on: 2017-04-10 14:41:18 +0000 -- --- Version: --- Error Messages per service - --- Ref: https://github.com/openresty/lua-nginx-module --- Ref: https://ipdbtestnet-admin.3scale.net/p/admin/api_docs --- Ref: http://nginx.org/en/docs/debugging_log.html - -local custom_config = false - -local _M = { - ['services'] = { - ['SERVICE_ID'] = { - error_auth_failed = 'Authentication failed', - error_auth_missing = 'Authentication parameters missing', - auth_failed_headers = 'text/plain; charset=us-ascii', - auth_missing_headers = 'text/plain; charset=us-ascii', - error_no_match = 'No Mapping Rule matched', - no_match_headers = 'text/plain; charset=us-ascii', - no_match_status = 404, - auth_failed_status = 403, - auth_missing_status = 403, - secret_token = 'THREESCALE_RESPONSE_SECRET_TOKEN', - get_credentials = function(service, params) - return ( - (params.app_id and params.app_key) - ) or error_no_credentials(service) - end, - extract_usage = function (service, request) - local method, url = unpack(string.split(request," ")) - local path, querystring = unpack(string.split(url, "?")) - local usage_t = {} - local matched_rules = {} - - local args = get_auth_params(nil, method) - - for i,r in ipairs(service.rules) do - check_rule({path=path, method=method, args=args}, r, usage_t, matched_rules) - end - - -- if there was no match, usage is set to nil and it will respond a 404, this behavior can be changed - return usage_t, table.concat(matched_rules, ", ") - end, - rules = { - { - method = 'POST', - pattern = '/api/{version}/transactions$', - parameters = { 'version' }, - querystring_params = function(args) - return true - end, - system_name = 'hits', - delta = 1 - }, - { - method = 'POST', - pattern = '/api/{version}/transactions$', - parameters = { 'version' }, - querystring_params = function(args) - return true - end, - system_name = 'request_body_size', - delta = 1 - }, - { - method = 'POST', - pattern = '/api/{version}/transactions$', - parameters = { 'version' }, - querystring_params = function(args) - return true - end, - system_name = 'response_body_size', - delta = 1 - }, - { - method = 'POST', - pattern = '/api/{version}/transactions$', - parameters = { 'version' }, - querystring_params = function(args) - return true - end, - system_name = 'post_transactions', - delta = 1 - }, - { - method = 'POST', - pattern = '/api/{version}/transactions$', - parameters = { 'version' }, - querystring_params = function(args) - return true - end, - system_name = 'total_body_size', - delta = 1 - }, - } -}, - } -} - --- Error Codes -function error_no_credentials(service) - ngx.status = service.auth_missing_status - ngx.header.content_type = service.auth_missing_headers - ngx.print(service.error_auth_missing) - ngx.exit(ngx.HTTP_OK) -end - -function error_authorization_failed(service) - ngx.status = service.auth_failed_status - ngx.header.content_type = service.auth_failed_headers - ngx.print(service.error_auth_failed) - ngx.exit(ngx.HTTP_OK) -end - -function error_no_match(service) - ngx.status = service.no_match_status - ngx.header.content_type = service.no_match_headers - ngx.print(service.error_no_match) - ngx.exit(ngx.HTTP_OK) -end --- End Error Codes - --- Aux function to split a string - -function string:split(delimiter) - local result = { } - local from = 1 - local delim_from, delim_to = string.find( self, delimiter, from ) - if delim_from == nil then return {self} end - while delim_from do - table.insert( result, string.sub( self, from , delim_from-1 ) ) - from = delim_to + 1 - delim_from, delim_to = string.find( self, delimiter, from ) - end - table.insert( result, string.sub( self, from ) ) - return result -end - -function first_values(a) - r = {} - for k,v in pairs(a) do - if type(v) == "table" then - r[k] = v[1] - else - r[k] = v - end - end - return r -end - -function set_or_inc(t, name, delta) - return (t[name] or 0) + delta -end - -function build_querystring_formatter(fmt) - return function (query) - local function kvmap(f, t) - local res = {} - for k, v in pairs(t) do - table.insert(res, f(k, v)) - end - return res - end - - return table.concat(kvmap(function(k,v) return string.format(fmt, k, v) end, query or {}), "&") - end -end - -local build_querystring = build_querystring_formatter("usage[%s]=%s") -local build_query = build_querystring_formatter("%s=%s") - -function regexpify(path) - return path:gsub('?.*', ''):gsub("{.-}", '([\\w_.-]+)'):gsub("%.", "\\.") -end - -function check_rule(req, rule, usage_t, matched_rules) - local param = {} - local p = regexpify(rule.pattern) - local m = ngx.re.match(req.path, - string.format("^%s",p)) - if m and req.method == rule.method then - local args = req.args - if rule.querystring_params(args) then -- may return an empty table - -- when no querystringparams - -- in the rule. it's fine - for i,p in ipairs(rule.parameters) do - param[p] = m[i] - end - - table.insert(matched_rules, rule.pattern) - usage_t[rule.system_name] = set_or_inc(usage_t, rule.system_name, rule.delta) - end - end -end - ---[[ - Authorization logic - NOTE: We do not use any of the authorization logic defined in the template. - We use custom authentication and authorization logic defined in the - custom_app_id_authorize() function. -]]-- - -function get_auth_params(where, method) - local params = {} - if where == "headers" then - params = ngx.req.get_headers() - elseif method == "GET" then - params = ngx.req.get_uri_args() - else - ngx.req.read_body() - params = ngx.req.get_post_args() - end - return first_values(params) -end - -function get_debug_value() - local h = ngx.req.get_headers() - if h["X-3scale-debug"] == 'SERVICE_TOKEN' then - return true - else - return false - end -end - -function _M.authorize(auth_strat, params, service) - if auth_strat == 'oauth' then - oauth(params, service) - else - authrep(params, service) - end -end - -function oauth(params, service) - ngx.var.cached_key = ngx.var.cached_key .. ":" .. ngx.var.usage - local access_tokens = ngx.shared.api_keys - local is_known = access_tokens:get(ngx.var.cached_key) - - if is_known ~= 200 then - local res = ngx.location.capture("/threescale_oauth_authrep", { share_all_vars = true }) - - -- IN HERE YOU DEFINE THE ERROR IF CREDENTIALS ARE PASSED, BUT THEY ARE NOT VALID - if res.status ~= 200 then - access_tokens:delete(ngx.var.cached_key) - ngx.status = res.status - ngx.header.content_type = "application/json" - ngx.var.cached_key = nil - error_authorization_failed(service) - else - access_tokens:set(ngx.var.cached_key,200) - end - - ngx.var.cached_key = nil - end -end - -function authrep(params, service) - ngx.var.cached_key = ngx.var.cached_key .. ":" .. ngx.var.usage - local api_keys = ngx.shared.api_keys - local is_known = api_keys:get(ngx.var.cached_key) - - if is_known ~= 200 then - local res = ngx.location.capture("/threescale_authrep", { share_all_vars = true }) - - -- IN HERE YOU DEFINE THE ERROR IF CREDENTIALS ARE PASSED, BUT THEY ARE NOT VALID - if res.status ~= 200 then - -- remove the key, if it's not 200 let's go the slow route, to 3scale's backend - api_keys:delete(ngx.var.cached_key) - ngx.status = res.status - ngx.header.content_type = "application/json" - ngx.var.cached_key = nil - error_authorization_failed(service) - else - api_keys:set(ngx.var.cached_key,200) - end - ngx.var.cached_key = nil - end -end - -function _M.access() - local params = {} - local host = ngx.req.get_headers()["Host"] - local auth_strat = "" - local service = {} - local usage = {} - local matched_patterns = '' - - if ngx.status == 403 then - ngx.say("Throttling due to too many requests") - ngx.exit(403) - end - - if ngx.var.service_id == 'SERVICE_ID' then - local parameters = get_auth_params("headers", string.split(ngx.var.request, " ")[1] ) - service = _M.services['SERVICE_ID'] -- - ngx.var.secret_token = service.secret_token - params.app_id = parameters["app_id"] - params.app_key = parameters["app_key"] -- or "" -- Uncoment the first part if you want to allow not passing app_key - service.get_credentials(service, params) - ngx.var.cached_key = "SERVICE_ID" .. ":" .. params.app_id ..":".. params.app_key - auth_strat = "2" - ngx.var.service_id = "SERVICE_ID" - ngx.var.proxy_pass = "http://backend_SERVICE_ID" - usage, matched_patterns = service:extract_usage(ngx.var.request) - end - - usage['post_transactions'] = 0 - usage['request_body_size'] = 0 - usage['total_body_size'] = 0 - usage['response_body_size'] = 0 - ngx.var.credentials = build_query(params) - ngx.var.usage = build_querystring(usage) - - -- WHAT TO DO IF NO USAGE CAN BE DERIVED FROM THE REQUEST. - if ngx.var.usage == '' then - ngx.header["X-3scale-matched-rules"] = '' - error_no_match(service) - end - - if get_debug_value() then - ngx.header["X-3scale-matched-rules"] = matched_patterns - ngx.header["X-3scale-credentials"] = ngx.var.credentials - ngx.header["X-3scale-usage"] = ngx.var.usage - ngx.header["X-3scale-hostname"] = ngx.var.hostname - end - _M.custom_app_id_authorize(params, service) -end - -function _M.custom_app_id_authorize(params, service) - ngx.var.cached_key = ngx.var.cached_key .. ":" .. ngx.var.usage - local api_keys = ngx.shared.api_keys - local res = ngx.location.capture("/threescale_auth", { share_all_vars = true }) - if res.status ~= 200 then - ngx.status = res.status - ngx.header.content_type = "application/json" - ngx.var.cached_key = nil - error_authorization_failed(service) - end - ngx.var.cached_key = nil -end - -function _M.post_action_content() - local report_data = {} - - -- increment POST count - report_data['post_transactions'] = 1 - - -- NOTE: When we are querying for the length of the request here, we already - -- have the complete request data with us and hence can just use the len() - -- function to get the size of the payload in bytes. - -- However, we might not have a complete response from the backend at this - -- stage (esp. if it's a large response size). So, we decipher the payload - -- size by peeking into the content length header of the response. - -- Otherwise, nginx will have to buffer every response and then calculate - -- response payload size. - - -- req data size - local req_data = ngx.req.get_body_data() - if req_data then - report_data['request_body_size'] = req_data:len() - else - report_data['request_body_size'] = 0 - end - - -- res data size - local all_headers = cjson.decode(ngx.var.resp_headers) - local variable_header = "content-length" --<-- case sensitive - if all_headers[variable_header] then - report_data['response_body_size'] = all_headers[variable_header] - else - report_data['response_body_size'] = 0 - end - - -- total data size - report_data['total_body_size'] = report_data['request_body_size'] + report_data['response_body_size'] - - -- get the app_id - local app_id = "" - local credentials = ngx.var.credentials:split("&") - for i in pairs(credentials) do - if credentials[i]:match('app_id') then - local temp = credentials[i]:split("=") - app_id = temp[2] - end - end - - -- form the payload to report to 3scale - local report = {} - report['service_id'] = ngx.var.service_id - report['service_token'] = ngx.var.service_token - report['transactions[0][app_id]'] = app_id - report['transactions[0][usage][post_transactions]'] = report_data['post_transactions'] - report['transactions[0][usage][request_body_size]'] = report_data['request_body_size'] - report['transactions[0][usage][response_body_size]'] = report_data['response_body_size'] - report['transactions[0][usage][total_body_size]'] = report_data['total_body_size'] - local res1 = ngx.location.capture("/threescale_report", {method = ngx.HTTP_POST, body = ngx.encode_args(report), share_all_vars = true }) - --ngx.log(0, ngx.encode_args(report)) - ngx.log(0, "Status: "..res1.status) - ngx.log(0, "Body: "..res1.body) - --if res1.status ~= 200 then - -- local api_keys = ngx.shared.api_keys - -- api_keys:delete(cached_key) - --end - ngx.exit(ngx.HTTP_OK) -end - -if custom_config then - local ok, c = pcall(function() return require(custom_config) end) - if ok and type(c) == 'table' and type(c.setup) == 'function' then - c.setup(_M) - end -end - -return _M - --- END OF SCRIPT diff --git a/k8s/apicast/container/nginx_openresty_entrypoint.bash b/k8s/apicast/container/nginx_openresty_entrypoint.bash deleted file mode 100755 index d240d491..00000000 --- a/k8s/apicast/container/nginx_openresty_entrypoint.bash +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -set -euo pipefail - -# Openresty vars -dns_server=`printenv DNS_SERVER` -openresty_frontend_port=`printenv OPENRESTY_FRONTEND_PORT` - - -# BigchainDB vars -bdb_backend_host=`printenv BIGCHAINDB_BACKEND_HOST` -bdb_api_port=`printenv BIGCHAINDB_API_PORT` - - -# Read the 3scale credentials from the mountpoint -# Should be mounted at the following directory -THREESCALE_CREDENTIALS_DIR=/usr/local/openresty/nginx/conf/threescale - -threescale_secret_token=`cat ${THREESCALE_CREDENTIALS_DIR}/secret-token` -threescale_service_id=`cat ${THREESCALE_CREDENTIALS_DIR}/service-id` -threescale_version_header=`cat ${THREESCALE_CREDENTIALS_DIR}/version-header` -threescale_service_token=`cat ${THREESCALE_CREDENTIALS_DIR}/service-token` - - -if [[ -z "${dns_server:?DNS_SERVER not specified. Exiting!}" || \ - -z "${openresty_frontend_port:?OPENRESTY_FRONTEND_PORT not specified. Exiting!}" || \ - -z "${bdb_backend_host:?BIGCHAINDB_BACKEND_HOST not specified. Exiting!}" || \ - -z "${bdb_api_port:?BIGCHAINDB_API_PORT not specified. Exiting!}" || \ - -z "${threescale_secret_token:?3scale secret token not specified. Exiting!}" || \ - -z "${threescale_service_id:?3scale service id not specified. Exiting!}" || \ - -z "${threescale_version_header:?3scale version header not specified. Exiting!}" || \ - -z "${threescale_service_token:?3scale service token not specified. Exiting!}" ]]; then - echo "Invalid environment settings detected. Exiting!" - exit 1 -fi - -NGINX_LUA_FILE=/usr/local/openresty/nginx/conf/nginx.lua -NGINX_CONF_FILE=/usr/local/openresty/nginx/conf/nginx.conf - -# configure the nginx.lua file with env variables -sed -i "s|SERVICE_ID|${threescale_service_id}|g" ${NGINX_LUA_FILE} -sed -i "s|THREESCALE_RESPONSE_SECRET_TOKEN|${threescale_secret_token}|g" ${NGINX_LUA_FILE} -sed -i "s|SERVICE_TOKEN|${threescale_service_token}|g" ${NGINX_LUA_FILE} - -# configure the nginx.conf file with env variables -sed -i "s|DNS_SERVER|${dns_server}|g" ${NGINX_CONF_FILE} -sed -i "s|OPENRESTY_FRONTEND_PORT|${openresty_frontend_port}|g" ${NGINX_CONF_FILE} -sed -i "s|BIGCHAINDB_BACKEND_HOST|${bdb_backend_host}|g" ${NGINX_CONF_FILE} -sed -i "s|BIGCHAINDB_API_PORT|${bdb_api_port}|g" ${NGINX_CONF_FILE} -sed -i "s|THREESCALE_RESPONSE_SECRET_TOKEN|${threescale_secret_token}|g" $NGINX_CONF_FILE -sed -i "s|SERVICE_ID|${threescale_service_id}|g" $NGINX_CONF_FILE -sed -i "s|THREESCALE_VERSION_HEADER|${threescale_version_header}|g" $NGINX_CONF_FILE -sed -i "s|SERVICE_TOKEN|${threescale_service_token}|g" $NGINX_CONF_FILE - -exec /opt/app-root/scripts/entrypoint \ No newline at end of file From 9fa4be55037ccf047e12bee4df73dc5719ee595f Mon Sep 17 00:00:00 2001 From: Shahbaz Nazir Date: Wed, 21 Feb 2018 16:43:28 +0100 Subject: [PATCH 3/3] Added update config before starting nginx Signed-off-by: Shahbaz Nazir --- k8s/3scale-apicast/apicast-conf.yaml | 8 ---- k8s/3scale-apicast/apicast-dep.yaml | 34 ++++++++++++----- k8s/3scale-apicast/apicast-svc.yaml | 2 +- .../container/docker_build_and_push.bash | 2 +- .../container/nginx_openresty_entrypoint.bash | 37 +++++++++---------- 5 files changed, 44 insertions(+), 39 deletions(-) delete mode 100644 k8s/3scale-apicast/apicast-conf.yaml diff --git a/k8s/3scale-apicast/apicast-conf.yaml b/k8s/3scale-apicast/apicast-conf.yaml deleted file mode 100644 index 02fec5e1..00000000 --- a/k8s/3scale-apicast/apicast-conf.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: apicast-secret - namespace: default -type: Opaque -data: - portal-endpoint: "aHR0cHM6Ly9jNzQzOTRhM2M1NzQ5OWE5ZWIxNGI4YzYzZWZjNmVkNUBiaWdjaGFpbmRiLWFkbWluLjNzY2FsZS5uZXQ" \ No newline at end of file diff --git a/k8s/3scale-apicast/apicast-dep.yaml b/k8s/3scale-apicast/apicast-dep.yaml index b1a6d1e9..9fab4967 100644 --- a/k8s/3scale-apicast/apicast-dep.yaml +++ b/k8s/3scale-apicast/apicast-dep.yaml @@ -13,24 +13,38 @@ spec: containers: - name: nginx-openresty image: bigchaindb/nginx_3scale:unstable - imagePullPolicy: IfNotPresent + imagePullPolicy: Always env: - - name: RESOLVER - valueFrom: - configMapKeyRef: - name: vars - key: cluster-dns-server-ip - name: THREESCALE_PORTAL_ENDPOINT valueFrom: secretKeyRef: name: apicast-secret key: portal-endpoint + - name: RESOLVER + valueFrom: + configMapKeyRef: + name: vars + key: cluster-dns-server-ip - name: APICAST_LOG_LEVEL - value: "debug" - - name: APICAST_CONFIGURATION_LOADER - value: "boot" + valueFrom: + configMapKeyRef: + name: apicast-config + key: api-log-level - name: APICAST_MANAGEMENT_API - value: "debug" + valueFrom: + configMapKeyRef: + name: apicast-config + key: mgmt-api-mode + - name: BIGCHAINDB_BACKEND_HOST + valueFrom: + configMapKeyRef: + name: vars + key: bdb-instance-name + - name: BIGCHAINDB_API_PORT + valueFrom: + configMapKeyRef: + name: vars + key: bigchaindb-api-port ports: - containerPort: 8080 protocol: TCP diff --git a/k8s/3scale-apicast/apicast-svc.yaml b/k8s/3scale-apicast/apicast-svc.yaml index 835ab37e..d8259151 100644 --- a/k8s/3scale-apicast/apicast-svc.yaml +++ b/k8s/3scale-apicast/apicast-svc.yaml @@ -16,7 +16,7 @@ spec: ports: - port: 8080 targetPort: 8080 - name: apicast-proxy + name: apicast-svc - port: 8090 targetPort: 8090 name: apicast-mgmt diff --git a/k8s/3scale-apicast/container/docker_build_and_push.bash b/k8s/3scale-apicast/container/docker_build_and_push.bash index 571b9e42..31dbb5e9 100755 --- a/k8s/3scale-apicast/container/docker_build_and_push.bash +++ b/k8s/3scale-apicast/container/docker_build_and_push.bash @@ -2,4 +2,4 @@ docker build -t bigchaindb/nginx_3scale:unstable . -#docker push bigchaindb/nginx_3scale:unstable +docker push bigchaindb/nginx_3scale:unstable diff --git a/k8s/3scale-apicast/container/nginx_openresty_entrypoint.bash b/k8s/3scale-apicast/container/nginx_openresty_entrypoint.bash index 5ad2f181..1511b63c 100755 --- a/k8s/3scale-apicast/container/nginx_openresty_entrypoint.bash +++ b/k8s/3scale-apicast/container/nginx_openresty_entrypoint.bash @@ -5,34 +5,33 @@ BASE_DIR=$(pwd) APICAST_RELEASE="3.1.0" BASE_GIT_URL="https://github.com/3scale/apicast/archive" - -# Sanity Check -if [[ -z "${THREESCALE_PORTAL_ENDPOINT:?THREESCALE_PORTAL_ENDPOINT not specified. Exiting!}" || \ - -z "${BDB_SERVICE_ENDPOINT:?bigchaindb backend service endpoint not specified. Exiting!}" ]]; then - exit 1 -fi - -# Download and Install Apicast -wget "${BASE_GIT_URL}/v${APICAST_RELEASE}.tar.gz" -tar -xvzf "v${APICAST_RELEASE}.tar.gz" - -luarocks make apicast-${APICAST_RELEASE}/apicast/*.rockspec --tree /usr/local/openresty/luajit - - - # Set Default config export APICAST_CONFIGURATION_LOADER="boot" # Overriding apicast default lazy config loader export APICAST_MANAGEMENT_API="debug" # Overriding apicast default fo 'status' mode to be - # able to update service endpoint from https://test.bigchaindb.com - # to local service endpoint + # able to update bigchaindb backen service endpoint + +# Sanity Check +if [[ -z "${THREESCALE_PORTAL_ENDPOINT:?THREESCALE_PORTAL_ENDPOINT not specified. Exiting!}" || \ + -z "${BIGCHAINDB_BACKEND_HOST:?BIGCHAINDB_BACKEND_HOST not specified. Exiting!}" || \ + -z "${BIGCHAINDB_API_PORT:?BIGCHAINDB_API_PORT not specified. Exiting!}" ]]; then + exit 1 +fi + +export THREESCALE_PORTAL_ENDPOINT=`printenv THREESCALE_PORTAL_ENDPOINT` # Print Current Configs echo "Apicast Release: ${APICAST_RELEASE}" echo "Apicast Download URL: ${BASE_GIT_URL}" echo "APICAST_CONFIGURATION_LOADER: ${APICAST_CONFIGURATION_LOADER}" -echo "BDB_SERVICE_ENDPOINT: ${BDB_SERVICE_ENDPOINT}" +echo "BIGCHAINDB_BACKEND_HOST: ${BIGCHAINDB_BACKEND_HOST}" +echo "BIGCHAINDB_API_PORT: ${BIGCHAINDB_API_PORT}" +# Download and Install Apicast +wget "${BASE_GIT_URL}/v${APICAST_RELEASE}.tar.gz" +tar -xvzf "v${APICAST_RELEASE}.tar.gz" + +eval luarocks make apicast-${APICAST_RELEASE}/apicast/*.rockspec --tree /usr/local/openresty/luajit # Start nginx echo "INFO: starting nginx..." -exec apicast-${APICAST_RELEASE}/apicast/bin/apicast -b -e production -v -v -v +exec apicast-${APICAST_RELEASE}/apicast/bin/apicast -b -e production -v -v -v \ No newline at end of file