mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
WIP
Signed-off-by: Shahbaz Nazir <shahbaz@bigchaindb.com>
This commit is contained in:
parent
88d4659dde
commit
d577b209e7
8
k8s/3scale-apicast/apicast-conf.yaml
Normal file
8
k8s/3scale-apicast/apicast-conf.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: apicast-secret
|
||||
namespace: default
|
||||
type: Opaque
|
||||
data:
|
||||
portal-endpoint: "aHR0cHM6Ly9jNzQzOTRhM2M1NzQ5OWE5ZWIxNGI4YzYzZWZjNmVkNUBiaWdjaGFpbmRiLWFkbWluLjNzY2FsZS5uZXQ"
|
||||
@ -15,7 +15,7 @@ spec:
|
||||
image: bigchaindb/nginx_3scale:unstable
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: DNS_SERVER
|
||||
- name: RESOLVER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: vars
|
||||
@ -23,14 +23,16 @@ spec:
|
||||
- name: THREESCALE_PORTAL_ENDPOINT
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: 3scale-credentials
|
||||
key: conf-url
|
||||
name: apicast-secret
|
||||
key: portal-endpoint
|
||||
- name: APICAST_LOG_LEVEL
|
||||
value: "debug"
|
||||
- name: APICAST_CONFIGURATION_LOADER
|
||||
value: "boot"
|
||||
- name: APICAST_MANAGEMENT_API
|
||||
value: "debug"
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
name: openresty-port
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
restartPolicy: Always
|
||||
- containerPort: 8090
|
||||
protocol: TCP
|
||||
@ -16,11 +16,10 @@ spec:
|
||||
ports:
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
name: apicast-port
|
||||
name: apicast-proxy
|
||||
- port: 8090
|
||||
targetPort: 8090
|
||||
name: apicast-mgmt
|
||||
protocol: TCP
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
protocol: TCP
|
||||
name: http-port
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
16
k8s/3scale-apicast/container/Dockerfile
Normal file
16
k8s/3scale-apicast/container/Dockerfile
Normal file
@ -0,0 +1,16 @@
|
||||
FROM openresty/openresty:xenial
|
||||
LABEL maintainer "dev@bigchaindb.com"
|
||||
WORKDIR /opt/apicast
|
||||
RUN apt-get update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get autoremove \
|
||||
&& apt-get clean \
|
||||
&& apt-get install wget
|
||||
COPY nginx_openresty_entrypoint.bash /
|
||||
|
||||
# The following ports are the values we use to run the NGINX+3scale container.
|
||||
# 80 for http, 8080 for the 3scale api, 8888 for health-check, 27017 for
|
||||
# MongoDB
|
||||
EXPOSE 8080 8090 8888
|
||||
|
||||
ENTRYPOINT ["/nginx_openresty_entrypoint.bash"]
|
||||
@ -2,4 +2,4 @@
|
||||
|
||||
docker build -t bigchaindb/nginx_3scale:unstable .
|
||||
|
||||
docker push bigchaindb/nginx_3scale:unstable
|
||||
#docker push bigchaindb/nginx_3scale:unstable
|
||||
38
k8s/3scale-apicast/container/nginx_openresty_entrypoint.bash
Executable file
38
k8s/3scale-apicast/container/nginx_openresty_entrypoint.bash
Executable file
@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
BASE_DIR=$(pwd)
|
||||
APICAST_RELEASE="3.1.0"
|
||||
BASE_GIT_URL="https://github.com/3scale/apicast/archive"
|
||||
|
||||
|
||||
# Sanity Check
|
||||
if [[ -z "${THREESCALE_PORTAL_ENDPOINT:?THREESCALE_PORTAL_ENDPOINT not specified. Exiting!}" || \
|
||||
-z "${BDB_SERVICE_ENDPOINT:?bigchaindb backend service endpoint not specified. Exiting!}" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Download and Install Apicast
|
||||
wget "${BASE_GIT_URL}/v${APICAST_RELEASE}.tar.gz"
|
||||
tar -xvzf "v${APICAST_RELEASE}.tar.gz"
|
||||
|
||||
luarocks make apicast-${APICAST_RELEASE}/apicast/*.rockspec --tree /usr/local/openresty/luajit
|
||||
|
||||
|
||||
|
||||
# Set Default config
|
||||
export APICAST_CONFIGURATION_LOADER="boot" # Overriding apicast default lazy config loader
|
||||
export APICAST_MANAGEMENT_API="debug" # Overriding apicast default fo 'status' mode to be
|
||||
# able to update service endpoint from https://test.bigchaindb.com
|
||||
# to local service endpoint
|
||||
|
||||
# Print Current Configs
|
||||
echo "Apicast Release: ${APICAST_RELEASE}"
|
||||
echo "Apicast Download URL: ${BASE_GIT_URL}"
|
||||
echo "APICAST_CONFIGURATION_LOADER: ${APICAST_CONFIGURATION_LOADER}"
|
||||
echo "BDB_SERVICE_ENDPOINT: ${BDB_SERVICE_ENDPOINT}"
|
||||
|
||||
|
||||
# Start nginx
|
||||
echo "INFO: starting nginx..."
|
||||
exec apicast-${APICAST_RELEASE}/apicast/bin/apicast -b -e production -v -v -v
|
||||
@ -1,14 +0,0 @@
|
||||
FROM registry.access.redhat.com/3scale-amp20/apicast-gateway
|
||||
LABEL maintainer "dev@bigchaindb.com"
|
||||
|
||||
COPY nginx.conf.template /opt/app-root/src/sites.d/nginx.conf
|
||||
COPY nginx.lua.template /opt/app-root/src/sites.d/nginx.lua
|
||||
COPY config.json /opt/app-root/src/conf/config.json
|
||||
COPY nginx_openresty_entrypoint.bash /opt/app-root/scripts/
|
||||
|
||||
ENV THREESCALE_CONFIG_FILE "/opt/app-root/src/conf/config.json"
|
||||
# The following ports are the values we use to run the NGINX+3scale container.
|
||||
# 80 for http, 8080 for the 3scale api, 8888 for health-check, 27017 for
|
||||
# MongoDB
|
||||
EXPOSE 80 8080 8888 27017
|
||||
ENTRYPOINT ["/nginx_openresty_entrypoint.bash"]
|
||||
@ -1,197 +0,0 @@
|
||||
worker_processes 2;
|
||||
daemon off;
|
||||
user nobody nogroup;
|
||||
pid /tmp/nginx.pid;
|
||||
error_log /usr/local/openresty/nginx/logs/error.log;
|
||||
env THREESCALE_DEPLOYMENT_ENV;
|
||||
|
||||
events {
|
||||
worker_connections 256;
|
||||
accept_mutex on;
|
||||
use epoll;
|
||||
}
|
||||
|
||||
http {
|
||||
lua_shared_dict api_keys 10m;
|
||||
server_names_hash_bucket_size 128;
|
||||
lua_package_path ";;$prefix/?.lua;$prefix/conf/?.lua";
|
||||
init_by_lua 'math.randomseed(ngx.time()) ; cjson = require("cjson")';
|
||||
access_log /usr/local/openresty/nginx/logs/access.log combined buffer=16k flush=5s;
|
||||
|
||||
# allow 10 req/sec from the same IP address, and store the counters in a
|
||||
# `zone` or shared memory location tagged as 'one'.
|
||||
limit_req_zone $binary_remote_addr zone=one:10m rate=10r/s;
|
||||
# enable logging when requests are being throttled
|
||||
limit_req_log_level notice;
|
||||
|
||||
# the http status code to return to the client; 429 is for TooManyRequests,
|
||||
# ref. RFC 6585
|
||||
limit_req_status 429;
|
||||
|
||||
resolver DNS_SERVER valid=30s ipv6=off;
|
||||
|
||||
map $remote_addr $bdb_backend {
|
||||
default BIGCHAINDB_BACKEND_HOST;
|
||||
}
|
||||
|
||||
upstream backend_SERVICE_ID {
|
||||
server localhost:9999 max_fails=5 fail_timeout=30;
|
||||
}
|
||||
|
||||
# Our frontend API server that accepts requests from the external world and
|
||||
# takes care of authentication and authorization. If auth is successful, it
|
||||
# forwards the request to the backend_SERVICE_ID upstream where a consortium
|
||||
# can run a BDB cluster.
|
||||
server {
|
||||
lua_code_cache on;
|
||||
listen OPENRESTY_FRONTEND_PORT;
|
||||
keepalive_timeout 60s;
|
||||
|
||||
underscores_in_headers on;
|
||||
set_by_lua $deployment 'return os.getenv("THREESCALE_DEPLOYMENT_ENV")';
|
||||
set $threescale_backend "https://su1.3scale.net";
|
||||
#set $threescale_backend "http://su1.3scale.net";
|
||||
#set $threescale_backend "https://su1.3scale.net:443";
|
||||
#set $threescale_backend "https://echo-api.3scale.net";
|
||||
|
||||
# `slowloris` attack mitigation settings
|
||||
client_body_timeout 10s;
|
||||
client_header_timeout 10s;
|
||||
|
||||
location = /out_of_band_authrep_action {
|
||||
internal;
|
||||
proxy_pass_request_headers off;
|
||||
set $service_token "SERVICE_TOKEN";
|
||||
content_by_lua "require('nginx').post_action_content()";
|
||||
}
|
||||
|
||||
# 3scale auth api that takes the auth credentials and metrics as input,
|
||||
# and returns 200 OK if both the credentials match and the user has not
|
||||
# exceeded the limits in his application plan.
|
||||
location = /threescale_auth {
|
||||
internal;
|
||||
set $service_token "SERVICE_TOKEN";
|
||||
proxy_pass $threescale_backend/transactions/authorize.xml?service_token=$service_token&service_id=$service_id&$usage&$credentials&log%5Bcode%5D=$arg_code&log%5Brequest%5D=$arg_req&log%5Bresponse%5D=$arg_resp;
|
||||
proxy_set_header Host "su1.3scale.net";
|
||||
#proxy_set_header Host "echo-api.3scale.net";
|
||||
proxy_set_header X-3scale-User-Agent "nginx$deployment";
|
||||
proxy_set_header X-3scale-Version "THREESCALE_VERSION_HEADER";
|
||||
}
|
||||
|
||||
# 3scale reporting api that takes the metrics data and persists the metrics
|
||||
# in the 3scale backend.
|
||||
location = /threescale_report {
|
||||
internal;
|
||||
set $service_token "SERVICE_TOKEN";
|
||||
proxy_pass $threescale_backend/transactions.xml;
|
||||
proxy_set_header Host "su1.3scale.net";
|
||||
#proxy_set_header Host "echo-api.3scale.net";
|
||||
# We have a bug in lua-nginx module that does not set
|
||||
# Content-Type from lua script
|
||||
proxy_pass_request_headers off;
|
||||
proxy_set_header Content-Type "application/x-www-form-urlencoded";
|
||||
proxy_set_header X-3scale-User-Agent "nginx$deployment";
|
||||
proxy_set_header X-3scale-Version "THREESCALE_VERSION_HEADER";
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_ignore_client_abort on;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-3scale-proxy-secret-token $secret_token;
|
||||
|
||||
# limit requests from the same client, allow `burst` to 20 r/s,
|
||||
# `nodelay` or drop connection immediately in case it exceeds this
|
||||
# threshold.
|
||||
limit_req zone=one burst=20 nodelay;
|
||||
|
||||
# We do not need the GET handling here as it's done in the other NGINX
|
||||
# module
|
||||
#if ($request_method = GET ) {
|
||||
# proxy_pass http://$bdb_backend:BIGCHAINDB_API_PORT;
|
||||
#}
|
||||
|
||||
if ($request_method = POST ) {
|
||||
set $service_token null;
|
||||
set $cached_key null;
|
||||
set $credentials null;
|
||||
set $usage null;
|
||||
set $service_id SERVICE_ID;
|
||||
set $proxy_pass null;
|
||||
set $secret_token null;
|
||||
set $resp_body null;
|
||||
set $resp_headers null;
|
||||
access_by_lua "require('nginx').access()";
|
||||
body_filter_by_lua 'ngx.ctx.buffered = (ngx.ctx.buffered or "") .. string.sub(ngx.arg[1], 1, 1000)
|
||||
if ngx.arg[2] then ngx.var.resp_body = ngx.ctx.buffered end';
|
||||
header_filter_by_lua 'ngx.var.resp_headers = cjson.encode(ngx.resp.get_headers())';
|
||||
|
||||
add_header 'Access-Control-Allow-Origin' '*';
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
|
||||
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range';
|
||||
add_header 'Access-Control-Expose-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range';
|
||||
|
||||
proxy_pass $proxy_pass ;
|
||||
post_action /out_of_band_authrep_action;
|
||||
}
|
||||
|
||||
if ($request_method = 'OPTIONS') {
|
||||
add_header 'Access-Control-Allow-Origin' '*';
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
|
||||
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range,app_key,app_id';
|
||||
add_header 'Access-Control-Max-Age' 43200;
|
||||
add_header 'Content-Type' 'text/plain charset=UTF-8';
|
||||
add_header 'Content-Length' 0;
|
||||
return 204;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Our backend server block that accepts requests from the nginx proxy and
|
||||
# forwards it to instances of BDB cluster. We currently run only a single
|
||||
# instance.
|
||||
server {
|
||||
sendfile on;
|
||||
|
||||
listen 9999;
|
||||
|
||||
# max client request body size: avg transaction size
|
||||
client_max_body_size 15k;
|
||||
|
||||
# keepalive connection settings
|
||||
keepalive_timeout 60s;
|
||||
|
||||
# `slowloris` attack mitigation settings
|
||||
client_body_timeout 10s;
|
||||
client_header_timeout 10s;
|
||||
|
||||
if ( $http_x_3scale_proxy_secret_token != "THREESCALE_RESPONSE_SECRET_TOKEN" ) {
|
||||
return 403;
|
||||
}
|
||||
|
||||
location / {
|
||||
try_files $uri @proxy_to_app;
|
||||
}
|
||||
|
||||
location @proxy_to_app {
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# enable the following line if and only if you use HTTPS
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header Host $http_host;
|
||||
|
||||
# we don't want nginx trying to do something clever with
|
||||
# redirects, we set the Host: header above already.
|
||||
proxy_redirect off;
|
||||
proxy_pass http://$bdb_backend:BIGCHAINDB_API_PORT;
|
||||
|
||||
# limit requests from the same client, allow `burst` to 20 r/s on avg,
|
||||
# `nodelay` or drop connection immediately in case it exceeds this
|
||||
# threshold.
|
||||
limit_req zone=one burst=20 nodelay;
|
||||
}
|
||||
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/local/openresty/nginx/html/50x.html;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,416 +0,0 @@
|
||||
-- -*- mode: lua; -*-
|
||||
-- Generated on: 2017-04-10 14:41:18 +0000 --
|
||||
-- Version:
|
||||
-- Error Messages per service
|
||||
|
||||
-- Ref: https://github.com/openresty/lua-nginx-module
|
||||
-- Ref: https://ipdbtestnet-admin.3scale.net/p/admin/api_docs
|
||||
-- Ref: http://nginx.org/en/docs/debugging_log.html
|
||||
|
||||
local custom_config = false
|
||||
|
||||
local _M = {
|
||||
['services'] = {
|
||||
['SERVICE_ID'] = {
|
||||
error_auth_failed = 'Authentication failed',
|
||||
error_auth_missing = 'Authentication parameters missing',
|
||||
auth_failed_headers = 'text/plain; charset=us-ascii',
|
||||
auth_missing_headers = 'text/plain; charset=us-ascii',
|
||||
error_no_match = 'No Mapping Rule matched',
|
||||
no_match_headers = 'text/plain; charset=us-ascii',
|
||||
no_match_status = 404,
|
||||
auth_failed_status = 403,
|
||||
auth_missing_status = 403,
|
||||
secret_token = 'THREESCALE_RESPONSE_SECRET_TOKEN',
|
||||
get_credentials = function(service, params)
|
||||
return (
|
||||
(params.app_id and params.app_key)
|
||||
) or error_no_credentials(service)
|
||||
end,
|
||||
extract_usage = function (service, request)
|
||||
local method, url = unpack(string.split(request," "))
|
||||
local path, querystring = unpack(string.split(url, "?"))
|
||||
local usage_t = {}
|
||||
local matched_rules = {}
|
||||
|
||||
local args = get_auth_params(nil, method)
|
||||
|
||||
for i,r in ipairs(service.rules) do
|
||||
check_rule({path=path, method=method, args=args}, r, usage_t, matched_rules)
|
||||
end
|
||||
|
||||
-- if there was no match, usage is set to nil and it will respond a 404, this behavior can be changed
|
||||
return usage_t, table.concat(matched_rules, ", ")
|
||||
end,
|
||||
rules = {
|
||||
{
|
||||
method = 'POST',
|
||||
pattern = '/api/{version}/transactions$',
|
||||
parameters = { 'version' },
|
||||
querystring_params = function(args)
|
||||
return true
|
||||
end,
|
||||
system_name = 'hits',
|
||||
delta = 1
|
||||
},
|
||||
{
|
||||
method = 'POST',
|
||||
pattern = '/api/{version}/transactions$',
|
||||
parameters = { 'version' },
|
||||
querystring_params = function(args)
|
||||
return true
|
||||
end,
|
||||
system_name = 'request_body_size',
|
||||
delta = 1
|
||||
},
|
||||
{
|
||||
method = 'POST',
|
||||
pattern = '/api/{version}/transactions$',
|
||||
parameters = { 'version' },
|
||||
querystring_params = function(args)
|
||||
return true
|
||||
end,
|
||||
system_name = 'response_body_size',
|
||||
delta = 1
|
||||
},
|
||||
{
|
||||
method = 'POST',
|
||||
pattern = '/api/{version}/transactions$',
|
||||
parameters = { 'version' },
|
||||
querystring_params = function(args)
|
||||
return true
|
||||
end,
|
||||
system_name = 'post_transactions',
|
||||
delta = 1
|
||||
},
|
||||
{
|
||||
method = 'POST',
|
||||
pattern = '/api/{version}/transactions$',
|
||||
parameters = { 'version' },
|
||||
querystring_params = function(args)
|
||||
return true
|
||||
end,
|
||||
system_name = 'total_body_size',
|
||||
delta = 1
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
-- Error Codes
|
||||
function error_no_credentials(service)
|
||||
ngx.status = service.auth_missing_status
|
||||
ngx.header.content_type = service.auth_missing_headers
|
||||
ngx.print(service.error_auth_missing)
|
||||
ngx.exit(ngx.HTTP_OK)
|
||||
end
|
||||
|
||||
function error_authorization_failed(service)
|
||||
ngx.status = service.auth_failed_status
|
||||
ngx.header.content_type = service.auth_failed_headers
|
||||
ngx.print(service.error_auth_failed)
|
||||
ngx.exit(ngx.HTTP_OK)
|
||||
end
|
||||
|
||||
function error_no_match(service)
|
||||
ngx.status = service.no_match_status
|
||||
ngx.header.content_type = service.no_match_headers
|
||||
ngx.print(service.error_no_match)
|
||||
ngx.exit(ngx.HTTP_OK)
|
||||
end
|
||||
-- End Error Codes
|
||||
|
||||
-- Aux function to split a string
|
||||
|
||||
function string:split(delimiter)
|
||||
local result = { }
|
||||
local from = 1
|
||||
local delim_from, delim_to = string.find( self, delimiter, from )
|
||||
if delim_from == nil then return {self} end
|
||||
while delim_from do
|
||||
table.insert( result, string.sub( self, from , delim_from-1 ) )
|
||||
from = delim_to + 1
|
||||
delim_from, delim_to = string.find( self, delimiter, from )
|
||||
end
|
||||
table.insert( result, string.sub( self, from ) )
|
||||
return result
|
||||
end
|
||||
|
||||
function first_values(a)
|
||||
r = {}
|
||||
for k,v in pairs(a) do
|
||||
if type(v) == "table" then
|
||||
r[k] = v[1]
|
||||
else
|
||||
r[k] = v
|
||||
end
|
||||
end
|
||||
return r
|
||||
end
|
||||
|
||||
function set_or_inc(t, name, delta)
|
||||
return (t[name] or 0) + delta
|
||||
end
|
||||
|
||||
function build_querystring_formatter(fmt)
|
||||
return function (query)
|
||||
local function kvmap(f, t)
|
||||
local res = {}
|
||||
for k, v in pairs(t) do
|
||||
table.insert(res, f(k, v))
|
||||
end
|
||||
return res
|
||||
end
|
||||
|
||||
return table.concat(kvmap(function(k,v) return string.format(fmt, k, v) end, query or {}), "&")
|
||||
end
|
||||
end
|
||||
|
||||
local build_querystring = build_querystring_formatter("usage[%s]=%s")
|
||||
local build_query = build_querystring_formatter("%s=%s")
|
||||
|
||||
function regexpify(path)
|
||||
return path:gsub('?.*', ''):gsub("{.-}", '([\\w_.-]+)'):gsub("%.", "\\.")
|
||||
end
|
||||
|
||||
function check_rule(req, rule, usage_t, matched_rules)
|
||||
local param = {}
|
||||
local p = regexpify(rule.pattern)
|
||||
local m = ngx.re.match(req.path,
|
||||
string.format("^%s",p))
|
||||
if m and req.method == rule.method then
|
||||
local args = req.args
|
||||
if rule.querystring_params(args) then -- may return an empty table
|
||||
-- when no querystringparams
|
||||
-- in the rule. it's fine
|
||||
for i,p in ipairs(rule.parameters) do
|
||||
param[p] = m[i]
|
||||
end
|
||||
|
||||
table.insert(matched_rules, rule.pattern)
|
||||
usage_t[rule.system_name] = set_or_inc(usage_t, rule.system_name, rule.delta)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
--[[
|
||||
Authorization logic
|
||||
NOTE: We do not use any of the authorization logic defined in the template.
|
||||
We use custom authentication and authorization logic defined in the
|
||||
custom_app_id_authorize() function.
|
||||
]]--
|
||||
|
||||
function get_auth_params(where, method)
|
||||
local params = {}
|
||||
if where == "headers" then
|
||||
params = ngx.req.get_headers()
|
||||
elseif method == "GET" then
|
||||
params = ngx.req.get_uri_args()
|
||||
else
|
||||
ngx.req.read_body()
|
||||
params = ngx.req.get_post_args()
|
||||
end
|
||||
return first_values(params)
|
||||
end
|
||||
|
||||
function get_debug_value()
|
||||
local h = ngx.req.get_headers()
|
||||
if h["X-3scale-debug"] == 'SERVICE_TOKEN' then
|
||||
return true
|
||||
else
|
||||
return false
|
||||
end
|
||||
end
|
||||
|
||||
function _M.authorize(auth_strat, params, service)
|
||||
if auth_strat == 'oauth' then
|
||||
oauth(params, service)
|
||||
else
|
||||
authrep(params, service)
|
||||
end
|
||||
end
|
||||
|
||||
function oauth(params, service)
|
||||
ngx.var.cached_key = ngx.var.cached_key .. ":" .. ngx.var.usage
|
||||
local access_tokens = ngx.shared.api_keys
|
||||
local is_known = access_tokens:get(ngx.var.cached_key)
|
||||
|
||||
if is_known ~= 200 then
|
||||
local res = ngx.location.capture("/threescale_oauth_authrep", { share_all_vars = true })
|
||||
|
||||
-- IN HERE YOU DEFINE THE ERROR IF CREDENTIALS ARE PASSED, BUT THEY ARE NOT VALID
|
||||
if res.status ~= 200 then
|
||||
access_tokens:delete(ngx.var.cached_key)
|
||||
ngx.status = res.status
|
||||
ngx.header.content_type = "application/json"
|
||||
ngx.var.cached_key = nil
|
||||
error_authorization_failed(service)
|
||||
else
|
||||
access_tokens:set(ngx.var.cached_key,200)
|
||||
end
|
||||
|
||||
ngx.var.cached_key = nil
|
||||
end
|
||||
end
|
||||
|
||||
function authrep(params, service)
|
||||
ngx.var.cached_key = ngx.var.cached_key .. ":" .. ngx.var.usage
|
||||
local api_keys = ngx.shared.api_keys
|
||||
local is_known = api_keys:get(ngx.var.cached_key)
|
||||
|
||||
if is_known ~= 200 then
|
||||
local res = ngx.location.capture("/threescale_authrep", { share_all_vars = true })
|
||||
|
||||
-- IN HERE YOU DEFINE THE ERROR IF CREDENTIALS ARE PASSED, BUT THEY ARE NOT VALID
|
||||
if res.status ~= 200 then
|
||||
-- remove the key, if it's not 200 let's go the slow route, to 3scale's backend
|
||||
api_keys:delete(ngx.var.cached_key)
|
||||
ngx.status = res.status
|
||||
ngx.header.content_type = "application/json"
|
||||
ngx.var.cached_key = nil
|
||||
error_authorization_failed(service)
|
||||
else
|
||||
api_keys:set(ngx.var.cached_key,200)
|
||||
end
|
||||
ngx.var.cached_key = nil
|
||||
end
|
||||
end
|
||||
|
||||
function _M.access()
|
||||
local params = {}
|
||||
local host = ngx.req.get_headers()["Host"]
|
||||
local auth_strat = ""
|
||||
local service = {}
|
||||
local usage = {}
|
||||
local matched_patterns = ''
|
||||
|
||||
if ngx.status == 403 then
|
||||
ngx.say("Throttling due to too many requests")
|
||||
ngx.exit(403)
|
||||
end
|
||||
|
||||
if ngx.var.service_id == 'SERVICE_ID' then
|
||||
local parameters = get_auth_params("headers", string.split(ngx.var.request, " ")[1] )
|
||||
service = _M.services['SERVICE_ID'] --
|
||||
ngx.var.secret_token = service.secret_token
|
||||
params.app_id = parameters["app_id"]
|
||||
params.app_key = parameters["app_key"] -- or "" -- Uncoment the first part if you want to allow not passing app_key
|
||||
service.get_credentials(service, params)
|
||||
ngx.var.cached_key = "SERVICE_ID" .. ":" .. params.app_id ..":".. params.app_key
|
||||
auth_strat = "2"
|
||||
ngx.var.service_id = "SERVICE_ID"
|
||||
ngx.var.proxy_pass = "http://backend_SERVICE_ID"
|
||||
usage, matched_patterns = service:extract_usage(ngx.var.request)
|
||||
end
|
||||
|
||||
usage['post_transactions'] = 0
|
||||
usage['request_body_size'] = 0
|
||||
usage['total_body_size'] = 0
|
||||
usage['response_body_size'] = 0
|
||||
ngx.var.credentials = build_query(params)
|
||||
ngx.var.usage = build_querystring(usage)
|
||||
|
||||
-- WHAT TO DO IF NO USAGE CAN BE DERIVED FROM THE REQUEST.
|
||||
if ngx.var.usage == '' then
|
||||
ngx.header["X-3scale-matched-rules"] = ''
|
||||
error_no_match(service)
|
||||
end
|
||||
|
||||
if get_debug_value() then
|
||||
ngx.header["X-3scale-matched-rules"] = matched_patterns
|
||||
ngx.header["X-3scale-credentials"] = ngx.var.credentials
|
||||
ngx.header["X-3scale-usage"] = ngx.var.usage
|
||||
ngx.header["X-3scale-hostname"] = ngx.var.hostname
|
||||
end
|
||||
_M.custom_app_id_authorize(params, service)
|
||||
end
|
||||
|
||||
function _M.custom_app_id_authorize(params, service)
|
||||
ngx.var.cached_key = ngx.var.cached_key .. ":" .. ngx.var.usage
|
||||
local api_keys = ngx.shared.api_keys
|
||||
local res = ngx.location.capture("/threescale_auth", { share_all_vars = true })
|
||||
if res.status ~= 200 then
|
||||
ngx.status = res.status
|
||||
ngx.header.content_type = "application/json"
|
||||
ngx.var.cached_key = nil
|
||||
error_authorization_failed(service)
|
||||
end
|
||||
ngx.var.cached_key = nil
|
||||
end
|
||||
|
||||
function _M.post_action_content()
|
||||
local report_data = {}
|
||||
|
||||
-- increment POST count
|
||||
report_data['post_transactions'] = 1
|
||||
|
||||
-- NOTE: When we are querying for the length of the request here, we already
|
||||
-- have the complete request data with us and hence can just use the len()
|
||||
-- function to get the size of the payload in bytes.
|
||||
-- However, we might not have a complete response from the backend at this
|
||||
-- stage (esp. if it's a large response size). So, we decipher the payload
|
||||
-- size by peeking into the content length header of the response.
|
||||
-- Otherwise, nginx will have to buffer every response and then calculate
|
||||
-- response payload size.
|
||||
|
||||
-- req data size
|
||||
local req_data = ngx.req.get_body_data()
|
||||
if req_data then
|
||||
report_data['request_body_size'] = req_data:len()
|
||||
else
|
||||
report_data['request_body_size'] = 0
|
||||
end
|
||||
|
||||
-- res data size
|
||||
local all_headers = cjson.decode(ngx.var.resp_headers)
|
||||
local variable_header = "content-length" --<-- case sensitive
|
||||
if all_headers[variable_header] then
|
||||
report_data['response_body_size'] = all_headers[variable_header]
|
||||
else
|
||||
report_data['response_body_size'] = 0
|
||||
end
|
||||
|
||||
-- total data size
|
||||
report_data['total_body_size'] = report_data['request_body_size'] + report_data['response_body_size']
|
||||
|
||||
-- get the app_id
|
||||
local app_id = ""
|
||||
local credentials = ngx.var.credentials:split("&")
|
||||
for i in pairs(credentials) do
|
||||
if credentials[i]:match('app_id') then
|
||||
local temp = credentials[i]:split("=")
|
||||
app_id = temp[2]
|
||||
end
|
||||
end
|
||||
|
||||
-- form the payload to report to 3scale
|
||||
local report = {}
|
||||
report['service_id'] = ngx.var.service_id
|
||||
report['service_token'] = ngx.var.service_token
|
||||
report['transactions[0][app_id]'] = app_id
|
||||
report['transactions[0][usage][post_transactions]'] = report_data['post_transactions']
|
||||
report['transactions[0][usage][request_body_size]'] = report_data['request_body_size']
|
||||
report['transactions[0][usage][response_body_size]'] = report_data['response_body_size']
|
||||
report['transactions[0][usage][total_body_size]'] = report_data['total_body_size']
|
||||
local res1 = ngx.location.capture("/threescale_report", {method = ngx.HTTP_POST, body = ngx.encode_args(report), share_all_vars = true })
|
||||
--ngx.log(0, ngx.encode_args(report))
|
||||
ngx.log(0, "Status: "..res1.status)
|
||||
ngx.log(0, "Body: "..res1.body)
|
||||
--if res1.status ~= 200 then
|
||||
-- local api_keys = ngx.shared.api_keys
|
||||
-- api_keys:delete(cached_key)
|
||||
--end
|
||||
ngx.exit(ngx.HTTP_OK)
|
||||
end
|
||||
|
||||
if custom_config then
|
||||
local ok, c = pcall(function() return require(custom_config) end)
|
||||
if ok and type(c) == 'table' and type(c.setup) == 'function' then
|
||||
c.setup(_M)
|
||||
end
|
||||
end
|
||||
|
||||
return _M
|
||||
|
||||
-- END OF SCRIPT
|
||||
@ -1,54 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Openresty vars
|
||||
dns_server=`printenv DNS_SERVER`
|
||||
openresty_frontend_port=`printenv OPENRESTY_FRONTEND_PORT`
|
||||
|
||||
|
||||
# BigchainDB vars
|
||||
bdb_backend_host=`printenv BIGCHAINDB_BACKEND_HOST`
|
||||
bdb_api_port=`printenv BIGCHAINDB_API_PORT`
|
||||
|
||||
|
||||
# Read the 3scale credentials from the mountpoint
|
||||
# Should be mounted at the following directory
|
||||
THREESCALE_CREDENTIALS_DIR=/usr/local/openresty/nginx/conf/threescale
|
||||
|
||||
threescale_secret_token=`cat ${THREESCALE_CREDENTIALS_DIR}/secret-token`
|
||||
threescale_service_id=`cat ${THREESCALE_CREDENTIALS_DIR}/service-id`
|
||||
threescale_version_header=`cat ${THREESCALE_CREDENTIALS_DIR}/version-header`
|
||||
threescale_service_token=`cat ${THREESCALE_CREDENTIALS_DIR}/service-token`
|
||||
|
||||
|
||||
if [[ -z "${dns_server:?DNS_SERVER not specified. Exiting!}" || \
|
||||
-z "${openresty_frontend_port:?OPENRESTY_FRONTEND_PORT not specified. Exiting!}" || \
|
||||
-z "${bdb_backend_host:?BIGCHAINDB_BACKEND_HOST not specified. Exiting!}" || \
|
||||
-z "${bdb_api_port:?BIGCHAINDB_API_PORT not specified. Exiting!}" || \
|
||||
-z "${threescale_secret_token:?3scale secret token not specified. Exiting!}" || \
|
||||
-z "${threescale_service_id:?3scale service id not specified. Exiting!}" || \
|
||||
-z "${threescale_version_header:?3scale version header not specified. Exiting!}" || \
|
||||
-z "${threescale_service_token:?3scale service token not specified. Exiting!}" ]]; then
|
||||
echo "Invalid environment settings detected. Exiting!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NGINX_LUA_FILE=/usr/local/openresty/nginx/conf/nginx.lua
|
||||
NGINX_CONF_FILE=/usr/local/openresty/nginx/conf/nginx.conf
|
||||
|
||||
# configure the nginx.lua file with env variables
|
||||
sed -i "s|SERVICE_ID|${threescale_service_id}|g" ${NGINX_LUA_FILE}
|
||||
sed -i "s|THREESCALE_RESPONSE_SECRET_TOKEN|${threescale_secret_token}|g" ${NGINX_LUA_FILE}
|
||||
sed -i "s|SERVICE_TOKEN|${threescale_service_token}|g" ${NGINX_LUA_FILE}
|
||||
|
||||
# configure the nginx.conf file with env variables
|
||||
sed -i "s|DNS_SERVER|${dns_server}|g" ${NGINX_CONF_FILE}
|
||||
sed -i "s|OPENRESTY_FRONTEND_PORT|${openresty_frontend_port}|g" ${NGINX_CONF_FILE}
|
||||
sed -i "s|BIGCHAINDB_BACKEND_HOST|${bdb_backend_host}|g" ${NGINX_CONF_FILE}
|
||||
sed -i "s|BIGCHAINDB_API_PORT|${bdb_api_port}|g" ${NGINX_CONF_FILE}
|
||||
sed -i "s|THREESCALE_RESPONSE_SECRET_TOKEN|${threescale_secret_token}|g" $NGINX_CONF_FILE
|
||||
sed -i "s|SERVICE_ID|${threescale_service_id}|g" $NGINX_CONF_FILE
|
||||
sed -i "s|THREESCALE_VERSION_HEADER|${threescale_version_header}|g" $NGINX_CONF_FILE
|
||||
sed -i "s|SERVICE_TOKEN|${threescale_service_token}|g" $NGINX_CONF_FILE
|
||||
|
||||
exec /opt/app-root/scripts/entrypoint
|
||||
Loading…
x
Reference in New Issue
Block a user