mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge remote-tracking branch 'remotes/origin/master' into kyber-master
This commit is contained in:
commit
5b07cdc8f5
@ -4,7 +4,8 @@ cache: pip
|
||||
python:
|
||||
- 3.4
|
||||
- 3.5
|
||||
|
||||
- 3.6
|
||||
|
||||
env:
|
||||
- TOXENV=flake8
|
||||
- TOXENV=docsroot
|
||||
@ -30,6 +31,12 @@ matrix:
|
||||
env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb
|
||||
- python: 3.5
|
||||
env: BIGCHAINDB_DATABASE_BACKEND=mongodb
|
||||
- python: 3.6
|
||||
addons:
|
||||
rethinkdb: '2.3.5'
|
||||
env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb
|
||||
- python: 3.6
|
||||
env: BIGCHAINDB_DATABASE_BACKEND=mongodb
|
||||
|
||||
before_install: sudo .ci/travis-before-install.sh
|
||||
|
||||
|
10
Makefile
10
Makefile
@ -51,18 +51,14 @@ lint: ## check style with flake8
|
||||
flake8 bigchaindb tests
|
||||
|
||||
test: ## run tests quickly with the default Python
|
||||
py.test
|
||||
|
||||
pytest -v -n auto
|
||||
|
||||
test-all: ## run tests on every Python version with tox
|
||||
tox
|
||||
|
||||
coverage: ## check code coverage quickly with the default Python
|
||||
coverage run --source bigchaindb py.test
|
||||
|
||||
coverage report -m
|
||||
coverage html
|
||||
$(BROWSER) htmlcov/index.html
|
||||
pytest -v -n auto --cov=bigchaindb --cov-report term --cov-report html
|
||||
$(BROWSER) htmlcov/index.html
|
||||
|
||||
docs: ## generate Sphinx HTML documentation, including API docs
|
||||
$(MAKE) -C docs/root clean
|
||||
|
@ -82,6 +82,6 @@ flake8 --max-line-length 119 bigchaindb/
|
||||
|
||||
## Writing and Running (Python) Tests
|
||||
|
||||
The content of this section was moved to [`bigchiandb/tests/README.md`](./tests/README.md).
|
||||
The content of this section was moved to [`bigchaindb/tests/README.md`](./tests/README.md).
|
||||
|
||||
Note: We automatically run all tests on all pull requests (using Travis CI), so you should definitely run all tests locally before you submit a pull request. See the above-linked README file for instructions.
|
||||
Note: We automatically run all tests on all pull requests (using Travis CI), so you should definitely run all tests locally before you submit a pull request. See the above-linked README file for instructions.
|
||||
|
@ -45,10 +45,16 @@ These steps are common between minor and patch releases:
|
||||
1. Make sure your local Git is in the same state as the release: e.g. `git fetch <remote-name>` and `git checkout v0.9.1`
|
||||
1. Make sure you have a `~/.pypirc` file containing credentials for PyPI
|
||||
1. Do a `make release` to build and publish the new `bigchaindb` package on PyPI
|
||||
1. Login to readthedocs.org as a maintainer of the BigchainDB Server docs.
|
||||
Go to Admin --> Versions and under **Choose Active Versions**, make sure that the new version's tag is
|
||||
"Active" and "Public", and make sure the new version's branch
|
||||
(without the 'v' in front) is _not_ active
|
||||
1. Also in readthedocs.org, go to Admin --> Advanced Settings
|
||||
and make sure that "Default branch:" (i.e. what "latest" points to)
|
||||
is set to the new release's tag, e.g. `v0.9.1`. (Don't miss the 'v' in front.)
|
||||
1. [Login to readthedocs.org](https://readthedocs.org/accounts/login/)
|
||||
as a maintainer of the BigchainDB Server docs, and:
|
||||
- Go to Admin --> Advanced Settings
|
||||
and make sure that "Default branch:" (i.e. what "latest" points to)
|
||||
is set to the new release's tag, e.g. `v0.9.1`.
|
||||
(Don't miss the 'v' in front.)
|
||||
- Go to Admin --> Versions
|
||||
and under **Choose Active Versions**, do these things:
|
||||
1. Make sure that the new version's tag is "Active" and "Public"
|
||||
2. Make sure the new version's branch
|
||||
(without the 'v' in front) is _not_ active.
|
||||
3. Make sure the **stable** branch is _not_ active.
|
||||
4. Scroll to the bottom of the page and click the Submit button.
|
||||
|
@ -16,7 +16,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def connect(backend=None, host=None, port=None, name=None, max_tries=None,
|
||||
connection_timeout=None, replicaset=None):
|
||||
connection_timeout=None, replicaset=None, ssl=None, login=None, password=None):
|
||||
"""Create a new connection to the database backend.
|
||||
|
||||
All arguments default to the current configuration's values if not
|
||||
@ -50,6 +50,9 @@ def connect(backend=None, host=None, port=None, name=None, max_tries=None,
|
||||
# to handle these these additional args. In case of RethinkDBConnection
|
||||
# it just does not do anything with it.
|
||||
replicaset = replicaset or bigchaindb.config['database'].get('replicaset')
|
||||
ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)
|
||||
login = login or bigchaindb.config['database'].get('login')
|
||||
password = password or bigchaindb.config['database'].get('password')
|
||||
|
||||
try:
|
||||
module_name, _, class_name = BACKENDS[backend].rpartition('.')
|
||||
@ -63,7 +66,7 @@ def connect(backend=None, host=None, port=None, name=None, max_tries=None,
|
||||
logger.debug('Connection: {}'.format(Class))
|
||||
return Class(host=host, port=port, dbname=dbname,
|
||||
max_tries=max_tries, connection_timeout=connection_timeout,
|
||||
replicaset=replicaset)
|
||||
replicaset=replicaset, ssl=ssl, login=login, password=password)
|
||||
|
||||
|
||||
class Connection:
|
||||
|
@ -16,7 +16,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class MongoDBConnection(Connection):
|
||||
|
||||
def __init__(self, replicaset=None, **kwargs):
|
||||
def __init__(self, replicaset=None, ssl=None, login=None, password=None, **kwargs):
|
||||
"""Create a new Connection instance.
|
||||
|
||||
Args:
|
||||
@ -28,6 +28,9 @@ class MongoDBConnection(Connection):
|
||||
|
||||
super().__init__(**kwargs)
|
||||
self.replicaset = replicaset or bigchaindb.config['database']['replicaset']
|
||||
self.ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)
|
||||
self.login = login or bigchaindb.config['database'].get('login')
|
||||
self.password = password or bigchaindb.config['database'].get('password')
|
||||
|
||||
@property
|
||||
def db(self):
|
||||
@ -71,14 +74,21 @@ class MongoDBConnection(Connection):
|
||||
# we should only return a connection if the replica set is
|
||||
# initialized. initialize_replica_set will check if the
|
||||
# replica set is initialized else it will initialize it.
|
||||
initialize_replica_set(self.host, self.port, self.connection_timeout)
|
||||
initialize_replica_set(self.host, self.port, self.connection_timeout,
|
||||
self.dbname, self.ssl, self.login, self.password)
|
||||
|
||||
# FYI: this might raise a `ServerSelectionTimeoutError`,
|
||||
# that is a subclass of `ConnectionFailure`.
|
||||
return pymongo.MongoClient(self.host,
|
||||
self.port,
|
||||
replicaset=self.replicaset,
|
||||
serverselectiontimeoutms=self.connection_timeout)
|
||||
client = pymongo.MongoClient(self.host,
|
||||
self.port,
|
||||
replicaset=self.replicaset,
|
||||
serverselectiontimeoutms=self.connection_timeout,
|
||||
ssl=self.ssl)
|
||||
|
||||
if self.login is not None and self.password is not None:
|
||||
client[self.dbname].authenticate(self.login, self.password)
|
||||
|
||||
return client
|
||||
|
||||
# `initialize_replica_set` might raise `ConnectionFailure` or `OperationFailure`.
|
||||
except (pymongo.errors.ConnectionFailure,
|
||||
@ -86,7 +96,7 @@ class MongoDBConnection(Connection):
|
||||
raise ConnectionError() from exc
|
||||
|
||||
|
||||
def initialize_replica_set(host, port, connection_timeout):
|
||||
def initialize_replica_set(host, port, connection_timeout, dbname, ssl, login, password):
|
||||
"""Initialize a replica set. If already initialized skip."""
|
||||
|
||||
# Setup a MongoDB connection
|
||||
@ -95,7 +105,12 @@ def initialize_replica_set(host, port, connection_timeout):
|
||||
# you try to connect to a replica set that is not yet initialized
|
||||
conn = pymongo.MongoClient(host=host,
|
||||
port=port,
|
||||
serverselectiontimeoutms=connection_timeout)
|
||||
serverselectiontimeoutms=connection_timeout,
|
||||
ssl=ssl)
|
||||
|
||||
if login is not None and password is not None:
|
||||
conn[dbname].authenticate(login, password)
|
||||
|
||||
_check_replica_set(conn)
|
||||
host = '{}:{}'.format(bigchaindb.config['database']['host'],
|
||||
bigchaindb.config['database']['port'])
|
||||
|
@ -100,4 +100,5 @@ def create_votes_secondary_index(conn, dbname):
|
||||
ASCENDING),
|
||||
('node_pubkey',
|
||||
ASCENDING)],
|
||||
name='block_and_voter')
|
||||
name='block_and_voter',
|
||||
unique=True)
|
||||
|
@ -24,7 +24,8 @@ from bigchaindb.commands.messages import (
|
||||
CANNOT_START_KEYPAIR_NOT_FOUND,
|
||||
RETHINKDB_STARTUP_ERROR,
|
||||
)
|
||||
from bigchaindb.commands.utils import configure_bigchaindb, input_on_stderr
|
||||
from bigchaindb.commands.utils import (
|
||||
configure_bigchaindb, start_logging_process, input_on_stderr)
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
@ -169,6 +170,7 @@ def run_drop(args):
|
||||
|
||||
|
||||
@configure_bigchaindb
|
||||
@start_logging_process
|
||||
def run_start(args):
|
||||
"""Start the processes to run the node"""
|
||||
logger.info('BigchainDB Version %s', bigchaindb.__version__)
|
||||
|
@ -21,20 +21,59 @@ from bigchaindb.version import __version__
|
||||
|
||||
|
||||
def configure_bigchaindb(command):
|
||||
"""Decorator to be used by command line functions, such that the
|
||||
configuration of bigchaindb is performed before the execution of
|
||||
the command.
|
||||
|
||||
Args:
|
||||
command: The command to decorate.
|
||||
|
||||
Returns:
|
||||
The command wrapper function.
|
||||
|
||||
"""
|
||||
@functools.wraps(command)
|
||||
def configure(args):
|
||||
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
||||
|
||||
logging_config = bigchaindb.config['log'] or {}
|
||||
if 'log_level' in args and args.log_level:
|
||||
logging_config['level_console'] = args.log_level
|
||||
setup_logging(user_log_config=logging_config)
|
||||
|
||||
try:
|
||||
config_from_cmdline = {
|
||||
'log': {'level_console': args.log_level},
|
||||
'server': {'loglevel': args.log_level},
|
||||
}
|
||||
except AttributeError:
|
||||
config_from_cmdline = None
|
||||
bigchaindb.config_utils.autoconfigure(
|
||||
filename=args.config, config=config_from_cmdline, force=True)
|
||||
command(args)
|
||||
|
||||
return configure
|
||||
|
||||
|
||||
def start_logging_process(command):
|
||||
"""Decorator to start the logging subscriber process.
|
||||
|
||||
Args:
|
||||
command: The command to decorate.
|
||||
|
||||
Returns:
|
||||
The command wrapper function.
|
||||
|
||||
.. important::
|
||||
|
||||
Configuration, if needed, should be applied before invoking this
|
||||
decorator, as starting the subscriber process for logging will
|
||||
configure the root logger for the child process based on the
|
||||
state of :obj:`bigchaindb.config` at the moment this decorator
|
||||
is invoked.
|
||||
|
||||
"""
|
||||
@functools.wraps(command)
|
||||
def start_logging(args):
|
||||
from bigchaindb import config
|
||||
setup_logging(user_log_config=config.get('log'))
|
||||
command(args)
|
||||
return start_logging
|
||||
|
||||
|
||||
# We need this because `input` always prints on stdout, while it should print
|
||||
# to stderr. It's a very old bug, check it out here:
|
||||
# - https://bugs.python.org/issue1927
|
||||
@ -160,6 +199,7 @@ base_parser.add_argument('-c', '--config',
|
||||
|
||||
base_parser.add_argument('-l', '--log-level',
|
||||
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
|
||||
default='INFO',
|
||||
help='Log level')
|
||||
|
||||
base_parser.add_argument('-y', '--yes', '--yes-please',
|
||||
|
@ -1,7 +1,12 @@
|
||||
import logging
|
||||
from logging.handlers import DEFAULT_TCP_LOGGING_PORT
|
||||
from os.path import expanduser, join
|
||||
|
||||
|
||||
DEFAULT_SOCKET_LOGGING_HOST = 'localhost'
|
||||
DEFAULT_SOCKET_LOGGING_PORT = DEFAULT_TCP_LOGGING_PORT
|
||||
DEFAULT_SOCKET_LOGGING_ADDR = (DEFAULT_SOCKET_LOGGING_HOST,
|
||||
DEFAULT_SOCKET_LOGGING_PORT)
|
||||
DEFAULT_LOG_DIR = expanduser('~')
|
||||
|
||||
PUBLISHER_LOGGING_CONFIG = {
|
||||
@ -18,9 +23,8 @@ SUBSCRIBER_LOGGING_CONFIG = {
|
||||
'formatters': {
|
||||
'console': {
|
||||
'class': 'logging.Formatter',
|
||||
'format': (
|
||||
'%(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
|
||||
),
|
||||
'format': ('[%(asctime)s] [%(levelname)s] (%(name)s) '
|
||||
'%(message)s (%(processName)-10s - pid: %(process)d)'),
|
||||
'datefmt': '%Y-%m-%d %H:%M:%S',
|
||||
},
|
||||
'file': {
|
||||
|
32
bigchaindb/log/loggers.py
Normal file
32
bigchaindb/log/loggers.py
Normal file
@ -0,0 +1,32 @@
|
||||
import logging.handlers
|
||||
|
||||
from gunicorn.glogging import Logger
|
||||
|
||||
from .configs import DEFAULT_SOCKET_LOGGING_HOST, DEFAULT_SOCKET_LOGGING_PORT
|
||||
|
||||
|
||||
class HttpServerLogger(Logger):
|
||||
"""Custom logger class for ``gunicorn`` logs.
|
||||
|
||||
Meant for internal usage only, to set the ``logger_class``
|
||||
configuration setting on gunicorn.
|
||||
|
||||
"""
|
||||
def setup(self, cfg):
|
||||
"""Setup the gunicorn access and error loggers. This overrides
|
||||
the parent method. Its main goal is to simply pipe all the logs to
|
||||
the TCP socket used througout BigchainDB.
|
||||
|
||||
Args:
|
||||
cfg (:obj:`gunicorn.config.Config`): Gunicorn configuration
|
||||
object. *Ignored*.
|
||||
|
||||
"""
|
||||
self._set_socklog_handler(self.error_log)
|
||||
self._set_socklog_handler(self.access_log)
|
||||
|
||||
def _set_socklog_handler(self, log):
|
||||
socket_handler = logging.handlers.SocketHandler(
|
||||
DEFAULT_SOCKET_LOGGING_HOST, DEFAULT_SOCKET_LOGGING_PORT)
|
||||
socket_handler._gunicorn = True
|
||||
log.addHandler(socket_handler)
|
@ -9,7 +9,12 @@ import struct
|
||||
import sys
|
||||
from multiprocessing import Process
|
||||
|
||||
from .configs import PUBLISHER_LOGGING_CONFIG, SUBSCRIBER_LOGGING_CONFIG
|
||||
from .configs import (
|
||||
DEFAULT_SOCKET_LOGGING_HOST,
|
||||
DEFAULT_SOCKET_LOGGING_PORT,
|
||||
PUBLISHER_LOGGING_CONFIG,
|
||||
SUBSCRIBER_LOGGING_CONFIG,
|
||||
)
|
||||
from bigchaindb.common.exceptions import ConfigurationError
|
||||
|
||||
|
||||
@ -23,7 +28,7 @@ def _normalize_log_level(level):
|
||||
def setup_pub_logger():
|
||||
dictConfig(PUBLISHER_LOGGING_CONFIG)
|
||||
socket_handler = logging.handlers.SocketHandler(
|
||||
'localhost', logging.handlers.DEFAULT_TCP_LOGGING_PORT)
|
||||
DEFAULT_SOCKET_LOGGING_HOST, DEFAULT_SOCKET_LOGGING_PORT)
|
||||
socket_handler.setLevel(logging.DEBUG)
|
||||
logger = logging.getLogger()
|
||||
logger.addHandler(socket_handler)
|
||||
|
@ -104,6 +104,7 @@ def create_server(settings):
|
||||
if not settings.get('threads'):
|
||||
settings['threads'] = (multiprocessing.cpu_count() * 2) + 1
|
||||
|
||||
settings['logger_class'] = 'bigchaindb.log.loggers.HttpServerLogger'
|
||||
app = create_app(debug=settings.get('debug', False),
|
||||
threads=settings['threads'])
|
||||
standalone = StandaloneApplication(app, settings)
|
||||
|
@ -32,6 +32,7 @@ coverage:
|
||||
- "benchmarking-tests/*"
|
||||
- "speed-tests/*"
|
||||
- "ntools/*"
|
||||
- "k8s/*"
|
||||
|
||||
comment:
|
||||
# @stevepeak (from codecov.io) suggested we change 'suggestions' to 'uncovered'
|
||||
|
18
docs/server/source/appendices/commands.rst
Normal file
18
docs/server/source/appendices/commands.rst
Normal file
@ -0,0 +1,18 @@
|
||||
######################
|
||||
Command Line Interface
|
||||
######################
|
||||
|
||||
.. automodule:: bigchaindb.commands
|
||||
:special-members: __init__
|
||||
|
||||
|
||||
:mod:`bigchaindb.commands.bigchain`
|
||||
-----------------------------------
|
||||
|
||||
.. automodule:: bigchaindb.commands.bigchain
|
||||
|
||||
|
||||
:mod:`bigchaindb.commands.utils`
|
||||
--------------------------------
|
||||
|
||||
.. automodule:: bigchaindb.commands.utils
|
@ -16,6 +16,7 @@ Appendices
|
||||
consensus
|
||||
pipelines
|
||||
backend
|
||||
commands
|
||||
aws-setup
|
||||
generate-key-pair-for-ssh
|
||||
firewall-notes
|
||||
|
@ -161,3 +161,18 @@ zero downtime during updates.
|
||||
|
||||
You can SSH to an existing BigchainDB instance and run the ``bigchaindb
|
||||
show-config`` command to check that the keyring is updated.
|
||||
|
||||
|
||||
Step 7: Run NGINX as a Deployment
|
||||
---------------------------------
|
||||
|
||||
Please refer :ref:`this <Step 10: Run NGINX as a Deployment>` to
|
||||
set up NGINX in your new node.
|
||||
|
||||
|
||||
Step 8: Test Your New BigchainDB Node
|
||||
-------------------------------------
|
||||
|
||||
Please refer to the testing steps :ref:`here <Step 11: Verify the BigchainDB
|
||||
Node Setup>` to verify that your new BigchainDB node is working as expected.
|
||||
|
||||
|
@ -16,3 +16,5 @@ If you find the cloud deployment templates for nodes helpful, then you may also
|
||||
template-kubernetes-azure
|
||||
node-on-kubernetes
|
||||
add-node-on-kubernetes
|
||||
upgrade-on-kubernetes
|
||||
|
@ -195,9 +195,9 @@ which can also be obtained using the ``az account list-locations`` command.
|
||||
You can also try to assign a name to an Public IP in Azure before starting
|
||||
the process, or use ``nslookup`` with the name you have in mind to check
|
||||
if it's available for use.
|
||||
In the rare chance that name in the ``data.fqdn`` field is not available,
|
||||
you must create a ConfigMap with a unique name and restart the
|
||||
MongoDB instance.
|
||||
|
||||
You should ensure that the the name specified in the ``data.fqdn`` field is
|
||||
a unique one.
|
||||
|
||||
**Kubernetes on bare-metal or other cloud providers.**
|
||||
You need to provide the name resolution function
|
||||
@ -343,8 +343,8 @@ Get the file ``bigchaindb-dep.yaml`` from GitHub using:
|
||||
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/bigchaindb/bigchaindb-dep.yaml
|
||||
|
||||
Note that we set the ``BIGCHAINDB_DATABASE_HOST`` to ``mdb`` which is the name
|
||||
of the MongoDB service defined earlier.
|
||||
Note that we set the ``BIGCHAINDB_DATABASE_HOST`` to ``mdb-svc`` which is the
|
||||
name of the MongoDB service defined earlier.
|
||||
|
||||
We also hardcode the ``BIGCHAINDB_KEYPAIR_PUBLIC``,
|
||||
``BIGCHAINDB_KEYPAIR_PRIVATE`` and ``BIGCHAINDB_KEYRING`` for now.
|
||||
@ -367,22 +367,55 @@ Create the required Deployment using:
|
||||
You can check its status using the command ``kubectl get deploy -w``
|
||||
|
||||
|
||||
Step 10: Verify the BigchainDB Node Setup
|
||||
Step 10: Run NGINX as a Deployment
|
||||
----------------------------------
|
||||
|
||||
NGINX is used as a proxy to both the BigchainDB and MongoDB instances in the
|
||||
node.
|
||||
It proxies HTTP requests on port 80 to the BigchainDB backend, and TCP
|
||||
connections on port 27017 to the MongoDB backend.
|
||||
|
||||
You can also configure a whitelist in NGINX to allow only connections from
|
||||
other instances in the MongoDB replica set to access the backend MongoDB
|
||||
instance.
|
||||
|
||||
Get the file ``nginx-cm.yaml`` from GitHub using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/nginx/nginx-cm.yaml
|
||||
|
||||
The IP address whitelist can be explicitly configured in ``nginx-cm.yaml``
|
||||
file. You will need a list of the IP addresses of all the other MongoDB
|
||||
instances in the cluster. If the MongoDB intances specify a hostname, then this
|
||||
needs to be resolved to the corresponding IP addresses. If the IP address of
|
||||
any MongoDB instance changes, we can start a 'rolling upgrade' of NGINX after
|
||||
updating the corresponding ConfigMap without affecting availabilty.
|
||||
|
||||
|
||||
Create the ConfigMap for the whitelist using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f nginx-cm.yaml
|
||||
|
||||
Get the file ``nginx-dep.yaml`` from GitHub using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/nginx/nginx-dep.yaml
|
||||
|
||||
Create the NGINX deployment using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f nginx-dep.yaml
|
||||
|
||||
|
||||
Step 11: Verify the BigchainDB Node Setup
|
||||
-----------------------------------------
|
||||
|
||||
Step 10.1: Testing Externally
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Try to access the ``<dns/ip of your exposed bigchaindb service endpoint>:9984``
|
||||
on your browser. You must receive a json output that shows the BigchainDB
|
||||
server version among other things.
|
||||
|
||||
Try to access the ``<dns/ip of your exposed mongodb service endpoint>:27017``
|
||||
on your browser. You must receive a message from MongoDB stating that it
|
||||
doesn't allow HTTP connections to the port anymore.
|
||||
|
||||
|
||||
Step 10.2: Testing Internally
|
||||
Step 11.1: Testing Internally
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Run a container that provides utilities like ``nslookup``, ``curl`` and ``dig``
|
||||
@ -392,23 +425,53 @@ on the cluster and query the internal DNS and IP endpoints.
|
||||
|
||||
$ kubectl run -it toolbox -- image <docker image to run> --restart=Never --rm
|
||||
|
||||
It will drop you to the shell prompt.
|
||||
Now you can query for the ``mdb`` and ``bdb`` service details.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ nslookup mdb
|
||||
$ dig +noall +answer _mdb-port._tcp.mdb.default.svc.cluster.local SRV
|
||||
$ curl -X GET http://mdb:27017
|
||||
$ curl -X GET http://bdb:9984
|
||||
|
||||
There is a generic image based on alpine:3.5 with the required utilities
|
||||
hosted at Docker Hub under ``bigchaindb/toolbox``.
|
||||
The corresponding Dockerfile is `here
|
||||
<https://github.com/bigchaindb/bigchaindb/k8s/toolbox/Dockerfile>`_.
|
||||
|
||||
You can use it as below to get started immediately:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl run -it toolbox --image bigchaindb/toolbox --restart=Never --rm
|
||||
|
||||
It will drop you to the shell prompt.
|
||||
Now you can query for the ``mdb`` and ``bdb`` service details.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
# nslookup mdb-svc
|
||||
# nslookup bdb-svc
|
||||
# nslookup ngx-svc
|
||||
# dig +noall +answer _mdb-port._tcp.mdb-svc.default.svc.cluster.local SRV
|
||||
# dig +noall +answer _bdb-port._tcp.bdb-svc.default.svc.cluster.local SRV
|
||||
# dig +noall +answer _ngx-public-mdb-port._tcp.ngx-svc.default.svc.cluster.local SRV
|
||||
# dig +noall +answer _ngx-public-bdb-port._tcp.ngx-svc.default.svc.cluster.local SRV
|
||||
# curl -X GET http://mdb-svc:27017
|
||||
# curl -X GET http://bdb-svc:9984
|
||||
# curl -X GET http://ngx-svc:80
|
||||
# curl -X GET http://ngx-svc:27017
|
||||
|
||||
The ``nslookup`` commands should output the configured IP addresses of the
|
||||
services in the cluster
|
||||
|
||||
The ``dig`` commands should return the port numbers configured for the
|
||||
various services in the cluster.
|
||||
|
||||
Finally, the ``curl`` commands test the availability of the services
|
||||
themselves.
|
||||
|
||||
Step 11.2: Testing Externally
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Try to access the ``<dns/ip of your exposed bigchaindb service endpoint>:80``
|
||||
on your browser. You must receive a json output that shows the BigchainDB
|
||||
server version among other things.
|
||||
|
||||
Try to access the ``<dns/ip of your exposed mongodb service endpoint>:27017``
|
||||
on your browser. If your IP is in the whitelist, you will receive a message
|
||||
from the MongoDB instance stating that it doesn't allow HTTP connections to
|
||||
the port anymore. If your IP is not in the whitelist, your access will be
|
||||
blocked and you will not see any response from the MongoDB instance.
|
||||
|
||||
|
@ -168,7 +168,7 @@ using something like:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ ssh ssh ubuntu@k8s-agent-4AC80E97-0
|
||||
$ ssh ubuntu@k8s-agent-4AC80E97-0
|
||||
|
||||
where ``k8s-agent-4AC80E97-0`` is the name
|
||||
of a Kubernetes agent node in your Kubernetes cluster.
|
||||
|
@ -0,0 +1,105 @@
|
||||
Kubernetes Template: Upgrade all Software in a BigchainDB Node
|
||||
==============================================================
|
||||
|
||||
This page outlines how to upgrade all the software associated
|
||||
with a BigchainDB node running on Kubernetes,
|
||||
including host operating systems, Docker, Kubernetes,
|
||||
and BigchainDB-related software.
|
||||
|
||||
|
||||
Upgrade Host OS, Docker and Kubernetes
|
||||
--------------------------------------
|
||||
|
||||
Some Kubernetes installation & management systems
|
||||
can do full or partial upgrades of host OSes, Docker,
|
||||
or Kubernetes, e.g.
|
||||
`Tectonic <https://coreos.com/tectonic/>`_,
|
||||
`Rancher <https://docs.rancher.com/rancher/v1.5/en/>`_,
|
||||
and
|
||||
`Kubo <https://pivotal.io/kubo>`_.
|
||||
Consult the documentation for your system.
|
||||
|
||||
**Azure Container Service (ACS).**
|
||||
On Dec. 15, 2016, a Microsoft employee
|
||||
`wrote <https://github.com/colemickens/azure-kubernetes-status/issues/15#issuecomment-267453251>`_:
|
||||
"In the coming months we [the Azure Kubernetes team] will be building managed updates in the ACS service."
|
||||
At the time of writing, managed updates were not yet available,
|
||||
but you should check the latest
|
||||
`ACS documentation <https://docs.microsoft.com/en-us/azure/container-service/>`_
|
||||
to see what's available now.
|
||||
Also at the time of writing, ACS only supported Ubuntu
|
||||
as the host (master and agent) operating system.
|
||||
You can upgrade Ubuntu and Docker on Azure
|
||||
by SSHing into each of the hosts,
|
||||
as documented on
|
||||
:ref:`another page <Optional: SSH to Your New Kubernetes Cluster Nodes>`.
|
||||
|
||||
In general, you can SSH to each host in your Kubernetes Cluster
|
||||
to update the OS and Docker.
|
||||
|
||||
.. note::
|
||||
|
||||
Once you are in an SSH session with a host,
|
||||
the ``docker info`` command is a handy way to detemine the
|
||||
host OS (including version) and the Docker version.
|
||||
|
||||
When you want to upgrade the software on a Kubernetes node,
|
||||
you should "drain" the node first,
|
||||
i.e. tell Kubernetes to gracefully terminate all pods
|
||||
on the node and mark it as unscheduleable
|
||||
(so no new pods get put on the node during its downtime).
|
||||
|
||||
.. code::
|
||||
|
||||
kubectl drain $NODENAME
|
||||
|
||||
There are `more details in the Kubernetes docs <https://kubernetes.io/docs/admin/cluster-management/#maintenance-on-a-node>`_,
|
||||
including instructions to make the node scheduleable again.
|
||||
|
||||
To manually upgrade the host OS,
|
||||
see the docs for that OS.
|
||||
|
||||
To manually upgrade Docker, see
|
||||
`the Docker docs <https://docs.docker.com/>`_.
|
||||
|
||||
To manually upgrade all Kubernetes software in your Kubernetes cluster, see
|
||||
`the Kubernetes docs <https://kubernetes.io/docs/admin/cluster-management/>`_.
|
||||
|
||||
|
||||
Upgrade BigchainDB-Related Software
|
||||
-----------------------------------
|
||||
|
||||
We use Kubernetes "Deployments" for NGINX, BigchainDB,
|
||||
and most other BigchainDB-related software.
|
||||
The only exception is MongoDB; we use a Kubernetes
|
||||
StatefulSet for that.
|
||||
|
||||
The nice thing about Kubernetes Deployments
|
||||
is that Kubernetes can manage most of the upgrade process.
|
||||
A typical upgrade workflow for a single Deployment would be:
|
||||
|
||||
.. code::
|
||||
|
||||
$ KUBE_EDITOR=nano kubectl edit deployment/<name of Deployment>
|
||||
|
||||
The `kubectl edit <https://kubernetes.io/docs/user-guide/kubectl/kubectl_edit/>`_
|
||||
command opens the specified editor (nano in the above example),
|
||||
allowing you to edit the specified Deployment *in the Kubernetes cluster*.
|
||||
You can change the version tag on the Docker image, for example.
|
||||
Don't forget to save your edits before exiting the editor.
|
||||
The Kubernetes docs have more information about
|
||||
`updating a Deployment <https://kubernetes.io/docs/user-guide/deployments/#updating-a-deployment>`_.
|
||||
|
||||
|
||||
The upgrade story for the MongoDB StatefulSet is *different*.
|
||||
(This is because MongoDB has persistent state,
|
||||
which is stored in some storage associated with a PersistentVolumeClaim.)
|
||||
At the time of writing, StatefulSets were still in beta,
|
||||
and they did not support automated image upgrade (Docker image tag upgrade).
|
||||
We expect that to change.
|
||||
Rather than trying to keep these docs up-to-date,
|
||||
we advise you to check out the current
|
||||
`Kubernetes docs about updating containers in StatefulSets
|
||||
<https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-containers>`_.
|
||||
|
||||
|
@ -22,7 +22,12 @@ One can also put different weights on the inputs to a threshold condition, along
|
||||
|
||||
The (single) output of a threshold condition can be used as one of the inputs of other threshold conditions. This means that one can combine threshold conditions to build complex logical expressions, e.g. (x OR y) AND (u OR v).
|
||||
|
||||
When one creates a condition, one can calculate its fulfillment length (e.g. 96). The more complex the condition, the larger its fulfillment length will be. A BigchainDB federation can put an upper limit on the allowed fulfillment length, as a way of capping the complexity of conditions (and the computing time required to validate them).
|
||||
When one creates a condition, one can calculate its fulfillment length (e.g.
|
||||
96). The more complex the condition, the larger its fulfillment length will be.
|
||||
A BigchainDB federation can put an upper limit on the complexity of the
|
||||
conditions, either directly by setting an allowed maximum fulfillment length,
|
||||
or indirectly by setting a maximum allowed transaction size which would limit
|
||||
the overall complexity accross all inputs and outputs of a transaction.
|
||||
|
||||
If someone tries to make a condition where the output of a threshold condition feeds into the input of another “earlier” threshold condition (i.e. in a closed logical circuit), then their computer will take forever to calculate the (infinite) “condition URI”, at least in theory. In practice, their computer will run out of memory or their client software will timeout after a while.
|
||||
|
||||
|
@ -68,6 +68,22 @@ You can also use the `--dev-start-rethinkdb` command line option to automaticall
|
||||
e.g. `bigchaindb --dev-start-rethinkdb start`. Note that this will also shutdown rethinkdb when the bigchaindb process stops.
|
||||
The option `--dev-allow-temp-keypair` will generate a keypair on the fly if no keypair is found, this is useful when you want to run a temporary instance of BigchainDB in a Docker container, for example.
|
||||
|
||||
### Options
|
||||
The log level for the console can be set via the option `--log-level` or its
|
||||
abbreviation `-l`. Example:
|
||||
|
||||
```bash
|
||||
$ bigchaindb --log-level INFO start
|
||||
```
|
||||
|
||||
The allowed levels are `DEBUG`, `INFO` , `WARNING`, `ERROR`, and `CRITICAL`.
|
||||
For an explanation regarding these levels please consult the
|
||||
[Logging Levels](https://docs.python.org/3.6/library/logging.html#levels)
|
||||
section of Python's documentation.
|
||||
|
||||
For a more fine-grained control over the logging configuration you can use the
|
||||
configuration file as documented under
|
||||
[Configuration Settings](configuration.html).
|
||||
|
||||
## bigchaindb set-shards
|
||||
|
||||
|
@ -22,6 +22,15 @@ For convenience, here's a list of all the relevant environment variables (docume
|
||||
`BIGCHAINDB_CONFIG_PATH`<br>
|
||||
`BIGCHAINDB_BACKLOG_REASSIGN_DELAY`<br>
|
||||
`BIGCHAINDB_CONSENSUS_PLUGIN`<br>
|
||||
`BIGCHAINDB_LOG`<br>
|
||||
`BIGCHAINDB_LOG_FILE`<br>
|
||||
`BIGCHAINDB_LOG_LEVEL_CONSOLE`<br>
|
||||
`BIGCHAINDB_LOG_LEVEL_LOGFILE`<br>
|
||||
`BIGCHAINDB_LOG_DATEFMT_CONSOLE`<br>
|
||||
`BIGCHAINDB_LOG_DATEFMT_LOGFILE`<br>
|
||||
`BIGCHAINDB_LOG_FMT_CONSOLE`<br>
|
||||
`BIGCHAINDB_LOG_FMT_LOGFILE`<br>
|
||||
`BIGCHAINDB_LOG_GRANULAR_LEVELS`<br>
|
||||
|
||||
The local config file is `$HOME/.bigchaindb` by default (a file which might not even exist), but you can tell BigchainDB to use a different file by using the `-c` command-line option, e.g. `bigchaindb -c path/to/config_file.json start`
|
||||
or using the `BIGCHAINDB_CONFIG_PATH` environment variable, e.g. `BIGHAINDB_CONFIG_PATH=.my_bigchaindb_config bigchaindb start`.
|
||||
@ -173,3 +182,211 @@ export BIGCHAINDB_CONSENSUS_PLUGIN=default
|
||||
```js
|
||||
"consensus_plugin": "default"
|
||||
```
|
||||
|
||||
## log
|
||||
The `log` key is expected to point to a mapping (set of key/value pairs)
|
||||
holding the logging configuration.
|
||||
|
||||
**Example**:
|
||||
|
||||
```
|
||||
{
|
||||
"log": {
|
||||
"file": "/var/log/bigchaindb.log",
|
||||
"level_console": "info",
|
||||
"level_logfile": "info",
|
||||
"datefmt_console": "%Y-%m-%d %H:%M:%S",
|
||||
"datefmt_logfile": "%Y-%m-%d %H:%M:%S",
|
||||
"fmt_console": "%(asctime)s [%(levelname)s] (%(name)s) %(message)s",
|
||||
"fmt_logfile": "%(asctime)s [%(levelname)s] (%(name)s) %(message)s",
|
||||
"granular_levels": {
|
||||
"bichaindb.backend": "info",
|
||||
"bichaindb.core": "info"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Defaults to**: `"{}"`.
|
||||
|
||||
Please note that although the default is `"{}"` as per the configuration file,
|
||||
internal defaults are used, such that the actual operational default is:
|
||||
|
||||
```
|
||||
{
|
||||
"log": {
|
||||
"file": "~/bigchaindb.log",
|
||||
"level_console": "info",
|
||||
"level_logfile": "info",
|
||||
"datefmt_console": "%Y-%m-%d %H:%M:%S",
|
||||
"datefmt_logfile": "%Y-%m-%d %H:%M:%S",
|
||||
"fmt_console": "%(asctime)s [%(levelname)s] (%(name)s) %(message)s",
|
||||
"fmt_logfile": "%(asctime)s [%(levelname)s] (%(name)s) %(message)s",
|
||||
"granular_levels": {}
|
||||
}
|
||||
```
|
||||
|
||||
The next subsections explain each field of the `log` configuration.
|
||||
|
||||
|
||||
### log.file
|
||||
The full path to the file where logs should be written to.
|
||||
|
||||
**Example**:
|
||||
|
||||
```
|
||||
{
|
||||
"log": {
|
||||
"file": "/var/log/bigchaindb/bigchaindb.log"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Defaults to**: `"~/bigchaindb.log"`.
|
||||
|
||||
Please note that the user running `bigchaindb` must have write access to the
|
||||
location.
|
||||
|
||||
|
||||
### log.level_console
|
||||
The log level used to log to the console. Possible allowed values are the ones
|
||||
defined by [Python](https://docs.python.org/3.6/library/logging.html#levels),
|
||||
but case insensitive for convenience's sake:
|
||||
|
||||
```
|
||||
"critical", "error", "warning", "info", "debug", "notset"
|
||||
```
|
||||
|
||||
**Example**:
|
||||
|
||||
```
|
||||
{
|
||||
"log": {
|
||||
"level_console": "info"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Defaults to**: `"info"`.
|
||||
|
||||
|
||||
### log.level_logfile
|
||||
The log level used to log to the log file. Possible allowed values are the ones
|
||||
defined by [Python](https://docs.python.org/3.6/library/logging.html#levels),
|
||||
but case insensitive for convenience's sake:
|
||||
|
||||
```
|
||||
"critical", "error", "warning", "info", "debug", "notset"
|
||||
```
|
||||
|
||||
**Example**:
|
||||
|
||||
```
|
||||
{
|
||||
"log": {
|
||||
"level_file": "info"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Defaults to**: `"info"`.
|
||||
|
||||
|
||||
### log.datefmt_console
|
||||
The format string for the date/time portion of a message, when logged to the
|
||||
console.
|
||||
|
||||
**Example**:
|
||||
|
||||
```
|
||||
{
|
||||
"log": {
|
||||
"datefmt_console": "%x %X %Z"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Defaults to**: `"%Y-%m-%d %H:%M:%S"`.
|
||||
|
||||
For more information on how to construct the format string please consult the
|
||||
table under Python's documentation of
|
||||
[`time.strftime(format[, t])`](https://docs.python.org/3.6/library/time.html#time.strftime)
|
||||
|
||||
### log.datefmt_logfile
|
||||
The format string for the date/time portion of a message, when logged to a log
|
||||
file.
|
||||
|
||||
**Example**:
|
||||
|
||||
```
|
||||
{
|
||||
"log": {
|
||||
"datefmt_logfile": "%c %z"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Defaults to**: `"%Y-%m-%d %H:%M:%S"`.
|
||||
|
||||
For more information on how to construct the format string please consult the
|
||||
table under Python's documentation of
|
||||
[`time.strftime(format[, t])`](https://docs.python.org/3.6/library/time.html#time.strftime)
|
||||
|
||||
|
||||
### log.fmt_console
|
||||
A string used to format the log messages when logged to the console.
|
||||
|
||||
**Example**:
|
||||
|
||||
```
|
||||
{
|
||||
"log": {
|
||||
"fmt_console": "%(asctime)s [%(levelname)s] %(message)s %(process)d"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Defaults to**: `"[%(asctime)s] [%(levelname)s] (%(name)s) %(message)s (%(processName)-10s - pid: %(process)d)"`
|
||||
|
||||
For more information on possible formatting options please consult Python's
|
||||
documentation on
|
||||
[LogRecord attributes](https://docs.python.org/3.6/library/logging.html#logrecord-attributes)
|
||||
|
||||
|
||||
### log.fmt_logfile
|
||||
A string used to format the log messages when logged to a log file.
|
||||
|
||||
**Example**:
|
||||
|
||||
```
|
||||
{
|
||||
"log": {
|
||||
"fmt_logfile": "%(asctime)s [%(levelname)s] %(message)s %(process)d"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Defaults to**: `"[%(asctime)s] [%(levelname)s] (%(name)s) %(message)s (%(processName)-10s - pid: %(process)d)"`
|
||||
|
||||
For more information on possible formatting options please consult Python's
|
||||
documentation on
|
||||
[LogRecord attributes](https://docs.python.org/3.6/library/logging.html#logrecord-attributes)
|
||||
|
||||
|
||||
### log.granular_levels
|
||||
Log levels for BigchainDB's modules. This can be useful to control the log
|
||||
level of specific parts of the application. As an example, if you wanted the
|
||||
logging of the `core.py` module to be more verbose, you would set the
|
||||
configuration shown in the example below.
|
||||
|
||||
**Example**:
|
||||
|
||||
```
|
||||
{
|
||||
"log": {
|
||||
"granular_levels": {
|
||||
"bichaindb.core": "debug"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Defaults to**: `"{}"`
|
||||
|
@ -1,44 +1,47 @@
|
||||
###############################################################
|
||||
# This config file runs bigchaindb:master as a k8s Deployment #
|
||||
# and it connects to the mongodb backend on a separate pod #
|
||||
# and it connects to the mongodb backend running as a #
|
||||
# separate pod #
|
||||
###############################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: bdb
|
||||
name: bdb-svc
|
||||
namespace: default
|
||||
labels:
|
||||
name: bdb
|
||||
name: bdb-svc
|
||||
spec:
|
||||
selector:
|
||||
app: bdb
|
||||
app: bdb-dep
|
||||
ports:
|
||||
- port: 9984
|
||||
targetPort: 9984
|
||||
name: bdb-port
|
||||
type: LoadBalancer
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: bdb
|
||||
name: bdb-dep
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: bdb
|
||||
app: bdb-dep
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: bigchaindb
|
||||
image: bigchaindb/bigchaindb:master
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- start
|
||||
env:
|
||||
- name: BIGCHAINDB_DATABASE_HOST
|
||||
value: mdb
|
||||
value: mdb-svc
|
||||
- name: BIGCHAINDB_DATABASE_PORT
|
||||
# TODO(Krish): remove hardcoded port
|
||||
value: "27017"
|
||||
@ -58,7 +61,6 @@ spec:
|
||||
value: "120"
|
||||
- name: BIGCHAINDB_KEYRING
|
||||
value: ""
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9984
|
||||
hostPort: 9984
|
||||
|
@ -1,89 +0,0 @@
|
||||
###############################################################
|
||||
# This config file runs bigchaindb:latest and connects to the #
|
||||
# mongodb backend as a service #
|
||||
###############################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: bdb-mdb-service
|
||||
namespace: default
|
||||
labels:
|
||||
name: bdb-mdb-service
|
||||
spec:
|
||||
selector:
|
||||
app: bdb-mdb
|
||||
ports:
|
||||
- port: 9984
|
||||
targetPort: 9984
|
||||
name: bdb-api
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: bdb-mdb
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: bdb-mdb
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: bdb-mdb
|
||||
image: bigchaindb/bigchaindb:latest
|
||||
args:
|
||||
- start
|
||||
env:
|
||||
- name: BIGCHAINDB_DATABASE_HOST
|
||||
value: mdb-service
|
||||
- name: BIGCHAINDB_DATABASE_PORT
|
||||
value: "27017"
|
||||
- name: BIGCHAINDB_DATABASE_REPLICASET
|
||||
value: bigchain-rs
|
||||
- name: BIGCHIANDB_DATABASE_BACKEND
|
||||
value: mongodb
|
||||
- name: BIGCHAINDB_DATABASE_NAME
|
||||
value: bigchain
|
||||
- name: BIGCHAINDB_SERVER_BIND
|
||||
value: 0.0.0.0:9984
|
||||
- name: BIGCHAINDB_KEYPAIR_PUBLIC
|
||||
value: EEWUAhsk94ZUHhVw7qx9oZiXYDAWc9cRz93eMrsTG4kZ
|
||||
- name: BIGCHAINDB_KEYPAIR_PRIVATE
|
||||
value: 3CjmRhu718gT1Wkba3LfdqX5pfYuBdaMPLd7ENUga5dm
|
||||
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
|
||||
value: "120"
|
||||
- name: BIGCHAINDB_KEYRING
|
||||
value: ""
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9984
|
||||
hostPort: 9984
|
||||
name: bdb-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: bigchaindb-data
|
||||
mountPath: /data
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: bigchaindb-data
|
||||
hostPath:
|
||||
path: /disk/bigchaindb-data
|
@ -1,87 +0,0 @@
|
||||
###############################################################
|
||||
# This config file runs bigchaindb:latest and connects to the #
|
||||
# rethinkdb backend as a service #
|
||||
###############################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: bdb-rdb-service
|
||||
namespace: default
|
||||
labels:
|
||||
name: bdb-rdb-service
|
||||
spec:
|
||||
selector:
|
||||
app: bdb-rdb
|
||||
ports:
|
||||
- port: 9984
|
||||
targetPort: 9984
|
||||
name: bdb-rdb-api
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: bdb-rdb
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: bdb-rdb
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: bdb-rdb
|
||||
image: bigchaindb/bigchaindb:latest
|
||||
args:
|
||||
- start
|
||||
env:
|
||||
- name: BIGCHAINDB_DATABASE_HOST
|
||||
value: rdb-service
|
||||
- name: BIGCHAINDB_DATABASE_PORT
|
||||
value: "28015"
|
||||
- name: BIGCHIANDB_DATABASE_BACKEND
|
||||
value: rethinkdb
|
||||
- name: BIGCHAINDB_DATABASE_NAME
|
||||
value: bigchain
|
||||
- name: BIGCHAINDB_SERVER_BIND
|
||||
value: 0.0.0.0:9984
|
||||
- name: BIGCHAINDB_KEYPAIR_PUBLIC
|
||||
value: EEWUAhsk94ZUHhVw7qx9oZiXYDAWc9cRz93eMrsTG4kZ
|
||||
- name: BIGCHAINDB_KEYPAIR_PRIVATE
|
||||
value: 3CjmRhu718gT1Wkba3LfdqX5pfYuBdaMPLd7ENUga5dm
|
||||
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
|
||||
value: "120"
|
||||
- name: BIGCHAINDB_KEYRING
|
||||
value: ""
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9984
|
||||
hostPort: 9984
|
||||
name: bdb-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: bigchaindb-data
|
||||
mountPath: /data
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: bigchaindb-data
|
||||
hostPath:
|
||||
path: /disk/bigchaindb-data
|
@ -1,57 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mongodb
|
||||
labels:
|
||||
name: mongodb
|
||||
spec:
|
||||
ports:
|
||||
- port: 27017
|
||||
targetPort: 27017
|
||||
clusterIP: None
|
||||
selector:
|
||||
role: mongodb
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: mongodb
|
||||
spec:
|
||||
serviceName: mongodb
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
role: mongodb
|
||||
environment: staging
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: mongo
|
||||
image: mongo:3.4.1
|
||||
command:
|
||||
- mongod
|
||||
- "--replSet"
|
||||
- bigchain-rs
|
||||
#- "--smallfiles"
|
||||
#- "--noprealloc"
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
volumeMounts:
|
||||
- name: mongo-persistent-storage
|
||||
mountPath: /data/db
|
||||
- name: mongo-sidecar
|
||||
image: cvallance/mongo-k8s-sidecar
|
||||
env:
|
||||
- name: MONGO_SIDECAR_POD_LABELS
|
||||
value: "role=mongo,environment=staging"
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: mongo-persistent-storage
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: "fast"
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
@ -1,114 +0,0 @@
|
||||
#################################################################
|
||||
# This YAML file desribes a StatefulSet with two containers: #
|
||||
# bigchaindb/bigchaindb:latest and mongo:3.4.1 #
|
||||
# It also describes a Service to expose BigchainDB and MongoDB. #
|
||||
#################################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: bdb-service
|
||||
namespace: default
|
||||
labels:
|
||||
name: bdb-service
|
||||
spec:
|
||||
selector:
|
||||
app: bdb
|
||||
ports:
|
||||
- port: 9984
|
||||
targetPort: 9984
|
||||
name: bdb-http-api
|
||||
- port: 27017
|
||||
targetPort: 27017
|
||||
name: mongodb-port
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: bdb
|
||||
namespace: default
|
||||
spec:
|
||||
serviceName: bdb
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
name: bdb
|
||||
labels:
|
||||
app: bdb
|
||||
#annotations:
|
||||
#pod.beta.kubernetes.io/init-containers: '[
|
||||
# TODO mongodb user and group; id = 999
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: bigchaindb
|
||||
image: bigchaindb/bigchaindb:master
|
||||
args:
|
||||
- start
|
||||
env:
|
||||
- name: BIGCHAINDB_KEYPAIR_PRIVATE
|
||||
value: 3CjmRhu718gT1Wkba3LfdqX5pfYuBdaMPLd7ENUga5dm
|
||||
- name: BIGCHAINDB_KEYPAIR_PUBLIC
|
||||
value: EEWUAhsk94ZUHhVw7qx9oZiXYDAWc9cRz93eMrsTG4kZ
|
||||
- name: BIGCHAINDB_KEYRING
|
||||
value: ""
|
||||
- name: BIGCHAINDB_DATABASE_BACKEND
|
||||
value: mongodb
|
||||
- name: BIGCHAINDB_DATABASE_HOST
|
||||
value: localhost
|
||||
- name: BIGCHAINDB_DATABASE_PORT
|
||||
value: "27017"
|
||||
- name: BIGCHAINDB_SERVER_BIND
|
||||
value: "0.0.0.0:9984"
|
||||
- name: BIGCHAINDB_DATABASE_REPLICASET
|
||||
value: bigchain-rs
|
||||
- name: BIGCHAINDB_DATABASE_NAME
|
||||
value: bigchain
|
||||
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
|
||||
value: "120"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9984
|
||||
hostPort: 9984
|
||||
name: bdb-port
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: bdb-port
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
- name: mongodb
|
||||
image: mongo:3.4.1
|
||||
args:
|
||||
- --replSet=bigchain-rs
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
hostPort: 27017
|
||||
name: mdb-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: mdb-data
|
||||
mountPath: /data
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: mdb-port
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
periodSeconds: 15
|
||||
timeoutSeconds: 1
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: mdb-data
|
||||
persistentVolumeClaim:
|
||||
claimName: mongoclaim
|
@ -1,131 +0,0 @@
|
||||
##############################################################
|
||||
# This YAML file desribes a StatefulSet with two containers: #
|
||||
# bigchaindb/bigchaindb:latest and rethinkdb:2.3 #
|
||||
# It also describes a Service to expose BigchainDB, #
|
||||
# the RethinkDB intracluster communications port, and #
|
||||
# the RethinkDB web interface port. #
|
||||
##############################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: bdb-service
|
||||
namespace: default
|
||||
labels:
|
||||
name: bdb-service
|
||||
spec:
|
||||
selector:
|
||||
app: bdb
|
||||
ports:
|
||||
- port: 9984
|
||||
targetPort: 9984
|
||||
name: bdb-http-api
|
||||
- port: 29015
|
||||
targetPort: 29015
|
||||
name: rdb-intracluster-comm-port
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
name: rdb-web-interface-port
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: bdb
|
||||
namespace: default
|
||||
spec:
|
||||
serviceName: bdb
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
name: bdb
|
||||
labels:
|
||||
app: bdb
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: bdb-server
|
||||
image: bigchaindb/bigchaindb:latest
|
||||
args:
|
||||
- start
|
||||
env:
|
||||
- name: BIGCHAINDB_KEYPAIR_PRIVATE
|
||||
value: 56mEvwwVxcYsFQ3Y8UTFB8DVBv38yoUhxzDW3DAdLVd2
|
||||
- name: BIGCHAINDB_KEYPAIR_PUBLIC
|
||||
value: 9DsHwiEtvk51UHmNM2eV66czFha69j3CdtNrCj1RcZWR
|
||||
- name: BIGCHAINDB_KEYRING
|
||||
value: ""
|
||||
- name: BIGCHAINDB_DATABASE_BACKEND
|
||||
value: rethinkdb
|
||||
- name: BIGCHAINDB_DATABASE_HOST
|
||||
value: localhost
|
||||
- name: BIGCHAINDB_DATABASE_PORT
|
||||
value: "28015"
|
||||
- name: BIGCHAINDB_SERVER_BIND
|
||||
value: "0.0.0.0:9984"
|
||||
- name: BIGCHAINDB_DATABASE_NAME
|
||||
value: bigchain
|
||||
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
|
||||
value: "120"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9984
|
||||
hostPort: 9984
|
||||
name: bdb-port
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
- name: rethinkdb
|
||||
image: rethinkdb:2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
hostPort: 8080
|
||||
name: rdb-web-interface-port
|
||||
protocol: TCP
|
||||
- containerPort: 29015
|
||||
hostPort: 29015
|
||||
name: rdb-intra-port
|
||||
protocol: TCP
|
||||
- containerPort: 28015
|
||||
hostPort: 28015
|
||||
name: rdb-client-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: rdb-data
|
||||
mountPath: /data
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8080
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8080
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: rdb-data
|
||||
persistentVolumeClaim:
|
||||
claimName: mongoclaim
|
@ -1,89 +0,0 @@
|
||||
#####################################################
|
||||
# This config file uses bdb v0.9.1 with bundled rdb #
|
||||
#####################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: bdb-service
|
||||
namespace: default
|
||||
labels:
|
||||
name: bdb-service
|
||||
spec:
|
||||
selector:
|
||||
app: bdb
|
||||
ports:
|
||||
- port: 9984
|
||||
targetPort: 9984
|
||||
name: bdb-http-api
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
name: bdb-rethinkdb-api
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: bdb
|
||||
namespace: default
|
||||
spec:
|
||||
serviceName: bdb
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
name: bdb
|
||||
labels:
|
||||
app: bdb
|
||||
annotations:
|
||||
pod.beta.kubernetes.io/init-containers: '[
|
||||
{
|
||||
"name": "bdb091-configure",
|
||||
"image": "bigchaindb/bigchaindb:0.9.1",
|
||||
"command": ["bigchaindb", "-y", "configure", "rethinkdb"],
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "bigchaindb-data",
|
||||
"mountPath": "/data"
|
||||
}
|
||||
]
|
||||
}
|
||||
]'
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: bdb091-server
|
||||
image: bigchaindb/bigchaindb:0.9.1
|
||||
args:
|
||||
- -c
|
||||
- /data/.bigchaindb
|
||||
- start
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9984
|
||||
hostPort: 9984
|
||||
name: bdb-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: bigchaindb-data
|
||||
mountPath: /data
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: bigchaindb-data
|
||||
hostPath:
|
||||
path: /disk/bigchaindb-data
|
@ -1,75 +0,0 @@
|
||||
####################################################
|
||||
# This config file runs rethinkdb:2.3 as a service #
|
||||
####################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: rdb-service
|
||||
namespace: default
|
||||
labels:
|
||||
name: rdb-service
|
||||
spec:
|
||||
selector:
|
||||
app: rdb
|
||||
ports:
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
name: rethinkdb-http-port
|
||||
- port: 28015
|
||||
targetPort: 28015
|
||||
name: rethinkdb-driver-port
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: rdb
|
||||
namespace: default
|
||||
spec:
|
||||
serviceName: rdb
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
name: rdb
|
||||
labels:
|
||||
app: rdb
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: rethinkdb
|
||||
image: rethinkdb:2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
hostPort: 8080
|
||||
name: rdb-http-port
|
||||
protocol: TCP
|
||||
- containerPort: 28015
|
||||
hostPort: 28015
|
||||
name: rdb-client-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: rdb-data
|
||||
mountPath: /data
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8080
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8080
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: rdb-data
|
||||
hostPath:
|
||||
path: /disk/rdb-data
|
@ -19,7 +19,7 @@
|
||||
```
|
||||
docker run \
|
||||
--name=mdb1 \
|
||||
--publish=17017:17017 \
|
||||
--publish=<mongo port number for external connections>:<corresponding host port> \
|
||||
--rm=true \
|
||||
bigchaindb/mongodb \
|
||||
--replica-set-name <replica set name> \
|
||||
|
@ -1,38 +1,39 @@
|
||||
########################################################################
|
||||
# This YAML file desribes a StatefulSet with a service for running and #
|
||||
# exposing a MongoDB service. #
|
||||
# exposing a MongoDB instance. #
|
||||
# It depends on the configdb and db k8s pvc. #
|
||||
########################################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mdb
|
||||
name: mdb-svc
|
||||
namespace: default
|
||||
labels:
|
||||
name: mdb
|
||||
name: mdb-svc
|
||||
spec:
|
||||
selector:
|
||||
app: mdb
|
||||
app: mdb-ss
|
||||
ports:
|
||||
- port: 27017
|
||||
targetPort: 27017
|
||||
name: mdb-port
|
||||
type: LoadBalancer
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: mdb
|
||||
name: mdb-ss
|
||||
namespace: default
|
||||
spec:
|
||||
serviceName: mdb
|
||||
serviceName: mdb-svc
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
name: mdb
|
||||
name: mdb-ss
|
||||
labels:
|
||||
app: mdb
|
||||
app: mdb-ss
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
@ -41,6 +42,7 @@ spec:
|
||||
# versions during updates and rollbacks. Also, once fixed, change the
|
||||
# imagePullPolicy to IfNotPresent for faster bootup
|
||||
image: bigchaindb/mongodb:latest
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: MONGODB_FQDN
|
||||
valueFrom:
|
||||
@ -60,7 +62,6 @@ spec:
|
||||
capabilities:
|
||||
add:
|
||||
- FOWNER
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
hostPort: 27017
|
||||
|
11
k8s/nginx/container/Dockerfile
Normal file
11
k8s/nginx/container/Dockerfile
Normal file
@ -0,0 +1,11 @@
|
||||
FROM nginx:1.11.10
|
||||
LABEL maintainer "dev@bigchaindb.com"
|
||||
WORKDIR /
|
||||
RUN apt-get update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get autoremove \
|
||||
&& apt-get clean
|
||||
COPY nginx.conf.template /etc/nginx/nginx.conf
|
||||
COPY nginx_entrypoint.bash /
|
||||
EXPOSE 80 443 27017
|
||||
ENTRYPOINT ["/nginx_entrypoint.bash"]
|
70
k8s/nginx/container/README.md
Normal file
70
k8s/nginx/container/README.md
Normal file
@ -0,0 +1,70 @@
|
||||
## Custom Nginx container for a Node
|
||||
|
||||
### Need
|
||||
|
||||
* Since, BigchainDB and MongoDB both need to expose ports to the outside
|
||||
world (inter and intra cluster), we need to have a basic DDoS mitigation
|
||||
strategy to ensure that we can provide proper uptime and security these
|
||||
core services.
|
||||
|
||||
* We can have a proxy like nginx/haproxy in every node that listens to
|
||||
global connections and applies cluster level entry policy.
|
||||
|
||||
### Implementation
|
||||
* For MongoDB cluster communication, we will use nginx with an environment
|
||||
variable specifying a ":" separated list of IPs in the whitelist. This list
|
||||
contains the IPs of exising instances in the MongoDB replica set so as to
|
||||
allow connections from the whitelist and avoid a DDoS.
|
||||
|
||||
* For BigchainDB connections, nginx needs to have rules to throttle
|
||||
connections that are using resources over a threshold.
|
||||
|
||||
|
||||
### Step 1: Build the Latest Container
|
||||
|
||||
Run `docker build -t bigchaindb/nginx .` from this folder.
|
||||
|
||||
Optional: Upload container to Docker Hub:
|
||||
`docker push bigchaindb/nginx:<tag>`
|
||||
|
||||
### Step 2: Run the Container
|
||||
|
||||
Note that the whilelist IPs must be specified with the subnet in the CIDR
|
||||
format, eg: `1.2.3.4/16`
|
||||
|
||||
```
|
||||
docker run \
|
||||
--env "MONGODB_FRONTEND_PORT=<port where nginx listens for MongoDB connections>" \
|
||||
--env "MONGODB_BACKEND_HOST=<ip/hostname of instance where MongoDB is running>" \
|
||||
--env "MONGODB_BACKEND_PORT=<port where MongoDB is listening for connections>" \
|
||||
--env "BIGCHAINDB_FRONTEND_PORT=<port where nginx listens for BigchainDB connections>" \
|
||||
--env "BIGCHAINDB_BACKEND_HOST=<ip/hostname of instance where BigchainDB is
|
||||
running>" \
|
||||
--env "BIGCHAINDB_BACKEND_PORT=<port where BigchainDB is listening for
|
||||
connections>" \
|
||||
--env "MONGODB_WHITELIST=<a ':' separated list of IPs that can connect to MongoDB>" \
|
||||
--name=ngx \
|
||||
--publish=<port where nginx listens for MongoDB connections as specified above>:<correcponding host port> \
|
||||
--publish=<port where nginx listens for BigchainDB connections as specified
|
||||
above>:<corresponding host port> \
|
||||
--rm=true \
|
||||
bigchaindb/nginx
|
||||
```
|
||||
|
||||
For example:
|
||||
```
|
||||
docker run \
|
||||
--env "MONGODB_FRONTEND_PORT=17017" \
|
||||
--env "MONGODB_BACKEND_HOST=localhost" \
|
||||
--env "MONGODB_BACKEND_PORT=27017" \
|
||||
--env "BIGCHAINDB_FRONTEND_PORT=80" \
|
||||
--env "BIGCHAINDB_BACKEND_HOST=localhost" \
|
||||
--env "BIGCHAINDB_BACKEND_PORT=9984" \
|
||||
--env "MONGODB_WHITELIST="192.168.0.0/16:10.0.2.0/24" \
|
||||
--name=ngx \
|
||||
--publish=80:80 \
|
||||
--publish=17017:17017 \
|
||||
--rm=true \
|
||||
bigchaindb/nginx
|
||||
```
|
||||
|
108
k8s/nginx/container/nginx.conf.template
Normal file
108
k8s/nginx/container/nginx.conf.template
Normal file
@ -0,0 +1,108 @@
|
||||
worker_processes 2;
|
||||
daemon off;
|
||||
user nobody nogroup;
|
||||
pid /tmp/nginx.pid;
|
||||
error_log /etc/nginx/nginx.error.log;
|
||||
|
||||
events {
|
||||
worker_connections 256;
|
||||
accept_mutex on;
|
||||
use epoll;
|
||||
}
|
||||
|
||||
http {
|
||||
server_names_hash_bucket_size 128;
|
||||
resolver 8.8.8.8 8.8.4.4;
|
||||
access_log /etc/nginx/nginx.access.log combined buffer=16k flush=5s;
|
||||
|
||||
# allow 10 req/sec from the same IP address, and store the counters in a
|
||||
# `zone` or shared memory location tagged as 'one'.
|
||||
limit_req_zone $binary_remote_addr zone=one:10m rate=10r/s;
|
||||
|
||||
# enable logging when requests are being throttled
|
||||
limit_req_log_level notice;
|
||||
|
||||
# the http status code to return to the client when throttling;
|
||||
# 429 is for TooManyRequests,
|
||||
# ref. RFC 6585
|
||||
limit_req_status 429;
|
||||
|
||||
upstream bdb_backend {
|
||||
server BIGCHAINDB_BACKEND_HOST:BIGCHAINDB_BACKEND_PORT max_fails=5 fail_timeout=30;
|
||||
}
|
||||
|
||||
server {
|
||||
listen BIGCHAINDB_FRONTEND_PORT;
|
||||
# server_name "FRONTEND_DNS_NAME";
|
||||
underscores_in_headers on;
|
||||
|
||||
# max client request body size: avg transaction size
|
||||
client_max_body_size 15k;
|
||||
|
||||
# keepalive connection settings
|
||||
keepalive_timeout 20s;
|
||||
|
||||
# `slowloris` attack mitigation settings
|
||||
client_body_timeout 10s;
|
||||
client_header_timeout 10s;
|
||||
|
||||
location / {
|
||||
proxy_ignore_client_abort on;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_redirect off;
|
||||
|
||||
# TODO proxy_set_header X-Forwarded-Proto https;
|
||||
|
||||
# limit requests from the same client, allow `burst` to 20 r/s,
|
||||
# `nodelay` or drop connection immediately in case it exceeds this
|
||||
# threshold.
|
||||
limit_req zone=one burst=20 nodelay;
|
||||
|
||||
proxy_pass http://bdb_backend;
|
||||
}
|
||||
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /etc/nginx/50x.html;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# NGINX stream block for TCP and UDP proxies
|
||||
stream {
|
||||
log_format mdb_log '[$time_iso8601] $realip_remote_addr $remote_addr '
|
||||
'$proxy_protocol_addr $proxy_protocol_port '
|
||||
'$protocol $status $session_time $bytes_sent '
|
||||
'$bytes_received "$upstream_addr" "$upstream_bytes_sent" '
|
||||
'"$upstream_bytes_received" "$upstream_connect_time" ';
|
||||
|
||||
access_log /etc/nginx/nginx.stream.access.log mdb_log buffer=16k flush=5s;
|
||||
|
||||
# define a zone 'two' of size 10 megabytes to store the counters
|
||||
# that hold number of TCP connections from a specific IP address
|
||||
limit_conn_zone $binary_remote_addr zone=two:10m;
|
||||
|
||||
# enable logging when connections are being throttled
|
||||
limit_conn_log_level notice;
|
||||
|
||||
upstream mdb_backend {
|
||||
server MONGODB_BACKEND_HOST:MONGODB_BACKEND_PORT max_fails=5 fail_timeout=30 max_conns=1024;
|
||||
}
|
||||
|
||||
server {
|
||||
listen MONGODB_FRONTEND_PORT so_keepalive=10m:1m:5;
|
||||
preread_timeout 30s;
|
||||
tcp_nodelay on;
|
||||
|
||||
# whitelist
|
||||
MONGODB_WHITELIST
|
||||
# deny access to everyone else
|
||||
deny all;
|
||||
|
||||
# allow 512 connections from the same IP address
|
||||
limit_conn two 512;
|
||||
|
||||
proxy_pass mdb_backend;
|
||||
}
|
||||
}
|
44
k8s/nginx/container/nginx_entrypoint.bash
Executable file
44
k8s/nginx/container/nginx_entrypoint.bash
Executable file
@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
mongo_frontend_port=`printenv MONGODB_FRONTEND_PORT`
|
||||
mongo_backend_host=`printenv MONGODB_BACKEND_HOST`
|
||||
mongo_backend_port=`printenv MONGODB_BACKEND_PORT`
|
||||
bdb_frontend_port=`printenv BIGCHAINDB_FRONTEND_PORT`
|
||||
bdb_backend_host=`printenv BIGCHAINDB_BACKEND_HOST`
|
||||
bdb_backend_port=`printenv BIGCHAINDB_BACKEND_PORT`
|
||||
mongo_whitelist=`printenv MONGODB_WHITELIST`
|
||||
|
||||
# sanity checks
|
||||
if [[ -z "${mongo_frontend_port}" || \
|
||||
-z "${mongo_backend_host}" || \
|
||||
-z "${mongo_backend_port}" || \
|
||||
-z "${bdb_frontend_port}" || \
|
||||
-z "${bdb_backend_host}" || \
|
||||
-z "${bdb_backend_port}" ]] ; then
|
||||
echo "Invalid environment settings detected. Exiting!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NGINX_CONF_FILE=/etc/nginx/nginx.conf
|
||||
|
||||
# configure the nginx.conf file with env variables
|
||||
sed -i "s|MONGODB_FRONTEND_PORT|${mongo_frontend_port}|g" $NGINX_CONF_FILE
|
||||
sed -i "s|MONGODB_BACKEND_HOST|${mongo_backend_host}|g" $NGINX_CONF_FILE
|
||||
sed -i "s|MONGODB_BACKEND_PORT|${mongo_backend_port}|g" $NGINX_CONF_FILE
|
||||
sed -i "s|BIGCHAINDB_FRONTEND_PORT|${bdb_frontend_port}|g" $NGINX_CONF_FILE
|
||||
sed -i "s|BIGCHAINDB_BACKEND_HOST|${bdb_backend_host}|g" $NGINX_CONF_FILE
|
||||
sed -i "s|BIGCHAINDB_BACKEND_PORT|${bdb_backend_port}|g" $NGINX_CONF_FILE
|
||||
|
||||
# populate the whitelist in the conf file as per MONGODB_WHITELIST env var
|
||||
hosts=$(echo ${mongo_whitelist} | tr ":" "\n")
|
||||
for host in $hosts; do
|
||||
sed -i "s|MONGODB_WHITELIST|allow ${host};\n MONGODB_WHITELIST|g" $NGINX_CONF_FILE
|
||||
done
|
||||
|
||||
# remove the MONGODB_WHITELIST marker string from template
|
||||
sed -i "s|MONGODB_WHITELIST||g" $NGINX_CONF_FILE
|
||||
|
||||
# start nginx
|
||||
echo "INFO: starting nginx..."
|
||||
exec nginx -c /etc/nginx/nginx.conf
|
13
k8s/nginx/nginx-cm.yaml
Normal file
13
k8s/nginx/nginx-cm.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
#########################################################################
|
||||
# This YAML file desribes a ConfigMap with a valid list of IP addresses #
|
||||
# that can connect to the MongoDB instance. #
|
||||
#########################################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mongodb-whitelist
|
||||
namespace: default
|
||||
data:
|
||||
# ':' separated list of allowed hosts
|
||||
allowed-hosts: 192.168.0.0/16:10.0.2.0/24
|
82
k8s/nginx/nginx-dep.yaml
Normal file
82
k8s/nginx/nginx-dep.yaml
Normal file
@ -0,0 +1,82 @@
|
||||
###############################################################
|
||||
# This config file runs nginx as a k8s deployment and exposes #
|
||||
# it using an external load balancer. #
|
||||
# This deployment is used as a front end to both BigchainDB #
|
||||
# and MongoDB. #
|
||||
###############################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ngx-svc
|
||||
namespace: default
|
||||
labels:
|
||||
name: ngx-svc
|
||||
annotations:
|
||||
# NOTE: the following annotation is a beta feature and
|
||||
# only available in GCE/GKE and Azure as of now
|
||||
service.beta.kubernetes.io/external-traffic: OnlyLocal
|
||||
spec:
|
||||
selector:
|
||||
app: ngx-dep
|
||||
ports:
|
||||
- port: 27017
|
||||
targetPort: 27017
|
||||
name: ngx-public-mdb-port
|
||||
protocol: TCP
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
name: ngx-public-bdb-port
|
||||
protocol: TCP
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ngx-dep
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ngx-dep
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: nginx
|
||||
image: bigchaindb/nginx:latest
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: MONGODB_FRONTEND_PORT
|
||||
value: "27017"
|
||||
- name: MONGODB_BACKEND_HOST
|
||||
value: mdb-svc
|
||||
- name: MONGODB_BACKEND_PORT
|
||||
value: "27017"
|
||||
- name: BIGCHAINDB_FRONTEND_PORT
|
||||
value: "80"
|
||||
- name: BIGCHAINDB_BACKEND_HOST
|
||||
value: bdb-svc
|
||||
- name: BIGCHAINDB_BACKEND_PORT
|
||||
value: "9984"
|
||||
- name: MONGODB_WHITELIST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: mongodb-whitelist
|
||||
key: allowed-hosts
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
hostPort: 27017
|
||||
name: public-mdb-port
|
||||
protocol: TCP
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
name: public-bdb-port
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
#livenessProbe: TODO(Krish)
|
||||
#readinessProbe: TODO(Krish)
|
||||
restartPolicy: Always
|
2
setup.py
2
setup.py
@ -67,7 +67,7 @@ install_requires = [
|
||||
'pymongo~=3.4',
|
||||
'pysha3~=1.0.2',
|
||||
'cryptoconditions>=0.5.0',
|
||||
'python-rapidjson==0.0.8',
|
||||
'python-rapidjson==0.0.11',
|
||||
'logstats>=0.2.1',
|
||||
'flask>=0.10.1',
|
||||
'flask-cors==2.1.2',
|
||||
|
@ -99,6 +99,18 @@ def test_connection_run_errors(mock_client, mock_init_repl_set):
|
||||
assert query.run.call_count == 1
|
||||
|
||||
|
||||
@mock.patch('pymongo.database.Database.authenticate')
|
||||
def test_connection_with_credentials(mock_authenticate):
|
||||
import bigchaindb
|
||||
from bigchaindb.backend.mongodb.connection import MongoDBConnection
|
||||
conn = MongoDBConnection(host=bigchaindb.config['database']['host'],
|
||||
port=bigchaindb.config['database']['port'],
|
||||
login='theplague',
|
||||
password='secret')
|
||||
conn.connect()
|
||||
assert mock_authenticate.call_count == 2
|
||||
|
||||
|
||||
def test_check_replica_set_not_enabled(mongodb_connection):
|
||||
from bigchaindb.backend.mongodb.connection import _check_replica_set
|
||||
from bigchaindb.common.exceptions import ConfigurationError
|
||||
@ -168,7 +180,7 @@ def test_initialize_replica_set(mock_cmd_line_opts):
|
||||
]
|
||||
|
||||
# check that it returns
|
||||
assert initialize_replica_set('host', 1337, 1000) is None
|
||||
assert initialize_replica_set('host', 1337, 1000, 'dbname', False, None, None) is None
|
||||
|
||||
# test it raises OperationError if anything wrong
|
||||
with mock.patch.object(Database, 'command') as mock_command:
|
||||
@ -178,4 +190,4 @@ def test_initialize_replica_set(mock_cmd_line_opts):
|
||||
]
|
||||
|
||||
with pytest.raises(pymongo.errors.OperationFailure):
|
||||
initialize_replica_set('host', 1337, 1000)
|
||||
initialize_replica_set('host', 1337, 1000, 'dbname', False, None, None)
|
||||
|
@ -212,6 +212,7 @@ def test_get_owned_ids(signed_create_tx, user_pk):
|
||||
|
||||
|
||||
def test_get_votes_by_block_id(signed_create_tx, structurally_valid_vote):
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
from bigchaindb.backend import connect, query
|
||||
from bigchaindb.models import Block
|
||||
conn = connect()
|
||||
@ -219,10 +220,14 @@ def test_get_votes_by_block_id(signed_create_tx, structurally_valid_vote):
|
||||
# create and insert a block
|
||||
block = Block(transactions=[signed_create_tx])
|
||||
conn.db.bigchain.insert_one(block.to_dict())
|
||||
|
||||
# create and insert some votes
|
||||
structurally_valid_vote['vote']['voting_for_block'] = block.id
|
||||
conn.db.votes.insert_one(structurally_valid_vote)
|
||||
# create a second vote under a different key
|
||||
_, pk = generate_key_pair()
|
||||
structurally_valid_vote['vote']['voting_for_block'] = block.id
|
||||
structurally_valid_vote['node_pubkey'] = pk
|
||||
structurally_valid_vote.pop('_id')
|
||||
conn.db.votes.insert_one(structurally_valid_vote)
|
||||
|
||||
@ -325,6 +330,19 @@ def test_write_vote(structurally_valid_vote):
|
||||
assert vote_db == structurally_valid_vote
|
||||
|
||||
|
||||
def test_duplicate_vote_raises_duplicate_key(structurally_valid_vote):
|
||||
from bigchaindb.backend import connect, query
|
||||
from bigchaindb.backend.exceptions import DuplicateKeyError
|
||||
conn = connect()
|
||||
|
||||
# write a vote
|
||||
query.write_vote(conn, structurally_valid_vote)
|
||||
|
||||
# write the same vote a second time
|
||||
with pytest.raises(DuplicateKeyError):
|
||||
query.write_vote(conn, structurally_valid_vote)
|
||||
|
||||
|
||||
def test_get_genesis_block(genesis_block):
|
||||
from bigchaindb.backend import connect, query
|
||||
conn = connect()
|
||||
|
@ -38,7 +38,7 @@ def test_start_rethinkdb_exits_when_cannot_start(mock_popen):
|
||||
|
||||
|
||||
@patch('rethinkdb.ast.Table.reconfigure')
|
||||
def test_set_shards(mock_reconfigure, monkeypatch, b, mocked_setup_logging):
|
||||
def test_set_shards(mock_reconfigure, monkeypatch, b):
|
||||
from bigchaindb.commands.bigchain import run_set_shards
|
||||
|
||||
# this will mock the call to retrieve the database config
|
||||
@ -50,8 +50,6 @@ def test_set_shards(mock_reconfigure, monkeypatch, b, mocked_setup_logging):
|
||||
args = Namespace(num_shards=3, config=None)
|
||||
run_set_shards(args)
|
||||
mock_reconfigure.assert_called_with(replicas=1, shards=3, dry_run=False)
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
mocked_setup_logging.reset_mock()
|
||||
|
||||
# this will mock the call to retrieve the database config
|
||||
# we will set it to return three replica
|
||||
@ -61,10 +59,9 @@ def test_set_shards(mock_reconfigure, monkeypatch, b, mocked_setup_logging):
|
||||
monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_three_replicas)
|
||||
run_set_shards(args)
|
||||
mock_reconfigure.assert_called_with(replicas=3, shards=3, dry_run=False)
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
|
||||
|
||||
def test_set_shards_raises_exception(monkeypatch, b, mocked_setup_logging):
|
||||
def test_set_shards_raises_exception(monkeypatch, b):
|
||||
from bigchaindb.commands.bigchain import run_set_shards
|
||||
|
||||
# test that we are correctly catching the exception
|
||||
@ -81,11 +78,10 @@ def test_set_shards_raises_exception(monkeypatch, b, mocked_setup_logging):
|
||||
with pytest.raises(SystemExit) as exc:
|
||||
run_set_shards(args)
|
||||
assert exc.value.args == ('Failed to reconfigure tables.',)
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
|
||||
|
||||
@patch('rethinkdb.ast.Table.reconfigure')
|
||||
def test_set_replicas(mock_reconfigure, monkeypatch, b, mocked_setup_logging):
|
||||
def test_set_replicas(mock_reconfigure, monkeypatch, b):
|
||||
from bigchaindb.commands.bigchain import run_set_replicas
|
||||
|
||||
# this will mock the call to retrieve the database config
|
||||
@ -97,8 +93,6 @@ def test_set_replicas(mock_reconfigure, monkeypatch, b, mocked_setup_logging):
|
||||
args = Namespace(num_replicas=2, config=None)
|
||||
run_set_replicas(args)
|
||||
mock_reconfigure.assert_called_with(replicas=2, shards=2, dry_run=False)
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
mocked_setup_logging.reset_mock()
|
||||
|
||||
# this will mock the call to retrieve the database config
|
||||
# we will set it to return three shards
|
||||
@ -108,10 +102,9 @@ def test_set_replicas(mock_reconfigure, monkeypatch, b, mocked_setup_logging):
|
||||
monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_three_shards)
|
||||
run_set_replicas(args)
|
||||
mock_reconfigure.assert_called_with(replicas=2, shards=3, dry_run=False)
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
|
||||
|
||||
def test_set_replicas_raises_exception(monkeypatch, b, mocked_setup_logging):
|
||||
def test_set_replicas_raises_exception(monkeypatch, b):
|
||||
from bigchaindb.commands.bigchain import run_set_replicas
|
||||
|
||||
# test that we are correctly catching the exception
|
||||
@ -128,4 +121,3 @@ def test_set_replicas_raises_exception(monkeypatch, b, mocked_setup_logging):
|
||||
with pytest.raises(SystemExit) as exc:
|
||||
run_set_replicas(args)
|
||||
assert exc.value.args == ('Failed to reconfigure tables.',)
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
|
@ -74,7 +74,7 @@ def test_bigchain_run_start_assume_yes_create_default_config(
|
||||
# interfere with capsys.
|
||||
# See related issue: https://github.com/pytest-dev/pytest/issues/128
|
||||
@pytest.mark.usefixtures('ignore_local_config_file')
|
||||
def test_bigchain_show_config(capsys, mocked_setup_logging):
|
||||
def test_bigchain_show_config(capsys):
|
||||
from bigchaindb import config
|
||||
from bigchaindb.commands.bigchain import run_show_config
|
||||
|
||||
@ -85,11 +85,9 @@ def test_bigchain_show_config(capsys, mocked_setup_logging):
|
||||
del config['CONFIGURED']
|
||||
config['keypair']['private'] = 'x' * 45
|
||||
assert output_config == config
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
|
||||
|
||||
def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch,
|
||||
mocked_setup_logging):
|
||||
def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch):
|
||||
from bigchaindb import config
|
||||
from bigchaindb.commands.bigchain import run_export_my_pubkey
|
||||
|
||||
@ -106,11 +104,9 @@ def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch,
|
||||
lines = out.splitlines()
|
||||
assert config['keypair']['public'] in lines
|
||||
assert 'Charlie_Bucket' in lines
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
|
||||
|
||||
def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch,
|
||||
mocked_setup_logging):
|
||||
def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch):
|
||||
from bigchaindb import config
|
||||
from bigchaindb.commands.bigchain import run_export_my_pubkey
|
||||
|
||||
@ -126,49 +122,41 @@ def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch,
|
||||
# https://docs.python.org/3/library/exceptions.html#SystemExit
|
||||
assert exc_info.value.code == \
|
||||
"This node's public key wasn't set anywhere so it can't be exported"
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
|
||||
|
||||
def test_bigchain_run_init_when_db_exists(mocked_setup_logging,
|
||||
mock_db_init_with_existing_db):
|
||||
def test_bigchain_run_init_when_db_exists(mock_db_init_with_existing_db):
|
||||
from bigchaindb.commands.bigchain import run_init
|
||||
args = Namespace(config=None)
|
||||
run_init(args)
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
|
||||
|
||||
@patch('bigchaindb.backend.schema.drop_database')
|
||||
def test_drop_db_when_assumed_yes(mock_db_drop, mocked_setup_logging):
|
||||
def test_drop_db_when_assumed_yes(mock_db_drop):
|
||||
from bigchaindb.commands.bigchain import run_drop
|
||||
args = Namespace(config=None, yes=True)
|
||||
|
||||
run_drop(args)
|
||||
assert mock_db_drop.called
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
|
||||
|
||||
@patch('bigchaindb.backend.schema.drop_database')
|
||||
def test_drop_db_when_interactive_yes(mock_db_drop, monkeypatch,
|
||||
mocked_setup_logging):
|
||||
def test_drop_db_when_interactive_yes(mock_db_drop, monkeypatch):
|
||||
from bigchaindb.commands.bigchain import run_drop
|
||||
args = Namespace(config=None, yes=False)
|
||||
monkeypatch.setattr('bigchaindb.commands.bigchain.input_on_stderr', lambda x: 'y')
|
||||
|
||||
run_drop(args)
|
||||
assert mock_db_drop.called
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
|
||||
|
||||
@patch('bigchaindb.backend.schema.drop_database')
|
||||
def test_drop_db_does_not_drop_when_interactive_no(mock_db_drop, monkeypatch,
|
||||
mocked_setup_logging):
|
||||
def test_drop_db_does_not_drop_when_interactive_no(mock_db_drop, monkeypatch):
|
||||
from bigchaindb.commands.bigchain import run_drop
|
||||
args = Namespace(config=None, yes=False)
|
||||
monkeypatch.setattr('bigchaindb.commands.bigchain.input_on_stderr', lambda x: 'n')
|
||||
|
||||
run_drop(args)
|
||||
assert not mock_db_drop.called
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
|
||||
|
||||
def test_run_configure_when_config_exists_and_skipping(monkeypatch):
|
||||
@ -417,7 +405,7 @@ def test_calling_main(start_mock, base_parser_mock, parse_args_mock,
|
||||
|
||||
@pytest.mark.usefixtures('ignore_local_config_file')
|
||||
@patch('bigchaindb.commands.bigchain.add_replicas')
|
||||
def test_run_add_replicas(mock_add_replicas, mocked_setup_logging):
|
||||
def test_run_add_replicas(mock_add_replicas):
|
||||
from bigchaindb.commands.bigchain import run_add_replicas
|
||||
from bigchaindb.backend.exceptions import OperationError
|
||||
|
||||
@ -427,9 +415,7 @@ def test_run_add_replicas(mock_add_replicas, mocked_setup_logging):
|
||||
mock_add_replicas.return_value = None
|
||||
assert run_add_replicas(args) is None
|
||||
assert mock_add_replicas.call_count == 1
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
mock_add_replicas.reset_mock()
|
||||
mocked_setup_logging.reset_mock()
|
||||
|
||||
# test add_replicas with `OperationError`
|
||||
mock_add_replicas.side_effect = OperationError('err')
|
||||
@ -437,9 +423,7 @@ def test_run_add_replicas(mock_add_replicas, mocked_setup_logging):
|
||||
run_add_replicas(args)
|
||||
assert exc.value.args == ('err',)
|
||||
assert mock_add_replicas.call_count == 1
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
mock_add_replicas.reset_mock()
|
||||
mocked_setup_logging.reset_mock()
|
||||
|
||||
# test add_replicas with `NotImplementedError`
|
||||
mock_add_replicas.side_effect = NotImplementedError('err')
|
||||
@ -447,14 +431,12 @@ def test_run_add_replicas(mock_add_replicas, mocked_setup_logging):
|
||||
run_add_replicas(args)
|
||||
assert exc.value.args == ('err',)
|
||||
assert mock_add_replicas.call_count == 1
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
mock_add_replicas.reset_mock()
|
||||
mocked_setup_logging.reset_mock()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('ignore_local_config_file')
|
||||
@patch('bigchaindb.commands.bigchain.remove_replicas')
|
||||
def test_run_remove_replicas(mock_remove_replicas, mocked_setup_logging):
|
||||
def test_run_remove_replicas(mock_remove_replicas):
|
||||
from bigchaindb.commands.bigchain import run_remove_replicas
|
||||
from bigchaindb.backend.exceptions import OperationError
|
||||
|
||||
@ -464,8 +446,6 @@ def test_run_remove_replicas(mock_remove_replicas, mocked_setup_logging):
|
||||
mock_remove_replicas.return_value = None
|
||||
assert run_remove_replicas(args) is None
|
||||
assert mock_remove_replicas.call_count == 1
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
mocked_setup_logging.reset_mock()
|
||||
mock_remove_replicas.reset_mock()
|
||||
|
||||
# test add_replicas with `OperationError`
|
||||
@ -474,8 +454,6 @@ def test_run_remove_replicas(mock_remove_replicas, mocked_setup_logging):
|
||||
run_remove_replicas(args)
|
||||
assert exc.value.args == ('err',)
|
||||
assert mock_remove_replicas.call_count == 1
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
mocked_setup_logging.reset_mock()
|
||||
mock_remove_replicas.reset_mock()
|
||||
|
||||
# test add_replicas with `NotImplementedError`
|
||||
@ -484,6 +462,4 @@ def test_run_remove_replicas(mock_remove_replicas, mocked_setup_logging):
|
||||
run_remove_replicas(args)
|
||||
assert exc.value.args == ('err',)
|
||||
assert mock_remove_replicas.call_count == 1
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
mocked_setup_logging.reset_mock()
|
||||
mock_remove_replicas.reset_mock()
|
||||
|
@ -1,7 +1,6 @@
|
||||
import argparse
|
||||
from argparse import ArgumentTypeError, Namespace
|
||||
import logging
|
||||
from logging import getLogger
|
||||
|
||||
import pytest
|
||||
|
||||
@ -15,7 +14,7 @@ def reset_bigchaindb_config(monkeypatch):
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('ignore_local_config_file', 'reset_bigchaindb_config')
|
||||
def test_configure_bigchaindb_configures_bigchaindb(mocked_setup_logging):
|
||||
def test_configure_bigchaindb_configures_bigchaindb():
|
||||
from bigchaindb.commands.utils import configure_bigchaindb
|
||||
from bigchaindb.config_utils import is_configured
|
||||
assert not is_configured()
|
||||
@ -26,7 +25,6 @@ def test_configure_bigchaindb_configures_bigchaindb(mocked_setup_logging):
|
||||
|
||||
args = Namespace(config=None)
|
||||
test_configure(args)
|
||||
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('ignore_local_config_file',
|
||||
@ -39,22 +37,19 @@ def test_configure_bigchaindb_configures_bigchaindb(mocked_setup_logging):
|
||||
logging.ERROR,
|
||||
logging.CRITICAL,
|
||||
))
|
||||
def test_configure_bigchaindb_configures_logging(log_level,
|
||||
mocked_setup_sub_logger):
|
||||
def test_configure_bigchaindb_logging(log_level):
|
||||
from bigchaindb.commands.utils import configure_bigchaindb
|
||||
from bigchaindb.log.configs import PUBLISHER_LOGGING_CONFIG
|
||||
root_logger = getLogger()
|
||||
assert root_logger.level == logging.NOTSET
|
||||
from bigchaindb import config
|
||||
assert not config['log']
|
||||
|
||||
@configure_bigchaindb
|
||||
def test_configure_logger(args):
|
||||
root_logger = getLogger()
|
||||
assert root_logger.level == PUBLISHER_LOGGING_CONFIG['root']['level']
|
||||
pass
|
||||
|
||||
args = Namespace(config=None, log_level=log_level)
|
||||
test_configure_logger(args)
|
||||
mocked_setup_sub_logger.assert_called_once_with(
|
||||
user_log_config={'level_console': log_level})
|
||||
from bigchaindb import config
|
||||
assert config['log'] == {'level_console': log_level}
|
||||
|
||||
|
||||
def test_start_raises_if_command_not_implemented():
|
||||
|
@ -446,23 +446,6 @@ class TestBigchainApi(object):
|
||||
b.write_vote(b.vote(block_3.id, b.get_last_voted_block().id, True))
|
||||
assert b.get_last_voted_block().id == block_3.id
|
||||
|
||||
def test_no_vote_written_if_block_already_has_vote(self, b, genesis_block):
|
||||
from bigchaindb.models import Block
|
||||
|
||||
block_1 = dummy_block()
|
||||
b.write_block(block_1)
|
||||
|
||||
b.write_vote(b.vote(block_1.id, genesis_block.id, True))
|
||||
retrieved_block_1 = b.get_block(block_1.id)
|
||||
retrieved_block_1 = Block.from_dict(retrieved_block_1)
|
||||
|
||||
# try to vote again on the retrieved block, should do nothing
|
||||
b.write_vote(b.vote(retrieved_block_1.id, genesis_block.id, True))
|
||||
retrieved_block_2 = b.get_block(block_1.id)
|
||||
retrieved_block_2 = Block.from_dict(retrieved_block_2)
|
||||
|
||||
assert retrieved_block_1 == retrieved_block_2
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_assign_transaction_one_node(self, b, user_pk, user_sk):
|
||||
from bigchaindb.backend import query
|
||||
|
18
tests/log/test_loggers.py
Normal file
18
tests/log/test_loggers.py
Normal file
@ -0,0 +1,18 @@
|
||||
from logging.handlers import SocketHandler
|
||||
|
||||
|
||||
class TestHttpServerLogger:
|
||||
|
||||
def test_init(self, mocker):
|
||||
from bigchaindb.log.configs import (
|
||||
DEFAULT_SOCKET_LOGGING_ADDR as expected_socket_address)
|
||||
from bigchaindb.log.loggers import HttpServerLogger
|
||||
mocked_config = mocker.patch(
|
||||
'gunicorn.config.Config', autospec=True, spec_set=True)
|
||||
logger = HttpServerLogger(mocked_config.return_value)
|
||||
assert len(logger.access_log.handlers) == 1
|
||||
assert len(logger.error_log.handlers) == 1
|
||||
assert isinstance(logger.access_log.handlers[0], SocketHandler)
|
||||
assert isinstance(logger.error_log.handlers[0], SocketHandler)
|
||||
assert logger.access_log.handlers[0].address == expected_socket_address
|
||||
assert logger.error_log.handlers[0].address == expected_socket_address
|
@ -27,6 +27,8 @@ def test_get_votes_endpoint(b, client):
|
||||
@pytest.mark.bdb
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_get_votes_endpoint_multiple_votes(b, client):
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
|
||||
tx = Transaction.create([b.me], [([b.me], 1)])
|
||||
tx = tx.sign([b.me_private])
|
||||
|
||||
@ -37,8 +39,12 @@ def test_get_votes_endpoint_multiple_votes(b, client):
|
||||
vote_valid = b.vote(block.id, last_block, True)
|
||||
b.write_vote(vote_valid)
|
||||
|
||||
# vote the block valid
|
||||
# vote the block invalid
|
||||
# a note can only vote once so we need a new node_pubkey for the second
|
||||
# vote
|
||||
_, pk = generate_key_pair()
|
||||
vote_invalid = b.vote(block.id, last_block, False)
|
||||
vote_invalid['node_pubkey'] = pk
|
||||
b.write_vote(vote_invalid)
|
||||
|
||||
res = client.get(VOTES_ENDPOINT + '?block_id=' + block.id)
|
||||
|
Loading…
x
Reference in New Issue
Block a user