mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge remote-tracking branch 'origin/master' into federation-tests
This commit is contained in:
commit
a6ba13dc3e
2
.gitattributes
vendored
2
.gitattributes
vendored
@ -1,8 +1,6 @@
|
|||||||
benchmarking-tests export-ignore
|
|
||||||
deploy-cluster-aws export-ignore
|
deploy-cluster-aws export-ignore
|
||||||
docs export-ignore
|
docs export-ignore
|
||||||
ntools export-ignore
|
ntools export-ignore
|
||||||
speed-tests export-ignore
|
|
||||||
tests export-ignore
|
tests export-ignore
|
||||||
.gitattributes export-ignore
|
.gitattributes export-ignore
|
||||||
.gitignore export-ignore
|
.gitignore export-ignore
|
||||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -71,8 +71,6 @@ deploy-cluster-aws/confiles/
|
|||||||
deploy-cluster-aws/client_confile
|
deploy-cluster-aws/client_confile
|
||||||
deploy-cluster-aws/hostlist.py
|
deploy-cluster-aws/hostlist.py
|
||||||
deploy-cluster-aws/ssh_key.py
|
deploy-cluster-aws/ssh_key.py
|
||||||
benchmarking-tests/hostlist.py
|
|
||||||
benchmarking-tests/ssh_key.py
|
|
||||||
|
|
||||||
# Ansible-specific files
|
# Ansible-specific files
|
||||||
ntools/one-m/ansible/hosts
|
ntools/one-m/ansible/hosts
|
||||||
|
13
.travis.yml
13
.travis.yml
@ -4,6 +4,7 @@ cache: pip
|
|||||||
python:
|
python:
|
||||||
- 3.4
|
- 3.4
|
||||||
- 3.5
|
- 3.5
|
||||||
|
- 3.6
|
||||||
|
|
||||||
env:
|
env:
|
||||||
- TOXENV=flake8
|
- TOXENV=flake8
|
||||||
@ -19,6 +20,12 @@ matrix:
|
|||||||
env: TOXENV=docsroot
|
env: TOXENV=docsroot
|
||||||
- python: 3.4
|
- python: 3.4
|
||||||
env: TOXENV=docsserver
|
env: TOXENV=docsserver
|
||||||
|
- python: 3.5
|
||||||
|
env: TOXENV=flake8
|
||||||
|
- python: 3.5
|
||||||
|
env: TOXENV=docsroot
|
||||||
|
- python: 3.5
|
||||||
|
env: TOXENV=docsserver
|
||||||
include:
|
include:
|
||||||
- python: 3.4
|
- python: 3.4
|
||||||
addons:
|
addons:
|
||||||
@ -30,6 +37,12 @@ matrix:
|
|||||||
env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb
|
env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb
|
||||||
- python: 3.5
|
- python: 3.5
|
||||||
env: BIGCHAINDB_DATABASE_BACKEND=mongodb
|
env: BIGCHAINDB_DATABASE_BACKEND=mongodb
|
||||||
|
- python: 3.6
|
||||||
|
addons:
|
||||||
|
rethinkdb: '2.3.5'
|
||||||
|
env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb
|
||||||
|
- python: 3.6
|
||||||
|
env: BIGCHAINDB_DATABASE_BACKEND=mongodb
|
||||||
|
|
||||||
before_install: sudo .ci/travis-before-install.sh
|
before_install: sudo .ci/travis-before-install.sh
|
||||||
|
|
||||||
|
14
CHANGELOG.md
14
CHANGELOG.md
@ -15,6 +15,20 @@ For reference, the possible headings are:
|
|||||||
* **External Contributors** to list contributors outside of BigchainDB GmbH.
|
* **External Contributors** to list contributors outside of BigchainDB GmbH.
|
||||||
* **Notes**
|
* **Notes**
|
||||||
|
|
||||||
|
## [0.9.5] - 2017-03-29
|
||||||
|
Tag name: v0.9.5
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
Upgrade `python-rapidjson` to `0.0.11`(fixes #1350 - thanks to @ferOnti for
|
||||||
|
reporting).
|
||||||
|
|
||||||
|
## [0.9.4] - 2017-03-16
|
||||||
|
Tag name: v0.9.4
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
Fixed #1271 (false double spend error). Thanks to @jmduque for reporting the
|
||||||
|
problem along with a very detailed diagnosis and useful recommendations.
|
||||||
|
|
||||||
## [0.9.3] - 2017-03-06
|
## [0.9.3] - 2017-03-06
|
||||||
Tag name: v0.9.3
|
Tag name: v0.9.3
|
||||||
|
|
||||||
|
39
Dockerfile
39
Dockerfile
@ -1,33 +1,32 @@
|
|||||||
FROM ubuntu:xenial
|
FROM ubuntu:xenial
|
||||||
|
|
||||||
# From http://stackoverflow.com/a/38553499
|
|
||||||
|
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y locales
|
|
||||||
|
|
||||||
RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
|
|
||||||
echo 'LANG="en_US.UTF-8"'>/etc/default/locale && \
|
|
||||||
dpkg-reconfigure --frontend=noninteractive locales && \
|
|
||||||
update-locale LANG=en_US.UTF-8
|
|
||||||
|
|
||||||
ENV LANG en_US.UTF-8
|
ENV LANG en_US.UTF-8
|
||||||
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
# The `apt-get update` command executed with the install instructions should
|
|
||||||
# not use a locally cached storage layer. Force update the cache again.
|
|
||||||
# https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#run
|
|
||||||
RUN apt-get update && apt-get -y install python3 python3-pip libffi-dev \
|
|
||||||
&& pip3 install --upgrade pip \
|
|
||||||
&& pip3 install --upgrade setuptools
|
|
||||||
|
|
||||||
RUN mkdir -p /usr/src/app
|
RUN mkdir -p /usr/src/app
|
||||||
|
|
||||||
COPY . /usr/src/app/
|
COPY . /usr/src/app/
|
||||||
|
|
||||||
WORKDIR /usr/src/app
|
WORKDIR /usr/src/app
|
||||||
|
|
||||||
RUN pip3 install --no-cache-dir -e .
|
RUN locale-gen en_US.UTF-8 && \
|
||||||
|
apt-get -q update && \
|
||||||
|
apt-get install -qy --no-install-recommends \
|
||||||
|
python3 \
|
||||||
|
python3-pip \
|
||||||
|
libffi-dev \
|
||||||
|
python3-dev \
|
||||||
|
build-essential && \
|
||||||
|
\
|
||||||
|
pip3 install --upgrade --no-cache-dir pip setuptools && \
|
||||||
|
\
|
||||||
|
pip3 install --no-cache-dir -e . && \
|
||||||
|
\
|
||||||
|
apt-get remove -qy --purge gcc cpp binutils perl && \
|
||||||
|
apt-get -qy autoremove && \
|
||||||
|
apt-get -q clean all && \
|
||||||
|
rm -rf /usr/share/perl /usr/share/perl5 /usr/share/man /usr/share/info /usr/share/doc && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
VOLUME ["/data"]
|
VOLUME ["/data"]
|
||||||
|
|
||||||
WORKDIR /data
|
WORKDIR /data
|
||||||
|
|
||||||
ENV BIGCHAINDB_CONFIG_PATH /data/.bigchaindb
|
ENV BIGCHAINDB_CONFIG_PATH /data/.bigchaindb
|
||||||
|
@ -51,3 +51,15 @@ END BLOCK
|
|||||||
(END OF EMAIL)
|
(END OF EMAIL)
|
||||||
|
|
||||||
The next step is to wait for them to copy that comment into the comments of the indicated pull request. Once they do so, it's safe to merge the pull request.
|
The next step is to wait for them to copy that comment into the comments of the indicated pull request. Once they do so, it's safe to merge the pull request.
|
||||||
|
|
||||||
|
## How to Handle CLA Agreement Emails with No Associated Pull Request
|
||||||
|
|
||||||
|
Reply with an email like this:
|
||||||
|
|
||||||
|
Hi [First Name],
|
||||||
|
|
||||||
|
Today I got an email (copied below) to tell me that you agreed to the BigchainDB Contributor License Agreement. Did you intend to do that?
|
||||||
|
|
||||||
|
If no, then you can ignore this email.
|
||||||
|
|
||||||
|
If yes, then there's another step to connect your email address with your GitHub account. To do that, you must first create a pull request in one of the BigchainDB repositories on GitHub. Once you've done that, please reply to this email with a link to the pull request. Then I'll send you a special block of text to paste into the comments on that pull request.
|
||||||
|
8
Makefile
8
Makefile
@ -51,17 +51,13 @@ lint: ## check style with flake8
|
|||||||
flake8 bigchaindb tests
|
flake8 bigchaindb tests
|
||||||
|
|
||||||
test: ## run tests quickly with the default Python
|
test: ## run tests quickly with the default Python
|
||||||
py.test
|
pytest -v -n auto
|
||||||
|
|
||||||
|
|
||||||
test-all: ## run tests on every Python version with tox
|
test-all: ## run tests on every Python version with tox
|
||||||
tox
|
tox
|
||||||
|
|
||||||
coverage: ## check code coverage quickly with the default Python
|
coverage: ## check code coverage quickly with the default Python
|
||||||
coverage run --source bigchaindb py.test
|
pytest -v -n auto --cov=bigchaindb --cov-report term --cov-report html
|
||||||
|
|
||||||
coverage report -m
|
|
||||||
coverage html
|
|
||||||
$(BROWSER) htmlcov/index.html
|
$(BROWSER) htmlcov/index.html
|
||||||
|
|
||||||
docs: ## generate Sphinx HTML documentation, including API docs
|
docs: ## generate Sphinx HTML documentation, including API docs
|
||||||
|
@ -82,6 +82,6 @@ flake8 --max-line-length 119 bigchaindb/
|
|||||||
|
|
||||||
## Writing and Running (Python) Tests
|
## Writing and Running (Python) Tests
|
||||||
|
|
||||||
The content of this section was moved to [`bigchiandb/tests/README.md`](./tests/README.md).
|
The content of this section was moved to [`bigchaindb/tests/README.md`](./tests/README.md).
|
||||||
|
|
||||||
Note: We automatically run all tests on all pull requests (using Travis CI), so you should definitely run all tests locally before you submit a pull request. See the above-linked README file for instructions.
|
Note: We automatically run all tests on all pull requests (using Travis CI), so you should definitely run all tests locally before you submit a pull request. See the above-linked README file for instructions.
|
@ -45,10 +45,16 @@ These steps are common between minor and patch releases:
|
|||||||
1. Make sure your local Git is in the same state as the release: e.g. `git fetch <remote-name>` and `git checkout v0.9.1`
|
1. Make sure your local Git is in the same state as the release: e.g. `git fetch <remote-name>` and `git checkout v0.9.1`
|
||||||
1. Make sure you have a `~/.pypirc` file containing credentials for PyPI
|
1. Make sure you have a `~/.pypirc` file containing credentials for PyPI
|
||||||
1. Do a `make release` to build and publish the new `bigchaindb` package on PyPI
|
1. Do a `make release` to build and publish the new `bigchaindb` package on PyPI
|
||||||
1. Login to readthedocs.org as a maintainer of the BigchainDB Server docs.
|
1. [Login to readthedocs.org](https://readthedocs.org/accounts/login/)
|
||||||
Go to Admin --> Versions and under **Choose Active Versions**, make sure that the new version's tag is
|
as a maintainer of the BigchainDB Server docs, and:
|
||||||
"Active" and "Public", and make sure the new version's branch
|
- Go to Admin --> Advanced Settings
|
||||||
(without the 'v' in front) is _not_ active
|
|
||||||
1. Also in readthedocs.org, go to Admin --> Advanced Settings
|
|
||||||
and make sure that "Default branch:" (i.e. what "latest" points to)
|
and make sure that "Default branch:" (i.e. what "latest" points to)
|
||||||
is set to the new release's tag, e.g. `v0.9.1`. (Don't miss the 'v' in front.)
|
is set to the new release's tag, e.g. `v0.9.1`.
|
||||||
|
(Don't miss the 'v' in front.)
|
||||||
|
- Go to Admin --> Versions
|
||||||
|
and under **Choose Active Versions**, do these things:
|
||||||
|
1. Make sure that the new version's tag is "Active" and "Public"
|
||||||
|
2. Make sure the new version's branch
|
||||||
|
(without the 'v' in front) is _not_ active.
|
||||||
|
3. Make sure the **stable** branch is _not_ active.
|
||||||
|
4. Scroll to the bottom of the page and click the Submit button.
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
# Benchmarking tests
|
|
||||||
|
|
||||||
This folder contains util files and test case folders to benchmark the performance of a BigchainDB federation.
|
|
@ -1,154 +0,0 @@
|
|||||||
import multiprocessing as mp
|
|
||||||
import uuid
|
|
||||||
import argparse
|
|
||||||
import csv
|
|
||||||
import time
|
|
||||||
import logging
|
|
||||||
import rethinkdb as r
|
|
||||||
|
|
||||||
from bigchaindb.common.transaction import Transaction
|
|
||||||
|
|
||||||
from bigchaindb import Bigchain
|
|
||||||
from bigchaindb.utils import ProcessGroup
|
|
||||||
from bigchaindb.commands import utils
|
|
||||||
|
|
||||||
|
|
||||||
SIZE_OF_FILLER = {'minimal': 0,
|
|
||||||
'small': 10**3,
|
|
||||||
'medium': 10**4,
|
|
||||||
'large': 10**5}
|
|
||||||
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def create_write_transaction(tx_left, payload_filler):
|
|
||||||
b = Bigchain()
|
|
||||||
payload_dict = {}
|
|
||||||
if payload_filler:
|
|
||||||
payload_dict['filler'] = payload_filler
|
|
||||||
while tx_left > 0:
|
|
||||||
# Include a random uuid string in the payload
|
|
||||||
# to prevent duplicate transactions
|
|
||||||
# (i.e. transactions with the same hash)
|
|
||||||
payload_dict['msg'] = str(uuid.uuid4())
|
|
||||||
tx = Transaction.create([b.me], [b.me], payload=payload_dict)
|
|
||||||
tx = tx.sign([b.me_private])
|
|
||||||
b.write_transaction(tx)
|
|
||||||
tx_left -= 1
|
|
||||||
|
|
||||||
|
|
||||||
def run_add_backlog(args):
|
|
||||||
tx_left = args.num_transactions // mp.cpu_count()
|
|
||||||
payload_filler = 'x' * SIZE_OF_FILLER[args.payload_size]
|
|
||||||
workers = ProcessGroup(target=create_write_transaction,
|
|
||||||
args=(tx_left, payload_filler))
|
|
||||||
workers.start()
|
|
||||||
|
|
||||||
|
|
||||||
def run_gather_metrics(args):
|
|
||||||
# setup a rethinkdb connection
|
|
||||||
conn = r.connect(args.bigchaindb_host, 28015, 'bigchain')
|
|
||||||
|
|
||||||
# setup csv writer
|
|
||||||
csv_file = open(args.csvfile, 'w')
|
|
||||||
csv_writer = csv.writer(csv_file)
|
|
||||||
|
|
||||||
# query for the number of transactions on the backlog
|
|
||||||
num_transactions = r.table('backlog').count().run(conn)
|
|
||||||
num_transactions_received = 0
|
|
||||||
initial_time = None
|
|
||||||
logger.info('Starting gathering metrics.')
|
|
||||||
logger.info('{} transasctions in the backlog'.format(num_transactions))
|
|
||||||
logger.info('This process should exit automatically. '
|
|
||||||
'If this does not happen you can exit at any time using Ctrl-C '
|
|
||||||
'saving all the metrics gathered up to this point.')
|
|
||||||
|
|
||||||
logger.info('\t{:<20} {:<20} {:<20} {:<20}'.format(
|
|
||||||
'timestamp',
|
|
||||||
'tx in block',
|
|
||||||
'tx/s',
|
|
||||||
'% complete'
|
|
||||||
))
|
|
||||||
|
|
||||||
# listen to the changefeed
|
|
||||||
try:
|
|
||||||
for change in r.table('bigchain').changes().run(conn):
|
|
||||||
# check only for new blocks
|
|
||||||
if change['old_val'] is None:
|
|
||||||
block_num_transactions = len(
|
|
||||||
change['new_val']['block']['transactions']
|
|
||||||
)
|
|
||||||
time_now = time.time()
|
|
||||||
csv_writer.writerow(
|
|
||||||
[str(time_now), str(block_num_transactions)]
|
|
||||||
)
|
|
||||||
|
|
||||||
# log statistics
|
|
||||||
if initial_time is None:
|
|
||||||
initial_time = time_now
|
|
||||||
|
|
||||||
num_transactions_received += block_num_transactions
|
|
||||||
elapsed_time = time_now - initial_time
|
|
||||||
percent_complete = round(
|
|
||||||
(num_transactions_received / num_transactions) * 100
|
|
||||||
)
|
|
||||||
|
|
||||||
if elapsed_time != 0:
|
|
||||||
transactions_per_second = round(
|
|
||||||
num_transactions_received / elapsed_time
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
transactions_per_second = float('nan')
|
|
||||||
|
|
||||||
logger.info('\t{:<20} {:<20} {:<20} {:<20}'.format(
|
|
||||||
time_now,
|
|
||||||
block_num_transactions,
|
|
||||||
transactions_per_second,
|
|
||||||
percent_complete
|
|
||||||
))
|
|
||||||
|
|
||||||
if (num_transactions - num_transactions_received) == 0:
|
|
||||||
break
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
logger.info('Interrupted. Exiting early...')
|
|
||||||
finally:
|
|
||||||
# close files
|
|
||||||
csv_file.close()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(description='BigchainDB benchmarking utils')
|
|
||||||
subparsers = parser.add_subparsers(title='Commands', dest='command')
|
|
||||||
|
|
||||||
# add transactions to backlog
|
|
||||||
backlog_parser = subparsers.add_parser('add-backlog',
|
|
||||||
help='Add transactions to the backlog')
|
|
||||||
backlog_parser.add_argument('num_transactions',
|
|
||||||
metavar='num_transactions',
|
|
||||||
type=int, default=0,
|
|
||||||
help='Number of transactions to add to the backlog')
|
|
||||||
backlog_parser.add_argument('-s', '--payload-size',
|
|
||||||
choices=SIZE_OF_FILLER.keys(),
|
|
||||||
default='minimal',
|
|
||||||
help='Payload size')
|
|
||||||
|
|
||||||
# metrics
|
|
||||||
metrics_parser = subparsers.add_parser('gather-metrics',
|
|
||||||
help='Gather metrics to a csv file')
|
|
||||||
|
|
||||||
metrics_parser.add_argument('-b', '--bigchaindb-host',
|
|
||||||
required=True,
|
|
||||||
help=('Bigchaindb node hostname to connect '
|
|
||||||
'to gather cluster metrics'))
|
|
||||||
|
|
||||||
metrics_parser.add_argument('-c', '--csvfile',
|
|
||||||
required=True,
|
|
||||||
help='Filename to save the metrics')
|
|
||||||
|
|
||||||
utils.start(parser, globals())
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
46
benchmarking-tests/fabfile.py
vendored
46
benchmarking-tests/fabfile.py
vendored
@ -1,46 +0,0 @@
|
|||||||
from __future__ import with_statement, unicode_literals
|
|
||||||
|
|
||||||
from fabric.api import sudo, env, hosts
|
|
||||||
from fabric.api import task, parallel
|
|
||||||
from fabric.contrib.files import sed
|
|
||||||
from fabric.operations import run, put
|
|
||||||
from fabric.context_managers import settings
|
|
||||||
|
|
||||||
from hostlist import public_dns_names
|
|
||||||
from ssh_key import ssh_key_path
|
|
||||||
|
|
||||||
# Ignore known_hosts
|
|
||||||
# http://docs.fabfile.org/en/1.10/usage/env.html#disable-known-hosts
|
|
||||||
env.disable_known_hosts = True
|
|
||||||
|
|
||||||
# What remote servers should Fabric connect to? With what usernames?
|
|
||||||
env.user = 'ubuntu'
|
|
||||||
env.hosts = public_dns_names
|
|
||||||
|
|
||||||
# SSH key files to try when connecting:
|
|
||||||
# http://docs.fabfile.org/en/1.10/usage/env.html#key-filename
|
|
||||||
env.key_filename = ssh_key_path
|
|
||||||
|
|
||||||
|
|
||||||
@task
|
|
||||||
@parallel
|
|
||||||
def put_benchmark_utils():
|
|
||||||
put('benchmark_utils.py')
|
|
||||||
|
|
||||||
|
|
||||||
@task
|
|
||||||
@parallel
|
|
||||||
def prepare_backlog(num_transactions=10000):
|
|
||||||
run('python3 benchmark_utils.py add-backlog {}'.format(num_transactions))
|
|
||||||
|
|
||||||
|
|
||||||
@task
|
|
||||||
@parallel
|
|
||||||
def start_bigchaindb():
|
|
||||||
run('screen -d -m bigchaindb start &', pty=False)
|
|
||||||
|
|
||||||
|
|
||||||
@task
|
|
||||||
@parallel
|
|
||||||
def kill_bigchaindb():
|
|
||||||
run('killall bigchaindb')
|
|
@ -1,20 +0,0 @@
|
|||||||
# Transactions per second
|
|
||||||
|
|
||||||
Measure how many blocks per second are created on the _bigchain_ with a pre filled backlog.
|
|
||||||
|
|
||||||
1. Deploy an aws cluster https://docs.bigchaindb.com/projects/server/en/latest/clusters-feds/aws-testing-cluster.html
|
|
||||||
2. Make a symbolic link to hostlist.py: `ln -s ../deploy-cluster-aws/hostlist.py .`
|
|
||||||
3. Make a symbolic link to bigchaindb.pem:
|
|
||||||
```bash
|
|
||||||
mkdir pem
|
|
||||||
cd pem
|
|
||||||
ln -s ../deploy-cluster-aws/pem/bigchaindb.pem .
|
|
||||||
```
|
|
||||||
|
|
||||||
Then:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
fab put_benchmark_utils
|
|
||||||
fab prepare_backlog:<num txs per node> # wait for process to finish
|
|
||||||
fab start_bigchaindb
|
|
||||||
```
|
|
@ -5,24 +5,46 @@ import os
|
|||||||
# PORT_NUMBER = reduce(lambda x, y: x * y, map(ord, 'BigchainDB')) % 2**16
|
# PORT_NUMBER = reduce(lambda x, y: x * y, map(ord, 'BigchainDB')) % 2**16
|
||||||
# basically, the port number is 9984
|
# basically, the port number is 9984
|
||||||
|
|
||||||
_database_rethinkdb = {
|
|
||||||
'backend': os.environ.get('BIGCHAINDB_DATABASE_BACKEND', 'rethinkdb'),
|
_base_database_rethinkdb = {
|
||||||
'host': os.environ.get('BIGCHAINDB_DATABASE_HOST', 'localhost'),
|
'host': os.environ.get('BIGCHAINDB_DATABASE_HOST', 'localhost'),
|
||||||
'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 28015)),
|
'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 28015)),
|
||||||
'name': os.environ.get('BIGCHAINDB_DATABASE_NAME', 'bigchain'),
|
'name': os.environ.get('BIGCHAINDB_DATABASE_NAME', 'bigchain'),
|
||||||
'connection_timeout': 5000,
|
|
||||||
'max_tries': 3,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_database_mongodb = {
|
# The following variable is used by `bigchaindb configure` to
|
||||||
'backend': os.environ.get('BIGCHAINDB_DATABASE_BACKEND', 'mongodb'),
|
# prompt the user for database values. We cannot rely on
|
||||||
|
# _base_database_rethinkdb.keys() or _base_database_mongodb.keys()
|
||||||
|
# because dicts are unordered. I tried to configure
|
||||||
|
|
||||||
|
_database_keys_map = {
|
||||||
|
'mongodb': ('host', 'port', 'name', 'replicaset'),
|
||||||
|
'rethinkdb': ('host', 'port', 'name')
|
||||||
|
}
|
||||||
|
|
||||||
|
_base_database_mongodb = {
|
||||||
'host': os.environ.get('BIGCHAINDB_DATABASE_HOST', 'localhost'),
|
'host': os.environ.get('BIGCHAINDB_DATABASE_HOST', 'localhost'),
|
||||||
'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 27017)),
|
'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 27017)),
|
||||||
'name': os.environ.get('BIGCHAINDB_DATABASE_NAME', 'bigchain'),
|
'name': os.environ.get('BIGCHAINDB_DATABASE_NAME', 'bigchain'),
|
||||||
'replicaset': os.environ.get('BIGCHAINDB_DATABASE_REPLICASET', 'bigchain-rs'),
|
'replicaset': os.environ.get('BIGCHAINDB_DATABASE_REPLICASET', 'bigchain-rs'),
|
||||||
|
'ssl': bool(os.environ.get('BIGCHAINDB_DATABASE_SSL', False)),
|
||||||
|
'login': os.environ.get('BIGCHAINDB_DATABASE_LOGIN'),
|
||||||
|
'password': os.environ.get('BIGCHAINDB_DATABASE_PASSWORD')
|
||||||
|
}
|
||||||
|
|
||||||
|
_database_rethinkdb = {
|
||||||
|
'backend': os.environ.get('BIGCHAINDB_DATABASE_BACKEND', 'rethinkdb'),
|
||||||
'connection_timeout': 5000,
|
'connection_timeout': 5000,
|
||||||
'max_tries': 3,
|
'max_tries': 3,
|
||||||
}
|
}
|
||||||
|
_database_rethinkdb.update(_base_database_rethinkdb)
|
||||||
|
|
||||||
|
_database_mongodb = {
|
||||||
|
'backend': os.environ.get('BIGCHAINDB_DATABASE_BACKEND', 'mongodb'),
|
||||||
|
'connection_timeout': 5000,
|
||||||
|
'max_tries': 3,
|
||||||
|
}
|
||||||
|
_database_mongodb.update(_base_database_mongodb)
|
||||||
|
|
||||||
_database_map = {
|
_database_map = {
|
||||||
'mongodb': _database_mongodb,
|
'mongodb': _database_mongodb,
|
||||||
@ -45,7 +67,22 @@ config = {
|
|||||||
'private': None,
|
'private': None,
|
||||||
},
|
},
|
||||||
'keyring': [],
|
'keyring': [],
|
||||||
'backlog_reassign_delay': 120
|
'backlog_reassign_delay': 120,
|
||||||
|
'log': {
|
||||||
|
# TODO Document here or elsewhere.
|
||||||
|
# Example of config:
|
||||||
|
# 'file': '/var/log/bigchaindb.log',
|
||||||
|
# 'level_console': 'info',
|
||||||
|
# 'level_logfile': 'info',
|
||||||
|
# 'datefmt_console': '%Y-%m-%d %H:%M:%S',
|
||||||
|
# 'datefmt_logfile': '%Y-%m-%d %H:%M:%S',
|
||||||
|
# 'fmt_console': '%(asctime)s [%(levelname)s] (%(name)s) %(message)s',
|
||||||
|
# 'fmt_logfile': '%(asctime)s [%(levelname)s] (%(name)s) %(message)s',
|
||||||
|
# 'granular_levels': {
|
||||||
|
# 'bichaindb.backend': 'info',
|
||||||
|
# 'bichaindb.core': 'info',
|
||||||
|
# },
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
# We need to maintain a backup copy of the original config dict in case
|
# We need to maintain a backup copy of the original config dict in case
|
||||||
|
@ -16,7 +16,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
def connect(backend=None, host=None, port=None, name=None, max_tries=None,
|
def connect(backend=None, host=None, port=None, name=None, max_tries=None,
|
||||||
connection_timeout=None, replicaset=None):
|
connection_timeout=None, replicaset=None, ssl=None, login=None, password=None):
|
||||||
"""Create a new connection to the database backend.
|
"""Create a new connection to the database backend.
|
||||||
|
|
||||||
All arguments default to the current configuration's values if not
|
All arguments default to the current configuration's values if not
|
||||||
@ -50,6 +50,9 @@ def connect(backend=None, host=None, port=None, name=None, max_tries=None,
|
|||||||
# to handle these these additional args. In case of RethinkDBConnection
|
# to handle these these additional args. In case of RethinkDBConnection
|
||||||
# it just does not do anything with it.
|
# it just does not do anything with it.
|
||||||
replicaset = replicaset or bigchaindb.config['database'].get('replicaset')
|
replicaset = replicaset or bigchaindb.config['database'].get('replicaset')
|
||||||
|
ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)
|
||||||
|
login = login or bigchaindb.config['database'].get('login')
|
||||||
|
password = password or bigchaindb.config['database'].get('password')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
module_name, _, class_name = BACKENDS[backend].rpartition('.')
|
module_name, _, class_name = BACKENDS[backend].rpartition('.')
|
||||||
@ -63,7 +66,7 @@ def connect(backend=None, host=None, port=None, name=None, max_tries=None,
|
|||||||
logger.debug('Connection: {}'.format(Class))
|
logger.debug('Connection: {}'.format(Class))
|
||||||
return Class(host=host, port=port, dbname=dbname,
|
return Class(host=host, port=port, dbname=dbname,
|
||||||
max_tries=max_tries, connection_timeout=connection_timeout,
|
max_tries=max_tries, connection_timeout=connection_timeout,
|
||||||
replicaset=replicaset)
|
replicaset=replicaset, ssl=ssl, login=login, password=password)
|
||||||
|
|
||||||
|
|
||||||
class Connection:
|
class Connection:
|
||||||
|
@ -16,7 +16,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class MongoDBConnection(Connection):
|
class MongoDBConnection(Connection):
|
||||||
|
|
||||||
def __init__(self, replicaset=None, **kwargs):
|
def __init__(self, replicaset=None, ssl=None, login=None, password=None, **kwargs):
|
||||||
"""Create a new Connection instance.
|
"""Create a new Connection instance.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -28,6 +28,9 @@ class MongoDBConnection(Connection):
|
|||||||
|
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
self.replicaset = replicaset or bigchaindb.config['database']['replicaset']
|
self.replicaset = replicaset or bigchaindb.config['database']['replicaset']
|
||||||
|
self.ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)
|
||||||
|
self.login = login or bigchaindb.config['database'].get('login')
|
||||||
|
self.password = password or bigchaindb.config['database'].get('password')
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def db(self):
|
def db(self):
|
||||||
@ -71,14 +74,21 @@ class MongoDBConnection(Connection):
|
|||||||
# we should only return a connection if the replica set is
|
# we should only return a connection if the replica set is
|
||||||
# initialized. initialize_replica_set will check if the
|
# initialized. initialize_replica_set will check if the
|
||||||
# replica set is initialized else it will initialize it.
|
# replica set is initialized else it will initialize it.
|
||||||
initialize_replica_set(self.host, self.port, self.connection_timeout)
|
initialize_replica_set(self.host, self.port, self.connection_timeout,
|
||||||
|
self.dbname, self.ssl, self.login, self.password)
|
||||||
|
|
||||||
# FYI: this might raise a `ServerSelectionTimeoutError`,
|
# FYI: this might raise a `ServerSelectionTimeoutError`,
|
||||||
# that is a subclass of `ConnectionFailure`.
|
# that is a subclass of `ConnectionFailure`.
|
||||||
return pymongo.MongoClient(self.host,
|
client = pymongo.MongoClient(self.host,
|
||||||
self.port,
|
self.port,
|
||||||
replicaset=self.replicaset,
|
replicaset=self.replicaset,
|
||||||
serverselectiontimeoutms=self.connection_timeout)
|
serverselectiontimeoutms=self.connection_timeout,
|
||||||
|
ssl=self.ssl)
|
||||||
|
|
||||||
|
if self.login is not None and self.password is not None:
|
||||||
|
client[self.dbname].authenticate(self.login, self.password)
|
||||||
|
|
||||||
|
return client
|
||||||
|
|
||||||
# `initialize_replica_set` might raise `ConnectionFailure` or `OperationFailure`.
|
# `initialize_replica_set` might raise `ConnectionFailure` or `OperationFailure`.
|
||||||
except (pymongo.errors.ConnectionFailure,
|
except (pymongo.errors.ConnectionFailure,
|
||||||
@ -86,7 +96,7 @@ class MongoDBConnection(Connection):
|
|||||||
raise ConnectionError() from exc
|
raise ConnectionError() from exc
|
||||||
|
|
||||||
|
|
||||||
def initialize_replica_set(host, port, connection_timeout):
|
def initialize_replica_set(host, port, connection_timeout, dbname, ssl, login, password):
|
||||||
"""Initialize a replica set. If already initialized skip."""
|
"""Initialize a replica set. If already initialized skip."""
|
||||||
|
|
||||||
# Setup a MongoDB connection
|
# Setup a MongoDB connection
|
||||||
@ -95,7 +105,12 @@ def initialize_replica_set(host, port, connection_timeout):
|
|||||||
# you try to connect to a replica set that is not yet initialized
|
# you try to connect to a replica set that is not yet initialized
|
||||||
conn = pymongo.MongoClient(host=host,
|
conn = pymongo.MongoClient(host=host,
|
||||||
port=port,
|
port=port,
|
||||||
serverselectiontimeoutms=connection_timeout)
|
serverselectiontimeoutms=connection_timeout,
|
||||||
|
ssl=ssl)
|
||||||
|
|
||||||
|
if login is not None and password is not None:
|
||||||
|
conn[dbname].authenticate(login, password)
|
||||||
|
|
||||||
_check_replica_set(conn)
|
_check_replica_set(conn)
|
||||||
host = '{}:{}'.format(bigchaindb.config['database']['host'],
|
host = '{}:{}'.format(bigchaindb.config['database']['host'],
|
||||||
bigchaindb.config['database']['port'])
|
bigchaindb.config['database']['port'])
|
||||||
|
@ -153,14 +153,22 @@ def get_spent(conn, transaction_id, output):
|
|||||||
cursor = conn.run(
|
cursor = conn.run(
|
||||||
conn.collection('bigchain').aggregate([
|
conn.collection('bigchain').aggregate([
|
||||||
{'$match': {
|
{'$match': {
|
||||||
'block.transactions.inputs.fulfills.txid': transaction_id,
|
'block.transactions.inputs': {
|
||||||
'block.transactions.inputs.fulfills.output': output
|
'$elemMatch': {
|
||||||
|
'fulfills.txid': transaction_id,
|
||||||
|
'fulfills.output': output,
|
||||||
|
},
|
||||||
|
},
|
||||||
}},
|
}},
|
||||||
{'$unwind': '$block.transactions'},
|
{'$unwind': '$block.transactions'},
|
||||||
{'$match': {
|
{'$match': {
|
||||||
'block.transactions.inputs.fulfills.txid': transaction_id,
|
'block.transactions.inputs': {
|
||||||
'block.transactions.inputs.fulfills.output': output
|
'$elemMatch': {
|
||||||
}}
|
'fulfills.txid': transaction_id,
|
||||||
|
'fulfills.output': output,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
]))
|
]))
|
||||||
# we need to access some nested fields before returning so lets use a
|
# we need to access some nested fields before returning so lets use a
|
||||||
# generator to avoid having to read all records on the cursor at this point
|
# generator to avoid having to read all records on the cursor at this point
|
||||||
|
@ -100,4 +100,5 @@ def create_votes_secondary_index(conn, dbname):
|
|||||||
ASCENDING),
|
ASCENDING),
|
||||||
('node_pubkey',
|
('node_pubkey',
|
||||||
ASCENDING)],
|
ASCENDING)],
|
||||||
name='block_and_voter')
|
name='block_and_voter',
|
||||||
|
unique=True)
|
||||||
|
@ -12,7 +12,8 @@ import sys
|
|||||||
from bigchaindb.common import crypto
|
from bigchaindb.common import crypto
|
||||||
from bigchaindb.common.exceptions import (StartupError,
|
from bigchaindb.common.exceptions import (StartupError,
|
||||||
DatabaseAlreadyExists,
|
DatabaseAlreadyExists,
|
||||||
KeypairNotFoundException)
|
KeypairNotFoundException,
|
||||||
|
DatabaseDoesNotExist)
|
||||||
import bigchaindb
|
import bigchaindb
|
||||||
from bigchaindb import backend, processes
|
from bigchaindb import backend, processes
|
||||||
from bigchaindb.backend import schema
|
from bigchaindb.backend import schema
|
||||||
@ -24,7 +25,8 @@ from bigchaindb.commands.messages import (
|
|||||||
CANNOT_START_KEYPAIR_NOT_FOUND,
|
CANNOT_START_KEYPAIR_NOT_FOUND,
|
||||||
RETHINKDB_STARTUP_ERROR,
|
RETHINKDB_STARTUP_ERROR,
|
||||||
)
|
)
|
||||||
from bigchaindb.commands.utils import configure_bigchaindb, input_on_stderr
|
from bigchaindb.commands.utils import (
|
||||||
|
configure_bigchaindb, start_logging_process, input_on_stderr)
|
||||||
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
@ -86,26 +88,21 @@ def run_configure(args, skip_if_exists=False):
|
|||||||
# select the correct config defaults based on the backend
|
# select the correct config defaults based on the backend
|
||||||
print('Generating default configuration for backend {}'
|
print('Generating default configuration for backend {}'
|
||||||
.format(args.backend), file=sys.stderr)
|
.format(args.backend), file=sys.stderr)
|
||||||
|
database_keys = bigchaindb._database_keys_map[args.backend]
|
||||||
conf['database'] = bigchaindb._database_map[args.backend]
|
conf['database'] = bigchaindb._database_map[args.backend]
|
||||||
|
|
||||||
if not args.yes:
|
if not args.yes:
|
||||||
for key in ('bind', ):
|
for key in ('bind', ):
|
||||||
val = conf['server'][key]
|
val = conf['server'][key]
|
||||||
conf['server'][key] = \
|
conf['server'][key] = input_on_stderr('API Server {}? (default `{}`): '.format(key, val), val)
|
||||||
input_on_stderr('API Server {}? (default `{}`): '.format(key, val)) \
|
|
||||||
or val
|
|
||||||
|
|
||||||
for key in ('host', 'port', 'name'):
|
for key in database_keys:
|
||||||
val = conf['database'][key]
|
val = conf['database'][key]
|
||||||
conf['database'][key] = \
|
conf['database'][key] = input_on_stderr('Database {}? (default `{}`): '.format(key, val), val)
|
||||||
input_on_stderr('Database {}? (default `{}`): '.format(key, val)) \
|
|
||||||
or val
|
|
||||||
|
|
||||||
val = conf['backlog_reassign_delay']
|
val = conf['backlog_reassign_delay']
|
||||||
conf['backlog_reassign_delay'] = \
|
conf['backlog_reassign_delay'] = input_on_stderr(
|
||||||
input_on_stderr(('Stale transaction reassignment delay (in '
|
'Stale transaction reassignment delay (in seconds)? (default `{}`): '.format(val), val)
|
||||||
'seconds)? (default `{}`): '.format(val))) \
|
|
||||||
or val
|
|
||||||
|
|
||||||
if config_path != '-':
|
if config_path != '-':
|
||||||
bigchaindb.config_utils.write_config(conf, config_path)
|
bigchaindb.config_utils.write_config(conf, config_path)
|
||||||
@ -165,10 +162,14 @@ def run_drop(args):
|
|||||||
|
|
||||||
conn = backend.connect()
|
conn = backend.connect()
|
||||||
dbname = bigchaindb.config['database']['name']
|
dbname = bigchaindb.config['database']['name']
|
||||||
|
try:
|
||||||
schema.drop_database(conn, dbname)
|
schema.drop_database(conn, dbname)
|
||||||
|
except DatabaseDoesNotExist:
|
||||||
|
print("Cannot drop '{name}'. The database does not exist.".format(name=dbname), file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
@configure_bigchaindb
|
@configure_bigchaindb
|
||||||
|
@start_logging_process
|
||||||
def run_start(args):
|
def run_start(args):
|
||||||
"""Start the processes to run the node"""
|
"""Start the processes to run the node"""
|
||||||
logger.info('BigchainDB Version %s', bigchaindb.__version__)
|
logger.info('BigchainDB Version %s', bigchaindb.__version__)
|
@ -16,24 +16,108 @@ import bigchaindb
|
|||||||
import bigchaindb.config_utils
|
import bigchaindb.config_utils
|
||||||
from bigchaindb import backend
|
from bigchaindb import backend
|
||||||
from bigchaindb.common.exceptions import StartupError
|
from bigchaindb.common.exceptions import StartupError
|
||||||
|
from bigchaindb.log.setup import setup_logging
|
||||||
from bigchaindb.version import __version__
|
from bigchaindb.version import __version__
|
||||||
|
|
||||||
|
|
||||||
def configure_bigchaindb(command):
|
def configure_bigchaindb(command):
|
||||||
|
"""Decorator to be used by command line functions, such that the
|
||||||
|
configuration of bigchaindb is performed before the execution of
|
||||||
|
the command.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
command: The command to decorate.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The command wrapper function.
|
||||||
|
|
||||||
|
"""
|
||||||
@functools.wraps(command)
|
@functools.wraps(command)
|
||||||
def configure(args):
|
def configure(args):
|
||||||
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
try:
|
||||||
|
config_from_cmdline = {
|
||||||
|
'log': {'level_console': args.log_level},
|
||||||
|
'server': {'loglevel': args.log_level},
|
||||||
|
}
|
||||||
|
except AttributeError:
|
||||||
|
config_from_cmdline = None
|
||||||
|
bigchaindb.config_utils.autoconfigure(
|
||||||
|
filename=args.config, config=config_from_cmdline, force=True)
|
||||||
command(args)
|
command(args)
|
||||||
|
|
||||||
return configure
|
return configure
|
||||||
|
|
||||||
|
|
||||||
|
def start_logging_process(command):
|
||||||
|
"""Decorator to start the logging subscriber process.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
command: The command to decorate.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The command wrapper function.
|
||||||
|
|
||||||
|
.. important::
|
||||||
|
|
||||||
|
Configuration, if needed, should be applied before invoking this
|
||||||
|
decorator, as starting the subscriber process for logging will
|
||||||
|
configure the root logger for the child process based on the
|
||||||
|
state of :obj:`bigchaindb.config` at the moment this decorator
|
||||||
|
is invoked.
|
||||||
|
|
||||||
|
"""
|
||||||
|
@functools.wraps(command)
|
||||||
|
def start_logging(args):
|
||||||
|
from bigchaindb import config
|
||||||
|
setup_logging(user_log_config=config.get('log'))
|
||||||
|
command(args)
|
||||||
|
return start_logging
|
||||||
|
|
||||||
|
|
||||||
|
def _convert(value, default=None, convert=None):
|
||||||
|
def convert_bool(value):
|
||||||
|
if value.lower() in ('true', 't', 'yes', 'y'):
|
||||||
|
return True
|
||||||
|
if value.lower() in ('false', 'f', 'no', 'n'):
|
||||||
|
return False
|
||||||
|
raise ValueError('{} cannot be converted to bool'.format(value))
|
||||||
|
|
||||||
|
if value == '':
|
||||||
|
value = None
|
||||||
|
|
||||||
|
if convert is None:
|
||||||
|
if default is not None:
|
||||||
|
convert = type(default)
|
||||||
|
else:
|
||||||
|
convert = str
|
||||||
|
|
||||||
|
if convert == bool:
|
||||||
|
convert = convert_bool
|
||||||
|
|
||||||
|
if value is None:
|
||||||
|
return default
|
||||||
|
else:
|
||||||
|
return convert(value)
|
||||||
|
|
||||||
|
|
||||||
# We need this because `input` always prints on stdout, while it should print
|
# We need this because `input` always prints on stdout, while it should print
|
||||||
# to stderr. It's a very old bug, check it out here:
|
# to stderr. It's a very old bug, check it out here:
|
||||||
# - https://bugs.python.org/issue1927
|
# - https://bugs.python.org/issue1927
|
||||||
def input_on_stderr(prompt=''):
|
def input_on_stderr(prompt='', default=None, convert=None):
|
||||||
|
"""Output a string to stderr and wait for input.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
prompt (str): the message to display.
|
||||||
|
default: the default value to return if the user
|
||||||
|
leaves the field empty
|
||||||
|
convert (callable): a callable to be used to convert
|
||||||
|
the value the user inserted. If None, the type of
|
||||||
|
``default`` will be used.
|
||||||
|
"""
|
||||||
|
|
||||||
print(prompt, end='', file=sys.stderr)
|
print(prompt, end='', file=sys.stderr)
|
||||||
return builtins.input()
|
value = builtins.input()
|
||||||
|
return _convert(value, default, convert)
|
||||||
|
|
||||||
|
|
||||||
def start_rethinkdb():
|
def start_rethinkdb():
|
||||||
@ -151,6 +235,12 @@ base_parser.add_argument('-c', '--config',
|
|||||||
help='Specify the location of the configuration file '
|
help='Specify the location of the configuration file '
|
||||||
'(use "-" for stdout)')
|
'(use "-" for stdout)')
|
||||||
|
|
||||||
|
base_parser.add_argument('-l', '--log-level',
|
||||||
|
type=str.upper, # convert to uppercase for comparison to choices
|
||||||
|
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
|
||||||
|
default='INFO',
|
||||||
|
help='Log level')
|
||||||
|
|
||||||
base_parser.add_argument('-y', '--yes', '--yes-please',
|
base_parser.add_argument('-y', '--yes', '--yes-please',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help='Assume "yes" as answer to all prompts and run '
|
help='Assume "yes" as answer to all prompts and run '
|
||||||
|
@ -1,18 +1,31 @@
|
|||||||
# Separate all crypto code so that we can easily test several implementations
|
# Separate all crypto code so that we can easily test several implementations
|
||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
import sha3
|
import sha3
|
||||||
from cryptoconditions import crypto
|
from cryptoconditions import crypto
|
||||||
|
|
||||||
|
|
||||||
|
CryptoKeypair = namedtuple('CryptoKeypair', ('private_key', 'public_key'))
|
||||||
|
|
||||||
|
|
||||||
def hash_data(data):
|
def hash_data(data):
|
||||||
"""Hash the provided data using SHA3-256"""
|
"""Hash the provided data using SHA3-256"""
|
||||||
return sha3.sha3_256(data.encode()).hexdigest()
|
return sha3.sha3_256(data.encode()).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
def generate_key_pair():
|
def generate_key_pair():
|
||||||
|
"""Generates a cryptographic key pair.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
:class:`~bigchaindb.common.crypto.CryptoKeypair`: A
|
||||||
|
:obj:`collections.namedtuple` with named fields
|
||||||
|
:attr:`~bigchaindb.common.crypto.CryptoKeypair.private_key` and
|
||||||
|
:attr:`~bigchaindb.common.crypto.CryptoKeypair.public_key`.
|
||||||
|
|
||||||
|
"""
|
||||||
# TODO FOR CC: Adjust interface so that this function becomes unnecessary
|
# TODO FOR CC: Adjust interface so that this function becomes unnecessary
|
||||||
private_key, public_key = crypto.ed25519_generate_key_pair()
|
return CryptoKeypair(
|
||||||
return private_key.decode(), public_key.decode()
|
*(k.decode() for k in crypto.ed25519_generate_key_pair()))
|
||||||
|
|
||||||
|
|
||||||
PrivateKey = crypto.Ed25519SigningKey
|
PrivateKey = crypto.Ed25519SigningKey
|
||||||
|
@ -768,8 +768,7 @@ class Transaction(object):
|
|||||||
key_pairs (dict): The keys to sign the Transaction with.
|
key_pairs (dict): The keys to sign the Transaction with.
|
||||||
"""
|
"""
|
||||||
input_ = deepcopy(input_)
|
input_ = deepcopy(input_)
|
||||||
for owner_before in input_.owners_before:
|
for owner_before in set(input_.owners_before):
|
||||||
try:
|
|
||||||
# TODO: CC should throw a KeypairMismatchException, instead of
|
# TODO: CC should throw a KeypairMismatchException, instead of
|
||||||
# our manual mapping here
|
# our manual mapping here
|
||||||
|
|
||||||
@ -780,8 +779,8 @@ class Transaction(object):
|
|||||||
# TODO FOR CC: `get_subcondition` is singular. One would not
|
# TODO FOR CC: `get_subcondition` is singular. One would not
|
||||||
# expect to get a list back.
|
# expect to get a list back.
|
||||||
ccffill = input_.fulfillment
|
ccffill = input_.fulfillment
|
||||||
subffill = ccffill.get_subcondition_from_vk(owner_before)[0]
|
subffills = ccffill.get_subcondition_from_vk(owner_before)
|
||||||
except IndexError:
|
if not subffills:
|
||||||
raise KeypairMismatchException('Public key {} cannot be found '
|
raise KeypairMismatchException('Public key {} cannot be found '
|
||||||
'in the fulfillment'
|
'in the fulfillment'
|
||||||
.format(owner_before))
|
.format(owner_before))
|
||||||
@ -794,6 +793,7 @@ class Transaction(object):
|
|||||||
|
|
||||||
# cryptoconditions makes no assumptions of the encoding of the
|
# cryptoconditions makes no assumptions of the encoding of the
|
||||||
# message to sign or verify. It only accepts bytestrings
|
# message to sign or verify. It only accepts bytestrings
|
||||||
|
for subffill in subffills:
|
||||||
subffill.sign(tx_serialized.encode(), private_key)
|
subffill.sign(tx_serialized.encode(), private_key)
|
||||||
self.inputs[index] = input_
|
self.inputs[index] = input_
|
||||||
|
|
||||||
|
@ -220,11 +220,14 @@ def write_config(config, filename=None):
|
|||||||
json.dump(config, f, indent=4)
|
json.dump(config, f, indent=4)
|
||||||
|
|
||||||
|
|
||||||
|
def is_configured():
|
||||||
|
return bool(bigchaindb.config.get('CONFIGURED'))
|
||||||
|
|
||||||
|
|
||||||
def autoconfigure(filename=None, config=None, force=False):
|
def autoconfigure(filename=None, config=None, force=False):
|
||||||
"""Run ``file_config`` and ``env_config`` if the module has not
|
"""Run ``file_config`` and ``env_config`` if the module has not
|
||||||
been initialized."""
|
been initialized."""
|
||||||
|
if not force and is_configured():
|
||||||
if not force and bigchaindb.config.get('CONFIGURED'):
|
|
||||||
logger.debug('System already configured, skipping autoconfiguration')
|
logger.debug('System already configured, skipping autoconfiguration')
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -19,14 +19,17 @@ class Bigchain(object):
|
|||||||
Create, read, sign, write transactions to the database
|
Create, read, sign, write transactions to the database
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# return if a block has been voted invalid
|
|
||||||
BLOCK_INVALID = 'invalid'
|
BLOCK_INVALID = 'invalid'
|
||||||
# return if a block is valid, or tx is in valid block
|
"""return if a block has been voted invalid"""
|
||||||
|
|
||||||
BLOCK_VALID = TX_VALID = 'valid'
|
BLOCK_VALID = TX_VALID = 'valid'
|
||||||
# return if block is undecided, or tx is in undecided block
|
"""return if a block is valid, or tx is in valid block"""
|
||||||
|
|
||||||
BLOCK_UNDECIDED = TX_UNDECIDED = 'undecided'
|
BLOCK_UNDECIDED = TX_UNDECIDED = 'undecided'
|
||||||
# return if transaction is in backlog
|
"""return if block is undecided, or tx is in undecided block"""
|
||||||
|
|
||||||
TX_IN_BACKLOG = 'backlog'
|
TX_IN_BACKLOG = 'backlog'
|
||||||
|
"""return if transaction is in backlog"""
|
||||||
|
|
||||||
def __init__(self, public_key=None, private_key=None, keyring=[], connection=None, backlog_reassign_delay=None):
|
def __init__(self, public_key=None, private_key=None, keyring=[], connection=None, backlog_reassign_delay=None):
|
||||||
"""Initialize the Bigchain instance
|
"""Initialize the Bigchain instance
|
||||||
@ -321,43 +324,57 @@ class Bigchain(object):
|
|||||||
def get_spent(self, txid, output):
|
def get_spent(self, txid, output):
|
||||||
"""Check if a `txid` was already used as an input.
|
"""Check if a `txid` was already used as an input.
|
||||||
|
|
||||||
A transaction can be used as an input for another transaction. Bigchain needs to make sure that a
|
A transaction can be used as an input for another transaction. Bigchain
|
||||||
given `txid` is only used once.
|
needs to make sure that a given `(txid, output)` is only used once.
|
||||||
|
|
||||||
|
This method will check if the `(txid, output)` has already been
|
||||||
|
spent in a transaction that is in either the `VALID`, `UNDECIDED` or
|
||||||
|
`BACKLOG` state.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
txid (str): The id of the transaction
|
txid (str): The id of the transaction
|
||||||
output (num): the index of the output in the respective transaction
|
output (num): the index of the output in the respective transaction
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The transaction (Transaction) that used the `txid` as an input else
|
The transaction (Transaction) that used the `(txid, output)` as an
|
||||||
`None`
|
input else `None`
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
CriticalDoubleSpend: If the given `(txid, output)` was spent in
|
||||||
|
more than one valid transaction.
|
||||||
"""
|
"""
|
||||||
# checks if an input was already spent
|
# checks if an input was already spent
|
||||||
# checks if the bigchain has any transaction with input {'txid': ...,
|
# checks if the bigchain has any transaction with input {'txid': ...,
|
||||||
# 'output': ...}
|
# 'output': ...}
|
||||||
transactions = list(backend.query.get_spent(self.connection, txid, output))
|
transactions = list(backend.query.get_spent(self.connection, txid,
|
||||||
|
output))
|
||||||
|
|
||||||
# a transaction_id should have been spent at most one time
|
# a transaction_id should have been spent at most one time
|
||||||
if transactions:
|
# determine if these valid transactions appear in more than one valid
|
||||||
# determine if these valid transactions appear in more than one valid block
|
# block
|
||||||
num_valid_transactions = 0
|
num_valid_transactions = 0
|
||||||
|
non_invalid_transactions = []
|
||||||
for transaction in transactions:
|
for transaction in transactions:
|
||||||
# ignore invalid blocks
|
# ignore transactions in invalid blocks
|
||||||
# FIXME: Isn't there a faster solution than doing I/O again?
|
# FIXME: Isn't there a faster solution than doing I/O again?
|
||||||
if self.get_transaction(transaction['id']):
|
_, status = self.get_transaction(transaction['id'],
|
||||||
|
include_status=True)
|
||||||
|
if status == self.TX_VALID:
|
||||||
num_valid_transactions += 1
|
num_valid_transactions += 1
|
||||||
|
# `txid` can only have been spent in at most on valid block.
|
||||||
if num_valid_transactions > 1:
|
if num_valid_transactions > 1:
|
||||||
raise core_exceptions.CriticalDoubleSpend(
|
raise core_exceptions.CriticalDoubleSpend(
|
||||||
'`{}` was spent more than once. There is a problem'
|
'`{}` was spent more than once. There is a problem'
|
||||||
' with the chain'.format(txid))
|
' with the chain'.format(txid))
|
||||||
|
# if its not and invalid transaction
|
||||||
|
if status is not None:
|
||||||
|
non_invalid_transactions.append(transaction)
|
||||||
|
|
||||||
if num_valid_transactions:
|
if non_invalid_transactions:
|
||||||
return Transaction.from_dict(transactions[0])
|
return Transaction.from_dict(non_invalid_transactions[0])
|
||||||
else:
|
|
||||||
# all queried transactions were invalid
|
# Either no transaction was returned spending the `(txid, output)` as
|
||||||
return None
|
# input or the returned transactions are not valid.
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_outputs(self, owner):
|
def get_outputs(self, owner):
|
||||||
"""Retrieve a list of links to transaction outputs for a given public
|
"""Retrieve a list of links to transaction outputs for a given public
|
||||||
@ -372,32 +389,37 @@ class Bigchain(object):
|
|||||||
"""
|
"""
|
||||||
# get all transactions in which owner is in the `owners_after` list
|
# get all transactions in which owner is in the `owners_after` list
|
||||||
response = backend.query.get_owned_ids(self.connection, owner)
|
response = backend.query.get_owned_ids(self.connection, owner)
|
||||||
links = []
|
return [
|
||||||
|
TransactionLink(tx['id'], index)
|
||||||
|
for tx in response
|
||||||
|
if not self.is_tx_strictly_in_invalid_block(tx['id'])
|
||||||
|
for index, output in enumerate(tx['outputs'])
|
||||||
|
if utils.output_has_owner(output, owner)
|
||||||
|
]
|
||||||
|
|
||||||
for tx in response:
|
def is_tx_strictly_in_invalid_block(self, txid):
|
||||||
# disregard transactions from invalid blocks
|
"""
|
||||||
validity = self.get_blocks_status_containing_tx(tx['id'])
|
Checks whether the transaction with the given ``txid``
|
||||||
if Bigchain.BLOCK_VALID not in validity.values():
|
*strictly* belongs to an invalid block.
|
||||||
if Bigchain.BLOCK_UNDECIDED not in validity.values():
|
|
||||||
continue
|
|
||||||
|
|
||||||
# NOTE: It's OK to not serialize the transaction here, as we do not
|
Args:
|
||||||
# use it after the execution of this function.
|
txid (str): Transaction id.
|
||||||
# a transaction can contain multiple outputs so we need to iterate over all of them
|
|
||||||
# to get a list of outputs available to spend
|
Returns:
|
||||||
for index, output in enumerate(tx['outputs']):
|
bool: ``True`` if the transaction *strictly* belongs to a
|
||||||
# for simple signature conditions there are no subfulfillments
|
block that is invalid. ``False`` otherwise.
|
||||||
# check if the owner is in the condition `owners_after`
|
|
||||||
if len(output['public_keys']) == 1:
|
Note:
|
||||||
if output['condition']['details']['public_key'] == owner:
|
Since a transaction may be in multiple blocks, with
|
||||||
links.append(TransactionLink(tx['id'], index))
|
different statuses, the term "strictly" is used to
|
||||||
else:
|
emphasize that if a transaction is said to be in an invalid
|
||||||
# for transactions with multiple `public_keys` there will be several subfulfillments nested
|
block, it means that it is not in any other block that is
|
||||||
# in the condition. We need to iterate the subfulfillments to make sure there is a
|
either valid or undecided.
|
||||||
# subfulfillment for `owner`
|
|
||||||
if utils.condition_details_has_owner(output['condition']['details'], owner):
|
"""
|
||||||
links.append(TransactionLink(tx['id'], index))
|
validity = self.get_blocks_status_containing_tx(txid)
|
||||||
return links
|
return (Bigchain.BLOCK_VALID not in validity.values() and
|
||||||
|
Bigchain.BLOCK_UNDECIDED not in validity.values())
|
||||||
|
|
||||||
def get_owned_ids(self, owner):
|
def get_owned_ids(self, owner):
|
||||||
"""Retrieve a list of ``txid`` s that can be used as inputs.
|
"""Retrieve a list of ``txid`` s that can be used as inputs.
|
||||||
|
0
bigchaindb/log/__init__.py
Normal file
0
bigchaindb/log/__init__.py
Normal file
63
bigchaindb/log/configs.py
Normal file
63
bigchaindb/log/configs.py
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
import logging
|
||||||
|
from logging.handlers import DEFAULT_TCP_LOGGING_PORT
|
||||||
|
from os.path import expanduser, join
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_SOCKET_LOGGING_HOST = 'localhost'
|
||||||
|
DEFAULT_SOCKET_LOGGING_PORT = DEFAULT_TCP_LOGGING_PORT
|
||||||
|
DEFAULT_SOCKET_LOGGING_ADDR = (DEFAULT_SOCKET_LOGGING_HOST,
|
||||||
|
DEFAULT_SOCKET_LOGGING_PORT)
|
||||||
|
DEFAULT_LOG_DIR = expanduser('~')
|
||||||
|
|
||||||
|
PUBLISHER_LOGGING_CONFIG = {
|
||||||
|
'version': 1,
|
||||||
|
'disable_existing_loggers': False,
|
||||||
|
'root': {
|
||||||
|
'level': logging.DEBUG,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
SUBSCRIBER_LOGGING_CONFIG = {
|
||||||
|
'version': 1,
|
||||||
|
'disable_existing_loggers': False,
|
||||||
|
'formatters': {
|
||||||
|
'console': {
|
||||||
|
'class': 'logging.Formatter',
|
||||||
|
'format': ('[%(asctime)s] [%(levelname)s] (%(name)s) '
|
||||||
|
'%(message)s (%(processName)-10s - pid: %(process)d)'),
|
||||||
|
'datefmt': '%Y-%m-%d %H:%M:%S',
|
||||||
|
},
|
||||||
|
'file': {
|
||||||
|
'class': 'logging.Formatter',
|
||||||
|
'format': ('[%(asctime)s] [%(levelname)s] (%(name)s) '
|
||||||
|
'%(message)s (%(processName)-10s - pid: %(process)d)'),
|
||||||
|
'datefmt': '%Y-%m-%d %H:%M:%S',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'handlers': {
|
||||||
|
'console': {
|
||||||
|
'class': 'logging.StreamHandler',
|
||||||
|
'formatter': 'console',
|
||||||
|
'level': logging.INFO,
|
||||||
|
},
|
||||||
|
'file': {
|
||||||
|
'class': 'logging.FileHandler',
|
||||||
|
'filename': join(DEFAULT_LOG_DIR, 'bigchaindb.log'),
|
||||||
|
'mode': 'w',
|
||||||
|
'formatter': 'file',
|
||||||
|
'level': logging.INFO,
|
||||||
|
},
|
||||||
|
'errors': {
|
||||||
|
'class': 'logging.FileHandler',
|
||||||
|
'filename': join(DEFAULT_LOG_DIR, 'bigchaindb-errors.log'),
|
||||||
|
'mode': 'w',
|
||||||
|
'level': logging.ERROR,
|
||||||
|
'formatter': 'file',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'loggers': {},
|
||||||
|
'root': {
|
||||||
|
'level': logging.DEBUG,
|
||||||
|
'handlers': ['console', 'file', 'errors']
|
||||||
|
},
|
||||||
|
}
|
32
bigchaindb/log/loggers.py
Normal file
32
bigchaindb/log/loggers.py
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
import logging.handlers
|
||||||
|
|
||||||
|
from gunicorn.glogging import Logger
|
||||||
|
|
||||||
|
from .configs import DEFAULT_SOCKET_LOGGING_HOST, DEFAULT_SOCKET_LOGGING_PORT
|
||||||
|
|
||||||
|
|
||||||
|
class HttpServerLogger(Logger):
|
||||||
|
"""Custom logger class for ``gunicorn`` logs.
|
||||||
|
|
||||||
|
Meant for internal usage only, to set the ``logger_class``
|
||||||
|
configuration setting on gunicorn.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def setup(self, cfg):
|
||||||
|
"""Setup the gunicorn access and error loggers. This overrides
|
||||||
|
the parent method. Its main goal is to simply pipe all the logs to
|
||||||
|
the TCP socket used througout BigchainDB.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg (:obj:`gunicorn.config.Config`): Gunicorn configuration
|
||||||
|
object. *Ignored*.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self._set_socklog_handler(self.error_log)
|
||||||
|
self._set_socklog_handler(self.access_log)
|
||||||
|
|
||||||
|
def _set_socklog_handler(self, log):
|
||||||
|
socket_handler = logging.handlers.SocketHandler(
|
||||||
|
DEFAULT_SOCKET_LOGGING_HOST, DEFAULT_SOCKET_LOGGING_PORT)
|
||||||
|
socket_handler._gunicorn = True
|
||||||
|
log.addHandler(socket_handler)
|
174
bigchaindb/log/setup.py
Normal file
174
bigchaindb/log/setup.py
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
"""Setup logging."""
|
||||||
|
from copy import deepcopy
|
||||||
|
import logging
|
||||||
|
from logging.config import dictConfig
|
||||||
|
import logging.handlers
|
||||||
|
import pickle
|
||||||
|
from socketserver import StreamRequestHandler, ThreadingTCPServer
|
||||||
|
import struct
|
||||||
|
import sys
|
||||||
|
from multiprocessing import Process
|
||||||
|
|
||||||
|
from .configs import (
|
||||||
|
DEFAULT_SOCKET_LOGGING_HOST,
|
||||||
|
DEFAULT_SOCKET_LOGGING_PORT,
|
||||||
|
PUBLISHER_LOGGING_CONFIG,
|
||||||
|
SUBSCRIBER_LOGGING_CONFIG,
|
||||||
|
)
|
||||||
|
from bigchaindb.common.exceptions import ConfigurationError
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize_log_level(level):
|
||||||
|
try:
|
||||||
|
return level.upper()
|
||||||
|
except AttributeError as exc:
|
||||||
|
raise ConfigurationError('Log level must be a string!') from exc
|
||||||
|
|
||||||
|
|
||||||
|
def setup_pub_logger():
|
||||||
|
dictConfig(PUBLISHER_LOGGING_CONFIG)
|
||||||
|
socket_handler = logging.handlers.SocketHandler(
|
||||||
|
DEFAULT_SOCKET_LOGGING_HOST, DEFAULT_SOCKET_LOGGING_PORT)
|
||||||
|
socket_handler.setLevel(logging.DEBUG)
|
||||||
|
logger = logging.getLogger()
|
||||||
|
logger.addHandler(socket_handler)
|
||||||
|
|
||||||
|
|
||||||
|
def setup_sub_logger(*, user_log_config=None):
|
||||||
|
server = LogRecordSocketServer()
|
||||||
|
with server:
|
||||||
|
server_proc = Process(
|
||||||
|
target=server.serve_forever,
|
||||||
|
kwargs={'log_config': user_log_config},
|
||||||
|
)
|
||||||
|
server_proc.start()
|
||||||
|
|
||||||
|
|
||||||
|
def setup_logging(*, user_log_config=None):
|
||||||
|
setup_pub_logger()
|
||||||
|
setup_sub_logger(user_log_config=user_log_config)
|
||||||
|
|
||||||
|
|
||||||
|
def create_subscriber_logging_config(*, user_log_config=None):
|
||||||
|
sub_log_config = deepcopy(SUBSCRIBER_LOGGING_CONFIG)
|
||||||
|
|
||||||
|
if not user_log_config:
|
||||||
|
return sub_log_config
|
||||||
|
|
||||||
|
if 'file' in user_log_config:
|
||||||
|
filename = user_log_config['file']
|
||||||
|
sub_log_config['handlers']['file']['filename'] = filename
|
||||||
|
|
||||||
|
if 'level_console' in user_log_config:
|
||||||
|
level = _normalize_log_level(user_log_config['level_console'])
|
||||||
|
sub_log_config['handlers']['console']['level'] = level
|
||||||
|
|
||||||
|
if 'level_logfile' in user_log_config:
|
||||||
|
level = _normalize_log_level(user_log_config['level_logfile'])
|
||||||
|
sub_log_config['handlers']['file']['level'] = level
|
||||||
|
|
||||||
|
if 'fmt_console' in user_log_config:
|
||||||
|
fmt = user_log_config['fmt_console']
|
||||||
|
sub_log_config['formatters']['console']['format'] = fmt
|
||||||
|
|
||||||
|
if 'fmt_logfile' in user_log_config:
|
||||||
|
fmt = user_log_config['fmt_logfile']
|
||||||
|
sub_log_config['formatters']['file']['format'] = fmt
|
||||||
|
|
||||||
|
if 'datefmt_console' in user_log_config:
|
||||||
|
fmt = user_log_config['datefmt_console']
|
||||||
|
sub_log_config['formatters']['console']['datefmt'] = fmt
|
||||||
|
|
||||||
|
if 'datefmt_logfile' in user_log_config:
|
||||||
|
fmt = user_log_config['datefmt_logfile']
|
||||||
|
sub_log_config['formatters']['file']['datefmt'] = fmt
|
||||||
|
|
||||||
|
log_levels = user_log_config.get('granular_levels', {})
|
||||||
|
|
||||||
|
for logger_name, level in log_levels.items():
|
||||||
|
level = _normalize_log_level(level)
|
||||||
|
try:
|
||||||
|
sub_log_config['loggers'][logger_name]['level'] = level
|
||||||
|
except KeyError:
|
||||||
|
sub_log_config['loggers'][logger_name] = {'level': level}
|
||||||
|
|
||||||
|
return sub_log_config
|
||||||
|
|
||||||
|
|
||||||
|
class LogRecordStreamHandler(StreamRequestHandler):
|
||||||
|
"""Handler for a streaming logging request.
|
||||||
|
|
||||||
|
This basically logs the record using whatever logging policy is
|
||||||
|
configured locally.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def handle(self):
|
||||||
|
"""
|
||||||
|
Handle multiple requests - each expected to be a 4-byte length,
|
||||||
|
followed by the LogRecord in pickle format. Logs the record
|
||||||
|
according to whatever policy is configured locally.
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
chunk = self.connection.recv(4)
|
||||||
|
if len(chunk) < 4:
|
||||||
|
break
|
||||||
|
slen = struct.unpack('>L', chunk)[0]
|
||||||
|
chunk = self.connection.recv(slen)
|
||||||
|
while len(chunk) < slen:
|
||||||
|
chunk = chunk + self.connection.recv(slen - len(chunk))
|
||||||
|
obj = self.unpickle(chunk)
|
||||||
|
record = logging.makeLogRecord(obj)
|
||||||
|
self.handle_log_record(record)
|
||||||
|
|
||||||
|
def unpickle(self, data):
|
||||||
|
try:
|
||||||
|
return pickle.loads(data)
|
||||||
|
except (pickle.UnpicklingError,
|
||||||
|
AttributeError, EOFError, TypeError) as exc:
|
||||||
|
return {
|
||||||
|
'msg': '({}) Log handling error: un-pickling failed!'.format(
|
||||||
|
exc.__class__.__name__),
|
||||||
|
'exc_info': exc.args,
|
||||||
|
'level': logging.ERROR,
|
||||||
|
'func': self.unpickle.__name__,
|
||||||
|
}
|
||||||
|
|
||||||
|
def handle_log_record(self, record):
|
||||||
|
logger = logging.getLogger(record.name)
|
||||||
|
logger.handle(record)
|
||||||
|
|
||||||
|
|
||||||
|
class LogRecordSocketServer(ThreadingTCPServer):
|
||||||
|
"""
|
||||||
|
Simple TCP socket-based logging server.
|
||||||
|
|
||||||
|
"""
|
||||||
|
allow_reuse_address = True
|
||||||
|
|
||||||
|
def __init__(self,
|
||||||
|
host='localhost',
|
||||||
|
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
|
||||||
|
handler=LogRecordStreamHandler):
|
||||||
|
super().__init__((host, port), handler)
|
||||||
|
|
||||||
|
def serve_forever(self, *, poll_interval=0.5, log_config=None):
|
||||||
|
sub_logging_config = create_subscriber_logging_config(
|
||||||
|
user_log_config=log_config)
|
||||||
|
dictConfig(sub_logging_config)
|
||||||
|
try:
|
||||||
|
super().serve_forever(poll_interval=poll_interval)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE: Because the context manager is only available
|
||||||
|
# from 3.6 and up, we add it for lower versions.
|
||||||
|
if sys.version_info < (3, 6):
|
||||||
|
def __enter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, *args):
|
||||||
|
self.server_close()
|
||||||
|
|
||||||
|
LogRecordSocketServer.__enter__ = __enter__
|
||||||
|
LogRecordSocketServer.__exit__ = __exit__
|
@ -187,6 +187,11 @@ class Block(object):
|
|||||||
if not self.is_signature_valid():
|
if not self.is_signature_valid():
|
||||||
raise InvalidSignature('Invalid block signature')
|
raise InvalidSignature('Invalid block signature')
|
||||||
|
|
||||||
|
# Check that the block contains no duplicated transactions
|
||||||
|
txids = [tx.id for tx in self.transactions]
|
||||||
|
if len(txids) != len(set(txids)):
|
||||||
|
raise DuplicateTransaction('Block has duplicate transaction')
|
||||||
|
|
||||||
def _validate_block_transactions(self, bigchain):
|
def _validate_block_transactions(self, bigchain):
|
||||||
"""Validate Block transactions.
|
"""Validate Block transactions.
|
||||||
|
|
||||||
@ -196,10 +201,6 @@ class Block(object):
|
|||||||
Raises:
|
Raises:
|
||||||
ValidationError: If an invalid transaction is found
|
ValidationError: If an invalid transaction is found
|
||||||
"""
|
"""
|
||||||
txids = [tx.id for tx in self.transactions]
|
|
||||||
if len(txids) != len(set(txids)):
|
|
||||||
raise DuplicateTransaction('Block has duplicate transaction')
|
|
||||||
|
|
||||||
for tx in self.transactions:
|
for tx in self.transactions:
|
||||||
# If a transaction is not valid, `validate_transactions` will
|
# If a transaction is not valid, `validate_transactions` will
|
||||||
# throw an an exception and block validation will be canceled.
|
# throw an an exception and block validation will be canceled.
|
||||||
|
@ -113,6 +113,19 @@ def condition_details_has_owner(condition_details, owner):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def output_has_owner(output, owner):
|
||||||
|
# TODO
|
||||||
|
# Check whether it is really necessary to treat the single key case
|
||||||
|
# differently from the multiple keys case, and why not just use the same
|
||||||
|
# function for both cases.
|
||||||
|
if len(output['public_keys']) > 1:
|
||||||
|
return condition_details_has_owner(
|
||||||
|
output['condition']['details'], owner)
|
||||||
|
elif len(output['public_keys']) == 1:
|
||||||
|
return output['condition']['details']['public_key'] == owner
|
||||||
|
# TODO raise proper exception, e.g. invalid tx payload?
|
||||||
|
|
||||||
|
|
||||||
def is_genesis_block(block):
|
def is_genesis_block(block):
|
||||||
"""Check if the block is the genesis block.
|
"""Check if the block is the genesis block.
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ class StandaloneApplication(gunicorn.app.base.BaseApplication):
|
|||||||
- http://docs.gunicorn.org/en/latest/custom.html
|
- http://docs.gunicorn.org/en/latest/custom.html
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, app, options=None):
|
def __init__(self, app, *, options=None):
|
||||||
'''Initialize a new standalone application.
|
'''Initialize a new standalone application.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -32,7 +32,7 @@ class StandaloneApplication(gunicorn.app.base.BaseApplication):
|
|||||||
'''
|
'''
|
||||||
self.options = options or {}
|
self.options = options or {}
|
||||||
self.application = app
|
self.application = app
|
||||||
super(StandaloneApplication, self).__init__()
|
super().__init__()
|
||||||
|
|
||||||
def load_config(self):
|
def load_config(self):
|
||||||
config = dict((key, value) for key, value in self.options.items()
|
config = dict((key, value) for key, value in self.options.items()
|
||||||
@ -88,7 +88,8 @@ def create_server(settings):
|
|||||||
if not settings.get('threads'):
|
if not settings.get('threads'):
|
||||||
settings['threads'] = (multiprocessing.cpu_count() * 2) + 1
|
settings['threads'] = (multiprocessing.cpu_count() * 2) + 1
|
||||||
|
|
||||||
|
settings['logger_class'] = 'bigchaindb.log.loggers.HttpServerLogger'
|
||||||
app = create_app(debug=settings.get('debug', False),
|
app = create_app(debug=settings.get('debug', False),
|
||||||
threads=settings['threads'])
|
threads=settings['threads'])
|
||||||
standalone = StandaloneApplication(app, settings)
|
standalone = StandaloneApplication(app, options=settings)
|
||||||
return standalone
|
return standalone
|
||||||
|
@ -29,9 +29,8 @@ coverage:
|
|||||||
- "docs/*"
|
- "docs/*"
|
||||||
- "tests/*"
|
- "tests/*"
|
||||||
- "bigchaindb/version.py"
|
- "bigchaindb/version.py"
|
||||||
- "benchmarking-tests/*"
|
|
||||||
- "speed-tests/*"
|
|
||||||
- "ntools/*"
|
- "ntools/*"
|
||||||
|
- "k8s/*"
|
||||||
|
|
||||||
comment:
|
comment:
|
||||||
# @stevepeak (from codecov.io) suggested we change 'suggestions' to 'uncovered'
|
# @stevepeak (from codecov.io) suggested we change 'suggestions' to 'uncovered'
|
||||||
|
@ -39,7 +39,6 @@ fi
|
|||||||
|
|
||||||
echo "NUM_NODES = "$NUM_NODES
|
echo "NUM_NODES = "$NUM_NODES
|
||||||
echo "BRANCH = "$BRANCH
|
echo "BRANCH = "$BRANCH
|
||||||
echo "WHAT_TO_DEPLOY = "$WHAT_TO_DEPLOY
|
|
||||||
echo "SSH_KEY_NAME" = $SSH_KEY_NAME
|
echo "SSH_KEY_NAME" = $SSH_KEY_NAME
|
||||||
echo "USE_KEYPAIRS_FILE = "$USE_KEYPAIRS_FILE
|
echo "USE_KEYPAIRS_FILE = "$USE_KEYPAIRS_FILE
|
||||||
echo "IMAGE_ID = "$IMAGE_ID
|
echo "IMAGE_ID = "$IMAGE_ID
|
||||||
@ -85,7 +84,7 @@ if [[ $CONFILES_COUNT != $NUM_NODES ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Auto-generate the tag to apply to all nodes in the cluster
|
# Auto-generate the tag to apply to all nodes in the cluster
|
||||||
TAG="BDB-"$WHAT_TO_DEPLOY"-"`date +%m-%d@%H:%M`
|
TAG="BDB-Server-"`date +%m-%d@%H:%M`
|
||||||
echo "TAG = "$TAG
|
echo "TAG = "$TAG
|
||||||
|
|
||||||
# Change the file permissions on the SSH private key file
|
# Change the file permissions on the SSH private key file
|
||||||
@ -121,7 +120,6 @@ fab install_base_software
|
|||||||
fab get_pip3
|
fab get_pip3
|
||||||
fab upgrade_setuptools
|
fab upgrade_setuptools
|
||||||
|
|
||||||
if [ "$WHAT_TO_DEPLOY" == "servers" ]; then
|
|
||||||
# (Re)create the RethinkDB configuration file conf/rethinkdb.conf
|
# (Re)create the RethinkDB configuration file conf/rethinkdb.conf
|
||||||
if [ "$ENABLE_WEB_ADMIN" == "True" ]; then
|
if [ "$ENABLE_WEB_ADMIN" == "True" ]; then
|
||||||
if [ "$BIND_HTTP_TO_LOCALHOST" == "True" ]; then
|
if [ "$BIND_HTTP_TO_LOCALHOST" == "True" ]; then
|
||||||
@ -132,13 +130,13 @@ if [ "$WHAT_TO_DEPLOY" == "servers" ]; then
|
|||||||
else
|
else
|
||||||
python create_rethinkdb_conf.py
|
python create_rethinkdb_conf.py
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Rollout RethinkDB and start it
|
# Rollout RethinkDB and start it
|
||||||
fab prep_rethinkdb_storage:$USING_EBS
|
fab prep_rethinkdb_storage:$USING_EBS
|
||||||
fab install_rethinkdb
|
fab install_rethinkdb
|
||||||
fab configure_rethinkdb
|
fab configure_rethinkdb
|
||||||
fab delete_rethinkdb_data
|
fab delete_rethinkdb_data
|
||||||
fab start_rethinkdb
|
fab start_rethinkdb
|
||||||
fi
|
|
||||||
|
|
||||||
# Rollout BigchainDB (but don't start it yet)
|
# Rollout BigchainDB (but don't start it yet)
|
||||||
if [ "$BRANCH" == "pypi" ]; then
|
if [ "$BRANCH" == "pypi" ]; then
|
||||||
@ -156,7 +154,6 @@ fi
|
|||||||
|
|
||||||
# Configure BigchainDB on all nodes
|
# Configure BigchainDB on all nodes
|
||||||
|
|
||||||
if [ "$WHAT_TO_DEPLOY" == "servers" ]; then
|
|
||||||
# The idea is to send a bunch of locally-created configuration
|
# The idea is to send a bunch of locally-created configuration
|
||||||
# files out to each of the instances / nodes.
|
# files out to each of the instances / nodes.
|
||||||
|
|
||||||
@ -191,13 +188,6 @@ if [ "$WHAT_TO_DEPLOY" == "servers" ]; then
|
|||||||
fab set_shards:$NUM_NODES
|
fab set_shards:$NUM_NODES
|
||||||
echo "To set the replication factor to 3, do: fab set_replicas:3"
|
echo "To set the replication factor to 3, do: fab set_replicas:3"
|
||||||
echo "To start BigchainDB on all the nodes, do: fab start_bigchaindb"
|
echo "To start BigchainDB on all the nodes, do: fab start_bigchaindb"
|
||||||
else
|
|
||||||
# Deploying clients
|
|
||||||
fab send_client_confile:client_confile
|
|
||||||
|
|
||||||
# Start sending load from the clients to the servers
|
|
||||||
fab start_bigchaindb_load
|
|
||||||
fi
|
|
||||||
|
|
||||||
# cleanup
|
# cleanup
|
||||||
rm add2known_hosts.sh
|
rm add2known_hosts.sh
|
||||||
|
@ -23,10 +23,6 @@ NUM_NODES=3
|
|||||||
# It's where to get the BigchainDB code to be deployed on the nodes
|
# It's where to get the BigchainDB code to be deployed on the nodes
|
||||||
BRANCH="master"
|
BRANCH="master"
|
||||||
|
|
||||||
# WHAT_TO_DEPLOY is either "servers" or "clients"
|
|
||||||
# What do you want to deploy?
|
|
||||||
WHAT_TO_DEPLOY="servers"
|
|
||||||
|
|
||||||
# SSH_KEY_NAME is the name of the SSH private key file
|
# SSH_KEY_NAME is the name of the SSH private key file
|
||||||
# in $HOME/.ssh/
|
# in $HOME/.ssh/
|
||||||
# It is used for SSH communications with AWS instances.
|
# It is used for SSH communications with AWS instances.
|
||||||
|
15
deploy-cluster-aws/fabfile.py
vendored
15
deploy-cluster-aws/fabfile.py
vendored
@ -237,15 +237,6 @@ def send_confile(confile):
|
|||||||
run('bigchaindb show-config')
|
run('bigchaindb show-config')
|
||||||
|
|
||||||
|
|
||||||
@task
|
|
||||||
@parallel
|
|
||||||
def send_client_confile(confile):
|
|
||||||
put(confile, 'tempfile')
|
|
||||||
run('mv tempfile ~/.bigchaindb')
|
|
||||||
print('For this node, bigchaindb show-config says:')
|
|
||||||
run('bigchaindb show-config')
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize BigchainDB
|
# Initialize BigchainDB
|
||||||
# i.e. create the database, the tables,
|
# i.e. create the database, the tables,
|
||||||
# the indexes, and the genesis block.
|
# the indexes, and the genesis block.
|
||||||
@ -278,12 +269,6 @@ def start_bigchaindb():
|
|||||||
sudo('screen -d -m bigchaindb -y start &', pty=False)
|
sudo('screen -d -m bigchaindb -y start &', pty=False)
|
||||||
|
|
||||||
|
|
||||||
@task
|
|
||||||
@parallel
|
|
||||||
def start_bigchaindb_load():
|
|
||||||
sudo('screen -d -m bigchaindb load &', pty=False)
|
|
||||||
|
|
||||||
|
|
||||||
# Install and run New Relic
|
# Install and run New Relic
|
||||||
@task
|
@task
|
||||||
@parallel
|
@parallel
|
||||||
|
@ -26,7 +26,7 @@ import boto3
|
|||||||
from awscommon import get_naeips
|
from awscommon import get_naeips
|
||||||
|
|
||||||
|
|
||||||
SETTINGS = ['NUM_NODES', 'BRANCH', 'WHAT_TO_DEPLOY', 'SSH_KEY_NAME',
|
SETTINGS = ['NUM_NODES', 'BRANCH', 'SSH_KEY_NAME',
|
||||||
'USE_KEYPAIRS_FILE', 'IMAGE_ID', 'INSTANCE_TYPE', 'SECURITY_GROUP',
|
'USE_KEYPAIRS_FILE', 'IMAGE_ID', 'INSTANCE_TYPE', 'SECURITY_GROUP',
|
||||||
'USING_EBS', 'EBS_VOLUME_SIZE', 'EBS_OPTIMIZED',
|
'USING_EBS', 'EBS_VOLUME_SIZE', 'EBS_OPTIMIZED',
|
||||||
'ENABLE_WEB_ADMIN', 'BIND_HTTP_TO_LOCALHOST']
|
'ENABLE_WEB_ADMIN', 'BIND_HTTP_TO_LOCALHOST']
|
||||||
@ -77,9 +77,6 @@ if not isinstance(NUM_NODES, int):
|
|||||||
if not isinstance(BRANCH, str):
|
if not isinstance(BRANCH, str):
|
||||||
raise SettingsTypeError('BRANCH should be a string')
|
raise SettingsTypeError('BRANCH should be a string')
|
||||||
|
|
||||||
if not isinstance(WHAT_TO_DEPLOY, str):
|
|
||||||
raise SettingsTypeError('WHAT_TO_DEPLOY should be a string')
|
|
||||||
|
|
||||||
if not isinstance(SSH_KEY_NAME, str):
|
if not isinstance(SSH_KEY_NAME, str):
|
||||||
raise SettingsTypeError('SSH_KEY_NAME should be a string')
|
raise SettingsTypeError('SSH_KEY_NAME should be a string')
|
||||||
|
|
||||||
@ -117,11 +114,6 @@ if NUM_NODES > 64:
|
|||||||
'The AWS deployment configuration file sets it to {}'.
|
'The AWS deployment configuration file sets it to {}'.
|
||||||
format(NUM_NODES))
|
format(NUM_NODES))
|
||||||
|
|
||||||
if WHAT_TO_DEPLOY not in ['servers', 'clients']:
|
|
||||||
raise ValueError('WHAT_TO_DEPLOY should be either "servers" or "clients". '
|
|
||||||
'The AWS deployment configuration file sets it to {}'.
|
|
||||||
format(WHAT_TO_DEPLOY))
|
|
||||||
|
|
||||||
if SSH_KEY_NAME in ['not-set-yet', '', None]:
|
if SSH_KEY_NAME in ['not-set-yet', '', None]:
|
||||||
raise ValueError('SSH_KEY_NAME should be set. '
|
raise ValueError('SSH_KEY_NAME should be set. '
|
||||||
'The AWS deployment configuration file sets it to {}'.
|
'The AWS deployment configuration file sets it to {}'.
|
||||||
@ -298,7 +290,7 @@ print('Writing hostlist.py')
|
|||||||
with open('hostlist.py', 'w') as f:
|
with open('hostlist.py', 'w') as f:
|
||||||
f.write('# -*- coding: utf-8 -*-\n')
|
f.write('# -*- coding: utf-8 -*-\n')
|
||||||
f.write('"""A list of the public DNS names of all the nodes in this\n')
|
f.write('"""A list of the public DNS names of all the nodes in this\n')
|
||||||
f.write('BigchainDB cluster/federation.\n')
|
f.write('BigchainDB cluster.\n')
|
||||||
f.write('"""\n')
|
f.write('"""\n')
|
||||||
f.write('\n')
|
f.write('\n')
|
||||||
f.write('from __future__ import unicode_literals\n')
|
f.write('from __future__ import unicode_literals\n')
|
||||||
|
@ -3,7 +3,7 @@ How BigchainDB is Good for Asset Registrations & Transfers
|
|||||||
|
|
||||||
BigchainDB can store data of any kind (within reason), but it's designed to be particularly good for storing asset registrations and transfers:
|
BigchainDB can store data of any kind (within reason), but it's designed to be particularly good for storing asset registrations and transfers:
|
||||||
|
|
||||||
* The fundamental thing that one submits to a BigchainDB federation to be checked and stored (if valid) is a *transaction*, and there are two kinds: CREATE transactions and TRANSFER transactions.
|
* The fundamental thing that one sends to a BigchainDB cluster, to be checked and stored (if valid), is a *transaction*, and there are two kinds: CREATE transactions and TRANSFER transactions.
|
||||||
* A CREATE transaction can be use to register any kind of asset (divisible or indivisible), along with arbitrary metadata.
|
* A CREATE transaction can be use to register any kind of asset (divisible or indivisible), along with arbitrary metadata.
|
||||||
* An asset can have zero, one, or several owners.
|
* An asset can have zero, one, or several owners.
|
||||||
* The owners of an asset can specify (crypto-)conditions which must be satisified by anyone wishing transfer the asset to new owners. For example, a condition might be that at least 3 of the 5 current owners must cryptographically sign a transfer transaction.
|
* The owners of an asset can specify (crypto-)conditions which must be satisified by anyone wishing transfer the asset to new owners. For example, a condition might be that at least 3 of the 5 current owners must cryptographically sign a transfer transaction.
|
||||||
|
@ -58,7 +58,7 @@ master_doc = 'index'
|
|||||||
|
|
||||||
# General information about the project.
|
# General information about the project.
|
||||||
project = 'BigchainDB'
|
project = 'BigchainDB'
|
||||||
copyright = '2016, BigchainDB Contributors'
|
copyright = '2017, BigchainDB Contributors'
|
||||||
author = 'BigchainDB Contributors'
|
author = 'BigchainDB Contributors'
|
||||||
|
|
||||||
# The version info for the project you're documenting, acts as replacement for
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
|
@ -4,18 +4,18 @@ Decentralization means that no one owns or controls everything, and there is no
|
|||||||
|
|
||||||
Ideally, each node in a BigchainDB cluster is owned and controlled by a different person or organization. Even if the cluster lives within one organization, it's still preferable to have each node controlled by a different person or subdivision.
|
Ideally, each node in a BigchainDB cluster is owned and controlled by a different person or organization. Even if the cluster lives within one organization, it's still preferable to have each node controlled by a different person or subdivision.
|
||||||
|
|
||||||
We use the phrase "BigchainDB federation" (or just "federation") to refer to the set of people and/or organizations who run the nodes of a BigchainDB cluster. A federation requires some form of governance to make decisions such as membership and policies. The exact details of the governance process are determined by each federation, but it can be very decentralized (e.g. purely vote-based, where each node gets a vote, and there are no special leadership roles).
|
We use the phrase "BigchainDB consortium" (or just "consortium") to refer to the set of people and/or organizations who run the nodes of a BigchainDB cluster. A consortium requires some form of governance to make decisions such as membership and policies. The exact details of the governance process are determined by each consortium, but it can be very decentralized (e.g. purely vote-based, where each node gets a vote, and there are no special leadership roles).
|
||||||
|
|
||||||
The actual data is decentralized in that it doesn’t all get stored in one place. Each federation node stores the primary of one shard and replicas of some other shards. (A shard is a subset of the total set of documents.) Sharding and replication are handled by RethinkDB.
|
If sharding is turned on (i.e. if the number of shards is larger than one), then the actual data is decentralized in that no one node stores all the data.
|
||||||
|
|
||||||
Every node has its own locally-stored list of the public keys of other federation members: the so-called keyring. There's no centrally-stored or centrally-shared keyring.
|
Every node has its own locally-stored list of the public keys of other consortium members: the so-called keyring. There's no centrally-stored or centrally-shared keyring.
|
||||||
|
|
||||||
A federation can increase its decentralization (and its resilience) by increasing its jurisdictional diversity, geographic diversity, and other kinds of diversity. This idea is expanded upon in [the section on node diversity](diversity.html).
|
A consortium can increase its decentralization (and its resilience) by increasing its jurisdictional diversity, geographic diversity, and other kinds of diversity. This idea is expanded upon in [the section on node diversity](diversity.html).
|
||||||
|
|
||||||
There’s no node that has a long-term special position in the federation. All nodes run the same software and perform the same duties.
|
There’s no node that has a long-term special position in the cluster. All nodes run the same software and perform the same duties.
|
||||||
|
|
||||||
RethinkDB has an “admin” user which can’t be deleted and which can make big changes to the database, such as dropping a table. Right now, that’s a big security vulnerability, but we have plans to mitigate it by:
|
RethinkDB and MongoDB have an “admin” user which can’t be deleted and which can make big changes to the database, such as dropping a table. Right now, that’s a big security vulnerability, but we have plans to mitigate it by:
|
||||||
1. Locking down the admin user as much as possible.
|
1. Locking down the admin user as much as possible.
|
||||||
2. Having all nodes inspect RethinkDB admin-type requests before acting on them. Requests can be checked against an evolving whitelist of allowed actions (voted on by federation nodes).
|
2. Having all nodes inspect admin-type requests before acting on them. Requests can be checked against an evolving whitelist of allowed actions. Nodes requesing non-allowed requests can be removed from the list of cluster nodes.
|
||||||
|
|
||||||
It’s worth noting that the RethinkDB admin user can’t transfer assets, even today. The only way to create a valid transfer transaction is to fulfill the current (crypto) conditions on the asset, and the admin user can’t do that because the admin user doesn’t have the necessary private keys (or preimages, in the case of hashlock conditions). They’re not stored in the database.
|
It’s worth noting that the RethinkDB admin user can’t transfer assets, even today. The only way to create a valid transfer transaction is to fulfill the current (crypto) conditions on the asset, and the admin user can’t do that because the admin user doesn’t have the necessary private keys (or preimages, in the case of hashlock conditions). They’re not stored in the database.
|
||||||
|
@ -6,6 +6,6 @@ Steps should be taken to make it difficult for any one actor or event to control
|
|||||||
2. **Geographic diversity.** The servers should be physically located at multiple geographic locations, so that it becomes difficult for a natural disaster (such as a flood or earthquake) to damage enough of them to cause problems.
|
2. **Geographic diversity.** The servers should be physically located at multiple geographic locations, so that it becomes difficult for a natural disaster (such as a flood or earthquake) to damage enough of them to cause problems.
|
||||||
3. **Hosting diversity.** The servers should be hosted by multiple hosting providers (e.g. Amazon Web Services, Microsoft Azure, Digital Ocean, Rackspace), so that it becomes difficult for one hosting provider to influence enough of the nodes.
|
3. **Hosting diversity.** The servers should be hosted by multiple hosting providers (e.g. Amazon Web Services, Microsoft Azure, Digital Ocean, Rackspace), so that it becomes difficult for one hosting provider to influence enough of the nodes.
|
||||||
4. **Operating system diversity.** The servers should use a variety of operating systems, so that a security bug in one OS can’t be used to exploit enough of the nodes.
|
4. **Operating system diversity.** The servers should use a variety of operating systems, so that a security bug in one OS can’t be used to exploit enough of the nodes.
|
||||||
5. **Diversity in general.** In general, membership diversity (of all kinds) confers many advantages on a federation. For example, it provides the federation with a source of various ideas for addressing challenges.
|
5. **Diversity in general.** In general, membership diversity (of all kinds) confers many advantages on a consortium. For example, it provides the consortium with a source of various ideas for addressing challenges.
|
||||||
|
|
||||||
Note: If all the nodes are running the same code, i.e. the same implementation of BigchainDB, then a bug in that code could be used to compromise all of the nodes. Ideally, there would be several different, well-maintained implementations of BigchainDB Server (e.g. one in Python, one in Go, etc.), so that a federation could also have a diversity of server implementations.
|
Note: If all the nodes are running the same code, i.e. the same implementation of BigchainDB, then a bug in that code could be used to compromise all of the nodes. Ideally, there would be several different, well-maintained implementations of BigchainDB Server (e.g. one in Python, one in Go, etc.), so that a consortium could also have a diversity of server implementations.
|
||||||
|
@ -8,12 +8,12 @@ It’s true that blockchain data is more difficult to change than usual: it’s
|
|||||||
|
|
||||||
BigchainDB achieves strong tamper-resistance in the following ways:
|
BigchainDB achieves strong tamper-resistance in the following ways:
|
||||||
|
|
||||||
1. **Replication.** All data is sharded and shards are replicated in several (different) places. The replication factor can be set by the federation. The higher the replication factor, the more difficult it becomes to change or delete all replicas.
|
1. **Replication.** All data is sharded and shards are replicated in several (different) places. The replication factor can be set by the consortium. The higher the replication factor, the more difficult it becomes to change or delete all replicas.
|
||||||
2. **Internal watchdogs.** All nodes monitor all changes and if some unallowed change happens, then appropriate action is taken. For example, if a valid block is deleted, then it is put back.
|
2. **Internal watchdogs.** All nodes monitor all changes and if some unallowed change happens, then appropriate action is taken. For example, if a valid block is deleted, then it is put back.
|
||||||
3. **External watchdogs.** Federations may opt to have trusted third-parties to monitor and audit their data, looking for irregularities. For federations with publicly-readable data, the public can act as an auditor.
|
3. **External watchdogs.** A consortium may opt to have trusted third-parties to monitor and audit their data, looking for irregularities. For a consortium with publicly-readable data, the public can act as an auditor.
|
||||||
4. **Cryptographic signatures** are used throughout BigchainDB as a way to check if messages (transactions, blocks and votes) have been tampered with enroute, and as a way to verify who signed the messages. Each block is signed by the node that created it. Each vote is signed by the node that cast it. A creation transaction is signed by the node that created it, although there are plans to improve that by adding signatures from the sending client and multiple nodes; see [Issue #347](https://github.com/bigchaindb/bigchaindb/issues/347). Transfer transactions can contain multiple inputs (fulfillments, one per asset transferred). Each fulfillment will typically contain one or more signatures from the owners (i.e. the owners before the transfer). Hashlock fulfillments are an exception; there’s an open issue ([#339](https://github.com/bigchaindb/bigchaindb/issues/339)) to address that.
|
4. **Cryptographic signatures** are used throughout BigchainDB as a way to check if messages (transactions, blocks and votes) have been tampered with enroute, and as a way to verify who signed the messages. Each block is signed by the node that created it. Each vote is signed by the node that cast it. A creation transaction is signed by the node that created it, although there are plans to improve that by adding signatures from the sending client and multiple nodes; see [Issue #347](https://github.com/bigchaindb/bigchaindb/issues/347). Transfer transactions can contain multiple inputs (fulfillments, one per asset transferred). Each fulfillment will typically contain one or more signatures from the owners (i.e. the owners before the transfer). Hashlock fulfillments are an exception; there’s an open issue ([#339](https://github.com/bigchaindb/bigchaindb/issues/339)) to address that.
|
||||||
5. **Full or partial backups** of the database may be recorded from time to time, possibly on magnetic tape storage, other blockchains, printouts, etc.
|
5. **Full or partial backups** of the database may be recorded from time to time, possibly on magnetic tape storage, other blockchains, printouts, etc.
|
||||||
6. **Strong security.** Node owners can adopt and enforce strong security policies.
|
6. **Strong security.** Node owners can adopt and enforce strong security policies.
|
||||||
7. **Node diversity.** Diversity makes it so that no one thing (e.g. natural disaster or operating system bug) can compromise enough of the nodes. See [the section on the kinds of node diversity](diversity.html).
|
7. **Node diversity.** Diversity makes it so that no one thing (e.g. natural disaster or operating system bug) can compromise enough of the nodes. See [the section on the kinds of node diversity](diversity.html).
|
||||||
|
|
||||||
Some of these things come "for free" as part of the BigchainDB software, and others require some extra effort from the federation and node owners.
|
Some of these things come "for free" as part of the BigchainDB software, and others require some extra effort from the consortium and node owners.
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
# Terminology
|
# Terminology
|
||||||
|
|
||||||
There is some specialized terminology associated with BigchainDB. To get started, you should at least know what what we mean by a BigchainDB *node*, *cluster* and *federation*.
|
There is some specialized terminology associated with BigchainDB. To get started, you should at least know what what we mean by a BigchainDB *node*, *cluster* and *consortium*.
|
||||||
|
|
||||||
|
|
||||||
## Node
|
## Node
|
||||||
|
|
||||||
A **BigchainDB node** is a machine or set of closely-linked machines running RethinkDB Server, BigchainDB Server, and related software. (A "machine" might be a bare-metal server, a virtual machine or a container.) Each node is controlled by one person or organization.
|
A **BigchainDB node** is a machine or set of closely-linked machines running RethinkDB/MongoDB Server, BigchainDB Server, and related software. (A "machine" might be a bare-metal server, a virtual machine or a container.) Each node is controlled by one person or organization.
|
||||||
|
|
||||||
|
|
||||||
## Cluster
|
## Cluster
|
||||||
@ -13,10 +13,10 @@ A **BigchainDB node** is a machine or set of closely-linked machines running Ret
|
|||||||
A set of BigchainDB nodes can connect to each other to form a **cluster**. Each node in the cluster runs the same software. A cluster contains one logical RethinkDB datastore. A cluster may have additional machines to do things such as cluster monitoring.
|
A set of BigchainDB nodes can connect to each other to form a **cluster**. Each node in the cluster runs the same software. A cluster contains one logical RethinkDB datastore. A cluster may have additional machines to do things such as cluster monitoring.
|
||||||
|
|
||||||
|
|
||||||
## Federation
|
## Consortium
|
||||||
|
|
||||||
The people and organizations that run the nodes in a cluster belong to a **federation** (i.e. another organization). A federation must have some sort of governance structure to make decisions. If a cluster is run by a single company, then the federation is just that company.
|
The people and organizations that run the nodes in a cluster belong to a **consortium** (i.e. another organization). A consortium must have some sort of governance structure to make decisions. If a cluster is run by a single company, then the "consortium" is just that company.
|
||||||
|
|
||||||
**What's the Difference Between a Cluster and a Federation?**
|
**What's the Difference Between a Cluster and a Consortium?**
|
||||||
|
|
||||||
A cluster is just a bunch of connected nodes. A federation is an organization which has a cluster, and where each node in the cluster has a different operator. Confusingly, we sometimes call a federation's cluster its "federation." You can probably tell what we mean from context.
|
A cluster is just a bunch of connected nodes. A consortium is an organization which has a cluster, and where each node in the cluster has a different operator.
|
18
docs/server/source/appendices/commands.rst
Normal file
18
docs/server/source/appendices/commands.rst
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
######################
|
||||||
|
Command Line Interface
|
||||||
|
######################
|
||||||
|
|
||||||
|
.. automodule:: bigchaindb.commands
|
||||||
|
:special-members: __init__
|
||||||
|
|
||||||
|
|
||||||
|
:mod:`bigchaindb.commands.bigchaindb`
|
||||||
|
-------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: bigchaindb.commands.bigchaindb
|
||||||
|
|
||||||
|
|
||||||
|
:mod:`bigchaindb.commands.utils`
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
.. automodule:: bigchaindb.commands.utils
|
@ -16,6 +16,7 @@ Appendices
|
|||||||
consensus
|
consensus
|
||||||
pipelines
|
pipelines
|
||||||
backend
|
backend
|
||||||
|
commands
|
||||||
aws-setup
|
aws-setup
|
||||||
generate-key-pair-for-ssh
|
generate-key-pair-for-ssh
|
||||||
firewall-notes
|
firewall-notes
|
||||||
|
@ -140,38 +140,6 @@ machine running the Docker engine. If you are running docker-machine (e.g. on
|
|||||||
Mac OS X) this will be the IP of the Docker machine (`docker-machine ip
|
Mac OS X) this will be the IP of the Docker machine (`docker-machine ip
|
||||||
machine_name`).
|
machine_name`).
|
||||||
|
|
||||||
### Load Testing with Docker
|
|
||||||
|
|
||||||
Now that we have BigchainDB running in the Docker container named `bigchaindb`, we can
|
|
||||||
start another BigchainDB container to generate a load test for it.
|
|
||||||
|
|
||||||
First, make sure the container named `bigchaindb` is still running. You can check that using:
|
|
||||||
```text
|
|
||||||
docker ps
|
|
||||||
```
|
|
||||||
|
|
||||||
You should see a container named `bigchaindb` in the list.
|
|
||||||
|
|
||||||
You can load test the BigchainDB running in that container by running the `bigchaindb load` command in a second container:
|
|
||||||
|
|
||||||
```text
|
|
||||||
docker run \
|
|
||||||
--env BIGCHAINDB_DATABASE_HOST=bigchaindb \
|
|
||||||
--link bigchaindb \
|
|
||||||
--rm \
|
|
||||||
--volume "$HOME/bigchaindb_docker:/data" \
|
|
||||||
bigchaindb/bigchaindb \
|
|
||||||
load
|
|
||||||
```
|
|
||||||
|
|
||||||
Note the `--link` option to link to the first container (named `bigchaindb`).
|
|
||||||
|
|
||||||
Aside: The `bigchaindb load` command has several options (e.g. `-m`). You can read more about it in [the documentation about the BigchainDB command line interface](../server-reference/bigchaindb-cli.html).
|
|
||||||
|
|
||||||
If you look at the RethinkDB dashboard (in your web browser), you should see the effects of the load test. You can also see some effects in the Docker logs using:
|
|
||||||
```text
|
|
||||||
docker logs -f bigchaindb
|
|
||||||
```
|
|
||||||
|
|
||||||
## Building Your Own Image
|
## Building Your Own Image
|
||||||
|
|
||||||
|
@ -0,0 +1,178 @@
|
|||||||
|
Kubernetes Template: Add a BigchainDB Node to an Existing BigchainDB Cluster
|
||||||
|
============================================================================
|
||||||
|
|
||||||
|
This page describes how to deploy a BigchainDB node using Kubernetes,
|
||||||
|
and how to add that node to an existing BigchainDB cluster.
|
||||||
|
It assumes you already have a running Kubernetes cluster
|
||||||
|
where you can deploy the new BigchainDB node.
|
||||||
|
|
||||||
|
If you want to deploy the first BigchainDB node in a BigchainDB cluster,
|
||||||
|
or a stand-alone BigchainDB node,
|
||||||
|
then see :doc:`the page about that <node-on-kubernetes>`.
|
||||||
|
|
||||||
|
|
||||||
|
Terminology Used
|
||||||
|
----------------
|
||||||
|
|
||||||
|
``existing cluster`` will refer to one of the existing Kubernetes clusters
|
||||||
|
hosting one of the existing BigchainDB nodes.
|
||||||
|
|
||||||
|
``ctx-1`` will refer to the kubectl context of the existing cluster.
|
||||||
|
|
||||||
|
``new cluster`` will refer to the new Kubernetes cluster that will run a new
|
||||||
|
BigchainDB node (including a BigchainDB instance and a MongoDB instance).
|
||||||
|
|
||||||
|
``ctx-2`` will refer to the kubectl context of the new cluster.
|
||||||
|
|
||||||
|
``new MongoDB instance`` will refer to the MongoDB instance in the new cluster.
|
||||||
|
|
||||||
|
``existing MongoDB instance`` will refer to the MongoDB instance in the
|
||||||
|
existing cluster.
|
||||||
|
|
||||||
|
``new BigchainDB instance`` will refer to the BigchainDB instance in the new
|
||||||
|
cluster.
|
||||||
|
|
||||||
|
``existing BigchainDB instance`` will refer to the BigchainDB instance in the
|
||||||
|
existing cluster.
|
||||||
|
|
||||||
|
|
||||||
|
Step 1: Prerequisites
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
* A public/private key pair for the new BigchainDB instance.
|
||||||
|
|
||||||
|
* The public key should be shared offline with the other existing BigchainDB
|
||||||
|
nodes in the existing BigchainDB cluster.
|
||||||
|
|
||||||
|
* You will need the public keys of all the existing BigchainDB nodes.
|
||||||
|
|
||||||
|
* A new Kubernetes cluster setup with kubectl configured to access it.
|
||||||
|
|
||||||
|
* Some familiarity with deploying a BigchainDB node on Kubernetes.
|
||||||
|
See our :doc:`other docs about that <node-on-kubernetes>`.
|
||||||
|
|
||||||
|
Note: If you are managing multiple Kubernetes clusters, from your local
|
||||||
|
system, you can run ``kubectl config view`` to list all the contexts that
|
||||||
|
are available for the local kubectl.
|
||||||
|
To target a specific cluster, add a ``--context`` flag to the kubectl CLI. For
|
||||||
|
example:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ kubectl --context ctx-1 apply -f example.yaml
|
||||||
|
$ kubectl --context ctx-2 apply -f example.yaml
|
||||||
|
$ kubectl --context ctx-1 proxy --port 8001
|
||||||
|
$ kubectl --context ctx-2 proxy --port 8002
|
||||||
|
|
||||||
|
|
||||||
|
Step 2: Prepare the New Kubernetes Cluster
|
||||||
|
------------------------------------------
|
||||||
|
|
||||||
|
Follow the steps in the sections to set up Storage Classes and Persistent Volume
|
||||||
|
Claims, and to run MongoDB in the new cluster:
|
||||||
|
|
||||||
|
1. :ref:`Add Storage Classes <Step 3: Create Storage Classes>`
|
||||||
|
2. :ref:`Add Persistent Volume Claims <Step 4: Create Persistent Volume Claims>`
|
||||||
|
3. :ref:`Create the Config Map <Step 5: Create the Config Map - Optional>`
|
||||||
|
4. :ref:`Run MongoDB instance <Step 6: Run MongoDB as a StatefulSet>`
|
||||||
|
|
||||||
|
|
||||||
|
Step 3: Add the New MongoDB Instance to the Existing Replica Set
|
||||||
|
----------------------------------------------------------------
|
||||||
|
|
||||||
|
Note that by ``replica set``, we are referring to the MongoDB replica set,
|
||||||
|
not a Kubernetes' ``ReplicaSet``.
|
||||||
|
|
||||||
|
If you are not the administrator of an existing BigchainDB node, you
|
||||||
|
will have to coordinate offline with an existing administrator so that they can
|
||||||
|
add the new MongoDB instance to the replica set.
|
||||||
|
|
||||||
|
Add the new instance of MongoDB from an existing instance by accessing the
|
||||||
|
``mongo`` shell.
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ kubectl --context ctx-1 exec -it mdb-0 -c mongodb -- /bin/bash
|
||||||
|
root@mdb-0# mongo --port 27017
|
||||||
|
|
||||||
|
One can only add members to a replica set from the ``PRIMARY`` instance.
|
||||||
|
The ``mongo`` shell prompt should state that this is the primary member in the
|
||||||
|
replica set.
|
||||||
|
If not, then you can use the ``rs.status()`` command to find out who the
|
||||||
|
primary is and login to the ``mongo`` shell in the primary.
|
||||||
|
|
||||||
|
Run the ``rs.add()`` command with the FQDN and port number of the other instances:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
PRIMARY> rs.add("<fqdn>:<port>")
|
||||||
|
|
||||||
|
|
||||||
|
Step 4: Verify the Replica Set Membership
|
||||||
|
-----------------------------------------
|
||||||
|
|
||||||
|
You can use the ``rs.conf()`` and the ``rs.status()`` commands available in the
|
||||||
|
mongo shell to verify the replica set membership.
|
||||||
|
|
||||||
|
The new MongoDB instance should be listed in the membership information
|
||||||
|
displayed.
|
||||||
|
|
||||||
|
|
||||||
|
Step 5: Start the New BigchainDB Instance
|
||||||
|
-----------------------------------------
|
||||||
|
|
||||||
|
Get the file ``bigchaindb-dep.yaml`` from GitHub using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/bigchaindb/bigchaindb-dep.yaml
|
||||||
|
|
||||||
|
Note that we set the ``BIGCHAINDB_DATABASE_HOST`` to ``mdb`` which is the name
|
||||||
|
of the MongoDB service defined earlier.
|
||||||
|
|
||||||
|
Edit the ``BIGCHAINDB_KEYPAIR_PUBLIC`` with the public key of this instance,
|
||||||
|
the ``BIGCHAINDB_KEYPAIR_PRIVATE`` with the private key of this instance and
|
||||||
|
the ``BIGCHAINDB_KEYRING`` with a ``:`` delimited list of all the public keys
|
||||||
|
in the BigchainDB cluster.
|
||||||
|
|
||||||
|
Create the required Deployment using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ kubectl --context ctx-2 apply -f bigchaindb-dep.yaml
|
||||||
|
|
||||||
|
You can check its status using the command ``kubectl get deploy -w``
|
||||||
|
|
||||||
|
|
||||||
|
Step 6: Restart the Existing BigchainDB Instance(s)
|
||||||
|
---------------------------------------------------
|
||||||
|
|
||||||
|
Add the public key of the new BigchainDB instance to the keyring of all the
|
||||||
|
existing BigchainDB instances and update the BigchainDB instances using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ kubectl --context ctx-1 replace -f bigchaindb-dep.yaml
|
||||||
|
|
||||||
|
This will create a "rolling deployment" in Kubernetes where a new instance of
|
||||||
|
BigchainDB will be created, and if the health check on the new instance is
|
||||||
|
successful, the earlier one will be terminated. This ensures that there is
|
||||||
|
zero downtime during updates.
|
||||||
|
|
||||||
|
You can SSH to an existing BigchainDB instance and run the ``bigchaindb
|
||||||
|
show-config`` command to check that the keyring is updated.
|
||||||
|
|
||||||
|
|
||||||
|
Step 7: Run NGINX as a Deployment
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
Please refer :ref:`this <Step 10: Run NGINX as a Deployment>` to
|
||||||
|
set up NGINX in your new node.
|
||||||
|
|
||||||
|
|
||||||
|
Step 8: Test Your New BigchainDB Node
|
||||||
|
-------------------------------------
|
||||||
|
|
||||||
|
Please refer to the testing steps :ref:`here <Step 11: Verify the BigchainDB
|
||||||
|
Node Setup>` to verify that your new BigchainDB node is working as expected.
|
||||||
|
|
@ -5,7 +5,7 @@ We have some "templates" to deploy a basic, working, but bare-bones BigchainDB n
|
|||||||
|
|
||||||
You don't have to use the tools we use in the templates. You can use whatever tools you prefer.
|
You don't have to use the tools we use in the templates. You can use whatever tools you prefer.
|
||||||
|
|
||||||
If you find the cloud deployment templates for nodes helpful, then you may also be interested in our scripts for :doc:`deploying a testing cluster on AWS <../clusters-feds/aws-testing-cluster>` (documented in the Clusters & Federations section).
|
If you find the cloud deployment templates for nodes helpful, then you may also be interested in our scripts for :doc:`deploying a testing cluster on AWS <../clusters-feds/aws-testing-cluster>` (documented in the Clusters section).
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
@ -15,4 +15,6 @@ If you find the cloud deployment templates for nodes helpful, then you may also
|
|||||||
azure-quickstart-template
|
azure-quickstart-template
|
||||||
template-kubernetes-azure
|
template-kubernetes-azure
|
||||||
node-on-kubernetes
|
node-on-kubernetes
|
||||||
|
add-node-on-kubernetes
|
||||||
|
upgrade-on-kubernetes
|
||||||
|
|
@ -1,9 +1,13 @@
|
|||||||
Run a BigchainDB Node in a Kubernetes Cluster
|
Kubernetes Template: Deploy a Single BigchainDB Node
|
||||||
=============================================
|
====================================================
|
||||||
|
|
||||||
Assuming you already have a `Kubernetes <https://kubernetes.io/>`_
|
This page describes how to deploy the first BigchainDB node
|
||||||
cluster up and running, this page describes how to run a
|
in a BigchainDB cluster, or a stand-alone BigchainDB node,
|
||||||
BigchainDB node in it.
|
using `Kubernetes <https://kubernetes.io/>`_.
|
||||||
|
It assumes you already have a running Kubernetes cluster.
|
||||||
|
|
||||||
|
If you want to add a new BigchainDB node to an existing BigchainDB cluster,
|
||||||
|
refer to :doc:`the page about that <add-node-on-kubernetes>`.
|
||||||
|
|
||||||
|
|
||||||
Step 1: Install kubectl
|
Step 1: Install kubectl
|
||||||
@ -31,24 +35,29 @@ then you can get the ``~/.kube/config`` file using:
|
|||||||
--resource-group <name of resource group containing the cluster> \
|
--resource-group <name of resource group containing the cluster> \
|
||||||
--name <ACS cluster name>
|
--name <ACS cluster name>
|
||||||
|
|
||||||
|
If it asks for a password (to unlock the SSH key)
|
||||||
|
and you enter the correct password,
|
||||||
|
but you get an error message,
|
||||||
|
then try adding ``--ssh-key-file ~/.ssh/<name>``
|
||||||
|
to the above command (i.e. the path to the private key).
|
||||||
|
|
||||||
|
|
||||||
Step 3: Create Storage Classes
|
Step 3: Create Storage Classes
|
||||||
------------------------------
|
------------------------------
|
||||||
|
|
||||||
MongoDB needs somewhere to store its data persistently,
|
MongoDB needs somewhere to store its data persistently,
|
||||||
outside the container where MongoDB is running.
|
outside the container where MongoDB is running.
|
||||||
|
Our MongoDB Docker container
|
||||||
The official MongoDB Docker container exports two volume mounts with correct
|
(based on the official MongoDB Docker container)
|
||||||
|
exports two volume mounts with correct
|
||||||
permissions from inside the container:
|
permissions from inside the container:
|
||||||
|
|
||||||
|
* The directory where the mongod instance stores its data: ``/data/db``.
|
||||||
|
There's more explanation in the MongoDB docs about `storage.dbpath <https://docs.mongodb.com/manual/reference/configuration-options/#storage.dbPath>`_.
|
||||||
|
|
||||||
* The directory where the mongod instance stores its data - ``/data/db``,
|
* The directory where the mongodb instance stores the metadata for a sharded
|
||||||
described at `storage.dbpath <https://docs.mongodb.com/manual/reference/configuration-options/#storage.dbPath>`_.
|
cluster: ``/data/configdb/``.
|
||||||
|
There's more explanation in the MongoDB docs about `sharding.configDB <https://docs.mongodb.com/manual/reference/configuration-options/#sharding.configDB>`_.
|
||||||
* The directory where mongodb instance stores the metadata for a sharded
|
|
||||||
cluster - ``/data/configdb/``, described at
|
|
||||||
`sharding.configDB <https://docs.mongodb.com/manual/reference/configuration-options/#sharding.configDB>`_.
|
|
||||||
|
|
||||||
|
|
||||||
Explaining how Kubernetes handles persistent volumes,
|
Explaining how Kubernetes handles persistent volumes,
|
||||||
and the associated terminology,
|
and the associated terminology,
|
||||||
@ -57,9 +66,6 @@ see `the Kubernetes docs about persistent volumes
|
|||||||
<https://kubernetes.io/docs/user-guide/persistent-volumes>`_.
|
<https://kubernetes.io/docs/user-guide/persistent-volumes>`_.
|
||||||
|
|
||||||
The first thing to do is create the Kubernetes storage classes.
|
The first thing to do is create the Kubernetes storage classes.
|
||||||
We will accordingly create two storage classes and persistent volume claims in
|
|
||||||
Kubernetes.
|
|
||||||
|
|
||||||
|
|
||||||
**Azure.** First, you need an Azure storage account.
|
**Azure.** First, you need an Azure storage account.
|
||||||
If you deployed your Kubernetes cluster on Azure
|
If you deployed your Kubernetes cluster on Azure
|
||||||
@ -73,7 +79,6 @@ Standard storage is lower-cost and lower-performance.
|
|||||||
It uses hard disk drives (HDD).
|
It uses hard disk drives (HDD).
|
||||||
LRS means locally-redundant storage: three replicas
|
LRS means locally-redundant storage: three replicas
|
||||||
in the same data center.
|
in the same data center.
|
||||||
|
|
||||||
Premium storage is higher-cost and higher-performance.
|
Premium storage is higher-cost and higher-performance.
|
||||||
It uses solid state drives (SSD).
|
It uses solid state drives (SSD).
|
||||||
At the time of writing,
|
At the time of writing,
|
||||||
@ -84,29 +89,25 @@ For future reference, the command to create a storage account is
|
|||||||
`az storage account create <https://docs.microsoft.com/en-us/cli/azure/storage/account#create>`_.
|
`az storage account create <https://docs.microsoft.com/en-us/cli/azure/storage/account#create>`_.
|
||||||
|
|
||||||
|
|
||||||
Get the files ``mongo-data-db-sc.yaml`` and ``mongo-data-configdb-sc.yaml``
|
Get the file ``mongo-sc.yaml`` from GitHub using:
|
||||||
from GitHub using:
|
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-data-db-sc.yaml
|
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-sc.yaml
|
||||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-data-configdb-sc.yaml
|
|
||||||
|
|
||||||
You may want to update the ``parameters.location`` field in both the files to
|
You may have to update the ``parameters.location`` field in both the files to
|
||||||
specify the location you are using in Azure.
|
specify the location you are using in Azure.
|
||||||
|
|
||||||
|
Create the required storage classes using:
|
||||||
Create the required StorageClass using
|
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ kubectl apply -f mongo-data-db-sc.yaml
|
$ kubectl apply -f mongo-sc.yaml
|
||||||
$ kubectl apply -f mongo-data-configdb-sc.yaml
|
|
||||||
|
|
||||||
|
|
||||||
You can check if it worked using ``kubectl get storageclasses``.
|
You can check if it worked using ``kubectl get storageclasses``.
|
||||||
|
|
||||||
Note that there is no line of the form
|
**Azure.** Note that there is no line of the form
|
||||||
``storageAccount: <azure storage account name>``
|
``storageAccount: <azure storage account name>``
|
||||||
under ``parameters:``. When we included one
|
under ``parameters:``. When we included one
|
||||||
and then created a PersistentVolumeClaim based on it,
|
and then created a PersistentVolumeClaim based on it,
|
||||||
@ -119,16 +120,13 @@ with the specified skuName and location.
|
|||||||
Step 4: Create Persistent Volume Claims
|
Step 4: Create Persistent Volume Claims
|
||||||
---------------------------------------
|
---------------------------------------
|
||||||
|
|
||||||
Next, we'll create two PersistentVolumeClaim objects ``mongo-db-claim`` and
|
Next, you will create two PersistentVolumeClaim objects ``mongo-db-claim`` and
|
||||||
``mongo-configdb-claim``.
|
``mongo-configdb-claim``.
|
||||||
|
Get the file ``mongo-pvc.yaml`` from GitHub using:
|
||||||
Get the files ``mongo-data-db-sc.yaml`` and ``mongo-data-configdb-sc.yaml``
|
|
||||||
from GitHub using:
|
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-data-db-pvc.yaml
|
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-pvc.yaml
|
||||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-data-configdb-pvc.yaml
|
|
||||||
|
|
||||||
Note how there's no explicit mention of Azure, AWS or whatever.
|
Note how there's no explicit mention of Azure, AWS or whatever.
|
||||||
``ReadWriteOnce`` (RWO) means the volume can be mounted as
|
``ReadWriteOnce`` (RWO) means the volume can be mounted as
|
||||||
@ -141,12 +139,11 @@ by AzureDisk.)
|
|||||||
You may want to update the ``spec.resources.requests.storage`` field in both
|
You may want to update the ``spec.resources.requests.storage`` field in both
|
||||||
the files to specify a different disk size.
|
the files to specify a different disk size.
|
||||||
|
|
||||||
Create the required PersistentVolumeClaim using:
|
Create the required Persistent Volume Claims using:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ kubectl apply -f mongo-data-db-pvc.yaml
|
$ kubectl apply -f mongo-pvc.yaml
|
||||||
$ kubectl apply -f mongo-data-configdb-pvc.yaml
|
|
||||||
|
|
||||||
|
|
||||||
You can check its status using: ``kubectl get pvc -w``
|
You can check its status using: ``kubectl get pvc -w``
|
||||||
@ -155,9 +152,73 @@ Initially, the status of persistent volume claims might be "Pending"
|
|||||||
but it should become "Bound" fairly quickly.
|
but it should become "Bound" fairly quickly.
|
||||||
|
|
||||||
|
|
||||||
Now we are ready to run MongoDB and BigchainDB on our Kubernetes cluster.
|
Step 5: Create the Config Map - Optional
|
||||||
|
----------------------------------------
|
||||||
|
|
||||||
Step 5: Run MongoDB as a StatefulSet
|
This step is required only if you are planning to set up multiple
|
||||||
|
`BigchainDB nodes
|
||||||
|
<https://docs.bigchaindb.com/en/latest/terminology.html#node>`_.
|
||||||
|
|
||||||
|
MongoDB reads the local ``/etc/hosts`` file while bootstrapping a replica set
|
||||||
|
to resolve the hostname provided to the ``rs.initiate()`` command. It needs to
|
||||||
|
ensure that the replica set is being initialized in the same instance where
|
||||||
|
the MongoDB instance is running.
|
||||||
|
|
||||||
|
To achieve this, you will create a ConfigMap with the FQDN of the MongoDB instance
|
||||||
|
and populate the ``/etc/hosts`` file with this value so that a replica set can
|
||||||
|
be created seamlessly.
|
||||||
|
|
||||||
|
Get the file ``mongo-cm.yaml`` from GitHub using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-cm.yaml
|
||||||
|
|
||||||
|
You may want to update the ``data.fqdn`` field in the file before creating the
|
||||||
|
ConfigMap. ``data.fqdn`` field will be the DNS name of your MongoDB instance.
|
||||||
|
This will be used by other MongoDB instances when forming a MongoDB
|
||||||
|
replica set. It should resolve to the MongoDB instance in your cluster when
|
||||||
|
you are done with the setup. This will help when you are adding more MongoDB
|
||||||
|
instances to the replica set in the future.
|
||||||
|
|
||||||
|
|
||||||
|
**Azure.**
|
||||||
|
In Kubernetes on ACS, the name you populate in the ``data.fqdn`` field
|
||||||
|
will be used to configure a DNS name for the public IP assigned to the
|
||||||
|
Kubernetes Service that is the frontend for the MongoDB instance.
|
||||||
|
We suggest using a name that will already be available in Azure.
|
||||||
|
We use ``mdb-instance-0``, ``mdb-instance-1`` and so on in this document,
|
||||||
|
which gives us ``mdb-instance-0.<azure location>.cloudapp.azure.com``,
|
||||||
|
``mdb-instance-1.<azure location>.cloudapp.azure.com``, etc. as the FQDNs.
|
||||||
|
The ``<azure location>`` is the Azure datacenter location you are using,
|
||||||
|
which can also be obtained using the ``az account list-locations`` command.
|
||||||
|
You can also try to assign a name to an Public IP in Azure before starting
|
||||||
|
the process, or use ``nslookup`` with the name you have in mind to check
|
||||||
|
if it's available for use.
|
||||||
|
|
||||||
|
You should ensure that the the name specified in the ``data.fqdn`` field is
|
||||||
|
a unique one.
|
||||||
|
|
||||||
|
**Kubernetes on bare-metal or other cloud providers.**
|
||||||
|
You need to provide the name resolution function
|
||||||
|
by other means (using DNS providers like GoDaddy, CloudFlare or your own
|
||||||
|
private DNS server). The DNS set up for other environments is currently
|
||||||
|
beyond the scope of this document.
|
||||||
|
|
||||||
|
|
||||||
|
Create the required ConfigMap using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ kubectl apply -f mongo-cm.yaml
|
||||||
|
|
||||||
|
|
||||||
|
You can check its status using: ``kubectl get cm``
|
||||||
|
|
||||||
|
Now you are ready to run MongoDB and BigchainDB on our Kubernetes cluster.
|
||||||
|
|
||||||
|
|
||||||
|
Step 6: Run MongoDB as a StatefulSet
|
||||||
------------------------------------
|
------------------------------------
|
||||||
|
|
||||||
Get the file ``mongo-ss.yaml`` from GitHub using:
|
Get the file ``mongo-ss.yaml`` from GitHub using:
|
||||||
@ -172,17 +233,15 @@ Note how the MongoDB container uses the ``mongo-db-claim`` and the
|
|||||||
``/data/configdb`` diretories (mount path). Note also that we use the pod's
|
``/data/configdb`` diretories (mount path). Note also that we use the pod's
|
||||||
``securityContext.capabilities.add`` specification to add the ``FOWNER``
|
``securityContext.capabilities.add`` specification to add the ``FOWNER``
|
||||||
capability to the container.
|
capability to the container.
|
||||||
|
|
||||||
That is because MongoDB container has the user ``mongodb``, with uid ``999``
|
That is because MongoDB container has the user ``mongodb``, with uid ``999``
|
||||||
and group ``mongodb``, with gid ``999``.
|
and group ``mongodb``, with gid ``999``.
|
||||||
When this container runs on a host with a mounted disk, the writes fail when
|
When this container runs on a host with a mounted disk, the writes fail when
|
||||||
there is no user with uid ``999``.
|
there is no user with uid ``999``.
|
||||||
|
|
||||||
To avoid this, we use the Docker feature of ``--cap-add=FOWNER``.
|
To avoid this, we use the Docker feature of ``--cap-add=FOWNER``.
|
||||||
This bypasses the uid and gid permission checks during writes and allows data
|
This bypasses the uid and gid permission checks during writes and allows data
|
||||||
to be persisted to disk.
|
to be persisted to disk.
|
||||||
Refer to the
|
Refer to the
|
||||||
`Docker doc <https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities>`_
|
`Docker docs <https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities>`_
|
||||||
for details.
|
for details.
|
||||||
|
|
||||||
As we gain more experience running MongoDB in testing and production, we will
|
As we gain more experience running MongoDB in testing and production, we will
|
||||||
@ -199,8 +258,83 @@ Create the required StatefulSet using:
|
|||||||
You can check its status using the commands ``kubectl get statefulsets -w``
|
You can check its status using the commands ``kubectl get statefulsets -w``
|
||||||
and ``kubectl get svc -w``
|
and ``kubectl get svc -w``
|
||||||
|
|
||||||
|
You may have to wait for up to 10 minutes for the disk to be created
|
||||||
|
and attached on the first run. The pod can fail several times with the message
|
||||||
|
saying that the timeout for mounting the disk was exceeded.
|
||||||
|
|
||||||
Step 6: Run BigchainDB as a Deployment
|
|
||||||
|
Step 7: Initialize a MongoDB Replica Set - Optional
|
||||||
|
---------------------------------------------------
|
||||||
|
|
||||||
|
This step is required only if you are planning to set up multiple
|
||||||
|
`BigchainDB nodes
|
||||||
|
<https://docs.bigchaindb.com/en/latest/terminology.html#node>`_.
|
||||||
|
|
||||||
|
|
||||||
|
Login to the running MongoDB instance and access the mongo shell using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ kubectl exec -it mdb-0 -c mongodb -- /bin/bash
|
||||||
|
root@mdb-0:/# mongo --port 27017
|
||||||
|
|
||||||
|
You will initiate the replica set by using the ``rs.initiate()`` command from the
|
||||||
|
mongo shell. Its syntax is:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
rs.initiate({
|
||||||
|
_id : "<replica-set-name",
|
||||||
|
members: [ {
|
||||||
|
_id : 0,
|
||||||
|
host : "<fqdn of this instance>:<port number>"
|
||||||
|
} ]
|
||||||
|
})
|
||||||
|
|
||||||
|
An example command might look like:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
> rs.initiate({ _id : "bigchain-rs", members: [ { _id : 0, host :"mdb-instance-0.westeurope.cloudapp.azure.com:27017" } ] })
|
||||||
|
|
||||||
|
|
||||||
|
where ``mdb-instance-0.westeurope.cloudapp.azure.com`` is the value stored in
|
||||||
|
the ``data.fqdn`` field in the ConfigMap created using ``mongo-cm.yaml``.
|
||||||
|
|
||||||
|
|
||||||
|
You should see changes in the mongo shell prompt from ``>``
|
||||||
|
to ``bigchain-rs:OTHER>`` to ``bigchain-rs:SECONDARY>`` and finally
|
||||||
|
to ``bigchain-rs:PRIMARY>``.
|
||||||
|
|
||||||
|
You can use the ``rs.conf()`` and the ``rs.status()`` commands to check the
|
||||||
|
detailed replica set configuration now.
|
||||||
|
|
||||||
|
|
||||||
|
Step 8: Create a DNS record - Optional
|
||||||
|
--------------------------------------
|
||||||
|
|
||||||
|
This step is required only if you are planning to set up multiple
|
||||||
|
`BigchainDB nodes
|
||||||
|
<https://docs.bigchaindb.com/en/latest/terminology.html#node>`_.
|
||||||
|
|
||||||
|
**Azure.** Select the current Azure resource group and look for the ``Public IP``
|
||||||
|
resource. You should see at least 2 entries there - one for the Kubernetes
|
||||||
|
master and the other for the MongoDB instance. You may have to ``Refresh`` the
|
||||||
|
Azure web page listing the resources in a resource group for the latest
|
||||||
|
changes to be reflected.
|
||||||
|
Select the ``Public IP`` resource that is attached to your service (it should
|
||||||
|
have the Kubernetes cluster name along with a random string),
|
||||||
|
select ``Configuration``, add the DNS name that was added in the
|
||||||
|
ConfigMap earlier, click ``Save``, and wait for the changes to be applied.
|
||||||
|
|
||||||
|
To verify the DNS setting is operational, you can run ``nslookup <dns
|
||||||
|
name added in ConfigMap>`` from your local Linux shell.
|
||||||
|
|
||||||
|
This will ensure that when you scale the replica set later, other MongoDB
|
||||||
|
members in the replica set can reach this instance.
|
||||||
|
|
||||||
|
|
||||||
|
Step 9: Run BigchainDB as a Deployment
|
||||||
--------------------------------------
|
--------------------------------------
|
||||||
|
|
||||||
Get the file ``bigchaindb-dep.yaml`` from GitHub using:
|
Get the file ``bigchaindb-dep.yaml`` from GitHub using:
|
||||||
@ -209,8 +343,8 @@ Get the file ``bigchaindb-dep.yaml`` from GitHub using:
|
|||||||
|
|
||||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/bigchaindb/bigchaindb-dep.yaml
|
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/bigchaindb/bigchaindb-dep.yaml
|
||||||
|
|
||||||
Note that we set the ``BIGCHAINDB_DATABASE_HOST`` to ``mdb`` which is the name
|
Note that we set the ``BIGCHAINDB_DATABASE_HOST`` to ``mdb-svc`` which is the
|
||||||
of the MongoDB service defined earlier.
|
name of the MongoDB service defined earlier.
|
||||||
|
|
||||||
We also hardcode the ``BIGCHAINDB_KEYPAIR_PUBLIC``,
|
We also hardcode the ``BIGCHAINDB_KEYPAIR_PUBLIC``,
|
||||||
``BIGCHAINDB_KEYPAIR_PRIVATE`` and ``BIGCHAINDB_KEYRING`` for now.
|
``BIGCHAINDB_KEYPAIR_PRIVATE`` and ``BIGCHAINDB_KEYRING`` for now.
|
||||||
@ -233,23 +367,56 @@ Create the required Deployment using:
|
|||||||
You can check its status using the command ``kubectl get deploy -w``
|
You can check its status using the command ``kubectl get deploy -w``
|
||||||
|
|
||||||
|
|
||||||
Step 7: Verify the BigchainDB Node Setup
|
Step 10: Run NGINX as a Deployment
|
||||||
----------------------------------------
|
----------------------------------
|
||||||
|
|
||||||
Step 7.1: Testing Externally
|
NGINX is used as a proxy to both the BigchainDB and MongoDB instances in the
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
node.
|
||||||
|
It proxies HTTP requests on port 80 to the BigchainDB backend, and TCP
|
||||||
|
connections on port 27017 to the MongoDB backend.
|
||||||
|
|
||||||
Try to access the ``<dns/ip of your exposed service endpoint>:9984`` on your
|
You can also configure a whitelist in NGINX to allow only connections from
|
||||||
browser. You must receive a json output that shows the BigchainDB server
|
other instances in the MongoDB replica set to access the backend MongoDB
|
||||||
version among other things.
|
instance.
|
||||||
|
|
||||||
Try to access the ``<dns/ip of your exposed service endpoint>:27017`` on your
|
Get the file ``nginx-cm.yaml`` from GitHub using:
|
||||||
browser. You must receive a message from MongoDB stating that it doesn't allow
|
|
||||||
HTTP connections to the port anymore.
|
.. code:: bash
|
||||||
|
|
||||||
|
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/nginx/nginx-cm.yaml
|
||||||
|
|
||||||
|
The IP address whitelist can be explicitly configured in ``nginx-cm.yaml``
|
||||||
|
file. You will need a list of the IP addresses of all the other MongoDB
|
||||||
|
instances in the cluster. If the MongoDB intances specify a hostname, then this
|
||||||
|
needs to be resolved to the corresponding IP addresses. If the IP address of
|
||||||
|
any MongoDB instance changes, we can start a 'rolling upgrade' of NGINX after
|
||||||
|
updating the corresponding ConfigMap without affecting availabilty.
|
||||||
|
|
||||||
|
|
||||||
Step 7.2: Testing Internally
|
Create the ConfigMap for the whitelist using:
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ kubectl apply -f nginx-cm.yaml
|
||||||
|
|
||||||
|
Get the file ``nginx-dep.yaml`` from GitHub using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/nginx/nginx-dep.yaml
|
||||||
|
|
||||||
|
Create the NGINX deployment using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ kubectl apply -f nginx-dep.yaml
|
||||||
|
|
||||||
|
|
||||||
|
Step 11: Verify the BigchainDB Node Setup
|
||||||
|
-----------------------------------------
|
||||||
|
|
||||||
|
Step 11.1: Testing Internally
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Run a container that provides utilities like ``nslookup``, ``curl`` and ``dig``
|
Run a container that provides utilities like ``nslookup``, ``curl`` and ``dig``
|
||||||
on the cluster and query the internal DNS and IP endpoints.
|
on the cluster and query the internal DNS and IP endpoints.
|
||||||
@ -258,23 +425,53 @@ on the cluster and query the internal DNS and IP endpoints.
|
|||||||
|
|
||||||
$ kubectl run -it toolbox -- image <docker image to run> --restart=Never --rm
|
$ kubectl run -it toolbox -- image <docker image to run> --restart=Never --rm
|
||||||
|
|
||||||
It will drop you to the shell prompt.
|
|
||||||
Now we can query for the ``mdb`` and ``bdb`` service details.
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
$ nslookup mdb
|
|
||||||
$ dig +noall +answer _mdb_port._tcp.mdb.default.svc.cluster.local SRV
|
|
||||||
$ curl -X GET http://mdb:27017
|
|
||||||
$ curl -X GET http://bdb:9984
|
|
||||||
|
|
||||||
There is a generic image based on alpine:3.5 with the required utilities
|
There is a generic image based on alpine:3.5 with the required utilities
|
||||||
hosted at Docker Hub under ``bigchaindb/toolbox``.
|
hosted at Docker Hub under ``bigchaindb/toolbox``.
|
||||||
The corresponding Dockerfile is `here
|
The corresponding Dockerfile is `here
|
||||||
<https://github.com/bigchaindb/bigchaindb/k8s/toolbox/Dockerfile>`_.
|
<https://github.com/bigchaindb/bigchaindb/k8s/toolbox/Dockerfile>`_.
|
||||||
|
|
||||||
You can use it as below to get started immediately:
|
You can use it as below to get started immediately:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ kubectl run -it toolbox --image bigchaindb/toolbox --restart=Never --rm
|
$ kubectl run -it toolbox --image bigchaindb/toolbox --restart=Never --rm
|
||||||
|
|
||||||
|
It will drop you to the shell prompt.
|
||||||
|
Now you can query for the ``mdb`` and ``bdb`` service details.
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
# nslookup mdb-svc
|
||||||
|
# nslookup bdb-svc
|
||||||
|
# nslookup ngx-svc
|
||||||
|
# dig +noall +answer _mdb-port._tcp.mdb-svc.default.svc.cluster.local SRV
|
||||||
|
# dig +noall +answer _bdb-port._tcp.bdb-svc.default.svc.cluster.local SRV
|
||||||
|
# dig +noall +answer _ngx-public-mdb-port._tcp.ngx-svc.default.svc.cluster.local SRV
|
||||||
|
# dig +noall +answer _ngx-public-bdb-port._tcp.ngx-svc.default.svc.cluster.local SRV
|
||||||
|
# curl -X GET http://mdb-svc:27017
|
||||||
|
# curl -X GET http://bdb-svc:9984
|
||||||
|
# curl -X GET http://ngx-svc:80
|
||||||
|
# curl -X GET http://ngx-svc:27017
|
||||||
|
|
||||||
|
The ``nslookup`` commands should output the configured IP addresses of the
|
||||||
|
services in the cluster
|
||||||
|
|
||||||
|
The ``dig`` commands should return the port numbers configured for the
|
||||||
|
various services in the cluster.
|
||||||
|
|
||||||
|
Finally, the ``curl`` commands test the availability of the services
|
||||||
|
themselves.
|
||||||
|
|
||||||
|
Step 11.2: Testing Externally
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Try to access the ``<dns/ip of your exposed bigchaindb service endpoint>:80``
|
||||||
|
on your browser. You must receive a json output that shows the BigchainDB
|
||||||
|
server version among other things.
|
||||||
|
|
||||||
|
Try to access the ``<dns/ip of your exposed mongodb service endpoint>:27017``
|
||||||
|
on your browser. If your IP is in the whitelist, you will receive a message
|
||||||
|
from the MongoDB instance stating that it doesn't allow HTTP connections to
|
||||||
|
the port anymore. If your IP is not in the whitelist, your access will be
|
||||||
|
blocked and you will not see any response from the MongoDB instance.
|
||||||
|
|
||||||
|
@ -168,7 +168,7 @@ using something like:
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ ssh ssh ubuntu@k8s-agent-4AC80E97-0
|
$ ssh ubuntu@k8s-agent-4AC80E97-0
|
||||||
|
|
||||||
where ``k8s-agent-4AC80E97-0`` is the name
|
where ``k8s-agent-4AC80E97-0`` is the name
|
||||||
of a Kubernetes agent node in your Kubernetes cluster.
|
of a Kubernetes agent node in your Kubernetes cluster.
|
||||||
|
@ -0,0 +1,105 @@
|
|||||||
|
Kubernetes Template: Upgrade all Software in a BigchainDB Node
|
||||||
|
==============================================================
|
||||||
|
|
||||||
|
This page outlines how to upgrade all the software associated
|
||||||
|
with a BigchainDB node running on Kubernetes,
|
||||||
|
including host operating systems, Docker, Kubernetes,
|
||||||
|
and BigchainDB-related software.
|
||||||
|
|
||||||
|
|
||||||
|
Upgrade Host OS, Docker and Kubernetes
|
||||||
|
--------------------------------------
|
||||||
|
|
||||||
|
Some Kubernetes installation & management systems
|
||||||
|
can do full or partial upgrades of host OSes, Docker,
|
||||||
|
or Kubernetes, e.g.
|
||||||
|
`Tectonic <https://coreos.com/tectonic/>`_,
|
||||||
|
`Rancher <https://docs.rancher.com/rancher/v1.5/en/>`_,
|
||||||
|
and
|
||||||
|
`Kubo <https://pivotal.io/kubo>`_.
|
||||||
|
Consult the documentation for your system.
|
||||||
|
|
||||||
|
**Azure Container Service (ACS).**
|
||||||
|
On Dec. 15, 2016, a Microsoft employee
|
||||||
|
`wrote <https://github.com/colemickens/azure-kubernetes-status/issues/15#issuecomment-267453251>`_:
|
||||||
|
"In the coming months we [the Azure Kubernetes team] will be building managed updates in the ACS service."
|
||||||
|
At the time of writing, managed updates were not yet available,
|
||||||
|
but you should check the latest
|
||||||
|
`ACS documentation <https://docs.microsoft.com/en-us/azure/container-service/>`_
|
||||||
|
to see what's available now.
|
||||||
|
Also at the time of writing, ACS only supported Ubuntu
|
||||||
|
as the host (master and agent) operating system.
|
||||||
|
You can upgrade Ubuntu and Docker on Azure
|
||||||
|
by SSHing into each of the hosts,
|
||||||
|
as documented on
|
||||||
|
:ref:`another page <Optional: SSH to Your New Kubernetes Cluster Nodes>`.
|
||||||
|
|
||||||
|
In general, you can SSH to each host in your Kubernetes Cluster
|
||||||
|
to update the OS and Docker.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Once you are in an SSH session with a host,
|
||||||
|
the ``docker info`` command is a handy way to detemine the
|
||||||
|
host OS (including version) and the Docker version.
|
||||||
|
|
||||||
|
When you want to upgrade the software on a Kubernetes node,
|
||||||
|
you should "drain" the node first,
|
||||||
|
i.e. tell Kubernetes to gracefully terminate all pods
|
||||||
|
on the node and mark it as unscheduleable
|
||||||
|
(so no new pods get put on the node during its downtime).
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
kubectl drain $NODENAME
|
||||||
|
|
||||||
|
There are `more details in the Kubernetes docs <https://kubernetes.io/docs/admin/cluster-management/#maintenance-on-a-node>`_,
|
||||||
|
including instructions to make the node scheduleable again.
|
||||||
|
|
||||||
|
To manually upgrade the host OS,
|
||||||
|
see the docs for that OS.
|
||||||
|
|
||||||
|
To manually upgrade Docker, see
|
||||||
|
`the Docker docs <https://docs.docker.com/>`_.
|
||||||
|
|
||||||
|
To manually upgrade all Kubernetes software in your Kubernetes cluster, see
|
||||||
|
`the Kubernetes docs <https://kubernetes.io/docs/admin/cluster-management/>`_.
|
||||||
|
|
||||||
|
|
||||||
|
Upgrade BigchainDB-Related Software
|
||||||
|
-----------------------------------
|
||||||
|
|
||||||
|
We use Kubernetes "Deployments" for NGINX, BigchainDB,
|
||||||
|
and most other BigchainDB-related software.
|
||||||
|
The only exception is MongoDB; we use a Kubernetes
|
||||||
|
StatefulSet for that.
|
||||||
|
|
||||||
|
The nice thing about Kubernetes Deployments
|
||||||
|
is that Kubernetes can manage most of the upgrade process.
|
||||||
|
A typical upgrade workflow for a single Deployment would be:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ KUBE_EDITOR=nano kubectl edit deployment/<name of Deployment>
|
||||||
|
|
||||||
|
The `kubectl edit <https://kubernetes.io/docs/user-guide/kubectl/kubectl_edit/>`_
|
||||||
|
command opens the specified editor (nano in the above example),
|
||||||
|
allowing you to edit the specified Deployment *in the Kubernetes cluster*.
|
||||||
|
You can change the version tag on the Docker image, for example.
|
||||||
|
Don't forget to save your edits before exiting the editor.
|
||||||
|
The Kubernetes docs have more information about
|
||||||
|
`updating a Deployment <https://kubernetes.io/docs/user-guide/deployments/#updating-a-deployment>`_.
|
||||||
|
|
||||||
|
|
||||||
|
The upgrade story for the MongoDB StatefulSet is *different*.
|
||||||
|
(This is because MongoDB has persistent state,
|
||||||
|
which is stored in some storage associated with a PersistentVolumeClaim.)
|
||||||
|
At the time of writing, StatefulSets were still in beta,
|
||||||
|
and they did not support automated image upgrade (Docker image tag upgrade).
|
||||||
|
We expect that to change.
|
||||||
|
Rather than trying to keep these docs up-to-date,
|
||||||
|
we advise you to check out the current
|
||||||
|
`Kubernetes docs about updating containers in StatefulSets
|
||||||
|
<https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-containers>`_.
|
||||||
|
|
||||||
|
|
@ -86,7 +86,6 @@ Step 2 is to make an AWS deployment configuration file, if necessary. There's an
|
|||||||
```text
|
```text
|
||||||
NUM_NODES=3
|
NUM_NODES=3
|
||||||
BRANCH="master"
|
BRANCH="master"
|
||||||
WHAT_TO_DEPLOY="servers"
|
|
||||||
SSH_KEY_NAME="not-set-yet"
|
SSH_KEY_NAME="not-set-yet"
|
||||||
USE_KEYPAIRS_FILE=False
|
USE_KEYPAIRS_FILE=False
|
||||||
IMAGE_ID="ami-8504fdea"
|
IMAGE_ID="ami-8504fdea"
|
||||||
|
@ -64,7 +64,7 @@ In the future, it will be possible for clients to query for the blocks containin
|
|||||||
|
|
||||||
**How could we be sure blocks and votes from a client are valid?**
|
**How could we be sure blocks and votes from a client are valid?**
|
||||||
|
|
||||||
All blocks and votes are signed by federation nodes. Only federation nodes can produce valid signatures because only federation nodes have the necessary private keys. A client can't produce a valid signature for a block or vote.
|
All blocks and votes are signed by cluster nodes (owned and operated by consortium members). Only cluster nodes can produce valid signatures because only cluster nodes have the necessary private keys. A client can't produce a valid signature for a block or vote.
|
||||||
|
|
||||||
**Could we restore an entire BigchainDB database using client-saved blocks and votes?**
|
**Could we restore an entire BigchainDB database using client-saved blocks and votes?**
|
||||||
|
|
||||||
@ -109,7 +109,7 @@ Considerations for BigchainDB:
|
|||||||
Although it's not advertised as such, RethinkDB's built-in replication feature is similar to continous backup, except the "backup" (i.e. the set of replica shards) is spread across all the nodes. One could take that idea a bit farther by creating a set of backup-only servers with one full backup:
|
Although it's not advertised as such, RethinkDB's built-in replication feature is similar to continous backup, except the "backup" (i.e. the set of replica shards) is spread across all the nodes. One could take that idea a bit farther by creating a set of backup-only servers with one full backup:
|
||||||
|
|
||||||
* Give all the original BigchainDB nodes (RethinkDB nodes) the server tag `original`. This is the default if you used the RethinkDB config file suggested in the section titled [Configure RethinkDB Server](../dev-and-test/setup-run-node.html#configure-rethinkdb-server).
|
* Give all the original BigchainDB nodes (RethinkDB nodes) the server tag `original`. This is the default if you used the RethinkDB config file suggested in the section titled [Configure RethinkDB Server](../dev-and-test/setup-run-node.html#configure-rethinkdb-server).
|
||||||
* Set up a group of servers running RethinkDB only, and give them the server tag `backup`. The `backup` servers could be geographically separated from all the `original` nodes (or not; it's up to the federation).
|
* Set up a group of servers running RethinkDB only, and give them the server tag `backup`. The `backup` servers could be geographically separated from all the `original` nodes (or not; it's up to the consortium to decide).
|
||||||
* Clients shouldn't be able to read from or write to servers in the `backup` set.
|
* Clients shouldn't be able to read from or write to servers in the `backup` set.
|
||||||
* Send a RethinkDB reconfigure command to the RethinkDB cluster to make it so that the `original` set has the same number of replicas as before (or maybe one less), and the `backup` set has one replica. Also, make sure the `primary_replica_tag='original'` so that all primary shards live on the `original` nodes.
|
* Send a RethinkDB reconfigure command to the RethinkDB cluster to make it so that the `original` set has the same number of replicas as before (or maybe one less), and the `backup` set has one replica. Also, make sure the `primary_replica_tag='original'` so that all primary shards live on the `original` nodes.
|
||||||
|
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
Clusters & Federations
|
Clusters
|
||||||
======================
|
========
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
|
||||||
set-up-a-federation
|
set-up-a-cluster
|
||||||
backup
|
backup
|
||||||
aws-testing-cluster
|
aws-testing-cluster
|
||||||
|
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
# Set Up a Federation
|
# Set Up a Cluster
|
||||||
|
|
||||||
This section is about how to set up a BigchainDB _federation_, where each node is operated by a different operator. If you want to set up and run a testing cluster on AWS (where all nodes are operated by you), then see [the section about that](aws-testing-cluster.html).
|
This section is about how to set up a BigchainDB cluster where each node is operated by a different operator. If you want to set up and run a testing cluster on AWS (where all nodes are operated by you), then see [the section about that](aws-testing-cluster.html).
|
||||||
|
|
||||||
|
|
||||||
## Initial Checklist
|
## Initial Checklist
|
||||||
|
|
||||||
* Do you have a governance process for making federation-level decisions, such as how to admit new members?
|
* Do you have a governance process for making consortium-level decisions, such as how to admit new members?
|
||||||
* What will you store in creation transactions (data payload)? Is there a data schema?
|
* What will you store in creation transactions (data payload)? Is there a data schema?
|
||||||
* Will you use transfer transactions? Will they include a non-empty data payload?
|
* Will you use transfer transactions? Will they include a non-empty data payload?
|
||||||
* Who will be allowed to submit transactions? Who will be allowed to read or query transactions? How will you enforce the access rules?
|
* Who will be allowed to submit transactions? Who will be allowed to read or query transactions? How will you enforce the access rules?
|
||||||
@ -13,7 +13,7 @@ This section is about how to set up a BigchainDB _federation_, where each node i
|
|||||||
|
|
||||||
## Set Up the Initial Cluster
|
## Set Up the Initial Cluster
|
||||||
|
|
||||||
The federation must decide some things before setting up the initial cluster (initial set of BigchainDB nodes):
|
The consortium must decide some things before setting up the initial cluster (initial set of BigchainDB nodes):
|
||||||
|
|
||||||
1. Who will operate a node in the initial cluster?
|
1. Who will operate a node in the initial cluster?
|
||||||
2. What will the replication factor be? (It must be 3 or more for [RethinkDB failover](https://rethinkdb.com/docs/failover/) to work.)
|
2. What will the replication factor be? (It must be 3 or more for [RethinkDB failover](https://rethinkdb.com/docs/failover/) to work.)
|
||||||
@ -21,7 +21,7 @@ The federation must decide some things before setting up the initial cluster (in
|
|||||||
|
|
||||||
Once those things have been decided, each node operator can begin setting up their BigchainDB (production) node.
|
Once those things have been decided, each node operator can begin setting up their BigchainDB (production) node.
|
||||||
|
|
||||||
Each node operator will eventually need two pieces of information from all other nodes in the federation:
|
Each node operator will eventually need two pieces of information from all other nodes:
|
||||||
|
|
||||||
1. Their RethinkDB hostname, e.g. `rdb.farm2.organization.org`
|
1. Their RethinkDB hostname, e.g. `rdb.farm2.organization.org`
|
||||||
2. Their BigchainDB public key, e.g. `Eky3nkbxDTMgkmiJC8i5hKyVFiAQNmPP4a2G4JdDxJCK`
|
2. Their BigchainDB public key, e.g. `Eky3nkbxDTMgkmiJC8i5hKyVFiAQNmPP4a2G4JdDxJCK`
|
@ -82,7 +82,7 @@ master_doc = 'index'
|
|||||||
|
|
||||||
# General information about the project.
|
# General information about the project.
|
||||||
project = 'BigchainDB Server'
|
project = 'BigchainDB Server'
|
||||||
copyright = '2016'
|
copyright = '2017, BigchainDB Contributors'
|
||||||
author = 'BigchainDB Contributors'
|
author = 'BigchainDB Contributors'
|
||||||
|
|
||||||
# The version info for the project you're documenting, acts as replacement for
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
|
@ -11,7 +11,7 @@ A block has the following structure:
|
|||||||
"timestamp": "<block-creation timestamp>",
|
"timestamp": "<block-creation timestamp>",
|
||||||
"transactions": ["<list of transactions>"],
|
"transactions": ["<list of transactions>"],
|
||||||
"node_pubkey": "<public key of the node creating the block>",
|
"node_pubkey": "<public key of the node creating the block>",
|
||||||
"voters": ["<list of federation nodes public keys>"]
|
"voters": ["<list of public keys of all nodes in the cluster>"]
|
||||||
},
|
},
|
||||||
"signature": "<signature of block>"
|
"signature": "<signature of block>"
|
||||||
}
|
}
|
||||||
@ -23,9 +23,9 @@ A block has the following structure:
|
|||||||
- ``timestamp``: The Unix time when the block was created. It's provided by the node that created the block.
|
- ``timestamp``: The Unix time when the block was created. It's provided by the node that created the block.
|
||||||
- ``transactions``: A list of the transactions included in the block.
|
- ``transactions``: A list of the transactions included in the block.
|
||||||
- ``node_pubkey``: The public key of the node that created the block.
|
- ``node_pubkey``: The public key of the node that created the block.
|
||||||
- ``voters``: A list of the public keys of federation nodes at the time the block was created.
|
- ``voters``: A list of the public keys of all cluster nodes at the time the block was created.
|
||||||
It's the list of federation nodes which can cast a vote on this block.
|
It's the list of nodes which can cast a vote on this block.
|
||||||
This list can change from block to block, as nodes join and leave the federation.
|
This list can change from block to block, as nodes join and leave the cluster.
|
||||||
|
|
||||||
- ``signature``: :ref:`Cryptographic signature <Signature Algorithm and Keys>` of the block by the node that created the block (i.e. the node with public key ``node_pubkey``). To generate the signature, the node signs the serialized inner ``block`` (the same thing that was hashed to determine the ``id``) using the private key corresponding to ``node_pubkey``.
|
- ``signature``: :ref:`Cryptographic signature <Signature Algorithm and Keys>` of the block by the node that created the block (i.e. the node with public key ``node_pubkey``). To generate the signature, the node signs the serialized inner ``block`` (the same thing that was hashed to determine the ``id``) using the private key corresponding to ``node_pubkey``.
|
||||||
|
|
||||||
|
@ -22,7 +22,12 @@ One can also put different weights on the inputs to a threshold condition, along
|
|||||||
|
|
||||||
The (single) output of a threshold condition can be used as one of the inputs of other threshold conditions. This means that one can combine threshold conditions to build complex logical expressions, e.g. (x OR y) AND (u OR v).
|
The (single) output of a threshold condition can be used as one of the inputs of other threshold conditions. This means that one can combine threshold conditions to build complex logical expressions, e.g. (x OR y) AND (u OR v).
|
||||||
|
|
||||||
When one creates a condition, one can calculate its fulfillment length (e.g. 96). The more complex the condition, the larger its fulfillment length will be. A BigchainDB federation can put an upper limit on the allowed fulfillment length, as a way of capping the complexity of conditions (and the computing time required to validate them).
|
When one creates a condition, one can calculate its fulfillment length (e.g.
|
||||||
|
96). The more complex the condition, the larger its fulfillment length will be.
|
||||||
|
A BigchainDB federation can put an upper limit on the complexity of the
|
||||||
|
conditions, either directly by setting an allowed maximum fulfillment length,
|
||||||
|
or indirectly by setting a maximum allowed transaction size which would limit
|
||||||
|
the overall complexity accross all inputs and outputs of a transaction.
|
||||||
|
|
||||||
If someone tries to make a condition where the output of a threshold condition feeds into the input of another “earlier” threshold condition (i.e. in a closed logical circuit), then their computer will take forever to calculate the (infinite) “condition URI”, at least in theory. In practice, their computer will run out of memory or their client software will timeout after a while.
|
If someone tries to make a condition where the output of a threshold condition feeds into the input of another “earlier” threshold condition (i.e. in a closed logical circuit), then their computer will take forever to calculate the (infinite) “condition URI”, at least in theory. In practice, their computer will run out of memory or their client software will timeout after a while.
|
||||||
|
|
||||||
|
@ -46,8 +46,12 @@ Transactions
|
|||||||
|
|
||||||
Get the transaction with the ID ``tx_id``.
|
Get the transaction with the ID ``tx_id``.
|
||||||
|
|
||||||
This endpoint returns a transaction only if a ``VALID`` block on
|
This endpoint returns a transaction if it was included in a ``VALID`` block,
|
||||||
``bigchain`` exists.
|
if it is still waiting to be processed (``BACKLOG``) or is still in an
|
||||||
|
undecided block (``UNDECIDED``). All instances of a transaction in invalid
|
||||||
|
blocks are ignored and treated as if they don't exist. If a request is made
|
||||||
|
for a transaction and instances of that transaction are found only in
|
||||||
|
invalid blocks, then the response will be ``404 Not Found``.
|
||||||
|
|
||||||
:param tx_id: transaction ID
|
:param tx_id: transaction ID
|
||||||
:type tx_id: hex string
|
:type tx_id: hex string
|
||||||
|
@ -10,7 +10,7 @@ Note that there are a few kinds of nodes:
|
|||||||
|
|
||||||
- A **bare-bones node** is a node deployed in the cloud, either as part of a testing cluster or as a starting point before upgrading the node to be production-ready. Our cloud deployment templates deploy a bare-bones node, as do our scripts for deploying a testing cluster on AWS.
|
- A **bare-bones node** is a node deployed in the cloud, either as part of a testing cluster or as a starting point before upgrading the node to be production-ready. Our cloud deployment templates deploy a bare-bones node, as do our scripts for deploying a testing cluster on AWS.
|
||||||
|
|
||||||
- A **production node** is a node that is part of a federation's BigchainDB cluster. A production node has the most components and requirements.
|
- A **production node** is a node that is part of a consortium's BigchainDB cluster. A production node has the most components and requirements.
|
||||||
|
|
||||||
|
|
||||||
## Setup Instructions for Various Cases
|
## Setup Instructions for Various Cases
|
||||||
@ -19,7 +19,7 @@ Note that there are a few kinds of nodes:
|
|||||||
* [Set up and run a bare-bones node in the cloud](cloud-deployment-templates/index.html)
|
* [Set up and run a bare-bones node in the cloud](cloud-deployment-templates/index.html)
|
||||||
* [Set up and run a local dev/test node for developing and testing BigchainDB Server](dev-and-test/setup-run-node.html)
|
* [Set up and run a local dev/test node for developing and testing BigchainDB Server](dev-and-test/setup-run-node.html)
|
||||||
* [Deploy a testing cluster on AWS](clusters-feds/aws-testing-cluster.html)
|
* [Deploy a testing cluster on AWS](clusters-feds/aws-testing-cluster.html)
|
||||||
* [Set up and run a federation (including production nodes)](clusters-feds/set-up-a-federation.html)
|
* [Set up and run a cluster (including production nodes)](clusters-feds/set-up-a-cluster.html)
|
||||||
|
|
||||||
Instructions for setting up a client will be provided once there's a public test net.
|
Instructions for setting up a client will be provided once there's a public test net.
|
||||||
|
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
# Production Node Assumptions
|
# Production Node Assumptions
|
||||||
|
|
||||||
If you're not sure what we mean by a BigchainDB *node*, *cluster*, *federation*, or *production node*, then see [the section in the Introduction where we defined those terms](../introduction.html#some-basic-vocabulary).
|
If you're not sure what we mean by a BigchainDB *node*, *cluster*, *consortium*, or *production node*, then see [the section in the Introduction where we defined those terms](../introduction.html#some-basic-vocabulary).
|
||||||
|
|
||||||
We make some assumptions about production nodes:
|
We make some assumptions about production nodes:
|
||||||
|
|
||||||
1. **Each production node is set up and managed by an experienced professional system administrator (or a team of them).**
|
1. **Each production node is set up and managed by an experienced professional system administrator (or a team of them).**
|
||||||
|
|
||||||
2. Each production node in a federation's cluster is managed by a different person or team.
|
2. Each production node in a cluster is managed by a different person or team.
|
||||||
|
|
||||||
Because of the first assumption, we don't provide a detailed cookbook explaining how to secure a server, or other things that a sysadmin should know. (We do provide some [templates](../cloud-deployment-templates/index.html), but those are just a starting point.)
|
Because of the first assumption, we don't provide a detailed cookbook explaining how to secure a server, or other things that a sysadmin should know. (We do provide some [templates](../cloud-deployment-templates/index.html), but those are just a starting point.)
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ There are some [notes on BigchainDB-specific firewall setup](../appendices/firew
|
|||||||
|
|
||||||
A BigchainDB node uses its system clock to generate timestamps for blocks and votes, so that clock should be kept in sync with some standard clock(s). The standard way to do that is to run an NTP daemon (Network Time Protocol daemon) on the node. (You could also use tlsdate, which uses TLS timestamps rather than NTP, but don't: it's not very accurate and it will break with TLS 1.3, which removes the timestamp.)
|
A BigchainDB node uses its system clock to generate timestamps for blocks and votes, so that clock should be kept in sync with some standard clock(s). The standard way to do that is to run an NTP daemon (Network Time Protocol daemon) on the node. (You could also use tlsdate, which uses TLS timestamps rather than NTP, but don't: it's not very accurate and it will break with TLS 1.3, which removes the timestamp.)
|
||||||
|
|
||||||
NTP is a standard protocol. There are many NTP daemons implementing it. We don't recommend a particular one. On the contrary, we recommend that different nodes in a federation run different NTP daemons, so that a problem with one daemon won't affect all nodes.
|
NTP is a standard protocol. There are many NTP daemons implementing it. We don't recommend a particular one. On the contrary, we recommend that different nodes in a cluster run different NTP daemons, so that a problem with one daemon won't affect all nodes.
|
||||||
|
|
||||||
Please see the [notes on NTP daemon setup](../appendices/ntp-notes.html) in the Appendices.
|
Please see the [notes on NTP daemon setup](../appendices/ntp-notes.html) in the Appendices.
|
||||||
|
|
||||||
@ -72,7 +72,7 @@ direct-io
|
|||||||
join=node0_hostname:29015
|
join=node0_hostname:29015
|
||||||
join=node1_hostname:29015
|
join=node1_hostname:29015
|
||||||
join=node2_hostname:29015
|
join=node2_hostname:29015
|
||||||
# continue until there's a join= line for each node in the federation
|
# continue until there's a join= line for each node in the cluster
|
||||||
```
|
```
|
||||||
|
|
||||||
* `directory=/data` tells the RethinkDB node to store its share of the database data in `/data`.
|
* `directory=/data` tells the RethinkDB node to store its share of the database data in `/data`.
|
||||||
@ -153,7 +153,7 @@ Edit the created config file:
|
|||||||
|
|
||||||
* Open `$HOME/.bigchaindb` (the created config file) in your text editor.
|
* Open `$HOME/.bigchaindb` (the created config file) in your text editor.
|
||||||
* Change `"server": {"bind": "localhost:9984", ... }` to `"server": {"bind": "0.0.0.0:9984", ... }`. This makes it so traffic can come from any IP address to port 9984 (the HTTP Client-Server API port).
|
* Change `"server": {"bind": "localhost:9984", ... }` to `"server": {"bind": "0.0.0.0:9984", ... }`. This makes it so traffic can come from any IP address to port 9984 (the HTTP Client-Server API port).
|
||||||
* Change `"keyring": []` to `"keyring": ["public_key_of_other_node_A", "public_key_of_other_node_B", "..."]` i.e. a list of the public keys of all the other nodes in the federation. The keyring should _not_ include your node's public key.
|
* Change `"keyring": []` to `"keyring": ["public_key_of_other_node_A", "public_key_of_other_node_B", "..."]` i.e. a list of the public keys of all the other nodes in the cluster. The keyring should _not_ include your node's public key.
|
||||||
|
|
||||||
For more information about the BigchainDB config file, see [Configuring a BigchainDB Node](configuration.html).
|
For more information about the BigchainDB config file, see [Configuring a BigchainDB Node](configuration.html).
|
||||||
|
|
||||||
@ -185,7 +185,7 @@ where:
|
|||||||
|
|
||||||
* `bigchaindb init` creates the database within RethinkDB, the tables, the indexes, and the genesis block.
|
* `bigchaindb init` creates the database within RethinkDB, the tables, the indexes, and the genesis block.
|
||||||
* `numshards` should be set to the number of nodes in the initial cluster.
|
* `numshards` should be set to the number of nodes in the initial cluster.
|
||||||
* `numreplicas` should be set to the database replication factor decided by the federation. It must be 3 or more for [RethinkDB failover](https://rethinkdb.com/docs/failover/) to work.
|
* `numreplicas` should be set to the database replication factor decided by the consortium. It must be 3 or more for [RethinkDB failover](https://rethinkdb.com/docs/failover/) to work.
|
||||||
|
|
||||||
Once the RethinkDB database is configured, every node operator can start BigchainDB using:
|
Once the RethinkDB database is configured, every node operator can start BigchainDB using:
|
||||||
```text
|
```text
|
||||||
|
@ -68,6 +68,22 @@ You can also use the `--dev-start-rethinkdb` command line option to automaticall
|
|||||||
e.g. `bigchaindb --dev-start-rethinkdb start`. Note that this will also shutdown rethinkdb when the bigchaindb process stops.
|
e.g. `bigchaindb --dev-start-rethinkdb start`. Note that this will also shutdown rethinkdb when the bigchaindb process stops.
|
||||||
The option `--dev-allow-temp-keypair` will generate a keypair on the fly if no keypair is found, this is useful when you want to run a temporary instance of BigchainDB in a Docker container, for example.
|
The option `--dev-allow-temp-keypair` will generate a keypair on the fly if no keypair is found, this is useful when you want to run a temporary instance of BigchainDB in a Docker container, for example.
|
||||||
|
|
||||||
|
### Options
|
||||||
|
The log level for the console can be set via the option `--log-level` or its
|
||||||
|
abbreviation `-l`. Example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ bigchaindb --log-level INFO start
|
||||||
|
```
|
||||||
|
|
||||||
|
The allowed levels are `DEBUG`, `INFO` , `WARNING`, `ERROR`, and `CRITICAL`.
|
||||||
|
For an explanation regarding these levels please consult the
|
||||||
|
[Logging Levels](https://docs.python.org/3.6/library/logging.html#levels)
|
||||||
|
section of Python's documentation.
|
||||||
|
|
||||||
|
For a more fine-grained control over the logging configuration you can use the
|
||||||
|
configuration file as documented under
|
||||||
|
[Configuration Settings](configuration.html).
|
||||||
|
|
||||||
## bigchaindb set-shards
|
## bigchaindb set-shards
|
||||||
|
|
||||||
|
@ -22,6 +22,15 @@ For convenience, here's a list of all the relevant environment variables (docume
|
|||||||
`BIGCHAINDB_CONFIG_PATH`<br>
|
`BIGCHAINDB_CONFIG_PATH`<br>
|
||||||
`BIGCHAINDB_BACKLOG_REASSIGN_DELAY`<br>
|
`BIGCHAINDB_BACKLOG_REASSIGN_DELAY`<br>
|
||||||
`BIGCHAINDB_CONSENSUS_PLUGIN`<br>
|
`BIGCHAINDB_CONSENSUS_PLUGIN`<br>
|
||||||
|
`BIGCHAINDB_LOG`<br>
|
||||||
|
`BIGCHAINDB_LOG_FILE`<br>
|
||||||
|
`BIGCHAINDB_LOG_LEVEL_CONSOLE`<br>
|
||||||
|
`BIGCHAINDB_LOG_LEVEL_LOGFILE`<br>
|
||||||
|
`BIGCHAINDB_LOG_DATEFMT_CONSOLE`<br>
|
||||||
|
`BIGCHAINDB_LOG_DATEFMT_LOGFILE`<br>
|
||||||
|
`BIGCHAINDB_LOG_FMT_CONSOLE`<br>
|
||||||
|
`BIGCHAINDB_LOG_FMT_LOGFILE`<br>
|
||||||
|
`BIGCHAINDB_LOG_GRANULAR_LEVELS`<br>
|
||||||
|
|
||||||
The local config file is `$HOME/.bigchaindb` by default (a file which might not even exist), but you can tell BigchainDB to use a different file by using the `-c` command-line option, e.g. `bigchaindb -c path/to/config_file.json start`
|
The local config file is `$HOME/.bigchaindb` by default (a file which might not even exist), but you can tell BigchainDB to use a different file by using the `-c` command-line option, e.g. `bigchaindb -c path/to/config_file.json start`
|
||||||
or using the `BIGCHAINDB_CONFIG_PATH` environment variable, e.g. `BIGHAINDB_CONFIG_PATH=.my_bigchaindb_config bigchaindb start`.
|
or using the `BIGCHAINDB_CONFIG_PATH` environment variable, e.g. `BIGHAINDB_CONFIG_PATH=.my_bigchaindb_config bigchaindb start`.
|
||||||
@ -173,3 +182,211 @@ export BIGCHAINDB_CONSENSUS_PLUGIN=default
|
|||||||
```js
|
```js
|
||||||
"consensus_plugin": "default"
|
"consensus_plugin": "default"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## log
|
||||||
|
The `log` key is expected to point to a mapping (set of key/value pairs)
|
||||||
|
holding the logging configuration.
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"log": {
|
||||||
|
"file": "/var/log/bigchaindb.log",
|
||||||
|
"level_console": "info",
|
||||||
|
"level_logfile": "info",
|
||||||
|
"datefmt_console": "%Y-%m-%d %H:%M:%S",
|
||||||
|
"datefmt_logfile": "%Y-%m-%d %H:%M:%S",
|
||||||
|
"fmt_console": "%(asctime)s [%(levelname)s] (%(name)s) %(message)s",
|
||||||
|
"fmt_logfile": "%(asctime)s [%(levelname)s] (%(name)s) %(message)s",
|
||||||
|
"granular_levels": {
|
||||||
|
"bichaindb.backend": "info",
|
||||||
|
"bichaindb.core": "info"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Defaults to**: `"{}"`.
|
||||||
|
|
||||||
|
Please note that although the default is `"{}"` as per the configuration file,
|
||||||
|
internal defaults are used, such that the actual operational default is:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"log": {
|
||||||
|
"file": "~/bigchaindb.log",
|
||||||
|
"level_console": "info",
|
||||||
|
"level_logfile": "info",
|
||||||
|
"datefmt_console": "%Y-%m-%d %H:%M:%S",
|
||||||
|
"datefmt_logfile": "%Y-%m-%d %H:%M:%S",
|
||||||
|
"fmt_console": "%(asctime)s [%(levelname)s] (%(name)s) %(message)s",
|
||||||
|
"fmt_logfile": "%(asctime)s [%(levelname)s] (%(name)s) %(message)s",
|
||||||
|
"granular_levels": {}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The next subsections explain each field of the `log` configuration.
|
||||||
|
|
||||||
|
|
||||||
|
### log.file
|
||||||
|
The full path to the file where logs should be written to.
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"log": {
|
||||||
|
"file": "/var/log/bigchaindb/bigchaindb.log"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Defaults to**: `"~/bigchaindb.log"`.
|
||||||
|
|
||||||
|
Please note that the user running `bigchaindb` must have write access to the
|
||||||
|
location.
|
||||||
|
|
||||||
|
|
||||||
|
### log.level_console
|
||||||
|
The log level used to log to the console. Possible allowed values are the ones
|
||||||
|
defined by [Python](https://docs.python.org/3.6/library/logging.html#levels),
|
||||||
|
but case insensitive for convenience's sake:
|
||||||
|
|
||||||
|
```
|
||||||
|
"critical", "error", "warning", "info", "debug", "notset"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"log": {
|
||||||
|
"level_console": "info"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Defaults to**: `"info"`.
|
||||||
|
|
||||||
|
|
||||||
|
### log.level_logfile
|
||||||
|
The log level used to log to the log file. Possible allowed values are the ones
|
||||||
|
defined by [Python](https://docs.python.org/3.6/library/logging.html#levels),
|
||||||
|
but case insensitive for convenience's sake:
|
||||||
|
|
||||||
|
```
|
||||||
|
"critical", "error", "warning", "info", "debug", "notset"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"log": {
|
||||||
|
"level_file": "info"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Defaults to**: `"info"`.
|
||||||
|
|
||||||
|
|
||||||
|
### log.datefmt_console
|
||||||
|
The format string for the date/time portion of a message, when logged to the
|
||||||
|
console.
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"log": {
|
||||||
|
"datefmt_console": "%x %X %Z"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Defaults to**: `"%Y-%m-%d %H:%M:%S"`.
|
||||||
|
|
||||||
|
For more information on how to construct the format string please consult the
|
||||||
|
table under Python's documentation of
|
||||||
|
[`time.strftime(format[, t])`](https://docs.python.org/3.6/library/time.html#time.strftime)
|
||||||
|
|
||||||
|
### log.datefmt_logfile
|
||||||
|
The format string for the date/time portion of a message, when logged to a log
|
||||||
|
file.
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"log": {
|
||||||
|
"datefmt_logfile": "%c %z"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Defaults to**: `"%Y-%m-%d %H:%M:%S"`.
|
||||||
|
|
||||||
|
For more information on how to construct the format string please consult the
|
||||||
|
table under Python's documentation of
|
||||||
|
[`time.strftime(format[, t])`](https://docs.python.org/3.6/library/time.html#time.strftime)
|
||||||
|
|
||||||
|
|
||||||
|
### log.fmt_console
|
||||||
|
A string used to format the log messages when logged to the console.
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"log": {
|
||||||
|
"fmt_console": "%(asctime)s [%(levelname)s] %(message)s %(process)d"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Defaults to**: `"[%(asctime)s] [%(levelname)s] (%(name)s) %(message)s (%(processName)-10s - pid: %(process)d)"`
|
||||||
|
|
||||||
|
For more information on possible formatting options please consult Python's
|
||||||
|
documentation on
|
||||||
|
[LogRecord attributes](https://docs.python.org/3.6/library/logging.html#logrecord-attributes)
|
||||||
|
|
||||||
|
|
||||||
|
### log.fmt_logfile
|
||||||
|
A string used to format the log messages when logged to a log file.
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"log": {
|
||||||
|
"fmt_logfile": "%(asctime)s [%(levelname)s] %(message)s %(process)d"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Defaults to**: `"[%(asctime)s] [%(levelname)s] (%(name)s) %(message)s (%(processName)-10s - pid: %(process)d)"`
|
||||||
|
|
||||||
|
For more information on possible formatting options please consult Python's
|
||||||
|
documentation on
|
||||||
|
[LogRecord attributes](https://docs.python.org/3.6/library/logging.html#logrecord-attributes)
|
||||||
|
|
||||||
|
|
||||||
|
### log.granular_levels
|
||||||
|
Log levels for BigchainDB's modules. This can be useful to control the log
|
||||||
|
level of specific parts of the application. As an example, if you wanted the
|
||||||
|
logging of the `core.py` module to be more verbose, you would set the
|
||||||
|
configuration shown in the example below.
|
||||||
|
|
||||||
|
**Example**:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"log": {
|
||||||
|
"granular_levels": {
|
||||||
|
"bichaindb.core": "debug"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Defaults to**: `"{}"`
|
||||||
|
@ -1,44 +1,47 @@
|
|||||||
###############################################################
|
###############################################################
|
||||||
# This config file runs bigchaindb:master as a k8s Deployment #
|
# This config file runs bigchaindb:master as a k8s Deployment #
|
||||||
# and it connects to the mongodb backend on a separate pod #
|
# and it connects to the mongodb backend running as a #
|
||||||
|
# separate pod #
|
||||||
###############################################################
|
###############################################################
|
||||||
|
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: bdb
|
name: bdb-svc
|
||||||
namespace: default
|
namespace: default
|
||||||
labels:
|
labels:
|
||||||
name: bdb
|
name: bdb-svc
|
||||||
spec:
|
spec:
|
||||||
selector:
|
selector:
|
||||||
app: bdb
|
app: bdb-dep
|
||||||
ports:
|
ports:
|
||||||
- port: 9984
|
- port: 9984
|
||||||
targetPort: 9984
|
targetPort: 9984
|
||||||
name: bdb-port
|
name: bdb-port
|
||||||
type: LoadBalancer
|
type: ClusterIP
|
||||||
|
clusterIP: None
|
||||||
---
|
---
|
||||||
apiVersion: extensions/v1beta1
|
apiVersion: extensions/v1beta1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: bdb
|
name: bdb-dep
|
||||||
spec:
|
spec:
|
||||||
replicas: 1
|
replicas: 1
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
app: bdb
|
app: bdb-dep
|
||||||
spec:
|
spec:
|
||||||
terminationGracePeriodSeconds: 10
|
terminationGracePeriodSeconds: 10
|
||||||
containers:
|
containers:
|
||||||
- name: bigchaindb
|
- name: bigchaindb
|
||||||
image: bigchaindb/bigchaindb:master
|
image: bigchaindb/bigchaindb:master
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
args:
|
args:
|
||||||
- start
|
- start
|
||||||
env:
|
env:
|
||||||
- name: BIGCHAINDB_DATABASE_HOST
|
- name: BIGCHAINDB_DATABASE_HOST
|
||||||
value: mdb
|
value: mdb-svc
|
||||||
- name: BIGCHAINDB_DATABASE_PORT
|
- name: BIGCHAINDB_DATABASE_PORT
|
||||||
# TODO(Krish): remove hardcoded port
|
# TODO(Krish): remove hardcoded port
|
||||||
value: "27017"
|
value: "27017"
|
||||||
@ -58,7 +61,6 @@ spec:
|
|||||||
value: "120"
|
value: "120"
|
||||||
- name: BIGCHAINDB_KEYRING
|
- name: BIGCHAINDB_KEYRING
|
||||||
value: ""
|
value: ""
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 9984
|
- containerPort: 9984
|
||||||
hostPort: 9984
|
hostPort: 9984
|
||||||
|
@ -1,89 +0,0 @@
|
|||||||
###############################################################
|
|
||||||
# This config file runs bigchaindb:latest and connects to the #
|
|
||||||
# mongodb backend as a service #
|
|
||||||
###############################################################
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: bdb-mdb-service
|
|
||||||
namespace: default
|
|
||||||
labels:
|
|
||||||
name: bdb-mdb-service
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: bdb-mdb
|
|
||||||
ports:
|
|
||||||
- port: 9984
|
|
||||||
targetPort: 9984
|
|
||||||
name: bdb-api
|
|
||||||
type: LoadBalancer
|
|
||||||
---
|
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: bdb-mdb
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: bdb-mdb
|
|
||||||
spec:
|
|
||||||
terminationGracePeriodSeconds: 10
|
|
||||||
containers:
|
|
||||||
- name: bdb-mdb
|
|
||||||
image: bigchaindb/bigchaindb:latest
|
|
||||||
args:
|
|
||||||
- start
|
|
||||||
env:
|
|
||||||
- name: BIGCHAINDB_DATABASE_HOST
|
|
||||||
value: mdb-service
|
|
||||||
- name: BIGCHAINDB_DATABASE_PORT
|
|
||||||
value: "27017"
|
|
||||||
- name: BIGCHAINDB_DATABASE_REPLICASET
|
|
||||||
value: bigchain-rs
|
|
||||||
- name: BIGCHIANDB_DATABASE_BACKEND
|
|
||||||
value: mongodb
|
|
||||||
- name: BIGCHAINDB_DATABASE_NAME
|
|
||||||
value: bigchain
|
|
||||||
- name: BIGCHAINDB_SERVER_BIND
|
|
||||||
value: 0.0.0.0:9984
|
|
||||||
- name: BIGCHAINDB_KEYPAIR_PUBLIC
|
|
||||||
value: EEWUAhsk94ZUHhVw7qx9oZiXYDAWc9cRz93eMrsTG4kZ
|
|
||||||
- name: BIGCHAINDB_KEYPAIR_PRIVATE
|
|
||||||
value: 3CjmRhu718gT1Wkba3LfdqX5pfYuBdaMPLd7ENUga5dm
|
|
||||||
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
|
|
||||||
value: "120"
|
|
||||||
- name: BIGCHAINDB_KEYRING
|
|
||||||
value: ""
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
ports:
|
|
||||||
- containerPort: 9984
|
|
||||||
hostPort: 9984
|
|
||||||
name: bdb-port
|
|
||||||
protocol: TCP
|
|
||||||
volumeMounts:
|
|
||||||
- name: bigchaindb-data
|
|
||||||
mountPath: /data
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: 200m
|
|
||||||
memory: 768Mi
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: 9984
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
timeoutSeconds: 10
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: 9984
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
timeoutSeconds: 10
|
|
||||||
restartPolicy: Always
|
|
||||||
volumes:
|
|
||||||
- name: bigchaindb-data
|
|
||||||
hostPath:
|
|
||||||
path: /disk/bigchaindb-data
|
|
@ -1,87 +0,0 @@
|
|||||||
###############################################################
|
|
||||||
# This config file runs bigchaindb:latest and connects to the #
|
|
||||||
# rethinkdb backend as a service #
|
|
||||||
###############################################################
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: bdb-rdb-service
|
|
||||||
namespace: default
|
|
||||||
labels:
|
|
||||||
name: bdb-rdb-service
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: bdb-rdb
|
|
||||||
ports:
|
|
||||||
- port: 9984
|
|
||||||
targetPort: 9984
|
|
||||||
name: bdb-rdb-api
|
|
||||||
type: LoadBalancer
|
|
||||||
---
|
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: bdb-rdb
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: bdb-rdb
|
|
||||||
spec:
|
|
||||||
terminationGracePeriodSeconds: 10
|
|
||||||
containers:
|
|
||||||
- name: bdb-rdb
|
|
||||||
image: bigchaindb/bigchaindb:latest
|
|
||||||
args:
|
|
||||||
- start
|
|
||||||
env:
|
|
||||||
- name: BIGCHAINDB_DATABASE_HOST
|
|
||||||
value: rdb-service
|
|
||||||
- name: BIGCHAINDB_DATABASE_PORT
|
|
||||||
value: "28015"
|
|
||||||
- name: BIGCHIANDB_DATABASE_BACKEND
|
|
||||||
value: rethinkdb
|
|
||||||
- name: BIGCHAINDB_DATABASE_NAME
|
|
||||||
value: bigchain
|
|
||||||
- name: BIGCHAINDB_SERVER_BIND
|
|
||||||
value: 0.0.0.0:9984
|
|
||||||
- name: BIGCHAINDB_KEYPAIR_PUBLIC
|
|
||||||
value: EEWUAhsk94ZUHhVw7qx9oZiXYDAWc9cRz93eMrsTG4kZ
|
|
||||||
- name: BIGCHAINDB_KEYPAIR_PRIVATE
|
|
||||||
value: 3CjmRhu718gT1Wkba3LfdqX5pfYuBdaMPLd7ENUga5dm
|
|
||||||
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
|
|
||||||
value: "120"
|
|
||||||
- name: BIGCHAINDB_KEYRING
|
|
||||||
value: ""
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
ports:
|
|
||||||
- containerPort: 9984
|
|
||||||
hostPort: 9984
|
|
||||||
name: bdb-port
|
|
||||||
protocol: TCP
|
|
||||||
volumeMounts:
|
|
||||||
- name: bigchaindb-data
|
|
||||||
mountPath: /data
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: 200m
|
|
||||||
memory: 768Mi
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: 9984
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
timeoutSeconds: 10
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: 9984
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
timeoutSeconds: 10
|
|
||||||
restartPolicy: Always
|
|
||||||
volumes:
|
|
||||||
- name: bigchaindb-data
|
|
||||||
hostPath:
|
|
||||||
path: /disk/bigchaindb-data
|
|
@ -1,114 +0,0 @@
|
|||||||
#################################################################
|
|
||||||
# This YAML file desribes a StatefulSet with two containers: #
|
|
||||||
# bigchaindb/bigchaindb:latest and mongo:3.4.1 #
|
|
||||||
# It also describes a Service to expose BigchainDB and MongoDB. #
|
|
||||||
#################################################################
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: bdb-service
|
|
||||||
namespace: default
|
|
||||||
labels:
|
|
||||||
name: bdb-service
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: bdb
|
|
||||||
ports:
|
|
||||||
- port: 9984
|
|
||||||
targetPort: 9984
|
|
||||||
name: bdb-http-api
|
|
||||||
- port: 27017
|
|
||||||
targetPort: 27017
|
|
||||||
name: mongodb-port
|
|
||||||
type: LoadBalancer
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1beta1
|
|
||||||
kind: StatefulSet
|
|
||||||
metadata:
|
|
||||||
name: bdb
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
serviceName: bdb
|
|
||||||
replicas: 1
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
name: bdb
|
|
||||||
labels:
|
|
||||||
app: bdb
|
|
||||||
#annotations:
|
|
||||||
#pod.beta.kubernetes.io/init-containers: '[
|
|
||||||
# TODO mongodb user and group; id = 999
|
|
||||||
spec:
|
|
||||||
terminationGracePeriodSeconds: 10
|
|
||||||
containers:
|
|
||||||
- name: bigchaindb
|
|
||||||
image: bigchaindb/bigchaindb:master
|
|
||||||
args:
|
|
||||||
- start
|
|
||||||
env:
|
|
||||||
- name: BIGCHAINDB_KEYPAIR_PRIVATE
|
|
||||||
value: 3CjmRhu718gT1Wkba3LfdqX5pfYuBdaMPLd7ENUga5dm
|
|
||||||
- name: BIGCHAINDB_KEYPAIR_PUBLIC
|
|
||||||
value: EEWUAhsk94ZUHhVw7qx9oZiXYDAWc9cRz93eMrsTG4kZ
|
|
||||||
- name: BIGCHAINDB_KEYRING
|
|
||||||
value: ""
|
|
||||||
- name: BIGCHAINDB_DATABASE_BACKEND
|
|
||||||
value: mongodb
|
|
||||||
- name: BIGCHAINDB_DATABASE_HOST
|
|
||||||
value: localhost
|
|
||||||
- name: BIGCHAINDB_DATABASE_PORT
|
|
||||||
value: "27017"
|
|
||||||
- name: BIGCHAINDB_SERVER_BIND
|
|
||||||
value: "0.0.0.0:9984"
|
|
||||||
- name: BIGCHAINDB_DATABASE_REPLICASET
|
|
||||||
value: bigchain-rs
|
|
||||||
- name: BIGCHAINDB_DATABASE_NAME
|
|
||||||
value: bigchain
|
|
||||||
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
|
|
||||||
value: "120"
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
ports:
|
|
||||||
- containerPort: 9984
|
|
||||||
hostPort: 9984
|
|
||||||
name: bdb-port
|
|
||||||
protocol: TCP
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: 200m
|
|
||||||
memory: 768Mi
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: bdb-port
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
timeoutSeconds: 10
|
|
||||||
- name: mongodb
|
|
||||||
image: mongo:3.4.1
|
|
||||||
args:
|
|
||||||
- --replSet=bigchain-rs
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
ports:
|
|
||||||
- containerPort: 27017
|
|
||||||
hostPort: 27017
|
|
||||||
name: mdb-port
|
|
||||||
protocol: TCP
|
|
||||||
volumeMounts:
|
|
||||||
- name: mdb-data
|
|
||||||
mountPath: /data
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: 200m
|
|
||||||
memory: 768Mi
|
|
||||||
livenessProbe:
|
|
||||||
tcpSocket:
|
|
||||||
port: mdb-port
|
|
||||||
successThreshold: 1
|
|
||||||
failureThreshold: 3
|
|
||||||
periodSeconds: 15
|
|
||||||
timeoutSeconds: 1
|
|
||||||
restartPolicy: Always
|
|
||||||
volumes:
|
|
||||||
- name: mdb-data
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: mongoclaim
|
|
@ -1,131 +0,0 @@
|
|||||||
##############################################################
|
|
||||||
# This YAML file desribes a StatefulSet with two containers: #
|
|
||||||
# bigchaindb/bigchaindb:latest and rethinkdb:2.3 #
|
|
||||||
# It also describes a Service to expose BigchainDB, #
|
|
||||||
# the RethinkDB intracluster communications port, and #
|
|
||||||
# the RethinkDB web interface port. #
|
|
||||||
##############################################################
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: bdb-service
|
|
||||||
namespace: default
|
|
||||||
labels:
|
|
||||||
name: bdb-service
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: bdb
|
|
||||||
ports:
|
|
||||||
- port: 9984
|
|
||||||
targetPort: 9984
|
|
||||||
name: bdb-http-api
|
|
||||||
- port: 29015
|
|
||||||
targetPort: 29015
|
|
||||||
name: rdb-intracluster-comm-port
|
|
||||||
- port: 8080
|
|
||||||
targetPort: 8080
|
|
||||||
name: rdb-web-interface-port
|
|
||||||
type: LoadBalancer
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1beta1
|
|
||||||
kind: StatefulSet
|
|
||||||
metadata:
|
|
||||||
name: bdb
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
serviceName: bdb
|
|
||||||
replicas: 1
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
name: bdb
|
|
||||||
labels:
|
|
||||||
app: bdb
|
|
||||||
spec:
|
|
||||||
terminationGracePeriodSeconds: 10
|
|
||||||
containers:
|
|
||||||
- name: bdb-server
|
|
||||||
image: bigchaindb/bigchaindb:latest
|
|
||||||
args:
|
|
||||||
- start
|
|
||||||
env:
|
|
||||||
- name: BIGCHAINDB_KEYPAIR_PRIVATE
|
|
||||||
value: 56mEvwwVxcYsFQ3Y8UTFB8DVBv38yoUhxzDW3DAdLVd2
|
|
||||||
- name: BIGCHAINDB_KEYPAIR_PUBLIC
|
|
||||||
value: 9DsHwiEtvk51UHmNM2eV66czFha69j3CdtNrCj1RcZWR
|
|
||||||
- name: BIGCHAINDB_KEYRING
|
|
||||||
value: ""
|
|
||||||
- name: BIGCHAINDB_DATABASE_BACKEND
|
|
||||||
value: rethinkdb
|
|
||||||
- name: BIGCHAINDB_DATABASE_HOST
|
|
||||||
value: localhost
|
|
||||||
- name: BIGCHAINDB_DATABASE_PORT
|
|
||||||
value: "28015"
|
|
||||||
- name: BIGCHAINDB_SERVER_BIND
|
|
||||||
value: "0.0.0.0:9984"
|
|
||||||
- name: BIGCHAINDB_DATABASE_NAME
|
|
||||||
value: bigchain
|
|
||||||
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
|
|
||||||
value: "120"
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
ports:
|
|
||||||
- containerPort: 9984
|
|
||||||
hostPort: 9984
|
|
||||||
name: bdb-port
|
|
||||||
protocol: TCP
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: 200m
|
|
||||||
memory: 768Mi
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: 9984
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
timeoutSeconds: 10
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: 9984
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
timeoutSeconds: 10
|
|
||||||
- name: rethinkdb
|
|
||||||
image: rethinkdb:2.3
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
ports:
|
|
||||||
- containerPort: 8080
|
|
||||||
hostPort: 8080
|
|
||||||
name: rdb-web-interface-port
|
|
||||||
protocol: TCP
|
|
||||||
- containerPort: 29015
|
|
||||||
hostPort: 29015
|
|
||||||
name: rdb-intra-port
|
|
||||||
protocol: TCP
|
|
||||||
- containerPort: 28015
|
|
||||||
hostPort: 28015
|
|
||||||
name: rdb-client-port
|
|
||||||
protocol: TCP
|
|
||||||
volumeMounts:
|
|
||||||
- name: rdb-data
|
|
||||||
mountPath: /data
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: 200m
|
|
||||||
memory: 768Mi
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: 8080
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
timeoutSeconds: 10
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: 8080
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
timeoutSeconds: 10
|
|
||||||
restartPolicy: Always
|
|
||||||
volumes:
|
|
||||||
- name: rdb-data
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: mongoclaim
|
|
@ -1,89 +0,0 @@
|
|||||||
#####################################################
|
|
||||||
# This config file uses bdb v0.9.1 with bundled rdb #
|
|
||||||
#####################################################
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: bdb-service
|
|
||||||
namespace: default
|
|
||||||
labels:
|
|
||||||
name: bdb-service
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: bdb
|
|
||||||
ports:
|
|
||||||
- port: 9984
|
|
||||||
targetPort: 9984
|
|
||||||
name: bdb-http-api
|
|
||||||
- port: 8080
|
|
||||||
targetPort: 8080
|
|
||||||
name: bdb-rethinkdb-api
|
|
||||||
type: LoadBalancer
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1beta1
|
|
||||||
kind: StatefulSet
|
|
||||||
metadata:
|
|
||||||
name: bdb
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
serviceName: bdb
|
|
||||||
replicas: 1
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
name: bdb
|
|
||||||
labels:
|
|
||||||
app: bdb
|
|
||||||
annotations:
|
|
||||||
pod.beta.kubernetes.io/init-containers: '[
|
|
||||||
{
|
|
||||||
"name": "bdb091-configure",
|
|
||||||
"image": "bigchaindb/bigchaindb:0.9.1",
|
|
||||||
"command": ["bigchaindb", "-y", "configure", "rethinkdb"],
|
|
||||||
"volumeMounts": [
|
|
||||||
{
|
|
||||||
"name": "bigchaindb-data",
|
|
||||||
"mountPath": "/data"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]'
|
|
||||||
spec:
|
|
||||||
terminationGracePeriodSeconds: 10
|
|
||||||
containers:
|
|
||||||
- name: bdb091-server
|
|
||||||
image: bigchaindb/bigchaindb:0.9.1
|
|
||||||
args:
|
|
||||||
- -c
|
|
||||||
- /data/.bigchaindb
|
|
||||||
- start
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
ports:
|
|
||||||
- containerPort: 9984
|
|
||||||
hostPort: 9984
|
|
||||||
name: bdb-port
|
|
||||||
protocol: TCP
|
|
||||||
volumeMounts:
|
|
||||||
- name: bigchaindb-data
|
|
||||||
mountPath: /data
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: 200m
|
|
||||||
memory: 768Mi
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: 9984
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
timeoutSeconds: 10
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: 9984
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
timeoutSeconds: 10
|
|
||||||
restartPolicy: Always
|
|
||||||
volumes:
|
|
||||||
- name: bigchaindb-data
|
|
||||||
hostPath:
|
|
||||||
path: /disk/bigchaindb-data
|
|
@ -1,75 +0,0 @@
|
|||||||
####################################################
|
|
||||||
# This config file runs rethinkdb:2.3 as a service #
|
|
||||||
####################################################
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: rdb-service
|
|
||||||
namespace: default
|
|
||||||
labels:
|
|
||||||
name: rdb-service
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: rdb
|
|
||||||
ports:
|
|
||||||
- port: 8080
|
|
||||||
targetPort: 8080
|
|
||||||
name: rethinkdb-http-port
|
|
||||||
- port: 28015
|
|
||||||
targetPort: 28015
|
|
||||||
name: rethinkdb-driver-port
|
|
||||||
type: LoadBalancer
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1beta1
|
|
||||||
kind: StatefulSet
|
|
||||||
metadata:
|
|
||||||
name: rdb
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
serviceName: rdb
|
|
||||||
replicas: 1
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
name: rdb
|
|
||||||
labels:
|
|
||||||
app: rdb
|
|
||||||
spec:
|
|
||||||
terminationGracePeriodSeconds: 10
|
|
||||||
containers:
|
|
||||||
- name: rethinkdb
|
|
||||||
image: rethinkdb:2.3
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
ports:
|
|
||||||
- containerPort: 8080
|
|
||||||
hostPort: 8080
|
|
||||||
name: rdb-http-port
|
|
||||||
protocol: TCP
|
|
||||||
- containerPort: 28015
|
|
||||||
hostPort: 28015
|
|
||||||
name: rdb-client-port
|
|
||||||
protocol: TCP
|
|
||||||
volumeMounts:
|
|
||||||
- name: rdb-data
|
|
||||||
mountPath: /data
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: 200m
|
|
||||||
memory: 768Mi
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: 8080
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
timeoutSeconds: 10
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: 8080
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
timeoutSeconds: 10
|
|
||||||
restartPolicy: Always
|
|
||||||
volumes:
|
|
||||||
- name: rdb-data
|
|
||||||
hostPath:
|
|
||||||
path: /disk/rdb-data
|
|
12
k8s/mongodb/container/Dockerfile
Normal file
12
k8s/mongodb/container/Dockerfile
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
FROM mongo:3.4.2
|
||||||
|
LABEL maintainer "dev@bigchaindb.com"
|
||||||
|
WORKDIR /
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get -y upgrade \
|
||||||
|
&& apt-get autoremove \
|
||||||
|
&& apt-get clean
|
||||||
|
COPY mongod.conf.template /etc/mongod.conf.template
|
||||||
|
COPY mongod_entrypoint/mongod_entrypoint /
|
||||||
|
VOLUME /data/db /data/configdb
|
||||||
|
EXPOSE 27017
|
||||||
|
ENTRYPOINT ["/mongod_entrypoint"]
|
51
k8s/mongodb/container/Makefile
Normal file
51
k8s/mongodb/container/Makefile
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
# Targets:
|
||||||
|
# all: Cleans, formats src files, builds the code, builds the docker image
|
||||||
|
# clean: Removes the binary and docker image
|
||||||
|
# format: Formats the src files
|
||||||
|
# build: Builds the code
|
||||||
|
# docker: Builds the code and docker image
|
||||||
|
# push: Push the docker image to Docker hub
|
||||||
|
|
||||||
|
GOCMD=go
|
||||||
|
GOVET=$(GOCMD) tool vet
|
||||||
|
GOINSTALL=$(GOCMD) install
|
||||||
|
GOFMT=gofmt -s -w
|
||||||
|
|
||||||
|
DOCKER_IMAGE_NAME?=bigchaindb/mongodb
|
||||||
|
DOCKER_IMAGE_TAG?=latest
|
||||||
|
|
||||||
|
PWD=$(shell pwd)
|
||||||
|
BINARY_PATH=$(PWD)/mongod_entrypoint/
|
||||||
|
BINARY_NAME=mongod_entrypoint
|
||||||
|
MAIN_FILE = $(BINARY_PATH)/mongod_entrypoint.go
|
||||||
|
SRC_FILES = $(BINARY_PATH)/mongod_entrypoint.go
|
||||||
|
|
||||||
|
.PHONY: all
|
||||||
|
|
||||||
|
all: clean build docker
|
||||||
|
|
||||||
|
clean:
|
||||||
|
@echo "removing any pre-built binary";
|
||||||
|
-@rm $(BINARY_PATH)/$(BINARY_NAME);
|
||||||
|
@echo "remove any pre-built docker image";
|
||||||
|
-@docker rmi $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG);
|
||||||
|
|
||||||
|
format:
|
||||||
|
$(GOFMT) $(SRC_FILES)
|
||||||
|
|
||||||
|
build: format
|
||||||
|
$(shell cd $(BINARY_PATH) && \
|
||||||
|
export GOPATH="$(BINARY_PATH)" && \
|
||||||
|
export GOBIN="$(BINARY_PATH)" && \
|
||||||
|
CGO_ENABLED=0 GOOS=linux $(GOINSTALL) -ldflags "-s" -a -installsuffix cgo $(MAIN_FILE))
|
||||||
|
|
||||||
|
docker: build
|
||||||
|
docker build \
|
||||||
|
-t $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG) .;
|
||||||
|
|
||||||
|
vet:
|
||||||
|
$(GOVET) .
|
||||||
|
|
||||||
|
push:
|
||||||
|
docker push \
|
||||||
|
$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG);
|
88
k8s/mongodb/container/README.md
Normal file
88
k8s/mongodb/container/README.md
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
## Custom MongoDB container for BigchainDB Backend
|
||||||
|
|
||||||
|
### Need
|
||||||
|
|
||||||
|
* MongoDB needs the hostname provided in the rs.initiate() command to be
|
||||||
|
resolvable through the hosts file locally.
|
||||||
|
* In the future, with the introduction of TLS for inter-cluster MongoDB
|
||||||
|
communications, we will need a way to specify detailed configuration.
|
||||||
|
* We also need a way to overwrite certain parameters to suit our use case.
|
||||||
|
|
||||||
|
|
||||||
|
### Step 1: Build the Latest Container
|
||||||
|
|
||||||
|
`make` from the root of this project.
|
||||||
|
|
||||||
|
|
||||||
|
### Step 2: Run the Container
|
||||||
|
|
||||||
|
```
|
||||||
|
docker run \
|
||||||
|
--name=mdb1 \
|
||||||
|
--publish=<mongo port number for external connections>:<corresponding host port> \
|
||||||
|
--rm=true \
|
||||||
|
bigchaindb/mongodb \
|
||||||
|
--replica-set-name <replica set name> \
|
||||||
|
--fqdn <fully qualified domain name of this instance> \
|
||||||
|
--port <mongod port number for external connections>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 3: Initialize the Replica Set
|
||||||
|
|
||||||
|
Login to one of the MongoDB containers, say mdb1:
|
||||||
|
|
||||||
|
`docker exec -it mdb1 bash`
|
||||||
|
|
||||||
|
Start the `mongo` shell:
|
||||||
|
|
||||||
|
`mongo --port 27017`
|
||||||
|
|
||||||
|
|
||||||
|
Run the rs.initiate() command:
|
||||||
|
```
|
||||||
|
rs.initiate({
|
||||||
|
_id : "<replica-set-name", members: [
|
||||||
|
{
|
||||||
|
_id : 0,
|
||||||
|
host : "<fqdn of this instance>:<port number>"
|
||||||
|
} ]
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
rs.initiate({ _id : "test-repl-set", members: [ { _id : 0, host :
|
||||||
|
"mdb-instance-0.westeurope.cloudapp.azure.com:27017" } ] })
|
||||||
|
```
|
||||||
|
|
||||||
|
You should also see changes in the mongo shell prompt from `>` to
|
||||||
|
`test-repl-set:OTHER>` to `test-repl-set:SECONDARY>` to finally
|
||||||
|
`test-repl-set:PRIMARY>`.
|
||||||
|
If this instance is not the primary, you can use the `rs.status()` command to
|
||||||
|
find out who is the primary.
|
||||||
|
|
||||||
|
|
||||||
|
#### Step 4: Add members to the Replica Set
|
||||||
|
|
||||||
|
We can only add members to a replica set from the PRIMARY instance.
|
||||||
|
Login to the PRIMARY and open a `mongo` shell.
|
||||||
|
|
||||||
|
Run the rs.add() command with the ip and port number of the other
|
||||||
|
containers/instances:
|
||||||
|
```
|
||||||
|
rs.add("<fqdn>:<port>")
|
||||||
|
```
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
Add mdb2 to replica set from mdb1:
|
||||||
|
```
|
||||||
|
rs.add("bdb-cluster-1.northeurope.cloudapp.azure.com:27017")
|
||||||
|
```
|
||||||
|
|
||||||
|
Add mdb3 to replica set from mdb1:
|
||||||
|
```
|
||||||
|
rs.add("bdb-cluster-2.northeurope.cloudapp.azure.com:27017")
|
||||||
|
```
|
||||||
|
|
89
k8s/mongodb/container/mongod.conf.template
Normal file
89
k8s/mongodb/container/mongod.conf.template
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
# mongod.conf
|
||||||
|
|
||||||
|
# for documentation of all options, see:
|
||||||
|
# http://docs.mongodb.org/manual/reference/configuration-options/
|
||||||
|
|
||||||
|
# where to write logging data.
|
||||||
|
systemLog:
|
||||||
|
verbosity: 0
|
||||||
|
#TODO traceAllExceptions: true
|
||||||
|
timeStampFormat: iso8601-utc
|
||||||
|
component:
|
||||||
|
accessControl:
|
||||||
|
verbosity: 0
|
||||||
|
command:
|
||||||
|
verbosity: 0
|
||||||
|
control:
|
||||||
|
verbosity: 0
|
||||||
|
ftdc:
|
||||||
|
verbosity: 0
|
||||||
|
geo:
|
||||||
|
verbosity: 0
|
||||||
|
index:
|
||||||
|
verbosity: 0
|
||||||
|
network:
|
||||||
|
verbosity: 0
|
||||||
|
query:
|
||||||
|
verbosity: 0
|
||||||
|
replication:
|
||||||
|
verbosity: 0
|
||||||
|
sharding:
|
||||||
|
verbosity: 0
|
||||||
|
storage:
|
||||||
|
verbosity: 0
|
||||||
|
journal:
|
||||||
|
verbosity: 0
|
||||||
|
write:
|
||||||
|
verbosity: 0
|
||||||
|
|
||||||
|
processManagement:
|
||||||
|
fork: false
|
||||||
|
pidFilePath: /tmp/mongod.pid
|
||||||
|
|
||||||
|
net:
|
||||||
|
port: PORT
|
||||||
|
bindIp: 0.0.0.0
|
||||||
|
maxIncomingConnections: 8192
|
||||||
|
wireObjectCheck: false
|
||||||
|
unixDomainSocket:
|
||||||
|
enabled: false
|
||||||
|
pathPrefix: /tmp
|
||||||
|
filePermissions: 0700
|
||||||
|
http:
|
||||||
|
enabled: false
|
||||||
|
compression:
|
||||||
|
compressors: snappy
|
||||||
|
#ssl: TODO
|
||||||
|
|
||||||
|
#security: TODO
|
||||||
|
|
||||||
|
#setParameter:
|
||||||
|
#notablescan: 1 TODO
|
||||||
|
#logUserIds: 1 TODO
|
||||||
|
|
||||||
|
storage:
|
||||||
|
dbPath: /data/db
|
||||||
|
indexBuildRetry: true
|
||||||
|
journal:
|
||||||
|
enabled: true
|
||||||
|
commitIntervalMs: 100
|
||||||
|
directoryPerDB: true
|
||||||
|
engine: wiredTiger
|
||||||
|
wiredTiger:
|
||||||
|
engineConfig:
|
||||||
|
journalCompressor: snappy
|
||||||
|
collectionConfig:
|
||||||
|
blockCompressor: snappy
|
||||||
|
indexConfig:
|
||||||
|
prefixCompression: true # TODO false may affect performance?
|
||||||
|
|
||||||
|
operationProfiling:
|
||||||
|
mode: slowOp
|
||||||
|
slowOpThresholdMs: 100
|
||||||
|
|
||||||
|
replication:
|
||||||
|
replSetName: REPLICA_SET_NAME
|
||||||
|
enableMajorityReadConcern: true
|
||||||
|
|
||||||
|
#sharding:
|
||||||
|
|
154
k8s/mongodb/container/mongod_entrypoint/mongod_entrypoint.go
Normal file
154
k8s/mongodb/container/mongod_entrypoint/mongod_entrypoint.go
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
mongoConfFilePath string = "/etc/mongod.conf"
|
||||||
|
mongoConfTemplateFilePath string = "/etc/mongod.conf.template"
|
||||||
|
hostsFilePath string = "/etc/hosts"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Use the same entrypoint as the mongo:3.4.2 image; just supply it with
|
||||||
|
// the mongod conf file with custom params
|
||||||
|
mongoStartCmd []string = []string{"/entrypoint.sh", "mongod", "--config",
|
||||||
|
mongoConfFilePath}
|
||||||
|
)
|
||||||
|
|
||||||
|
// context struct stores the user input and the constraints for the specified
|
||||||
|
// input. It also stores the keyword that needs to be replaced in the template
|
||||||
|
// files.
|
||||||
|
type context struct {
|
||||||
|
cliInput string
|
||||||
|
templateKeyword string
|
||||||
|
regex string
|
||||||
|
}
|
||||||
|
|
||||||
|
// sanity function takes the pre-defined constraints and the user inputs as
|
||||||
|
// arguments and validates user input based on regex matching
|
||||||
|
func sanity(input map[string]*context, fqdn, ip string) error {
|
||||||
|
var format *regexp.Regexp
|
||||||
|
for _, ctx := range input {
|
||||||
|
format = regexp.MustCompile(ctx.regex)
|
||||||
|
if format.MatchString(ctx.cliInput) == false {
|
||||||
|
return errors.New(fmt.Sprintf(
|
||||||
|
"Invalid value: '%s' for '%s'. Can be '%s'",
|
||||||
|
ctx.cliInput,
|
||||||
|
ctx.templateKeyword,
|
||||||
|
ctx.regex))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
format = regexp.MustCompile(`[a-z0-9-.]+`)
|
||||||
|
if format.MatchString(fqdn) == false {
|
||||||
|
return errors.New(fmt.Sprintf(
|
||||||
|
"Invalid value: '%s' for FQDN. Can be '%s'",
|
||||||
|
fqdn,
|
||||||
|
format))
|
||||||
|
}
|
||||||
|
|
||||||
|
if net.ParseIP(ip) == nil {
|
||||||
|
return errors.New(fmt.Sprintf(
|
||||||
|
"Invalid value: '%s' for IPv4. Can be a.b.c.d",
|
||||||
|
ip))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createFile function takes the pre-defined keywords, user inputs, the
|
||||||
|
// template file path and the new file path location as parameters, and
|
||||||
|
// creates a new file at file path with all the keywords replaced by inputs.
|
||||||
|
func createFile(input map[string]*context,
|
||||||
|
template string, conf string) error {
|
||||||
|
// read the template
|
||||||
|
contents, err := ioutil.ReadFile(template)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// replace
|
||||||
|
for _, ctx := range input {
|
||||||
|
contents = bytes.Replace(contents, []byte(ctx.templateKeyword),
|
||||||
|
[]byte(ctx.cliInput), -1)
|
||||||
|
}
|
||||||
|
// write
|
||||||
|
err = ioutil.WriteFile(conf, contents, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateHostsFile takes the FQDN supplied as input to the container and adds
|
||||||
|
// an entry to /etc/hosts
|
||||||
|
func updateHostsFile(ip, fqdn string) error {
|
||||||
|
fileHandle, err := os.OpenFile(hostsFilePath, os.O_APPEND|os.O_WRONLY,
|
||||||
|
os.ModeAppend)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fileHandle.Close()
|
||||||
|
// append
|
||||||
|
_, err = fileHandle.WriteString(fmt.Sprintf("\n%s %s\n", ip, fqdn))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var fqdn, ip string
|
||||||
|
input := make(map[string]*context)
|
||||||
|
|
||||||
|
input["replica-set-name"] = &context{}
|
||||||
|
input["replica-set-name"].regex = `[a-z]+`
|
||||||
|
input["replica-set-name"].templateKeyword = "REPLICA_SET_NAME"
|
||||||
|
flag.StringVar(&input["replica-set-name"].cliInput,
|
||||||
|
"replica-set-name",
|
||||||
|
"",
|
||||||
|
"replica set name")
|
||||||
|
|
||||||
|
input["port"] = &context{}
|
||||||
|
input["port"].regex = `[0-9]{4,5}`
|
||||||
|
input["port"].templateKeyword = "PORT"
|
||||||
|
flag.StringVar(&input["port"].cliInput,
|
||||||
|
"port",
|
||||||
|
"",
|
||||||
|
"mongodb port number")
|
||||||
|
|
||||||
|
flag.StringVar(&fqdn, "fqdn", "", "FQDN of the MongoDB instance")
|
||||||
|
flag.StringVar(&ip, "ip", "", "IPv4 address of the container")
|
||||||
|
|
||||||
|
flag.Parse()
|
||||||
|
err := sanity(input, fqdn, ip)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = createFile(input, mongoConfTemplateFilePath, mongoConfFilePath)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = updateHostsFile(ip, fqdn)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Starting Mongod....")
|
||||||
|
err = syscall.Exec(mongoStartCmd[0], mongoStartCmd[0:], os.Environ())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
13
k8s/mongodb/mongo-cm.yaml
Normal file
13
k8s/mongodb/mongo-cm.yaml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
#####################################################################
|
||||||
|
# This YAML file desribes a ConfigMap with the FQDN of the mongo #
|
||||||
|
# instance to be started. MongoDB instance uses the value from this #
|
||||||
|
# ConfigMap to bootstrap itself during startup. #
|
||||||
|
#####################################################################
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: mdb-fqdn
|
||||||
|
namespace: default
|
||||||
|
data:
|
||||||
|
fqdn: mdb-instance-0.westeurope.cloudapp.azure.com
|
@ -1,18 +0,0 @@
|
|||||||
##########################################################
|
|
||||||
# This YAML file desribes a k8s pvc for mongodb configDB #
|
|
||||||
##########################################################
|
|
||||||
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: mongo-configdb-claim
|
|
||||||
annotations:
|
|
||||||
volume.beta.kubernetes.io/storage-class: slow-configdb
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
# FIXME(Uncomment when ACS supports this!)
|
|
||||||
# persistentVolumeReclaimPolicy: Retain
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 20Gi
|
|
@ -1,12 +0,0 @@
|
|||||||
###################################################################
|
|
||||||
# This YAML file desribes a StorageClass for the mongodb configDB #
|
|
||||||
###################################################################
|
|
||||||
|
|
||||||
kind: StorageClass
|
|
||||||
apiVersion: storage.k8s.io/v1beta1
|
|
||||||
metadata:
|
|
||||||
name: slow-configdb
|
|
||||||
provisioner: kubernetes.io/azure-disk
|
|
||||||
parameters:
|
|
||||||
skuName: Standard_LRS
|
|
||||||
location: westeurope
|
|
@ -1,18 +0,0 @@
|
|||||||
########################################################
|
|
||||||
# This YAML file desribes a k8s pvc for mongodb dbPath #
|
|
||||||
########################################################
|
|
||||||
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: mongo-db-claim
|
|
||||||
annotations:
|
|
||||||
volume.beta.kubernetes.io/storage-class: slow-db
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
# FIXME(Uncomment when ACS supports this!)
|
|
||||||
# persistentVolumeReclaimPolicy: Retain
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 20Gi
|
|
@ -1,12 +0,0 @@
|
|||||||
#################################################################
|
|
||||||
# This YAML file desribes a StorageClass for the mongodb dbPath #
|
|
||||||
#################################################################
|
|
||||||
|
|
||||||
kind: StorageClass
|
|
||||||
apiVersion: storage.k8s.io/v1beta1
|
|
||||||
metadata:
|
|
||||||
name: slow-db
|
|
||||||
provisioner: kubernetes.io/azure-disk
|
|
||||||
parameters:
|
|
||||||
skuName: Standard_LRS
|
|
||||||
location: westeurope
|
|
35
k8s/mongodb/mongo-pvc.yaml
Normal file
35
k8s/mongodb/mongo-pvc.yaml
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
###########################################################
|
||||||
|
# This section file desribes a k8s pvc for mongodb dbPath #
|
||||||
|
###########################################################
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: mongo-db-claim
|
||||||
|
annotations:
|
||||||
|
volume.beta.kubernetes.io/storage-class: slow-db
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
# FIXME(Uncomment when ACS supports this!)
|
||||||
|
# persistentVolumeReclaimPolicy: Retain
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 20Gi
|
||||||
|
---
|
||||||
|
#############################################################
|
||||||
|
# This YAML section desribes a k8s pvc for mongodb configDB #
|
||||||
|
#############################################################
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: mongo-configdb-claim
|
||||||
|
annotations:
|
||||||
|
volume.beta.kubernetes.io/storage-class: slow-configdb
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
# FIXME(Uncomment when ACS supports this!)
|
||||||
|
# persistentVolumeReclaimPolicy: Retain
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 1Gi
|
23
k8s/mongodb/mongo-sc.yaml
Normal file
23
k8s/mongodb/mongo-sc.yaml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
####################################################################
|
||||||
|
# This YAML section desribes a StorageClass for the mongodb dbPath #
|
||||||
|
####################################################################
|
||||||
|
kind: StorageClass
|
||||||
|
apiVersion: storage.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: slow-db
|
||||||
|
provisioner: kubernetes.io/azure-disk
|
||||||
|
parameters:
|
||||||
|
skuName: Standard_LRS
|
||||||
|
location: westeurope
|
||||||
|
---
|
||||||
|
######################################################################
|
||||||
|
# This YAML section desribes a StorageClass for the mongodb configDB #
|
||||||
|
######################################################################
|
||||||
|
kind: StorageClass
|
||||||
|
apiVersion: storage.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: slow-configdb
|
||||||
|
provisioner: kubernetes.io/azure-disk
|
||||||
|
parameters:
|
||||||
|
skuName: Standard_LRS
|
||||||
|
location: westeurope
|
@ -1,50 +1,67 @@
|
|||||||
########################################################################
|
########################################################################
|
||||||
# This YAML file desribes a StatefulSet with a service for running and #
|
# This YAML file desribes a StatefulSet with a service for running and #
|
||||||
# exposing a MongoDB service. #
|
# exposing a MongoDB instance. #
|
||||||
# It depends on the configdb and db k8s pvc. #
|
# It depends on the configdb and db k8s pvc. #
|
||||||
########################################################################
|
########################################################################
|
||||||
|
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: mdb
|
name: mdb-svc
|
||||||
namespace: default
|
namespace: default
|
||||||
labels:
|
labels:
|
||||||
name: mdb
|
name: mdb-svc
|
||||||
spec:
|
spec:
|
||||||
selector:
|
selector:
|
||||||
app: mdb
|
app: mdb-ss
|
||||||
ports:
|
ports:
|
||||||
- port: 27017
|
- port: 27017
|
||||||
targetPort: 27017
|
targetPort: 27017
|
||||||
name: mdb-port
|
name: mdb-port
|
||||||
type: LoadBalancer
|
type: ClusterIP
|
||||||
|
clusterIP: None
|
||||||
---
|
---
|
||||||
apiVersion: apps/v1beta1
|
apiVersion: apps/v1beta1
|
||||||
kind: StatefulSet
|
kind: StatefulSet
|
||||||
metadata:
|
metadata:
|
||||||
name: mdb
|
name: mdb-ss
|
||||||
namespace: default
|
namespace: default
|
||||||
spec:
|
spec:
|
||||||
serviceName: mdb
|
serviceName: mdb-svc
|
||||||
replicas: 1
|
replicas: 1
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
name: mdb
|
name: mdb-ss
|
||||||
labels:
|
labels:
|
||||||
app: mdb
|
app: mdb-ss
|
||||||
spec:
|
spec:
|
||||||
terminationGracePeriodSeconds: 10
|
terminationGracePeriodSeconds: 10
|
||||||
containers:
|
containers:
|
||||||
- name: mongodb
|
- name: mongodb
|
||||||
image: mongo:3.4.1
|
# TODO(FIXME): Do not use latest in production as it is harder to track
|
||||||
|
# versions during updates and rollbacks. Also, once fixed, change the
|
||||||
|
# imagePullPolicy to IfNotPresent for faster bootup
|
||||||
|
image: bigchaindb/mongodb:latest
|
||||||
|
imagePullPolicy: Always
|
||||||
|
env:
|
||||||
|
- name: MONGODB_FQDN
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: mdb-fqdn
|
||||||
|
key: fqdn
|
||||||
|
- name: MONGODB_POD_IP
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: status.podIP
|
||||||
args:
|
args:
|
||||||
- --replSet=bigchain-rs
|
- --replica-set-name=bigchain-rs
|
||||||
|
- --fqdn=$(MONGODB_FQDN)
|
||||||
|
- --port=27017
|
||||||
|
- --ip=$(MONGODB_POD_IP)
|
||||||
securityContext:
|
securityContext:
|
||||||
capabilities:
|
capabilities:
|
||||||
add:
|
add:
|
||||||
- FOWNER
|
- FOWNER
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 27017
|
- containerPort: 27017
|
||||||
hostPort: 27017
|
hostPort: 27017
|
||||||
|
11
k8s/nginx/container/Dockerfile
Normal file
11
k8s/nginx/container/Dockerfile
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
FROM nginx:1.11.10
|
||||||
|
LABEL maintainer "dev@bigchaindb.com"
|
||||||
|
WORKDIR /
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get -y upgrade \
|
||||||
|
&& apt-get autoremove \
|
||||||
|
&& apt-get clean
|
||||||
|
COPY nginx.conf.template /etc/nginx/nginx.conf
|
||||||
|
COPY nginx_entrypoint.bash /
|
||||||
|
EXPOSE 80 443 27017
|
||||||
|
ENTRYPOINT ["/nginx_entrypoint.bash"]
|
70
k8s/nginx/container/README.md
Normal file
70
k8s/nginx/container/README.md
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
## Custom Nginx container for a Node
|
||||||
|
|
||||||
|
### Need
|
||||||
|
|
||||||
|
* Since, BigchainDB and MongoDB both need to expose ports to the outside
|
||||||
|
world (inter and intra cluster), we need to have a basic DDoS mitigation
|
||||||
|
strategy to ensure that we can provide proper uptime and security these
|
||||||
|
core services.
|
||||||
|
|
||||||
|
* We can have a proxy like nginx/haproxy in every node that listens to
|
||||||
|
global connections and applies cluster level entry policy.
|
||||||
|
|
||||||
|
### Implementation
|
||||||
|
* For MongoDB cluster communication, we will use nginx with an environment
|
||||||
|
variable specifying a ":" separated list of IPs in the whitelist. This list
|
||||||
|
contains the IPs of exising instances in the MongoDB replica set so as to
|
||||||
|
allow connections from the whitelist and avoid a DDoS.
|
||||||
|
|
||||||
|
* For BigchainDB connections, nginx needs to have rules to throttle
|
||||||
|
connections that are using resources over a threshold.
|
||||||
|
|
||||||
|
|
||||||
|
### Step 1: Build the Latest Container
|
||||||
|
|
||||||
|
Run `docker build -t bigchaindb/nginx .` from this folder.
|
||||||
|
|
||||||
|
Optional: Upload container to Docker Hub:
|
||||||
|
`docker push bigchaindb/nginx:<tag>`
|
||||||
|
|
||||||
|
### Step 2: Run the Container
|
||||||
|
|
||||||
|
Note that the whilelist IPs must be specified with the subnet in the CIDR
|
||||||
|
format, eg: `1.2.3.4/16`
|
||||||
|
|
||||||
|
```
|
||||||
|
docker run \
|
||||||
|
--env "MONGODB_FRONTEND_PORT=<port where nginx listens for MongoDB connections>" \
|
||||||
|
--env "MONGODB_BACKEND_HOST=<ip/hostname of instance where MongoDB is running>" \
|
||||||
|
--env "MONGODB_BACKEND_PORT=<port where MongoDB is listening for connections>" \
|
||||||
|
--env "BIGCHAINDB_FRONTEND_PORT=<port where nginx listens for BigchainDB connections>" \
|
||||||
|
--env "BIGCHAINDB_BACKEND_HOST=<ip/hostname of instance where BigchainDB is
|
||||||
|
running>" \
|
||||||
|
--env "BIGCHAINDB_BACKEND_PORT=<port where BigchainDB is listening for
|
||||||
|
connections>" \
|
||||||
|
--env "MONGODB_WHITELIST=<a ':' separated list of IPs that can connect to MongoDB>" \
|
||||||
|
--name=ngx \
|
||||||
|
--publish=<port where nginx listens for MongoDB connections as specified above>:<correcponding host port> \
|
||||||
|
--publish=<port where nginx listens for BigchainDB connections as specified
|
||||||
|
above>:<corresponding host port> \
|
||||||
|
--rm=true \
|
||||||
|
bigchaindb/nginx
|
||||||
|
```
|
||||||
|
|
||||||
|
For example:
|
||||||
|
```
|
||||||
|
docker run \
|
||||||
|
--env "MONGODB_FRONTEND_PORT=17017" \
|
||||||
|
--env "MONGODB_BACKEND_HOST=localhost" \
|
||||||
|
--env "MONGODB_BACKEND_PORT=27017" \
|
||||||
|
--env "BIGCHAINDB_FRONTEND_PORT=80" \
|
||||||
|
--env "BIGCHAINDB_BACKEND_HOST=localhost" \
|
||||||
|
--env "BIGCHAINDB_BACKEND_PORT=9984" \
|
||||||
|
--env "MONGODB_WHITELIST="192.168.0.0/16:10.0.2.0/24" \
|
||||||
|
--name=ngx \
|
||||||
|
--publish=80:80 \
|
||||||
|
--publish=17017:17017 \
|
||||||
|
--rm=true \
|
||||||
|
bigchaindb/nginx
|
||||||
|
```
|
||||||
|
|
108
k8s/nginx/container/nginx.conf.template
Normal file
108
k8s/nginx/container/nginx.conf.template
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
worker_processes 2;
|
||||||
|
daemon off;
|
||||||
|
user nobody nogroup;
|
||||||
|
pid /tmp/nginx.pid;
|
||||||
|
error_log /etc/nginx/nginx.error.log;
|
||||||
|
|
||||||
|
events {
|
||||||
|
worker_connections 256;
|
||||||
|
accept_mutex on;
|
||||||
|
use epoll;
|
||||||
|
}
|
||||||
|
|
||||||
|
http {
|
||||||
|
server_names_hash_bucket_size 128;
|
||||||
|
resolver 8.8.8.8 8.8.4.4;
|
||||||
|
access_log /etc/nginx/nginx.access.log combined buffer=16k flush=5s;
|
||||||
|
|
||||||
|
# allow 10 req/sec from the same IP address, and store the counters in a
|
||||||
|
# `zone` or shared memory location tagged as 'one'.
|
||||||
|
limit_req_zone $binary_remote_addr zone=one:10m rate=10r/s;
|
||||||
|
|
||||||
|
# enable logging when requests are being throttled
|
||||||
|
limit_req_log_level notice;
|
||||||
|
|
||||||
|
# the http status code to return to the client when throttling;
|
||||||
|
# 429 is for TooManyRequests,
|
||||||
|
# ref. RFC 6585
|
||||||
|
limit_req_status 429;
|
||||||
|
|
||||||
|
upstream bdb_backend {
|
||||||
|
server BIGCHAINDB_BACKEND_HOST:BIGCHAINDB_BACKEND_PORT max_fails=5 fail_timeout=30;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen BIGCHAINDB_FRONTEND_PORT;
|
||||||
|
# server_name "FRONTEND_DNS_NAME";
|
||||||
|
underscores_in_headers on;
|
||||||
|
|
||||||
|
# max client request body size: avg transaction size
|
||||||
|
client_max_body_size 15k;
|
||||||
|
|
||||||
|
# keepalive connection settings
|
||||||
|
keepalive_timeout 20s;
|
||||||
|
|
||||||
|
# `slowloris` attack mitigation settings
|
||||||
|
client_body_timeout 10s;
|
||||||
|
client_header_timeout 10s;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_ignore_client_abort on;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
proxy_redirect off;
|
||||||
|
|
||||||
|
# TODO proxy_set_header X-Forwarded-Proto https;
|
||||||
|
|
||||||
|
# limit requests from the same client, allow `burst` to 20 r/s,
|
||||||
|
# `nodelay` or drop connection immediately in case it exceeds this
|
||||||
|
# threshold.
|
||||||
|
limit_req zone=one burst=20 nodelay;
|
||||||
|
|
||||||
|
proxy_pass http://bdb_backend;
|
||||||
|
}
|
||||||
|
|
||||||
|
error_page 500 502 503 504 /50x.html;
|
||||||
|
location = /50x.html {
|
||||||
|
root /etc/nginx/50x.html;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# NGINX stream block for TCP and UDP proxies
|
||||||
|
stream {
|
||||||
|
log_format mdb_log '[$time_iso8601] $realip_remote_addr $remote_addr '
|
||||||
|
'$proxy_protocol_addr $proxy_protocol_port '
|
||||||
|
'$protocol $status $session_time $bytes_sent '
|
||||||
|
'$bytes_received "$upstream_addr" "$upstream_bytes_sent" '
|
||||||
|
'"$upstream_bytes_received" "$upstream_connect_time" ';
|
||||||
|
|
||||||
|
access_log /etc/nginx/nginx.stream.access.log mdb_log buffer=16k flush=5s;
|
||||||
|
|
||||||
|
# define a zone 'two' of size 10 megabytes to store the counters
|
||||||
|
# that hold number of TCP connections from a specific IP address
|
||||||
|
limit_conn_zone $binary_remote_addr zone=two:10m;
|
||||||
|
|
||||||
|
# enable logging when connections are being throttled
|
||||||
|
limit_conn_log_level notice;
|
||||||
|
|
||||||
|
upstream mdb_backend {
|
||||||
|
server MONGODB_BACKEND_HOST:MONGODB_BACKEND_PORT max_fails=5 fail_timeout=30 max_conns=1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen MONGODB_FRONTEND_PORT so_keepalive=10m:1m:5;
|
||||||
|
preread_timeout 30s;
|
||||||
|
tcp_nodelay on;
|
||||||
|
|
||||||
|
# whitelist
|
||||||
|
MONGODB_WHITELIST
|
||||||
|
# deny access to everyone else
|
||||||
|
deny all;
|
||||||
|
|
||||||
|
# allow 512 connections from the same IP address
|
||||||
|
limit_conn two 512;
|
||||||
|
|
||||||
|
proxy_pass mdb_backend;
|
||||||
|
}
|
||||||
|
}
|
44
k8s/nginx/container/nginx_entrypoint.bash
Executable file
44
k8s/nginx/container/nginx_entrypoint.bash
Executable file
@ -0,0 +1,44 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
mongo_frontend_port=`printenv MONGODB_FRONTEND_PORT`
|
||||||
|
mongo_backend_host=`printenv MONGODB_BACKEND_HOST`
|
||||||
|
mongo_backend_port=`printenv MONGODB_BACKEND_PORT`
|
||||||
|
bdb_frontend_port=`printenv BIGCHAINDB_FRONTEND_PORT`
|
||||||
|
bdb_backend_host=`printenv BIGCHAINDB_BACKEND_HOST`
|
||||||
|
bdb_backend_port=`printenv BIGCHAINDB_BACKEND_PORT`
|
||||||
|
mongo_whitelist=`printenv MONGODB_WHITELIST`
|
||||||
|
|
||||||
|
# sanity checks
|
||||||
|
if [[ -z "${mongo_frontend_port}" || \
|
||||||
|
-z "${mongo_backend_host}" || \
|
||||||
|
-z "${mongo_backend_port}" || \
|
||||||
|
-z "${bdb_frontend_port}" || \
|
||||||
|
-z "${bdb_backend_host}" || \
|
||||||
|
-z "${bdb_backend_port}" ]] ; then
|
||||||
|
echo "Invalid environment settings detected. Exiting!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
NGINX_CONF_FILE=/etc/nginx/nginx.conf
|
||||||
|
|
||||||
|
# configure the nginx.conf file with env variables
|
||||||
|
sed -i "s|MONGODB_FRONTEND_PORT|${mongo_frontend_port}|g" $NGINX_CONF_FILE
|
||||||
|
sed -i "s|MONGODB_BACKEND_HOST|${mongo_backend_host}|g" $NGINX_CONF_FILE
|
||||||
|
sed -i "s|MONGODB_BACKEND_PORT|${mongo_backend_port}|g" $NGINX_CONF_FILE
|
||||||
|
sed -i "s|BIGCHAINDB_FRONTEND_PORT|${bdb_frontend_port}|g" $NGINX_CONF_FILE
|
||||||
|
sed -i "s|BIGCHAINDB_BACKEND_HOST|${bdb_backend_host}|g" $NGINX_CONF_FILE
|
||||||
|
sed -i "s|BIGCHAINDB_BACKEND_PORT|${bdb_backend_port}|g" $NGINX_CONF_FILE
|
||||||
|
|
||||||
|
# populate the whitelist in the conf file as per MONGODB_WHITELIST env var
|
||||||
|
hosts=$(echo ${mongo_whitelist} | tr ":" "\n")
|
||||||
|
for host in $hosts; do
|
||||||
|
sed -i "s|MONGODB_WHITELIST|allow ${host};\n MONGODB_WHITELIST|g" $NGINX_CONF_FILE
|
||||||
|
done
|
||||||
|
|
||||||
|
# remove the MONGODB_WHITELIST marker string from template
|
||||||
|
sed -i "s|MONGODB_WHITELIST||g" $NGINX_CONF_FILE
|
||||||
|
|
||||||
|
# start nginx
|
||||||
|
echo "INFO: starting nginx..."
|
||||||
|
exec nginx -c /etc/nginx/nginx.conf
|
13
k8s/nginx/nginx-cm.yaml
Normal file
13
k8s/nginx/nginx-cm.yaml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
#########################################################################
|
||||||
|
# This YAML file desribes a ConfigMap with a valid list of IP addresses #
|
||||||
|
# that can connect to the MongoDB instance. #
|
||||||
|
#########################################################################
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: mongodb-whitelist
|
||||||
|
namespace: default
|
||||||
|
data:
|
||||||
|
# ':' separated list of allowed hosts
|
||||||
|
allowed-hosts: 192.168.0.0/16:10.0.2.0/24
|
82
k8s/nginx/nginx-dep.yaml
Normal file
82
k8s/nginx/nginx-dep.yaml
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
###############################################################
|
||||||
|
# This config file runs nginx as a k8s deployment and exposes #
|
||||||
|
# it using an external load balancer. #
|
||||||
|
# This deployment is used as a front end to both BigchainDB #
|
||||||
|
# and MongoDB. #
|
||||||
|
###############################################################
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: ngx-svc
|
||||||
|
namespace: default
|
||||||
|
labels:
|
||||||
|
name: ngx-svc
|
||||||
|
annotations:
|
||||||
|
# NOTE: the following annotation is a beta feature and
|
||||||
|
# only available in GCE/GKE and Azure as of now
|
||||||
|
service.beta.kubernetes.io/external-traffic: OnlyLocal
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: ngx-dep
|
||||||
|
ports:
|
||||||
|
- port: 27017
|
||||||
|
targetPort: 27017
|
||||||
|
name: ngx-public-mdb-port
|
||||||
|
protocol: TCP
|
||||||
|
- port: 80
|
||||||
|
targetPort: 80
|
||||||
|
name: ngx-public-bdb-port
|
||||||
|
protocol: TCP
|
||||||
|
type: LoadBalancer
|
||||||
|
---
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: ngx-dep
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: ngx-dep
|
||||||
|
spec:
|
||||||
|
terminationGracePeriodSeconds: 10
|
||||||
|
containers:
|
||||||
|
- name: nginx
|
||||||
|
image: bigchaindb/nginx:latest
|
||||||
|
imagePullPolicy: Always
|
||||||
|
env:
|
||||||
|
- name: MONGODB_FRONTEND_PORT
|
||||||
|
value: "27017"
|
||||||
|
- name: MONGODB_BACKEND_HOST
|
||||||
|
value: mdb-svc
|
||||||
|
- name: MONGODB_BACKEND_PORT
|
||||||
|
value: "27017"
|
||||||
|
- name: BIGCHAINDB_FRONTEND_PORT
|
||||||
|
value: "80"
|
||||||
|
- name: BIGCHAINDB_BACKEND_HOST
|
||||||
|
value: bdb-svc
|
||||||
|
- name: BIGCHAINDB_BACKEND_PORT
|
||||||
|
value: "9984"
|
||||||
|
- name: MONGODB_WHITELIST
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: mongodb-whitelist
|
||||||
|
key: allowed-hosts
|
||||||
|
ports:
|
||||||
|
- containerPort: 27017
|
||||||
|
hostPort: 27017
|
||||||
|
name: public-mdb-port
|
||||||
|
protocol: TCP
|
||||||
|
- containerPort: 80
|
||||||
|
hostPort: 80
|
||||||
|
name: public-bdb-port
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 200m
|
||||||
|
memory: 768Mi
|
||||||
|
#livenessProbe: TODO(Krish)
|
||||||
|
#readinessProbe: TODO(Krish)
|
||||||
|
restartPolicy: Always
|
5
setup.py
5
setup.py
@ -31,6 +31,7 @@ dev_require = [
|
|||||||
'ipdb',
|
'ipdb',
|
||||||
'ipython',
|
'ipython',
|
||||||
'watchdog',
|
'watchdog',
|
||||||
|
'logging_tree',
|
||||||
]
|
]
|
||||||
|
|
||||||
docs_require = [
|
docs_require = [
|
||||||
@ -66,7 +67,7 @@ install_requires = [
|
|||||||
'pymongo~=3.4',
|
'pymongo~=3.4',
|
||||||
'pysha3~=1.0.2',
|
'pysha3~=1.0.2',
|
||||||
'cryptoconditions>=0.5.0',
|
'cryptoconditions>=0.5.0',
|
||||||
'python-rapidjson==0.0.8',
|
'python-rapidjson==0.0.11',
|
||||||
'logstats>=0.2.1',
|
'logstats>=0.2.1',
|
||||||
'flask>=0.10.1',
|
'flask>=0.10.1',
|
||||||
'flask-restful~=0.3.0',
|
'flask-restful~=0.3.0',
|
||||||
@ -116,7 +117,7 @@ setup(
|
|||||||
|
|
||||||
entry_points={
|
entry_points={
|
||||||
'console_scripts': [
|
'console_scripts': [
|
||||||
'bigchaindb=bigchaindb.commands.bigchain:main'
|
'bigchaindb=bigchaindb.commands.bigchaindb:main'
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
install_requires=install_requires,
|
install_requires=install_requires,
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
# Speed Tests
|
|
||||||
|
|
||||||
This folder contains tests related to the code performance of a single node.
|
|
@ -1,97 +0,0 @@
|
|||||||
import json
|
|
||||||
import time
|
|
||||||
|
|
||||||
import rapidjson
|
|
||||||
from line_profiler import LineProfiler
|
|
||||||
|
|
||||||
import bigchaindb
|
|
||||||
|
|
||||||
# BIG TODO: Adjust for new transaction model
|
|
||||||
|
|
||||||
|
|
||||||
def speedtest_validate_transaction():
|
|
||||||
# create a transaction
|
|
||||||
b = bigchaindb.Bigchain()
|
|
||||||
tx = b.create_transaction(b.me, b.me, None, 'CREATE')
|
|
||||||
tx_signed = b.sign_transaction(tx, b.me_private)
|
|
||||||
|
|
||||||
# setup the profiler
|
|
||||||
profiler = LineProfiler()
|
|
||||||
profiler.enable_by_count()
|
|
||||||
profiler.add_function(bigchaindb.Bigchain.validate_transaction)
|
|
||||||
|
|
||||||
# validate_transaction 1000 times
|
|
||||||
for i in range(1000):
|
|
||||||
b.validate_transaction(tx_signed)
|
|
||||||
|
|
||||||
profiler.print_stats()
|
|
||||||
|
|
||||||
|
|
||||||
def speedtest_serialize_block_json():
|
|
||||||
# create a block
|
|
||||||
b = bigchaindb.Bigchain()
|
|
||||||
tx = b.create_transaction(b.me, b.me, None, 'CREATE')
|
|
||||||
tx_signed = b.sign_transaction(tx, b.me_private)
|
|
||||||
block = b.create_block([tx_signed] * 1000)
|
|
||||||
|
|
||||||
time_start = time.time()
|
|
||||||
for _ in range(1000):
|
|
||||||
_ = json.dumps(block, skipkeys=False, ensure_ascii=False, sort_keys=True)
|
|
||||||
time_elapsed = time.time() - time_start
|
|
||||||
|
|
||||||
print('speedtest_serialize_block_json: {} s'.format(time_elapsed))
|
|
||||||
|
|
||||||
|
|
||||||
def speedtest_serialize_block_rapidjson():
|
|
||||||
# create a block
|
|
||||||
b = bigchaindb.Bigchain()
|
|
||||||
tx = b.create_transaction(b.me, b.me, None, 'CREATE')
|
|
||||||
tx_signed = b.sign_transaction(tx, b.me_private)
|
|
||||||
block = b.create_block([tx_signed] * 1000)
|
|
||||||
|
|
||||||
time_start = time.time()
|
|
||||||
for _ in range(1000):
|
|
||||||
_ = rapidjson.dumps(block, skipkeys=False, ensure_ascii=False, sort_keys=True)
|
|
||||||
time_elapsed = time.time() - time_start
|
|
||||||
|
|
||||||
print('speedtest_serialize_block_rapidjson: {} s'.format(time_elapsed))
|
|
||||||
|
|
||||||
|
|
||||||
def speedtest_deserialize_block_json():
|
|
||||||
# create a block
|
|
||||||
b = bigchaindb.Bigchain()
|
|
||||||
tx = b.create_transaction(b.me, b.me, None, 'CREATE')
|
|
||||||
tx_signed = b.sign_transaction(tx, b.me_private)
|
|
||||||
block = b.create_block([tx_signed] * 1000)
|
|
||||||
block_serialized = json.dumps(block, skipkeys=False, ensure_ascii=False, sort_keys=True)
|
|
||||||
|
|
||||||
time_start = time.time()
|
|
||||||
for _ in range(1000):
|
|
||||||
_ = json.loads(block_serialized)
|
|
||||||
time_elapsed = time.time() - time_start
|
|
||||||
|
|
||||||
print('speedtest_deserialize_block_json: {} s'.format(time_elapsed))
|
|
||||||
|
|
||||||
|
|
||||||
def speedtest_deserialize_block_rapidjson():
|
|
||||||
# create a block
|
|
||||||
b = bigchaindb.Bigchain()
|
|
||||||
tx = b.create_transaction(b.me, b.me, None, 'CREATE')
|
|
||||||
tx_signed = b.sign_transaction(tx, b.me_private)
|
|
||||||
block = b.create_block([tx_signed] * 1000)
|
|
||||||
block_serialized = rapidjson.dumps(block, skipkeys=False, ensure_ascii=False, sort_keys=True)
|
|
||||||
|
|
||||||
time_start = time.time()
|
|
||||||
for _ in range(1000):
|
|
||||||
_ = rapidjson.loads(block_serialized)
|
|
||||||
time_elapsed = time.time() - time_start
|
|
||||||
|
|
||||||
print('speedtest_deserialize_block_rapidjson: {} s'.format(time_elapsed))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
speedtest_validate_transaction()
|
|
||||||
speedtest_serialize_block_json()
|
|
||||||
speedtest_serialize_block_rapidjson()
|
|
||||||
speedtest_deserialize_block_json()
|
|
||||||
speedtest_deserialize_block_rapidjson()
|
|
@ -99,6 +99,18 @@ def test_connection_run_errors(mock_client, mock_init_repl_set):
|
|||||||
assert query.run.call_count == 1
|
assert query.run.call_count == 1
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('pymongo.database.Database.authenticate')
|
||||||
|
def test_connection_with_credentials(mock_authenticate):
|
||||||
|
import bigchaindb
|
||||||
|
from bigchaindb.backend.mongodb.connection import MongoDBConnection
|
||||||
|
conn = MongoDBConnection(host=bigchaindb.config['database']['host'],
|
||||||
|
port=bigchaindb.config['database']['port'],
|
||||||
|
login='theplague',
|
||||||
|
password='secret')
|
||||||
|
conn.connect()
|
||||||
|
assert mock_authenticate.call_count == 2
|
||||||
|
|
||||||
|
|
||||||
def test_check_replica_set_not_enabled(mongodb_connection):
|
def test_check_replica_set_not_enabled(mongodb_connection):
|
||||||
from bigchaindb.backend.mongodb.connection import _check_replica_set
|
from bigchaindb.backend.mongodb.connection import _check_replica_set
|
||||||
from bigchaindb.common.exceptions import ConfigurationError
|
from bigchaindb.common.exceptions import ConfigurationError
|
||||||
@ -168,7 +180,7 @@ def test_initialize_replica_set(mock_cmd_line_opts):
|
|||||||
]
|
]
|
||||||
|
|
||||||
# check that it returns
|
# check that it returns
|
||||||
assert initialize_replica_set('host', 1337, 1000) is None
|
assert initialize_replica_set('host', 1337, 1000, 'dbname', False, None, None) is None
|
||||||
|
|
||||||
# test it raises OperationError if anything wrong
|
# test it raises OperationError if anything wrong
|
||||||
with mock.patch.object(Database, 'command') as mock_command:
|
with mock.patch.object(Database, 'command') as mock_command:
|
||||||
@ -178,4 +190,4 @@ def test_initialize_replica_set(mock_cmd_line_opts):
|
|||||||
]
|
]
|
||||||
|
|
||||||
with pytest.raises(pymongo.errors.OperationFailure):
|
with pytest.raises(pymongo.errors.OperationFailure):
|
||||||
initialize_replica_set('host', 1337, 1000)
|
initialize_replica_set('host', 1337, 1000, 'dbname', False, None, None)
|
||||||
|
@ -159,6 +159,43 @@ def test_get_spent(signed_create_tx, signed_transfer_tx):
|
|||||||
assert spents[0] == signed_transfer_tx.to_dict()
|
assert spents[0] == signed_transfer_tx.to_dict()
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_spent_for_tx_with_multiple_inputs(carol):
|
||||||
|
from bigchaindb.backend import connect, query
|
||||||
|
from bigchaindb.models import Block, Transaction
|
||||||
|
conn = connect()
|
||||||
|
tx_0 = Transaction.create(
|
||||||
|
[carol.public_key],
|
||||||
|
[([carol.public_key], 1),
|
||||||
|
([carol.public_key], 1),
|
||||||
|
([carol.public_key], 2)],
|
||||||
|
).sign([carol.private_key])
|
||||||
|
block = Block(transactions=[tx_0])
|
||||||
|
conn.db.bigchain.insert_one(block.to_dict())
|
||||||
|
spents = list(query.get_spent(conn, tx_0.id, 0))
|
||||||
|
assert not spents
|
||||||
|
|
||||||
|
tx_1 = Transaction.transfer(
|
||||||
|
tx_0.to_inputs()[2:3],
|
||||||
|
[([carol.public_key], 1),
|
||||||
|
([carol.public_key], 1)],
|
||||||
|
asset_id=tx_0.id,
|
||||||
|
).sign([carol.private_key])
|
||||||
|
block = Block(transactions=[tx_1])
|
||||||
|
conn.db.bigchain.insert_one(block.to_dict())
|
||||||
|
spents = list(query.get_spent(conn, tx_0.id, 0))
|
||||||
|
assert not spents
|
||||||
|
|
||||||
|
tx_2 = Transaction.transfer(
|
||||||
|
tx_0.to_inputs()[0:1] + tx_1.to_inputs()[1:2],
|
||||||
|
[([carol.public_key], 2)],
|
||||||
|
asset_id=tx_0.id,
|
||||||
|
).sign([carol.private_key])
|
||||||
|
block = Block(transactions=[tx_2])
|
||||||
|
conn.db.bigchain.insert_one(block.to_dict())
|
||||||
|
spents = list(query.get_spent(conn, tx_0.id, 1))
|
||||||
|
assert not spents
|
||||||
|
|
||||||
|
|
||||||
def test_get_owned_ids(signed_create_tx, user_pk):
|
def test_get_owned_ids(signed_create_tx, user_pk):
|
||||||
from bigchaindb.backend import connect, query
|
from bigchaindb.backend import connect, query
|
||||||
from bigchaindb.models import Block
|
from bigchaindb.models import Block
|
||||||
@ -175,6 +212,7 @@ def test_get_owned_ids(signed_create_tx, user_pk):
|
|||||||
|
|
||||||
|
|
||||||
def test_get_votes_by_block_id(signed_create_tx, structurally_valid_vote):
|
def test_get_votes_by_block_id(signed_create_tx, structurally_valid_vote):
|
||||||
|
from bigchaindb.common.crypto import generate_key_pair
|
||||||
from bigchaindb.backend import connect, query
|
from bigchaindb.backend import connect, query
|
||||||
from bigchaindb.models import Block
|
from bigchaindb.models import Block
|
||||||
conn = connect()
|
conn = connect()
|
||||||
@ -182,10 +220,14 @@ def test_get_votes_by_block_id(signed_create_tx, structurally_valid_vote):
|
|||||||
# create and insert a block
|
# create and insert a block
|
||||||
block = Block(transactions=[signed_create_tx])
|
block = Block(transactions=[signed_create_tx])
|
||||||
conn.db.bigchain.insert_one(block.to_dict())
|
conn.db.bigchain.insert_one(block.to_dict())
|
||||||
|
|
||||||
# create and insert some votes
|
# create and insert some votes
|
||||||
structurally_valid_vote['vote']['voting_for_block'] = block.id
|
structurally_valid_vote['vote']['voting_for_block'] = block.id
|
||||||
conn.db.votes.insert_one(structurally_valid_vote)
|
conn.db.votes.insert_one(structurally_valid_vote)
|
||||||
|
# create a second vote under a different key
|
||||||
|
_, pk = generate_key_pair()
|
||||||
structurally_valid_vote['vote']['voting_for_block'] = block.id
|
structurally_valid_vote['vote']['voting_for_block'] = block.id
|
||||||
|
structurally_valid_vote['node_pubkey'] = pk
|
||||||
structurally_valid_vote.pop('_id')
|
structurally_valid_vote.pop('_id')
|
||||||
conn.db.votes.insert_one(structurally_valid_vote)
|
conn.db.votes.insert_one(structurally_valid_vote)
|
||||||
|
|
||||||
@ -288,6 +330,19 @@ def test_write_vote(structurally_valid_vote):
|
|||||||
assert vote_db == structurally_valid_vote
|
assert vote_db == structurally_valid_vote
|
||||||
|
|
||||||
|
|
||||||
|
def test_duplicate_vote_raises_duplicate_key(structurally_valid_vote):
|
||||||
|
from bigchaindb.backend import connect, query
|
||||||
|
from bigchaindb.backend.exceptions import DuplicateKeyError
|
||||||
|
conn = connect()
|
||||||
|
|
||||||
|
# write a vote
|
||||||
|
query.write_vote(conn, structurally_valid_vote)
|
||||||
|
|
||||||
|
# write the same vote a second time
|
||||||
|
with pytest.raises(DuplicateKeyError):
|
||||||
|
query.write_vote(conn, structurally_valid_vote)
|
||||||
|
|
||||||
|
|
||||||
def test_get_genesis_block(genesis_block):
|
def test_get_genesis_block(genesis_block):
|
||||||
from bigchaindb.backend import connect, query
|
from bigchaindb.backend import connect, query
|
||||||
conn = connect()
|
conn = connect()
|
||||||
|
@ -5,8 +5,8 @@ import pytest
|
|||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def mock_run_configure(monkeypatch):
|
def mock_run_configure(monkeypatch):
|
||||||
from bigchaindb.commands import bigchain
|
from bigchaindb.commands import bigchaindb
|
||||||
monkeypatch.setattr(bigchain, 'run_configure', lambda *args, **kwargs: None)
|
monkeypatch.setattr(bigchaindb, 'run_configure', lambda *args, **kwargs: None)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@ -17,8 +17,8 @@ def mock_write_config(monkeypatch):
|
|||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def mock_db_init_with_existing_db(monkeypatch):
|
def mock_db_init_with_existing_db(monkeypatch):
|
||||||
from bigchaindb.commands import bigchain
|
from bigchaindb.commands import bigchaindb
|
||||||
monkeypatch.setattr(bigchain, '_run_init', lambda: None)
|
monkeypatch.setattr(bigchaindb, '_run_init', lambda: None)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@ -50,3 +50,12 @@ def run_start_args(request):
|
|||||||
start_rethinkdb=param.get('start_rethinkdb', False),
|
start_rethinkdb=param.get('start_rethinkdb', False),
|
||||||
allow_temp_keypair=param.get('allow_temp_keypair', False),
|
allow_temp_keypair=param.get('allow_temp_keypair', False),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mocked_setup_logging(mocker):
|
||||||
|
return mocker.patch(
|
||||||
|
'bigchaindb.commands.utils.setup_logging',
|
||||||
|
autospec=True,
|
||||||
|
spec_set=True,
|
||||||
|
)
|
||||||
|
@ -9,12 +9,14 @@ from argparse import Namespace
|
|||||||
def test_bigchain_run_start_with_rethinkdb(mock_start_rethinkdb,
|
def test_bigchain_run_start_with_rethinkdb(mock_start_rethinkdb,
|
||||||
mock_run_configure,
|
mock_run_configure,
|
||||||
mock_processes_start,
|
mock_processes_start,
|
||||||
mock_db_init_with_existing_db):
|
mock_db_init_with_existing_db,
|
||||||
from bigchaindb.commands.bigchain import run_start
|
mocked_setup_logging):
|
||||||
|
from bigchaindb.commands.bigchaindb import run_start
|
||||||
args = Namespace(start_rethinkdb=True, allow_temp_keypair=False, config=None, yes=True)
|
args = Namespace(start_rethinkdb=True, allow_temp_keypair=False, config=None, yes=True)
|
||||||
run_start(args)
|
run_start(args)
|
||||||
|
|
||||||
mock_start_rethinkdb.assert_called_with()
|
mock_start_rethinkdb.assert_called_with()
|
||||||
|
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||||
|
|
||||||
|
|
||||||
@patch('subprocess.Popen')
|
@patch('subprocess.Popen')
|
||||||
@ -37,7 +39,7 @@ def test_start_rethinkdb_exits_when_cannot_start(mock_popen):
|
|||||||
|
|
||||||
@patch('rethinkdb.ast.Table.reconfigure')
|
@patch('rethinkdb.ast.Table.reconfigure')
|
||||||
def test_set_shards(mock_reconfigure, monkeypatch, b):
|
def test_set_shards(mock_reconfigure, monkeypatch, b):
|
||||||
from bigchaindb.commands.bigchain import run_set_shards
|
from bigchaindb.commands.bigchaindb import run_set_shards
|
||||||
|
|
||||||
# this will mock the call to retrieve the database config
|
# this will mock the call to retrieve the database config
|
||||||
# we will set it to return one replica
|
# we will set it to return one replica
|
||||||
@ -60,7 +62,7 @@ def test_set_shards(mock_reconfigure, monkeypatch, b):
|
|||||||
|
|
||||||
|
|
||||||
def test_set_shards_raises_exception(monkeypatch, b):
|
def test_set_shards_raises_exception(monkeypatch, b):
|
||||||
from bigchaindb.commands.bigchain import run_set_shards
|
from bigchaindb.commands.bigchaindb import run_set_shards
|
||||||
|
|
||||||
# test that we are correctly catching the exception
|
# test that we are correctly catching the exception
|
||||||
def mock_raise(*args, **kwargs):
|
def mock_raise(*args, **kwargs):
|
||||||
@ -80,7 +82,7 @@ def test_set_shards_raises_exception(monkeypatch, b):
|
|||||||
|
|
||||||
@patch('rethinkdb.ast.Table.reconfigure')
|
@patch('rethinkdb.ast.Table.reconfigure')
|
||||||
def test_set_replicas(mock_reconfigure, monkeypatch, b):
|
def test_set_replicas(mock_reconfigure, monkeypatch, b):
|
||||||
from bigchaindb.commands.bigchain import run_set_replicas
|
from bigchaindb.commands.bigchaindb import run_set_replicas
|
||||||
|
|
||||||
# this will mock the call to retrieve the database config
|
# this will mock the call to retrieve the database config
|
||||||
# we will set it to return two shards
|
# we will set it to return two shards
|
||||||
@ -103,7 +105,7 @@ def test_set_replicas(mock_reconfigure, monkeypatch, b):
|
|||||||
|
|
||||||
|
|
||||||
def test_set_replicas_raises_exception(monkeypatch, b):
|
def test_set_replicas_raises_exception(monkeypatch, b):
|
||||||
from bigchaindb.commands.bigchain import run_set_replicas
|
from bigchaindb.commands.bigchaindb import run_set_replicas
|
||||||
|
|
||||||
# test that we are correctly catching the exception
|
# test that we are correctly catching the exception
|
||||||
def mock_raise(*args, **kwargs):
|
def mock_raise(*args, **kwargs):
|
||||||
|
@ -8,7 +8,7 @@ import pytest
|
|||||||
|
|
||||||
def test_make_sure_we_dont_remove_any_command():
|
def test_make_sure_we_dont_remove_any_command():
|
||||||
# thanks to: http://stackoverflow.com/a/18161115/597097
|
# thanks to: http://stackoverflow.com/a/18161115/597097
|
||||||
from bigchaindb.commands.bigchain import create_parser
|
from bigchaindb.commands.bigchaindb import create_parser
|
||||||
|
|
||||||
parser = create_parser()
|
parser = create_parser()
|
||||||
|
|
||||||
@ -27,23 +27,28 @@ def test_make_sure_we_dont_remove_any_command():
|
|||||||
|
|
||||||
@patch('bigchaindb.commands.utils.start')
|
@patch('bigchaindb.commands.utils.start')
|
||||||
def test_main_entrypoint(mock_start):
|
def test_main_entrypoint(mock_start):
|
||||||
from bigchaindb.commands.bigchain import main
|
from bigchaindb.commands.bigchaindb import main
|
||||||
main()
|
main()
|
||||||
|
|
||||||
assert mock_start.called
|
assert mock_start.called
|
||||||
|
|
||||||
|
|
||||||
def test_bigchain_run_start(mock_run_configure, mock_processes_start, mock_db_init_with_existing_db):
|
def test_bigchain_run_start(mock_run_configure,
|
||||||
from bigchaindb.commands.bigchain import run_start
|
mock_processes_start,
|
||||||
|
mock_db_init_with_existing_db,
|
||||||
|
mocked_setup_logging):
|
||||||
|
from bigchaindb.commands.bigchaindb import run_start
|
||||||
args = Namespace(start_rethinkdb=False, allow_temp_keypair=False, config=None, yes=True)
|
args = Namespace(start_rethinkdb=False, allow_temp_keypair=False, config=None, yes=True)
|
||||||
run_start(args)
|
run_start(args)
|
||||||
|
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(reason="BigchainDB doesn't support the automatic creation of a config file anymore")
|
@pytest.mark.skipif(reason="BigchainDB doesn't support the automatic creation of a config file anymore")
|
||||||
def test_bigchain_run_start_assume_yes_create_default_config(monkeypatch, mock_processes_start,
|
def test_bigchain_run_start_assume_yes_create_default_config(
|
||||||
mock_generate_key_pair, mock_db_init_with_existing_db):
|
monkeypatch, mock_processes_start, mock_generate_key_pair,
|
||||||
|
mock_db_init_with_existing_db, mocked_setup_logging):
|
||||||
import bigchaindb
|
import bigchaindb
|
||||||
from bigchaindb.commands.bigchain import run_start
|
from bigchaindb.commands.bigchaindb import run_start
|
||||||
from bigchaindb import config_utils
|
from bigchaindb import config_utils
|
||||||
|
|
||||||
value = {}
|
value = {}
|
||||||
@ -61,6 +66,7 @@ def test_bigchain_run_start_assume_yes_create_default_config(monkeypatch, mock_p
|
|||||||
args = Namespace(config=None, yes=True)
|
args = Namespace(config=None, yes=True)
|
||||||
run_start(args)
|
run_start(args)
|
||||||
|
|
||||||
|
mocked_setup_logging.assert_called_once_with()
|
||||||
assert value['return'] == expected_config
|
assert value['return'] == expected_config
|
||||||
|
|
||||||
|
|
||||||
@ -70,7 +76,7 @@ def test_bigchain_run_start_assume_yes_create_default_config(monkeypatch, mock_p
|
|||||||
@pytest.mark.usefixtures('ignore_local_config_file')
|
@pytest.mark.usefixtures('ignore_local_config_file')
|
||||||
def test_bigchain_show_config(capsys):
|
def test_bigchain_show_config(capsys):
|
||||||
from bigchaindb import config
|
from bigchaindb import config
|
||||||
from bigchaindb.commands.bigchain import run_show_config
|
from bigchaindb.commands.bigchaindb import run_show_config
|
||||||
|
|
||||||
args = Namespace(config=None)
|
args = Namespace(config=None)
|
||||||
_, _ = capsys.readouterr()
|
_, _ = capsys.readouterr()
|
||||||
@ -83,7 +89,7 @@ def test_bigchain_show_config(capsys):
|
|||||||
|
|
||||||
def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch):
|
def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch):
|
||||||
from bigchaindb import config
|
from bigchaindb import config
|
||||||
from bigchaindb.commands.bigchain import run_export_my_pubkey
|
from bigchaindb.commands.bigchaindb import run_export_my_pubkey
|
||||||
|
|
||||||
args = Namespace(config='dummy')
|
args = Namespace(config='dummy')
|
||||||
# so in run_export_my_pubkey(args) below,
|
# so in run_export_my_pubkey(args) below,
|
||||||
@ -102,7 +108,7 @@ def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch):
|
|||||||
|
|
||||||
def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch):
|
def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch):
|
||||||
from bigchaindb import config
|
from bigchaindb import config
|
||||||
from bigchaindb.commands.bigchain import run_export_my_pubkey
|
from bigchaindb.commands.bigchaindb import run_export_my_pubkey
|
||||||
|
|
||||||
args = Namespace(config='dummy')
|
args = Namespace(config='dummy')
|
||||||
monkeypatch.setitem(config['keypair'], 'public', None)
|
monkeypatch.setitem(config['keypair'], 'public', None)
|
||||||
@ -119,14 +125,14 @@ def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch):
|
|||||||
|
|
||||||
|
|
||||||
def test_bigchain_run_init_when_db_exists(mock_db_init_with_existing_db):
|
def test_bigchain_run_init_when_db_exists(mock_db_init_with_existing_db):
|
||||||
from bigchaindb.commands.bigchain import run_init
|
from bigchaindb.commands.bigchaindb import run_init
|
||||||
args = Namespace(config=None)
|
args = Namespace(config=None)
|
||||||
run_init(args)
|
run_init(args)
|
||||||
|
|
||||||
|
|
||||||
@patch('bigchaindb.backend.schema.drop_database')
|
@patch('bigchaindb.backend.schema.drop_database')
|
||||||
def test_drop_db_when_assumed_yes(mock_db_drop):
|
def test_drop_db_when_assumed_yes(mock_db_drop):
|
||||||
from bigchaindb.commands.bigchain import run_drop
|
from bigchaindb.commands.bigchaindb import run_drop
|
||||||
args = Namespace(config=None, yes=True)
|
args = Namespace(config=None, yes=True)
|
||||||
|
|
||||||
run_drop(args)
|
run_drop(args)
|
||||||
@ -135,26 +141,40 @@ def test_drop_db_when_assumed_yes(mock_db_drop):
|
|||||||
|
|
||||||
@patch('bigchaindb.backend.schema.drop_database')
|
@patch('bigchaindb.backend.schema.drop_database')
|
||||||
def test_drop_db_when_interactive_yes(mock_db_drop, monkeypatch):
|
def test_drop_db_when_interactive_yes(mock_db_drop, monkeypatch):
|
||||||
from bigchaindb.commands.bigchain import run_drop
|
from bigchaindb.commands.bigchaindb import run_drop
|
||||||
args = Namespace(config=None, yes=False)
|
args = Namespace(config=None, yes=False)
|
||||||
monkeypatch.setattr('bigchaindb.commands.bigchain.input_on_stderr', lambda x: 'y')
|
monkeypatch.setattr('bigchaindb.commands.bigchaindb.input_on_stderr', lambda x: 'y')
|
||||||
|
|
||||||
run_drop(args)
|
run_drop(args)
|
||||||
assert mock_db_drop.called
|
assert mock_db_drop.called
|
||||||
|
|
||||||
|
|
||||||
|
@patch('bigchaindb.backend.schema.drop_database')
|
||||||
|
def test_drop_db_when_db_does_not_exist(mock_db_drop, capsys):
|
||||||
|
from bigchaindb import config
|
||||||
|
from bigchaindb.commands.bigchaindb import run_drop
|
||||||
|
from bigchaindb.common.exceptions import DatabaseDoesNotExist
|
||||||
|
args = Namespace(config=None, yes=True)
|
||||||
|
mock_db_drop.side_effect = DatabaseDoesNotExist
|
||||||
|
|
||||||
|
run_drop(args)
|
||||||
|
output_message = capsys.readouterr()[1]
|
||||||
|
assert output_message == "Cannot drop '{name}'. The database does not exist.\n".format(
|
||||||
|
name=config['database']['name'])
|
||||||
|
|
||||||
|
|
||||||
@patch('bigchaindb.backend.schema.drop_database')
|
@patch('bigchaindb.backend.schema.drop_database')
|
||||||
def test_drop_db_does_not_drop_when_interactive_no(mock_db_drop, monkeypatch):
|
def test_drop_db_does_not_drop_when_interactive_no(mock_db_drop, monkeypatch):
|
||||||
from bigchaindb.commands.bigchain import run_drop
|
from bigchaindb.commands.bigchaindb import run_drop
|
||||||
args = Namespace(config=None, yes=False)
|
args = Namespace(config=None, yes=False)
|
||||||
monkeypatch.setattr('bigchaindb.commands.bigchain.input_on_stderr', lambda x: 'n')
|
monkeypatch.setattr('bigchaindb.commands.bigchaindb.input_on_stderr', lambda x: 'n')
|
||||||
|
|
||||||
run_drop(args)
|
run_drop(args)
|
||||||
assert not mock_db_drop.called
|
assert not mock_db_drop.called
|
||||||
|
|
||||||
|
|
||||||
def test_run_configure_when_config_exists_and_skipping(monkeypatch):
|
def test_run_configure_when_config_exists_and_skipping(monkeypatch):
|
||||||
from bigchaindb.commands.bigchain import run_configure
|
from bigchaindb.commands.bigchaindb import run_configure
|
||||||
monkeypatch.setattr('os.path.exists', lambda path: True)
|
monkeypatch.setattr('os.path.exists', lambda path: True)
|
||||||
args = Namespace(config='foo', yes=True)
|
args = Namespace(config='foo', yes=True)
|
||||||
return_value = run_configure(args, skip_if_exists=True)
|
return_value = run_configure(args, skip_if_exists=True)
|
||||||
@ -168,7 +188,7 @@ def test_run_configure_when_config_does_not_exist(monkeypatch,
|
|||||||
mock_write_config,
|
mock_write_config,
|
||||||
mock_generate_key_pair,
|
mock_generate_key_pair,
|
||||||
mock_bigchaindb_backup_config):
|
mock_bigchaindb_backup_config):
|
||||||
from bigchaindb.commands.bigchain import run_configure
|
from bigchaindb.commands.bigchaindb import run_configure
|
||||||
monkeypatch.setattr('os.path.exists', lambda path: False)
|
monkeypatch.setattr('os.path.exists', lambda path: False)
|
||||||
monkeypatch.setattr('builtins.input', lambda: '\n')
|
monkeypatch.setattr('builtins.input', lambda: '\n')
|
||||||
args = Namespace(config='foo', backend='rethinkdb', yes=True)
|
args = Namespace(config='foo', backend='rethinkdb', yes=True)
|
||||||
@ -185,7 +205,7 @@ def test_run_configure_when_config_does_exist(monkeypatch,
|
|||||||
def mock_write_config(newconfig, filename=None):
|
def mock_write_config(newconfig, filename=None):
|
||||||
value['return'] = newconfig
|
value['return'] = newconfig
|
||||||
|
|
||||||
from bigchaindb.commands.bigchain import run_configure
|
from bigchaindb.commands.bigchaindb import run_configure
|
||||||
monkeypatch.setattr('os.path.exists', lambda path: True)
|
monkeypatch.setattr('os.path.exists', lambda path: True)
|
||||||
monkeypatch.setattr('builtins.input', lambda: '\n')
|
monkeypatch.setattr('builtins.input', lambda: '\n')
|
||||||
monkeypatch.setattr('bigchaindb.config_utils.write_config', mock_write_config)
|
monkeypatch.setattr('bigchaindb.config_utils.write_config', mock_write_config)
|
||||||
@ -201,7 +221,7 @@ def test_run_configure_when_config_does_exist(monkeypatch,
|
|||||||
))
|
))
|
||||||
def test_run_configure_with_backend(backend, monkeypatch, mock_write_config):
|
def test_run_configure_with_backend(backend, monkeypatch, mock_write_config):
|
||||||
import bigchaindb
|
import bigchaindb
|
||||||
from bigchaindb.commands.bigchain import run_configure
|
from bigchaindb.commands.bigchaindb import run_configure
|
||||||
|
|
||||||
value = {}
|
value = {}
|
||||||
|
|
||||||
@ -228,17 +248,18 @@ def test_run_configure_with_backend(backend, monkeypatch, mock_write_config):
|
|||||||
@patch('bigchaindb.common.crypto.generate_key_pair',
|
@patch('bigchaindb.common.crypto.generate_key_pair',
|
||||||
return_value=('private_key', 'public_key'))
|
return_value=('private_key', 'public_key'))
|
||||||
@pytest.mark.usefixtures('ignore_local_config_file')
|
@pytest.mark.usefixtures('ignore_local_config_file')
|
||||||
def test_allow_temp_keypair_generates_one_on_the_fly(mock_gen_keypair,
|
def test_allow_temp_keypair_generates_one_on_the_fly(
|
||||||
mock_processes_start,
|
mock_gen_keypair, mock_processes_start,
|
||||||
mock_db_init_with_existing_db):
|
mock_db_init_with_existing_db, mocked_setup_logging):
|
||||||
import bigchaindb
|
import bigchaindb
|
||||||
from bigchaindb.commands.bigchain import run_start
|
from bigchaindb.commands.bigchaindb import run_start
|
||||||
|
|
||||||
bigchaindb.config['keypair'] = {'private': None, 'public': None}
|
bigchaindb.config['keypair'] = {'private': None, 'public': None}
|
||||||
|
|
||||||
args = Namespace(allow_temp_keypair=True, start_rethinkdb=False, config=None, yes=True)
|
args = Namespace(allow_temp_keypair=True, start_rethinkdb=False, config=None, yes=True)
|
||||||
run_start(args)
|
run_start(args)
|
||||||
|
|
||||||
|
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||||
assert bigchaindb.config['keypair']['private'] == 'private_key'
|
assert bigchaindb.config['keypair']['private'] == 'private_key'
|
||||||
assert bigchaindb.config['keypair']['public'] == 'public_key'
|
assert bigchaindb.config['keypair']['public'] == 'public_key'
|
||||||
|
|
||||||
@ -248,9 +269,10 @@ def test_allow_temp_keypair_generates_one_on_the_fly(mock_gen_keypair,
|
|||||||
@pytest.mark.usefixtures('ignore_local_config_file')
|
@pytest.mark.usefixtures('ignore_local_config_file')
|
||||||
def test_allow_temp_keypair_doesnt_override_if_keypair_found(mock_gen_keypair,
|
def test_allow_temp_keypair_doesnt_override_if_keypair_found(mock_gen_keypair,
|
||||||
mock_processes_start,
|
mock_processes_start,
|
||||||
mock_db_init_with_existing_db):
|
mock_db_init_with_existing_db,
|
||||||
|
mocked_setup_logging):
|
||||||
import bigchaindb
|
import bigchaindb
|
||||||
from bigchaindb.commands.bigchain import run_start
|
from bigchaindb.commands.bigchaindb import run_start
|
||||||
|
|
||||||
# Preconditions for the test
|
# Preconditions for the test
|
||||||
original_private_key = bigchaindb.config['keypair']['private']
|
original_private_key = bigchaindb.config['keypair']['private']
|
||||||
@ -262,12 +284,16 @@ def test_allow_temp_keypair_doesnt_override_if_keypair_found(mock_gen_keypair,
|
|||||||
args = Namespace(allow_temp_keypair=True, start_rethinkdb=False, config=None, yes=True)
|
args = Namespace(allow_temp_keypair=True, start_rethinkdb=False, config=None, yes=True)
|
||||||
run_start(args)
|
run_start(args)
|
||||||
|
|
||||||
|
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||||
assert bigchaindb.config['keypair']['private'] == original_private_key
|
assert bigchaindb.config['keypair']['private'] == original_private_key
|
||||||
assert bigchaindb.config['keypair']['public'] == original_public_key
|
assert bigchaindb.config['keypair']['public'] == original_public_key
|
||||||
|
|
||||||
|
|
||||||
def test_run_start_when_db_already_exists(mocker, monkeypatch, run_start_args):
|
def test_run_start_when_db_already_exists(mocker,
|
||||||
from bigchaindb.commands.bigchain import run_start
|
monkeypatch,
|
||||||
|
run_start_args,
|
||||||
|
mocked_setup_logging):
|
||||||
|
from bigchaindb.commands.bigchaindb import run_start
|
||||||
from bigchaindb.common.exceptions import DatabaseAlreadyExists
|
from bigchaindb.common.exceptions import DatabaseAlreadyExists
|
||||||
mocked_start = mocker.patch('bigchaindb.processes.start')
|
mocked_start = mocker.patch('bigchaindb.processes.start')
|
||||||
|
|
||||||
@ -275,13 +301,17 @@ def test_run_start_when_db_already_exists(mocker, monkeypatch, run_start_args):
|
|||||||
raise DatabaseAlreadyExists()
|
raise DatabaseAlreadyExists()
|
||||||
|
|
||||||
monkeypatch.setattr(
|
monkeypatch.setattr(
|
||||||
'bigchaindb.commands.bigchain._run_init', mock_run_init)
|
'bigchaindb.commands.bigchaindb._run_init', mock_run_init)
|
||||||
run_start(run_start_args)
|
run_start(run_start_args)
|
||||||
|
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||||
assert mocked_start.called
|
assert mocked_start.called
|
||||||
|
|
||||||
|
|
||||||
def test_run_start_when_keypair_not_found(mocker, monkeypatch, run_start_args):
|
def test_run_start_when_keypair_not_found(mocker,
|
||||||
from bigchaindb.commands.bigchain import run_start
|
monkeypatch,
|
||||||
|
run_start_args,
|
||||||
|
mocked_setup_logging):
|
||||||
|
from bigchaindb.commands.bigchaindb import run_start
|
||||||
from bigchaindb.commands.messages import CANNOT_START_KEYPAIR_NOT_FOUND
|
from bigchaindb.commands.messages import CANNOT_START_KEYPAIR_NOT_FOUND
|
||||||
from bigchaindb.common.exceptions import KeypairNotFoundException
|
from bigchaindb.common.exceptions import KeypairNotFoundException
|
||||||
mocked_start = mocker.patch('bigchaindb.processes.start')
|
mocked_start = mocker.patch('bigchaindb.processes.start')
|
||||||
@ -290,11 +320,12 @@ def test_run_start_when_keypair_not_found(mocker, monkeypatch, run_start_args):
|
|||||||
raise KeypairNotFoundException()
|
raise KeypairNotFoundException()
|
||||||
|
|
||||||
monkeypatch.setattr(
|
monkeypatch.setattr(
|
||||||
'bigchaindb.commands.bigchain._run_init', mock_run_init)
|
'bigchaindb.commands.bigchaindb._run_init', mock_run_init)
|
||||||
|
|
||||||
with pytest.raises(SystemExit) as exc:
|
with pytest.raises(SystemExit) as exc:
|
||||||
run_start(run_start_args)
|
run_start(run_start_args)
|
||||||
|
|
||||||
|
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||||
assert len(exc.value.args) == 1
|
assert len(exc.value.args) == 1
|
||||||
assert exc.value.args[0] == CANNOT_START_KEYPAIR_NOT_FOUND
|
assert exc.value.args[0] == CANNOT_START_KEYPAIR_NOT_FOUND
|
||||||
assert not mocked_start.called
|
assert not mocked_start.called
|
||||||
@ -302,8 +333,9 @@ def test_run_start_when_keypair_not_found(mocker, monkeypatch, run_start_args):
|
|||||||
|
|
||||||
def test_run_start_when_start_rethinkdb_fails(mocker,
|
def test_run_start_when_start_rethinkdb_fails(mocker,
|
||||||
monkeypatch,
|
monkeypatch,
|
||||||
run_start_args):
|
run_start_args,
|
||||||
from bigchaindb.commands.bigchain import run_start
|
mocked_setup_logging):
|
||||||
|
from bigchaindb.commands.bigchaindb import run_start
|
||||||
from bigchaindb.commands.messages import RETHINKDB_STARTUP_ERROR
|
from bigchaindb.commands.messages import RETHINKDB_STARTUP_ERROR
|
||||||
from bigchaindb.common.exceptions import StartupError
|
from bigchaindb.common.exceptions import StartupError
|
||||||
run_start_args.start_rethinkdb = True
|
run_start_args.start_rethinkdb = True
|
||||||
@ -319,6 +351,7 @@ def test_run_start_when_start_rethinkdb_fails(mocker,
|
|||||||
with pytest.raises(SystemExit) as exc:
|
with pytest.raises(SystemExit) as exc:
|
||||||
run_start(run_start_args)
|
run_start(run_start_args)
|
||||||
|
|
||||||
|
mocked_setup_logging.assert_called_once_with(user_log_config={})
|
||||||
assert len(exc.value.args) == 1
|
assert len(exc.value.args) == 1
|
||||||
assert exc.value.args[0] == RETHINKDB_STARTUP_ERROR.format(err_msg)
|
assert exc.value.args[0] == RETHINKDB_STARTUP_ERROR.format(err_msg)
|
||||||
assert not mocked_start.called
|
assert not mocked_start.called
|
||||||
@ -329,7 +362,7 @@ def test_run_start_when_start_rethinkdb_fails(mocker,
|
|||||||
@patch('bigchaindb.commands.utils.start')
|
@patch('bigchaindb.commands.utils.start')
|
||||||
def test_calling_main(start_mock, base_parser_mock, parse_args_mock,
|
def test_calling_main(start_mock, base_parser_mock, parse_args_mock,
|
||||||
monkeypatch):
|
monkeypatch):
|
||||||
from bigchaindb.commands.bigchain import main
|
from bigchaindb.commands.bigchaindb import main
|
||||||
|
|
||||||
argparser_mock = Mock()
|
argparser_mock = Mock()
|
||||||
parser = Mock()
|
parser = Mock()
|
||||||
@ -385,9 +418,9 @@ def test_calling_main(start_mock, base_parser_mock, parse_args_mock,
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.usefixtures('ignore_local_config_file')
|
@pytest.mark.usefixtures('ignore_local_config_file')
|
||||||
@patch('bigchaindb.commands.bigchain.add_replicas')
|
@patch('bigchaindb.commands.bigchaindb.add_replicas')
|
||||||
def test_run_add_replicas(mock_add_replicas):
|
def test_run_add_replicas(mock_add_replicas):
|
||||||
from bigchaindb.commands.bigchain import run_add_replicas
|
from bigchaindb.commands.bigchaindb import run_add_replicas
|
||||||
from bigchaindb.backend.exceptions import OperationError
|
from bigchaindb.backend.exceptions import OperationError
|
||||||
|
|
||||||
args = Namespace(config=None, replicas=['localhost:27017'])
|
args = Namespace(config=None, replicas=['localhost:27017'])
|
||||||
@ -416,9 +449,9 @@ def test_run_add_replicas(mock_add_replicas):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.usefixtures('ignore_local_config_file')
|
@pytest.mark.usefixtures('ignore_local_config_file')
|
||||||
@patch('bigchaindb.commands.bigchain.remove_replicas')
|
@patch('bigchaindb.commands.bigchaindb.remove_replicas')
|
||||||
def test_run_remove_replicas(mock_remove_replicas):
|
def test_run_remove_replicas(mock_remove_replicas):
|
||||||
from bigchaindb.commands.bigchain import run_remove_replicas
|
from bigchaindb.commands.bigchaindb import run_remove_replicas
|
||||||
from bigchaindb.backend.exceptions import OperationError
|
from bigchaindb.backend.exceptions import OperationError
|
||||||
|
|
||||||
args = Namespace(config=None, replicas=['localhost:27017'])
|
args = Namespace(config=None, replicas=['localhost:27017'])
|
||||||
|
@ -1,12 +1,87 @@
|
|||||||
import argparse
|
import argparse
|
||||||
|
from argparse import ArgumentTypeError, Namespace
|
||||||
|
import logging
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def reset_bigchaindb_config(monkeypatch):
|
||||||
|
import bigchaindb
|
||||||
|
monkeypatch.setattr('bigchaindb.config', bigchaindb._config)
|
||||||
|
|
||||||
|
|
||||||
|
def test_input_on_stderr():
|
||||||
|
from bigchaindb.commands.utils import input_on_stderr, _convert
|
||||||
|
|
||||||
|
with patch('builtins.input', return_value='I love cats'):
|
||||||
|
assert input_on_stderr() == 'I love cats'
|
||||||
|
|
||||||
|
# input_on_stderr uses `_convert` internally, from now on we will
|
||||||
|
# just use that function
|
||||||
|
|
||||||
|
assert _convert('hack the planet') == 'hack the planet'
|
||||||
|
assert _convert('42') == '42'
|
||||||
|
assert _convert('42', default=10) == 42
|
||||||
|
assert _convert('', default=10) == 10
|
||||||
|
assert _convert('42', convert=int) == 42
|
||||||
|
assert _convert('True', convert=bool) is True
|
||||||
|
assert _convert('False', convert=bool) is False
|
||||||
|
assert _convert('t', convert=bool) is True
|
||||||
|
assert _convert('3.14', default=1.0) == 3.14
|
||||||
|
assert _convert('TrUe', default=False) is True
|
||||||
|
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
assert _convert('TRVE', default=False)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
assert _convert('ಠ_ಠ', convert=int)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.usefixtures('ignore_local_config_file', 'reset_bigchaindb_config')
|
||||||
|
def test_configure_bigchaindb_configures_bigchaindb():
|
||||||
|
from bigchaindb.commands.utils import configure_bigchaindb
|
||||||
|
from bigchaindb.config_utils import is_configured
|
||||||
|
assert not is_configured()
|
||||||
|
|
||||||
|
@configure_bigchaindb
|
||||||
|
def test_configure(args):
|
||||||
|
assert is_configured()
|
||||||
|
|
||||||
|
args = Namespace(config=None)
|
||||||
|
test_configure(args)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.usefixtures('ignore_local_config_file',
|
||||||
|
'reset_bigchaindb_config',
|
||||||
|
'reset_logging_config')
|
||||||
|
@pytest.mark.parametrize('log_level', (
|
||||||
|
logging.DEBUG,
|
||||||
|
logging.INFO,
|
||||||
|
logging.WARNING,
|
||||||
|
logging.ERROR,
|
||||||
|
logging.CRITICAL,
|
||||||
|
))
|
||||||
|
def test_configure_bigchaindb_logging(log_level):
|
||||||
|
from bigchaindb.commands.utils import configure_bigchaindb
|
||||||
|
from bigchaindb import config
|
||||||
|
assert not config['log']
|
||||||
|
|
||||||
|
@configure_bigchaindb
|
||||||
|
def test_configure_logger(args):
|
||||||
|
pass
|
||||||
|
|
||||||
|
args = Namespace(config=None, log_level=log_level)
|
||||||
|
test_configure_logger(args)
|
||||||
|
from bigchaindb import config
|
||||||
|
assert config['log'] == {'level_console': log_level}
|
||||||
|
|
||||||
|
|
||||||
def test_start_raises_if_command_not_implemented():
|
def test_start_raises_if_command_not_implemented():
|
||||||
from bigchaindb.commands import utils
|
from bigchaindb.commands import utils
|
||||||
from bigchaindb.commands.bigchain import create_parser
|
from bigchaindb.commands.bigchaindb import create_parser
|
||||||
|
|
||||||
parser = create_parser()
|
parser = create_parser()
|
||||||
|
|
||||||
@ -18,7 +93,7 @@ def test_start_raises_if_command_not_implemented():
|
|||||||
|
|
||||||
def test_start_raises_if_no_arguments_given():
|
def test_start_raises_if_no_arguments_given():
|
||||||
from bigchaindb.commands import utils
|
from bigchaindb.commands import utils
|
||||||
from bigchaindb.commands.bigchain import create_parser
|
from bigchaindb.commands.bigchaindb import create_parser
|
||||||
|
|
||||||
parser = create_parser()
|
parser = create_parser()
|
||||||
|
|
||||||
@ -51,13 +126,13 @@ def test_mongodb_host_type():
|
|||||||
from bigchaindb.commands.utils import mongodb_host
|
from bigchaindb.commands.utils import mongodb_host
|
||||||
|
|
||||||
# bad port provided
|
# bad port provided
|
||||||
with pytest.raises(argparse.ArgumentTypeError):
|
with pytest.raises(ArgumentTypeError):
|
||||||
mongodb_host('localhost:11111111111')
|
mongodb_host('localhost:11111111111')
|
||||||
|
|
||||||
# no port information provided
|
# no port information provided
|
||||||
with pytest.raises(argparse.ArgumentTypeError):
|
with pytest.raises(ArgumentTypeError):
|
||||||
mongodb_host('localhost')
|
mongodb_host('localhost')
|
||||||
|
|
||||||
# bad host provided
|
# bad host provided
|
||||||
with pytest.raises(argparse.ArgumentTypeError):
|
with pytest.raises(ArgumentTypeError):
|
||||||
mongodb_host(':27017')
|
mongodb_host(':27017')
|
||||||
|
@ -573,6 +573,42 @@ def test_validate_tx_threshold_create_signature(user_user2_threshold_input,
|
|||||||
validate_transaction_model(tx)
|
validate_transaction_model(tx)
|
||||||
|
|
||||||
|
|
||||||
|
def test_validate_tx_threshold_duplicated_pk(user_pub, user_priv,
|
||||||
|
asset_definition):
|
||||||
|
from copy import deepcopy
|
||||||
|
from cryptoconditions import Ed25519Fulfillment, ThresholdSha256Fulfillment
|
||||||
|
from bigchaindb.common.transaction import Input, Output, Transaction
|
||||||
|
from bigchaindb.common.crypto import PrivateKey
|
||||||
|
|
||||||
|
threshold = ThresholdSha256Fulfillment(threshold=2)
|
||||||
|
threshold.add_subfulfillment(Ed25519Fulfillment(public_key=user_pub))
|
||||||
|
threshold.add_subfulfillment(Ed25519Fulfillment(public_key=user_pub))
|
||||||
|
|
||||||
|
threshold_input = Input(threshold, [user_pub, user_pub])
|
||||||
|
threshold_output = Output(threshold, [user_pub, user_pub])
|
||||||
|
|
||||||
|
tx = Transaction(Transaction.CREATE, asset_definition,
|
||||||
|
[threshold_input], [threshold_output])
|
||||||
|
expected = deepcopy(threshold_input)
|
||||||
|
expected.fulfillment.subconditions[0]['body'].sign(str(tx).encode(),
|
||||||
|
PrivateKey(user_priv))
|
||||||
|
expected.fulfillment.subconditions[1]['body'].sign(str(tx).encode(),
|
||||||
|
PrivateKey(user_priv))
|
||||||
|
|
||||||
|
tx.sign([user_priv, user_priv])
|
||||||
|
|
||||||
|
subconditions = tx.inputs[0].fulfillment.subconditions
|
||||||
|
expected_subconditions = expected.fulfillment.subconditions
|
||||||
|
assert subconditions[0]['body'].to_dict()['signature'] == \
|
||||||
|
expected_subconditions[0]['body'].to_dict()['signature']
|
||||||
|
assert subconditions[1]['body'].to_dict()['signature'] == \
|
||||||
|
expected_subconditions[1]['body'].to_dict()['signature']
|
||||||
|
|
||||||
|
assert tx.inputs[0].to_dict()['fulfillment'] == \
|
||||||
|
expected.fulfillment.serialize_uri()
|
||||||
|
assert tx.inputs_valid() is True
|
||||||
|
|
||||||
|
|
||||||
def test_multiple_input_validation_of_transfer_tx(user_input, user_output,
|
def test_multiple_input_validation_of_transfer_tx(user_input, user_output,
|
||||||
user_priv, user2_pub,
|
user_priv, user2_pub,
|
||||||
user2_priv, user3_pub,
|
user2_priv, user3_pub,
|
||||||
|
@ -12,6 +12,8 @@ import random
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
from logging import getLogger
|
||||||
|
from logging.config import dictConfig
|
||||||
from bigchaindb.common import crypto
|
from bigchaindb.common import crypto
|
||||||
|
|
||||||
TEST_DB_NAME = 'bigchain_test'
|
TEST_DB_NAME = 'bigchain_test'
|
||||||
@ -203,6 +205,15 @@ def ignore_local_config_file(monkeypatch):
|
|||||||
mock_file_config)
|
mock_file_config)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def reset_logging_config():
|
||||||
|
# root_logger_level = getLogger().level
|
||||||
|
root_logger_level = 'DEBUG'
|
||||||
|
dictConfig({'version': 1, 'root': {'level': 'NOTSET'}})
|
||||||
|
yield
|
||||||
|
getLogger().setLevel(root_logger_level)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def user_sk():
|
def user_sk():
|
||||||
return USER_PRIVATE_KEY
|
return USER_PRIVATE_KEY
|
||||||
@ -223,6 +234,54 @@ def user2_pk():
|
|||||||
return USER2_PK
|
return USER2_PK
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def alice():
|
||||||
|
from bigchaindb.common.crypto import generate_key_pair
|
||||||
|
return generate_key_pair()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def alice_privkey(alice):
|
||||||
|
return alice.private_key
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def alice_pubkey(alice):
|
||||||
|
return alice.public_key
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def bob():
|
||||||
|
from bigchaindb.common.crypto import generate_key_pair
|
||||||
|
return generate_key_pair()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def bob_privkey(bob):
|
||||||
|
return bob.private_key
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def bob_pubkey(carol):
|
||||||
|
return bob.public_key
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def carol():
|
||||||
|
from bigchaindb.common.crypto import generate_key_pair
|
||||||
|
return generate_key_pair()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def carol_privkey(carol):
|
||||||
|
return carol.private_key
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def carol_pubkey(carol):
|
||||||
|
return carol.public_key
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def b():
|
def b():
|
||||||
from bigchaindb import Bigchain
|
from bigchaindb import Bigchain
|
||||||
@ -383,3 +442,15 @@ def db_name(db_config):
|
|||||||
def db_conn():
|
def db_conn():
|
||||||
from bigchaindb.backend import connect
|
from bigchaindb.backend import connect
|
||||||
return connect()
|
return connect()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mocked_setup_pub_logger(mocker):
|
||||||
|
return mocker.patch(
|
||||||
|
'bigchaindb.log.setup.setup_pub_logger', autospec=True, spec_set=True)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mocked_setup_sub_logger(mocker):
|
||||||
|
return mocker.patch(
|
||||||
|
'bigchaindb.log.setup.setup_sub_logger', autospec=True, spec_set=True)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user