From 3f02329cdf8419ca0c20318b98a6277c9a49aaba Mon Sep 17 00:00:00 2001 From: troymc Date: Mon, 25 Apr 2016 16:51:38 +0200 Subject: [PATCH 01/24] Easier way to make Fabric init BigchainDB on only 1 node --- deploy-cluster-aws/fabfile.py | 30 ++++-------------------------- deploy-cluster-aws/startup.sh | 10 +++++----- 2 files changed, 9 insertions(+), 31 deletions(-) diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index 82198642..c6c0019c 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -5,7 +5,7 @@ BigchainDB, including its storage backend (RethinkDB). from __future__ import with_statement, unicode_literals -from fabric.api import sudo, env +from fabric.api import sudo, env, hosts from fabric.api import task, parallel from fabric.contrib.files import sed from fabric.operations import run, put @@ -30,28 +30,6 @@ newrelic_license_key = 'you_need_a_real_license_key' ###################################################################### -# DON'T PUT @parallel -@task -def set_hosts(hosts): - """A helper function to change env.hosts from the - command line. - - Args: - hosts (str): 'one_node' or 'two_nodes' - - Example: - fab set_hosts:one_node init_bigchaindb - """ - if hosts == 'one_node': - env.hosts = public_dns_names[:1] - elif hosts == 'two_nodes': - env.hosts = public_dns_names[:2] - else: - raise ValueError('Invalid input to set_hosts.' - ' Expected one_node or two_nodes.' - ' Got {}'.format(hosts)) - - # Install base software @task @parallel @@ -141,10 +119,10 @@ def configure_bigchaindb(): # Initialize BigchainDB # i.e. create the database, the tables, # the indexes, and the genesis block. -# (This only needs to be run on one node.) -# Call using: -# fab set_hosts:one_node init_bigchaindb +# (The @hosts decorator is used to make this +# task run on only one node. See http://tinyurl.com/h9qqf3t ) @task +@hosts(public_dns_names[0]) def init_bigchaindb(): run('bigchaindb init', pty=False) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 58fca6d5..1e25b85d 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -94,11 +94,11 @@ fab configure_bigchaindb # TODO: Add list of public keys to keyring of all nodes -# Send a "bigchaindb init" command to one node -# to initialize the BigchainDB database -# i.e. create the database, the tables, -# the indexes, and the genesis block. -fab set_hosts:one_node init_bigchaindb +# Initialize BigchainDB (i.e. Create the RethinkDB database, +# the tables, the indexes, and genesis glock). Note that +# this will only be sent to one of the nodes, see the +# definition of init_bigchaindb() in fabfile.py to see why. +fab init_bigchaindb # Start BigchainDB on all the nodes using "screen" fab start_bigchaindb From 2d6c502ee3f41895881270dfbce4a4db8ab45336 Mon Sep 17 00:00:00 2001 From: vrde Date: Tue, 26 Apr 2016 00:37:35 +0200 Subject: [PATCH 02/24] Add exp command to start RethinkDB --- bigchaindb/commands/bigchain.py | 17 +++++++++++++---- bigchaindb/commands/utils.py | 18 ++++++++++++++++++ tests/test_commands.py | 31 ++++++++++++++++++++++++++++++- 3 files changed, 61 insertions(+), 5 deletions(-) diff --git a/bigchaindb/commands/bigchain.py b/bigchaindb/commands/bigchain.py index 1907c185..c9062895 100644 --- a/bigchaindb/commands/bigchain.py +++ b/bigchaindb/commands/bigchain.py @@ -14,7 +14,7 @@ import bigchaindb import bigchaindb.config_utils from bigchaindb import db from bigchaindb.exceptions import DatabaseAlreadyExists, KeypairNotFoundException -from bigchaindb.commands.utils import base_parser, start +from bigchaindb.commands import utils from bigchaindb.processes import Processes from bigchaindb import crypto @@ -122,8 +122,12 @@ def run_drop(args): def run_start(args): """Start the processes to run the node""" - # run_configure(args, skip_if_exists=True) bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) + + if args.start_rethinkdb: + proc = utils.start_rethinkdb() + logger.info('RethinkDB started with PID %s' % proc.pid) + try: db.init() except DatabaseAlreadyExists: @@ -140,7 +144,12 @@ def run_start(args): def main(): parser = argparse.ArgumentParser( description='Control your BigchainDB node.', - parents=[base_parser]) + parents=[utils.base_parser]) + + parser.add_argument('--experimental-start-rethinkdb', + dest='start_rethinkdb', + action='store_true', + help='Run RethinkDB on start') # all the commands are contained in the subparsers object, # the command selected by the user will be stored in `args.command` @@ -172,7 +181,7 @@ def main(): subparsers.add_parser('start', help='Start BigchainDB') - start(parser, globals()) + utils.start(parser, globals()) if __name__ == '__main__': diff --git a/bigchaindb/commands/utils.py b/bigchaindb/commands/utils.py index c23b25ec..66ee9f94 100644 --- a/bigchaindb/commands/utils.py +++ b/bigchaindb/commands/utils.py @@ -2,12 +2,30 @@ for ``argparse.ArgumentParser``. """ +import os import argparse import multiprocessing as mp +import subprocess from bigchaindb.version import __version__ +def start_rethinkdb(): + proc = subprocess.Popen(['rethinkdb', '--bind', 'all'], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True) + + line = '' + + for line in proc.stdout: + if line.startswith('Server ready'): + return proc + + exit('Error starting RethinkDB, reason is: {}'.format(line)) + proc.kill() + + def start(parser, scope): """Utility function to execute a subcommand. diff --git a/tests/test_commands.py b/tests/test_commands.py index b7814206..ba0b9f24 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1,4 +1,5 @@ import json +from unittest.mock import Mock, patch from argparse import Namespace from pprint import pprint import copy @@ -62,10 +63,22 @@ def mock_bigchaindb_backup_config(monkeypatch): def test_bigchain_run_start(mock_run_configure, mock_processes_start, mock_db_init_with_existing_db): from bigchaindb.commands.bigchain import run_start - args = Namespace(config=None, yes=True) + args = Namespace(start_rethinkdb=False, config=None, yes=True) run_start(args) +@patch('bigchaindb.commands.utils.start_rethinkdb') +def test_bigchain_run_start_with_rethinkdb(mock_start_rethinkdb, + mock_run_configure, + mock_processes_start, + mock_db_init_with_existing_db): + from bigchaindb.commands.bigchain import run_start + args = Namespace(start_rethinkdb=True, config=None, yes=True) + run_start(args) + + mock_start_rethinkdb.assert_called_with() + + @pytest.mark.skipif(reason="BigchainDB doesn't support the automatic creation of a config file anymore") def test_bigchain_run_start_assume_yes_create_default_config(monkeypatch, mock_processes_start, mock_generate_key_pair, mock_db_init_with_existing_db): @@ -195,3 +208,19 @@ def test_run_configure_when_config_does_exist(monkeypatch, args = Namespace(config='foo', yes=None) run_configure(args) assert value == {} + + +@patch('subprocess.Popen') +def test_start_rethinkdb_returns_a_process_when_successful(mock_popen): + from bigchaindb.commands import utils + mock_popen.return_value = Mock(stdout=['Server ready']) + assert utils.start_rethinkdb() is mock_popen.return_value + + +@patch('subprocess.Popen') +def test_start_rethinkdb_exists_when_cannot_start(mock_popen): + from bigchaindb.commands import utils + mock_popen.return_value = Mock(stdout=['Nopety nope']) + with pytest.raises(SystemExit): + utils.start_rethinkdb() + From c3ad343e776332d6be39e59ca8322ada01fa99af Mon Sep 17 00:00:00 2001 From: vrde Date: Tue, 26 Apr 2016 02:09:53 +0200 Subject: [PATCH 03/24] Respect "-" as stdout --- bigchaindb/commands/bigchain.py | 30 +++++++++++++++++++++++------- bigchaindb/commands/utils.py | 3 ++- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/bigchaindb/commands/bigchain.py b/bigchaindb/commands/bigchain.py index c9062895..d6c6a0c6 100644 --- a/bigchaindb/commands/bigchain.py +++ b/bigchaindb/commands/bigchain.py @@ -9,6 +9,7 @@ import logging import argparse import copy import json +import builtins import bigchaindb import bigchaindb.config_utils @@ -23,6 +24,14 @@ logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) +# We need this because `input` always prints on stdout, while it should print +# to stderr. It's a very old bug, check it out here: +# - https://bugs.python.org/issue1927 +def input(prompt): + print(prompt, end='', file=sys.stderr) + return builtins.input() + + def run_show_config(args): """Show the current configuration""" # TODO Proposal: remove the "hidden" configuration. Only show config. If @@ -43,7 +52,11 @@ def run_configure(args, skip_if_exists=False): skip_if_exists (bool): skip the function if a config file already exists """ config_path = args.config or bigchaindb.config_utils.CONFIG_DEFAULT_PATH - config_file_exists = os.path.exists(config_path) + + config_file_exists = False + # if the config path is `-` then it's stdout + if config_path != '-': + config_file_exists = os.path.exists(config_path) if config_file_exists and skip_if_exists: return @@ -57,7 +70,7 @@ def run_configure(args, skip_if_exists=False): # Patch the default configuration with the new values conf = copy.deepcopy(bigchaindb._config) - print('Generating keypair') + print('Generating keypair', file=sys.stderr) conf['keypair']['private'], conf['keypair']['public'] = \ crypto.generate_key_pair() @@ -80,9 +93,12 @@ def run_configure(args, skip_if_exists=False): input('Statsd {}? (default `{}`): '.format(key, val)) \ or val - bigchaindb.config_utils.write_config(conf, config_path) - print('Configuration written to {}'.format(config_path)) - print('Ready to go!') + if config_path != '-': + bigchaindb.config_utils.write_config(conf, config_path) + else: + print(json.dumps(conf, indent=4, sort_keys=True)) + print('Configuration written to {}'.format(config_path), file=sys.stderr) + print('Ready to go!', file=sys.stderr) def run_export_my_pubkey(args): @@ -110,8 +126,8 @@ def run_init(args): try: db.init() except DatabaseAlreadyExists: - print('The database already exists.') - print('If you wish to re-initialize it, first drop it.') + print('The database already exists.', file=sys.stderr) + print('If you wish to re-initialize it, first drop it.', file=sys.stderr) def run_drop(args): diff --git a/bigchaindb/commands/utils.py b/bigchaindb/commands/utils.py index 66ee9f94..4923a713 100644 --- a/bigchaindb/commands/utils.py +++ b/bigchaindb/commands/utils.py @@ -69,7 +69,8 @@ def start(parser, scope): base_parser = argparse.ArgumentParser(add_help=False, prog='bigchaindb') base_parser.add_argument('-c', '--config', - help='Specify the location of the configuration file') + help='Specify the location of the configuration file ' + '(use "-" for stdout)') base_parser.add_argument('-y', '--yes', '--yes-please', action='store_true', From f9c609ff0d34cd21ab40e24acb1fe70c833ff062 Mon Sep 17 00:00:00 2001 From: vrde Date: Tue, 26 Apr 2016 03:24:56 +0200 Subject: [PATCH 04/24] Add env variables to default config --- bigchaindb/commands/bigchain.py | 5 ++++- bigchaindb/config_utils.py | 6 +++++- setup.py | 2 +- tests/test_commands.py | 4 ++-- 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/bigchaindb/commands/bigchain.py b/bigchaindb/commands/bigchain.py index d6c6a0c6..f4e25f5e 100644 --- a/bigchaindb/commands/bigchain.py +++ b/bigchaindb/commands/bigchain.py @@ -52,6 +52,8 @@ def run_configure(args, skip_if_exists=False): skip_if_exists (bool): skip the function if a config file already exists """ config_path = args.config or bigchaindb.config_utils.CONFIG_DEFAULT_PATH + bigchaindb.config_utils.autoconfigure(filename=False, force=True) + config_file_exists = False # if the config path is `-` then it's stdout @@ -68,7 +70,8 @@ def run_configure(args, skip_if_exists=False): return # Patch the default configuration with the new values - conf = copy.deepcopy(bigchaindb._config) + conf = copy.deepcopy(bigchaindb.config) + del conf['CONFIGURED'] print('Generating keypair', file=sys.stderr) conf['keypair']['private'], conf['keypair']['public'] = \ diff --git a/bigchaindb/config_utils.py b/bigchaindb/config_utils.py index 3ec17656..4256f580 100644 --- a/bigchaindb/config_utils.py +++ b/bigchaindb/config_utils.py @@ -91,7 +91,11 @@ def file_config(filename=None): file at CONFIG_DEFAULT_PATH, if filename == None) """ logger.debug('On entry into file_config(), filename = {}'.format(filename)) - if not filename: + + if filename is False: + return {} + + if filename is None: filename = CONFIG_DEFAULT_PATH logger.debug('file_config() will try to open `{}`'.format(filename)) diff --git a/setup.py b/setup.py index 77e3a02d..2cbc2d35 100644 --- a/setup.py +++ b/setup.py @@ -73,7 +73,7 @@ setup( ] }, install_requires=[ - 'rethinkdb==2.2.0.post4', + 'rethinkdb==2.3.0', 'pysha3==0.3', 'pytz==2015.7', 'cryptoconditions==0.1.6', diff --git a/tests/test_commands.py b/tests/test_commands.py index ba0b9f24..f7e27a2d 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -186,7 +186,7 @@ def test_run_configure_when_config_does_not_exist(monkeypatch, mock_bigchaindb_backup_config): from bigchaindb.commands.bigchain import run_configure monkeypatch.setattr('os.path.exists', lambda path: False) - monkeypatch.setattr('builtins.input', lambda question: '\n') + monkeypatch.setattr('builtins.input', lambda: '\n') args = Namespace(config='foo', yes=True) return_value = run_configure(args) assert return_value is None @@ -202,7 +202,7 @@ def test_run_configure_when_config_does_exist(monkeypatch, from bigchaindb.commands.bigchain import run_configure monkeypatch.setattr('os.path.exists', lambda path: True) - monkeypatch.setattr('builtins.input', lambda question: '\n') + monkeypatch.setattr('builtins.input', lambda: '\n') monkeypatch.setattr('bigchaindb.config_utils.write_config', mock_write_config) args = Namespace(config='foo', yes=None) From 56af6e5b1273a7115dd2c50eedd0c6f4b4212ecc Mon Sep 17 00:00:00 2001 From: vrde Date: Tue, 26 Apr 2016 03:43:48 +0200 Subject: [PATCH 05/24] Move load from bigchain-benchmark to bigchain --- bigchaindb/commands/bigchain.py | 55 +++++++++++++++ bigchaindb/commands/bigchain_benchmark.py | 84 ----------------------- setup.py | 3 +- 3 files changed, 56 insertions(+), 86 deletions(-) delete mode 100644 bigchaindb/commands/bigchain_benchmark.py diff --git a/bigchaindb/commands/bigchain.py b/bigchaindb/commands/bigchain.py index f4e25f5e..8065834a 100644 --- a/bigchaindb/commands/bigchain.py +++ b/bigchaindb/commands/bigchain.py @@ -11,8 +11,13 @@ import copy import json import builtins +import logstats + + import bigchaindb import bigchaindb.config_utils +from bigchaindb.util import ProcessGroup +from bigchaindb.client import temp_client from bigchaindb import db from bigchaindb.exceptions import DatabaseAlreadyExists, KeypairNotFoundException from bigchaindb.commands import utils @@ -160,6 +165,38 @@ def run_start(args): processes.start() +def _run_load(tx_left, stats): + logstats.thread.start(stats) + client = temp_client() + # b = bigchaindb.Bigchain() + + while True: + tx = client.create() + + stats['transactions'] += 1 + + if tx_left is not None: + tx_left -= 1 + if tx_left == 0: + break + + +def run_load(args): + bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) + logger.info('Starting %s processes', args.multiprocess) + stats = logstats.Logstats() + logstats.thread.start(stats) + + tx_left = None + if args.count > 0: + tx_left = int(args.count / args.multiprocess) + + workers = ProcessGroup(concurrency=args.multiprocess, + target=_run_load, + args=(tx_left, stats.get_child())) + workers.start() + + def main(): parser = argparse.ArgumentParser( description='Control your BigchainDB node.', @@ -200,6 +237,24 @@ def main(): subparsers.add_parser('start', help='Start BigchainDB') + load_parser = subparsers.add_parser('load', + help='Write transactions to the backlog') + + load_parser.add_argument('-m', '--multiprocess', + nargs='?', + type=int, + default=False, + help='Spawn multiple processes to run the command, ' + 'if no value is provided, the number of processes ' + 'is equal to the number of cores of the host machine') + + load_parser.add_argument('-c', '--count', + default=0, + type=int, + help='Number of transactions to push. If the parameter -m ' + 'is set, the count is distributed equally to all the ' + 'processes') + utils.start(parser, globals()) diff --git a/bigchaindb/commands/bigchain_benchmark.py b/bigchaindb/commands/bigchain_benchmark.py deleted file mode 100644 index 9c0edc63..00000000 --- a/bigchaindb/commands/bigchain_benchmark.py +++ /dev/null @@ -1,84 +0,0 @@ -"""Command line interface for the `bigchaindb-benchmark` command.""" - -import logging -import argparse - -import logstats - -import bigchaindb -import bigchaindb.config_utils -from bigchaindb.util import ProcessGroup -from bigchaindb.client import temp_client -from bigchaindb.commands.utils import base_parser, start - - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -def _run_load(tx_left, stats): - logstats.thread.start(stats) - client = temp_client() - # b = bigchaindb.Bigchain() - - while True: - tx = client.create() - - stats['transactions'] += 1 - - if tx_left is not None: - tx_left -= 1 - if tx_left == 0: - break - - -def run_load(args): - bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) - logger.info('Starting %s processes', args.multiprocess) - stats = logstats.Logstats() - logstats.thread.start(stats) - - tx_left = None - if args.count > 0: - tx_left = int(args.count / args.multiprocess) - - workers = ProcessGroup(concurrency=args.multiprocess, - target=_run_load, - args=(tx_left, stats.get_child())) - workers.start() - - -def main(): - parser = argparse.ArgumentParser(description='Benchmark your bigchain federation.', - parents=[base_parser]) - - # all the commands are contained in the subparsers object, - # the command selected by the user will be stored in `args.command` - # that is used by the `main` function to select which other - # function to call. - subparsers = parser.add_subparsers(title='Commands', - dest='command') - - # parser for database level commands - load_parser = subparsers.add_parser('load', - help='Write transactions to the backlog') - - load_parser.add_argument('-m', '--multiprocess', - nargs='?', - type=int, - default=False, - help='Spawn multiple processes to run the command, ' - 'if no value is provided, the number of processes ' - 'is equal to the number of cores of the host machine') - - load_parser.add_argument('-c', '--count', - default=0, - type=int, - help='Number of transactions to push. If the parameter -m ' - 'is set, the count is distributed equally to all the ' - 'processes') - - start(parser, globals()) - -if __name__ == '__main__': - main() diff --git a/setup.py b/setup.py index 2cbc2d35..a4294176 100644 --- a/setup.py +++ b/setup.py @@ -65,8 +65,7 @@ setup( entry_points={ 'console_scripts': [ - 'bigchaindb=bigchaindb.commands.bigchain:main', - 'bigchaindb-benchmark=bigchaindb.commands.bigchain_benchmark:main' + 'bigchaindb=bigchaindb.commands.bigchain:main' ], 'bigchaindb.consensus': [ 'default=bigchaindb.consensus:BaseConsensusRules' From 2f4c2609a102d9b6618a63db82dd8a85b615e50d Mon Sep 17 00:00:00 2001 From: vrde Date: Tue, 26 Apr 2016 04:28:59 +0200 Subject: [PATCH 06/24] Update Dockerfile with new env variables --- Dockerfile | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index a955dd5d..15a3cc17 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,12 +1,26 @@ -FROM python:3.5 +FROM rethinkdb:2.3 RUN apt-get update +RUN apt-get -y install python3 python3-pip +RUN pip3 install --upgrade pip +RUN pip3 install --upgrade setuptools RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -RUN pip install --upgrade pip COPY . /usr/src/app/ -RUN pip install --no-cache-dir -e .[dev] +WORKDIR /usr/src/app + +RUN pip3 install --no-cache-dir -e . + +WORKDIR /data + +ENV BIGCHAINDB_CONFIG_PATH /data/.bigchaindb +ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984 +ENV BIGCHAINDB_API_ENDPOINT http://0.0.0.0:9984/api/v1 + +ENTRYPOINT ["bigchaindb", "--experimental-start-rethinkdb"] + +CMD ["start"] + +EXPOSE 8080 9984 28015 29015 From 9d040185335509633f0a7ecadabce1bb3ecf97b8 Mon Sep 17 00:00:00 2001 From: vrde Date: Tue, 26 Apr 2016 04:29:32 +0200 Subject: [PATCH 07/24] Update docs --- docs/source/bigchaindb-cli.md | 9 +++--- docs/source/installing-server.md | 46 +++++++++++++++---------------- docs/source/monitoring.md | 4 +-- docs/source/running-unit-tests.md | 13 --------- 4 files changed, 29 insertions(+), 43 deletions(-) diff --git a/docs/source/bigchaindb-cli.md b/docs/source/bigchaindb-cli.md index 37822717..d154e118 100644 --- a/docs/source/bigchaindb-cli.md +++ b/docs/source/bigchaindb-cli.md @@ -1,6 +1,6 @@ # The BigchainDB Command Line Interface (CLI) -There are some command-line commands for working with BigchainDB: `bigchaindb` and `bigchaindb-benchmark`. This section provides an overview of those commands. +The command to interact with BigchainDB is `bigchaindb`. This section provides an overview of the command. ## bigchaindb @@ -37,10 +37,9 @@ This command drops (erases) the RethinkDB database. You will be prompted to make This command starts BigchainDB. It always begins by trying a `bigchaindb init` first. See the note in the documentation for `bigchaindb init`. -## bigchaindb-benchmark +## bigchaindb load -The `bigchaindb-benchmark` command is used to run benchmarking tests. You can learn more about it using: +The command is used to run benchmarking tests. You can learn more about it using: ```text -$ bigchaindb-benchmark -h -$ bigchaindb-benchmark load -h +$ bigchaindb load -h ``` diff --git a/docs/source/installing-server.md b/docs/source/installing-server.md index e1465b98..09967238 100644 --- a/docs/source/installing-server.md +++ b/docs/source/installing-server.md @@ -111,9 +111,9 @@ If it's the first time you've run `bigchaindb start`, then it creates the databa **NOT for Production Use** -For those who like using Docker and wish to experiment with BigchainDB in non-production environments, we currently maintain a `dockerfile` that can be used to build an image for `bigchaindb`, along with a `docker-compose.yml` file to manage a "standalone node", consisting mainly of two containers: one for RethinkDB, and another for BigchainDB. +For those who like using Docker and wish to experiment with BigchainDB in non-production environments, we currently maintain a `dockerfile` that can be used to build an image for `bigchaindb`. -Assuming you have `docker` and `docker-compose` installed, you would proceed as follows. +Assuming you have `docker` installed, you would proceed as follows. In a terminal shell: ```text @@ -122,41 +122,41 @@ $ git clone git@github.com:bigchaindb/bigchaindb.git Build the Docker image: ```text -$ docker-compose build +$ docker build --tag local-bigchaindb . ``` -then do a one-time configuration step to create the config file; it will be -stored on your host machine under ` ~/.bigchaindb_docker/config`: +then do a one-time configuration step to create the config file; we will use +the `-y` option to accept all the default values. The configuration file will +be stored on your host machine under ` ~/bigchaindb_docker/.bigchaindb`: ```text -$ docker-compose run --rm bigchaindb bigchaindb configure -Starting bigchaindb_rethinkdb-data_1 +$ docker run --rm -v "$HOME/bigchaindb_docker:/data" -ti local-bigchaindb -y configure Generating keypair -API Server bind? (default `localhost:9984`): -Database host? (default `localhost`): rethinkdb -Database port? (default `28015`): -Database name? (default `bigchain`): -Statsd host? (default `localhost`): statsd -Statsd port? (default `8125`): -Statsd rate? (default `0.01`): +Configuration written to /data/.bigchaindb Ready to go! ``` -As shown above, make sure that you set the database and statsd hosts to their -corresponding service names (`rethinkdb`, `statsd`), defined in`docker-compose.yml` -and `docker-compose-monitor.yml`. - You can then start it up (in the background, as a daemon) using: ```text -$ docker-compose up -d +$ BIGCHAIN_CID=$(docker run -d -v "$HOME/bigchaindb_docker:/data" local-bigchaindb) ``` -then you can load test transactions via: +then you can load test transactions (using the `-m` option to enable using all +the cores in the host machine) via: ```text -$ docker-compose run --rm bigchaindb bigchaindb-benchmark load +$ docker exec -d $BIGCHAIN_CID load -m ``` +You can see BigchainDB processing your transactions running: +```text +$ docker logs -f $BIGCHAIN_CID +``` + + If you're on Linux, you can probably view the RethinkDB dashboard at: -[http://localhost:58080/](http://localhost:58080/) +[http://localhost:8080/](http://localhost:8080/) -If that doesn't work, then replace `localhost` with the IP or hostname of the machine running the Docker engine. If you are running docker-machine (e.g.: on Mac OS X) this will be the IP of the Docker machine (`docker-machine ip machine_name`). +If that doesn't work, then replace `localhost` with the IP or hostname of the +machine running the Docker engine. If you are running docker-machine (e.g.: on +Mac OS X) this will be the IP of the Docker machine (`docker-machine ip +machine_name`). diff --git a/docs/source/monitoring.md b/docs/source/monitoring.md index b5b9ca58..4fb53072 100644 --- a/docs/source/monitoring.md +++ b/docs/source/monitoring.md @@ -22,11 +22,11 @@ then point a browser tab to: The login and password are `admin` by default. If BigchainDB is running and processing transactions, you should see analytics—if not, [start BigchainDB](installing-server.html#run-bigchaindb) and load some test transactions: ```text -$ bigchaindb-benchmark load +$ bigchaindb load ``` then refresh the page after a few seconds. If you're not interested in monitoring, don't worry: BigchainDB will function just fine without any monitoring setup. -Feel free to modify the [custom Grafana dashboard](https://github.com/rhsimplex/grafana-bigchaindb-docker/blob/master/bigchaindb_dashboard.js) to your liking! \ No newline at end of file +Feel free to modify the [custom Grafana dashboard](https://github.com/rhsimplex/grafana-bigchaindb-docker/blob/master/bigchaindb_dashboard.js) to your liking! diff --git a/docs/source/running-unit-tests.md b/docs/source/running-unit-tests.md index e20a0fc2..9b0ea505 100644 --- a/docs/source/running-unit-tests.md +++ b/docs/source/running-unit-tests.md @@ -18,16 +18,3 @@ $ python setup.py test (Aside: How does the above command work? The documentation for [pytest-runner](https://pypi.python.org/pypi/pytest-runner) explains. We use [pytest](http://pytest.org/latest/) to write all unit tests.) -### Using docker-compose to Run the Tests - -You can also use `docker-compose` to run the unit tests. (You don't have to start RethinkDB first: `docker-compose` does that on its own, when it reads the `docker-compose.yml` file.) - -First, build the images (~once), using: -```text -$ docker-compose build -``` - -then run the unit tests using: -```text -$ docker-compose run --rm bigchaindb py.test -v -``` From fc61a0993411172c44c0bd929c3e4419e469f4fa Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 26 Apr 2016 14:56:50 +0200 Subject: [PATCH 08/24] Add set_host() and send_confile() to fabfile.py --- deploy-cluster-aws/fabfile.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index c6c0019c..6037d95f 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -30,6 +30,24 @@ newrelic_license_key = 'you_need_a_real_license_key' ###################################################################### +# DON'T PUT @parallel +@task +def set_host(host_index): + """A helper task to change env.hosts from the + command line. It will only "stick" for the duration + of the fab command that called it. + + Args: + host_index (int): 1, 2, 3, etc. + Example: + fab set_host:4 fab_task_A fab_task_B + will set env.hosts = [public_dns_names[3]] + but only for doing fab_task_A and fab_task_B + """ + env.hosts = [public_dns_names[int(host_index) - 1]] + print('Set env.hosts = {}'.format(env.hosts)) + + # Install base software @task @parallel @@ -116,6 +134,21 @@ def configure_bigchaindb(): run('bigchaindb -y configure', pty=False) +# Send the specified configuration file to +# the remote host and save it there in +# ~/.bigchaindb +# Use in conjunction with set_host() +# No @parallel +@task +def send_confile(confile): + put('confiles/' + confile, 'tempfile') + sudo('mv tempfile ~/.bigchaindb') + print('When confile = {} '.format(confile)) + print('bigchaindb show-config output is:') + run('bigchaindb show-config') + print(' ') + + # Initialize BigchainDB # i.e. create the database, the tables, # the indexes, and the genesis block. From a7fb8c66a278c2a616285cbcbd25365f67385b03 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 26 Apr 2016 14:58:56 +0200 Subject: [PATCH 09/24] Call clusterize_confiles.py & send_confile from startup.sh --- deploy-cluster-aws/startup.sh | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 1e25b85d..923eb7af 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -21,7 +21,7 @@ if [ -z "$2" ]; then fi TAG=$1 -NODES=$2 +NUM_NODES=$2 # If they don't include a third argument () # then assume BRANCH = "pypi" by default @@ -38,6 +38,13 @@ if [ ! -f "pem/bigchaindb.pem" ]; then exit 1 fi +# Check for the confiles directory +if [ ! -d "confiles" ]; then + echo "Directory confiles is needed but does not exist" + echo "See make_confiles.sh to find out how to make it" + exit 1 +fi + # Change the file permissions on pem/bigchaindb.pem # so that the owner can read it, but that's all chmod 0400 pem/bigchaindb.pem @@ -52,7 +59,7 @@ chmod 0400 pem/bigchaindb.pem # 5. writes the shellscript add2known_hosts.sh # 6. (over)writes a file named hostlist.py # containing a list of all public DNS names. -python launch_ec2_nodes.py --tag $TAG --nodes $NODES +python launch_ec2_nodes.py --tag $TAG --nodes $NUM_NODES # Make add2known_hosts.sh executable then execute it. # This adds remote keys to ~/.ssh/known_hosts @@ -86,13 +93,28 @@ else fi # Configure BigchainDB on all nodes -fab configure_bigchaindb -# TODO: Get public keys from all nodes +# The idea is to send a bunch of locally-created configuration +# files out to each of the instances / nodes. +# Assume a set of $NUM_NODES BigchaindB config files +# already exists in the confiles directory. +# One can create a set using a command like +# ./make_confiles.sh confiles $NUM_NODES +# (We can't do that here now because this virtual environment +# is a Python 2 environment that may not even have +# bigchaindb installed, so bigchaindb configure can't be called) -# TODO: Add list of public keys to keyring of all nodes +# Transform the config files in the confiles directory +# to have proper keyrings, api_endpoint values, etc. +python clusterize_confiles.py confiles $NUM_NODES +# Send one of the config files to each instance +for (( HOST=1 ; HOST<=$NUM_NODES ; HOST++ )); do + CONFILE="bcdb_conf"$HOST + echo "Sending "$CONFILE + fab set_host:$HOST send_confile:$CONFILE +done # Initialize BigchainDB (i.e. Create the RethinkDB database, # the tables, the indexes, and genesis glock). Note that @@ -105,3 +127,4 @@ fab start_bigchaindb # cleanup rm add2known_hosts.sh +# rm -rf temp_confs \ No newline at end of file From 1c5c4223c77a2df8549bef889d4dd37338eb35ca Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 26 Apr 2016 15:00:35 +0200 Subject: [PATCH 10/24] Add deploy-cluster-aws/confiles/ to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index d94467b1..27b4f797 100644 --- a/.gitignore +++ b/.gitignore @@ -71,3 +71,4 @@ target/ # Some files created when deploying a cluster on AWS deploy-cluster-aws/conf/rethinkdb.conf deploy-cluster-aws/hostlist.py +deploy-cluster-aws/confiles/ From c3ce5ef4f7daa00c6cbc964929dcc711a03ad1d3 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 26 Apr 2016 15:01:23 +0200 Subject: [PATCH 11/24] Created make_confiles.sh and clusterize_confiles.py --- deploy-cluster-aws/clusterize_confiles.py | 72 +++++++++++++++++++++++ deploy-cluster-aws/make_confiles.sh | 40 +++++++++++++ 2 files changed, 112 insertions(+) create mode 100644 deploy-cluster-aws/clusterize_confiles.py create mode 100755 deploy-cluster-aws/make_confiles.sh diff --git a/deploy-cluster-aws/clusterize_confiles.py b/deploy-cluster-aws/clusterize_confiles.py new file mode 100644 index 00000000..498953e5 --- /dev/null +++ b/deploy-cluster-aws/clusterize_confiles.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +"""Given a directory full of default BigchainDB config files, +transform them into config files for a cluster with proper +keyrings, API endpoint values, etc. + +Note: This script assumes that there is a file named hostlist.py +containing public_dns_names = a list of the public DNS names of +all the hosts in the cluster. + +Usage: + python clusterize_confiles.py +""" + +from __future__ import unicode_literals +import os +import json +import argparse + +from hostlist import public_dns_names + + +# Parse the command-line arguments +parser = argparse.ArgumentParser() +parser.add_argument('dir', + help='Directory containing the config files') +parser.add_argument('number_of_files', + help='Number of config files expected in dir', + type=int) +args = parser.parse_args() + +conf_dir = args.dir +numfiles_expected = int(args.number_of_files) + +# Check if the number of files in conf_dir is what was expected +conf_files = os.listdir(conf_dir) +numfiles = len(conf_files) +if numfiles != numfiles_expected: + raise ValueError('There are {} files in {} but {} were expected'. + format(numfiles, conf_dir, numfiles_expected)) + +# Make a list containing all the public keys from +# all the config files +pubkeys = [] +for filename in conf_files: + file_path = os.path.join(conf_dir, filename) + with open(file_path, 'r') as f: + conf_dict = json.load(f) + pubkey = conf_dict['keypair']['public'] + pubkeys.append(pubkey) + +# Rewrite each config file, one at a time +for i, filename in enumerate(conf_files): + file_path = os.path.join(conf_dir, filename) + with open(file_path, 'r') as f: + conf_dict = json.load(f) + # The keyring is the list of *all* public keys + # minus the config file's own public key + keyring = list(pubkeys) + keyring.remove(conf_dict['keypair']['public']) + conf_dict['keyring'] = keyring + # Allow incoming server traffic from any IP address + # to port 9984 + conf_dict['server']['bind'] = '0.0.0.0:9984' + # Set the api_endpoint + conf_dict['api_endpoint'] = 'http://' + public_dns_names[i] + \ + ':9984/api/v1' + # Delete the config file + os.remove(file_path) + # Write new config file with the same filename + print('Rewriting {}'.format(file_path)) + with open(file_path, 'w') as f2: + json.dump(conf_dict, f2) diff --git a/deploy-cluster-aws/make_confiles.sh b/deploy-cluster-aws/make_confiles.sh new file mode 100755 index 00000000..91f4d1fa --- /dev/null +++ b/deploy-cluster-aws/make_confiles.sh @@ -0,0 +1,40 @@ +#! /bin/bash + +# The set -e option instructs bash to immediately exit +# if any command has a non-zero exit status +set -e + +function printErr() + { + echo "usage: ./make_confiles.sh " + echo "No argument $1 supplied" + } + +if [ -z "$1" ]; then + printErr "" + exit 1 +fi + +if [ -z "$2" ]; then + printErr "" + exit 1 +fi + +CONFDIR=$1 +NUMFILES=$2 + +# If $CONFDIR exists, remove it +if [ -d "$CONFDIR" ]; then + rm -rf $CONFDIR +fi + +# Create $CONFDIR +mkdir $CONFDIR + +# Use the bigchaindb configure command to create +# $NUMFILES BigchainDB config files in $CONFDIR +for (( i=1; i<=$NUMFILES; i++ )); do + CONPATH=$CONFDIR"/bcdb_conf"$i + echo "Writing "$CONPATH + bigchaindb -y -c $CONPATH configure +done From 2ef9bedfefb9a2bcb800db5faf7d0ac03ba44040 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 26 Apr 2016 15:25:33 +0200 Subject: [PATCH 12/24] Small changes to output of send_confile in fabfile.py --- deploy-cluster-aws/fabfile.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index 6037d95f..c0c2181c 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -143,10 +143,8 @@ def configure_bigchaindb(): def send_confile(confile): put('confiles/' + confile, 'tempfile') sudo('mv tempfile ~/.bigchaindb') - print('When confile = {} '.format(confile)) - print('bigchaindb show-config output is:') + print('For this node, bigchaindb show-config says:') run('bigchaindb show-config') - print(' ') # Initialize BigchainDB From 653a6a231c5077250e5a5dec59a630bf3986dfdd Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 26 Apr 2016 16:09:06 +0200 Subject: [PATCH 13/24] Instead of waiting 45s, test port 22 conns. until all work --- deploy-cluster-aws/launch_ec2_nodes.py | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/deploy-cluster-aws/launch_ec2_nodes.py b/deploy-cluster-aws/launch_ec2_nodes.py index 9ebf3026..7db71adc 100644 --- a/deploy-cluster-aws/launch_ec2_nodes.py +++ b/deploy-cluster-aws/launch_ec2_nodes.py @@ -14,6 +14,7 @@ from __future__ import unicode_literals import sys import time +import socket import argparse import botocore import boto3 @@ -192,8 +193,23 @@ with open('hostlist.py', 'w') as f: f.write('\n') f.write('public_dns_names = {}\n'.format(public_dns_names)) -# Wait -wait_time = 45 -print('Waiting {} seconds to make sure all instances are ready...'. - format(wait_time)) -time.sleep(wait_time) + +# For each node in the cluster, check port 22 (ssh) until it's reachable +for public_dns_name in public_dns_names: + # Create an INET, STREAMing socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + print('Attempting to connect to {} on port 22 (ssh)...'. + format(public_dns_name)) + unreachable = True + while unreachable: + try: + # Open a TCP connection to the remote node on port 22 + s.connect((public_dns_name, 22)) + print(' Port 22 is reachable!') + s.shutdown(socket.SHUT_WR) + s.close() + unreachable = False + except socket.error as e: + print(' Socket error: {}'.format(e)) + print(' Trying again in 3 seconds') + time.sleep(3.0) From 4d965a8b3451a17d304603f150ae2f50579c2eba Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 26 Apr 2016 16:49:50 +0200 Subject: [PATCH 14/24] Renumbered confiles to start at 0 --- deploy-cluster-aws/fabfile.py | 7 +++---- deploy-cluster-aws/make_confiles.sh | 2 +- deploy-cluster-aws/startup.sh | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index c0c2181c..e2e99e8d 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -38,14 +38,13 @@ def set_host(host_index): of the fab command that called it. Args: - host_index (int): 1, 2, 3, etc. + host_index (int): 0, 1, 2, 3, etc. Example: fab set_host:4 fab_task_A fab_task_B - will set env.hosts = [public_dns_names[3]] + will set env.hosts = [public_dns_names[4]] but only for doing fab_task_A and fab_task_B """ - env.hosts = [public_dns_names[int(host_index) - 1]] - print('Set env.hosts = {}'.format(env.hosts)) + env.hosts = [public_dns_names[int(host_index)]] # Install base software diff --git a/deploy-cluster-aws/make_confiles.sh b/deploy-cluster-aws/make_confiles.sh index 91f4d1fa..72735cb3 100755 --- a/deploy-cluster-aws/make_confiles.sh +++ b/deploy-cluster-aws/make_confiles.sh @@ -33,7 +33,7 @@ mkdir $CONFDIR # Use the bigchaindb configure command to create # $NUMFILES BigchainDB config files in $CONFDIR -for (( i=1; i<=$NUMFILES; i++ )); do +for (( i=0; i<$NUMFILES; i++ )); do CONPATH=$CONFDIR"/bcdb_conf"$i echo "Writing "$CONPATH bigchaindb -y -c $CONPATH configure diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 923eb7af..f9ccbbe7 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -110,7 +110,7 @@ fi python clusterize_confiles.py confiles $NUM_NODES # Send one of the config files to each instance -for (( HOST=1 ; HOST<=$NUM_NODES ; HOST++ )); do +for (( HOST=0 ; HOST<$NUM_NODES ; HOST++ )); do CONFILE="bcdb_conf"$HOST echo "Sending "$CONFILE fab set_host:$HOST send_confile:$CONFILE From 7fa8f7e9f091585f0bd7668ce11d5abd08ef1865 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 26 Apr 2016 16:50:55 +0200 Subject: [PATCH 15/24] Updated AWS deployment docs --- docs/source/deploy-on-aws.md | 46 +++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/docs/source/deploy-on-aws.md b/docs/source/deploy-on-aws.md index 4a373a0d..0bd4ed62 100644 --- a/docs/source/deploy-on-aws.md +++ b/docs/source/deploy-on-aws.md @@ -83,16 +83,37 @@ Add some rules for Inbound traffic: **Note: These rules are extremely lax! They're meant to make testing easy.** You'll want to tighten them up if you intend to have a secure cluster. For example, Source = 0.0.0.0/0 is [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) for "allow this traffic to come from _any_ IP address." -## Deployment +## AWS Deployment -Here's an example of how one could launch a BigchainDB cluster of four nodes tagged `wrigley` on AWS: +### AWS Deployment Step 1 + +Step 1 is to create a set of BigchainDB configuration files in a directory named `confiles`. They can be the default configuration files (i.e. what gets created when you use the `bigchaindb -y configure` command). An easy way to create a set of config files is to use the `make_confiles.sh` script. For example, if you **go into a virtual environment where `bigchaindb` is installed (i.e. probably a Python 3 virtual environment)** and enter: ```text +# in a Python 3 virtual environment where bigchaindb is installed cd bigchaindb -cd deploy-cluster-aws -./startup.sh wrigley 4 pypi +cd deploy_cluster_aws +./make_confiles.sh confiles 3 ``` -The `pypi` on the end means that it will install the latest (stable) `bigchaindb` package from the [Python Package Index (PyPI)](https://pypi.python.org/pypi). That is, on each instance, BigchainDB is installed using `pip install bigchaindb`. +then three (3) default BigchainDB configuration files will be created in a directory named `confiles`, namely: + +* `confiles/bcdb_conf0` +* `confiles/bcdb_conf1` +* `confiles/bcdb_conf2` + +You can look inside those files if you're curious. In step 2, they'll be modified. For example, the default keyring is an empty list. In step 2, the deployment script automatically change the keyring of each node to be a list of the public keys of all other nodes. + +### AWS Deployment Step 2 + +Step 2 is to launch the nodes ("instances") on AWS, to install all the necessary software on them, to configure the software, and to run the software. + +Here's an example of how one could launch a BigchainDB cluster of three (3) nodes tagged `wrigley` on AWS: +```text +cd deploy-cluster-aws +./startup.sh wrigley 3 pypi +``` + +The `pypi` on the end means that it will install the latest (stable) `bigchaindb` package from the [Python Package Index (PyPI)](https://pypi.python.org/pypi). That is, on each node, BigchainDB is installed using `pip install bigchaindb`. `startup.sh` is a Bash script which calls some Python and Fabric scripts. The usage is: ```text @@ -101,20 +122,7 @@ The `pypi` on the end means that it will install the latest (stable) `bigchaindb The first two arguments are self-explanatory. The third argument can be `pypi` or the name of a local Git branch (e.g. `master` or `feat/3752/quote-asimov-on-tuesdays`). If you don't include a third argument, then `pypi` will be assumed by default. -Here's what the `startup.sh` script does; it: - -0. allocates more elastic IP addresses if necessary, -1. launches the specified number of nodes (instances) on Amazon EC2, -2. tags them with the specified tag, -3. waits until those instances exist and are running, -4. for each instance, it associates an elastic IP address with that instance, -5. adds remote keys to `~/.ssh/known_hosts`, -6. (re)creates the RethinkDB configuration file `conf/rethinkdb.conf`, -7. installs base (prerequisite) software on all instances, -8. installs RethinkDB on all instances, -9. installs BigchainDB on all instances, -10. initializes the BigchainDB database, -11. starts BigchainDB on all instances. +If you're curious what the `startup.sh` script does, the source code has lots of explanatory comments, so it's quite easy to read. Here's a link to the latest version on GitHub: [`startup.sh`](https://github.com/bigchaindb/bigchaindb/blob/master/deploy-cluster-aws/startup.sh) It should take a few minutes for the deployment to finish. If you run into problems, see the section on Known Deployment Issues below. From 70b86ce49b6ebbecb120f5e097ec1ba3767d4fbc Mon Sep 17 00:00:00 2001 From: vrde Date: Tue, 26 Apr 2016 17:01:37 +0200 Subject: [PATCH 16/24] Add artificial delay to wait for tables init --- bigchaindb/commands/utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/bigchaindb/commands/utils.py b/bigchaindb/commands/utils.py index 4923a713..99b6b4cb 100644 --- a/bigchaindb/commands/utils.py +++ b/bigchaindb/commands/utils.py @@ -2,7 +2,7 @@ for ``argparse.ArgumentParser``. """ -import os +import time import argparse import multiprocessing as mp import subprocess @@ -20,6 +20,10 @@ def start_rethinkdb(): for line in proc.stdout: if line.startswith('Server ready'): + # FIXME: seems like tables are not ready when the server is ready, + # that's why we need to sleep a bit before returning. + # Not the optimal solution. Happy to see the right one :) + time.sleep(1) return proc exit('Error starting RethinkDB, reason is: {}'.format(line)) From 774454e4755a4a1e05fa6295ecd584721f611631 Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 27 Apr 2016 09:28:31 +0200 Subject: [PATCH 17/24] Clarified some points in the AWS deployment docs --- docs/source/deploy-on-aws.md | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/docs/source/deploy-on-aws.md b/docs/source/deploy-on-aws.md index 0bd4ed62..49c83021 100644 --- a/docs/source/deploy-on-aws.md +++ b/docs/source/deploy-on-aws.md @@ -87,28 +87,26 @@ Add some rules for Inbound traffic: ### AWS Deployment Step 1 -Step 1 is to create a set of BigchainDB configuration files in a directory named `confiles`. They can be the default configuration files (i.e. what gets created when you use the `bigchaindb -y configure` command). An easy way to create a set of config files is to use the `make_confiles.sh` script. For example, if you **go into a virtual environment where `bigchaindb` is installed (i.e. probably a Python 3 virtual environment)** and enter: +Suppose _N_ is the number of nodes you want in your BigchainDB cluster. If you already have a set of _N_ BigchainDB configuration files in the `deploy-cluster-aws/confiles` directory, then you can jump to step 2. To create such a set, you can do something like: ```text # in a Python 3 virtual environment where bigchaindb is installed cd bigchaindb -cd deploy_cluster_aws +cd deploy-cluster-aws ./make_confiles.sh confiles 3 ``` -then three (3) default BigchainDB configuration files will be created in a directory named `confiles`, namely: +That will create three (3) _default_ BigchainDB configuration files in the `deploy-cluster-aws/confiles` directory (which will be created if it doesn't already exist). The three files will be named `bcdb_conf0`, `bcdb_conf1`, and `bcdb_conf2`. -* `confiles/bcdb_conf0` -* `confiles/bcdb_conf1` -* `confiles/bcdb_conf2` - -You can look inside those files if you're curious. In step 2, they'll be modified. For example, the default keyring is an empty list. In step 2, the deployment script automatically change the keyring of each node to be a list of the public keys of all other nodes. +You can look inside those files if you're curious. In step 2, they'll be modified. For example, the default keyring is an empty list. In step 2, the deployment script automatically changes the keyring of each node to be a list of the public keys of all other nodes. Other changes are also made. ### AWS Deployment Step 2 -Step 2 is to launch the nodes ("instances") on AWS, to install all the necessary software on them, to configure the software, and to run the software. +Step 2 is to launch the nodes ("instances") on AWS, to install all the necessary software on them, configure the software, run the software, and more. Here's an example of how one could launch a BigchainDB cluster of three (3) nodes tagged `wrigley` on AWS: ```text +# in a Python 2.5-2.7 virtual environment where fabric, boto3, etc. are installed +cd bigchaindb cd deploy-cluster-aws ./startup.sh wrigley 3 pypi ``` From 344246096d270d6cea555d6213d7de7fa2c2a498 Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 27 Apr 2016 11:57:19 +0200 Subject: [PATCH 18/24] Give IP address to socket.connect, rather than DNS name --- deploy-cluster-aws/launch_ec2_nodes.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/deploy-cluster-aws/launch_ec2_nodes.py b/deploy-cluster-aws/launch_ec2_nodes.py index 7db71adc..868a732c 100644 --- a/deploy-cluster-aws/launch_ec2_nodes.py +++ b/deploy-cluster-aws/launch_ec2_nodes.py @@ -195,21 +195,25 @@ with open('hostlist.py', 'w') as f: # For each node in the cluster, check port 22 (ssh) until it's reachable -for public_dns_name in public_dns_names: - # Create an INET, STREAMing socket +for instance in instances_with_tag: + ip_address = instance.public_ip_address + # Create a socket + # Address Family: AF_INET (means IPv4) + # Type: SOCK_STREAM (means connection-oriented TCP protocol) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) print('Attempting to connect to {} on port 22 (ssh)...'. - format(public_dns_name)) + format(ip_address)) unreachable = True while unreachable: try: - # Open a TCP connection to the remote node on port 22 - s.connect((public_dns_name, 22)) - print(' Port 22 is reachable!') - s.shutdown(socket.SHUT_WR) - s.close() - unreachable = False + # Open a connection to the remote node on port 22 + s.connect((ip_address, 22)) except socket.error as e: print(' Socket error: {}'.format(e)) print(' Trying again in 3 seconds') time.sleep(3.0) + else: + print(' Port 22 is reachable!') + s.shutdown(socket.SHUT_WR) + s.close() + unreachable = False From 12a538a93364d281258ce1eb37cee1cb92df653e Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 27 Apr 2016 13:43:40 +0200 Subject: [PATCH 19/24] Use run() rather than sudo() to move conf file to ~ --- deploy-cluster-aws/fabfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index e2e99e8d..807a3536 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -141,7 +141,7 @@ def configure_bigchaindb(): @task def send_confile(confile): put('confiles/' + confile, 'tempfile') - sudo('mv tempfile ~/.bigchaindb') + run('mv tempfile ~/.bigchaindb') print('For this node, bigchaindb show-config says:') run('bigchaindb show-config') From 0d6e74cfc7041a55b08e0b5f7a73769af10ce632 Mon Sep 17 00:00:00 2001 From: vrde Date: Wed, 27 Apr 2016 17:00:21 +0200 Subject: [PATCH 20/24] Use .wait to check when the database is ready --- bigchaindb/commands/bigchain.py | 9 ++++++-- bigchaindb/commands/utils.py | 37 ++++++++++++++++++++++++++++----- bigchaindb/exceptions.py | 3 ++- 3 files changed, 41 insertions(+), 8 deletions(-) diff --git a/bigchaindb/commands/bigchain.py b/bigchaindb/commands/bigchain.py index 8065834a..d23f99fc 100644 --- a/bigchaindb/commands/bigchain.py +++ b/bigchaindb/commands/bigchain.py @@ -19,7 +19,9 @@ import bigchaindb.config_utils from bigchaindb.util import ProcessGroup from bigchaindb.client import temp_client from bigchaindb import db -from bigchaindb.exceptions import DatabaseAlreadyExists, KeypairNotFoundException +from bigchaindb.exceptions import (StartupError, + DatabaseAlreadyExists, + KeypairNotFoundException) from bigchaindb.commands import utils from bigchaindb.processes import Processes from bigchaindb import crypto @@ -149,7 +151,10 @@ def run_start(args): bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) if args.start_rethinkdb: - proc = utils.start_rethinkdb() + try: + proc = utils.start_rethinkdb() + except StartupError as e: + sys.exit('Error starting RethinkDB, reason is: {}'.format(e)) logger.info('RethinkDB started with PID %s' % proc.pid) try: diff --git a/bigchaindb/commands/utils.py b/bigchaindb/commands/utils.py index 99b6b4cb..b3cc356c 100644 --- a/bigchaindb/commands/utils.py +++ b/bigchaindb/commands/utils.py @@ -2,31 +2,58 @@ for ``argparse.ArgumentParser``. """ -import time import argparse import multiprocessing as mp import subprocess +import rethinkdb as r + +import bigchaindb +from bigchaindb.exceptions import StartupError +from bigchaindb import db from bigchaindb.version import __version__ def start_rethinkdb(): + """Start RethinkDB as a child process and wait for it to be + available. + + Raises: + ``bigchaindb.exceptions.StartupError`` if RethinkDB cannot + be started. + """ + proc = subprocess.Popen(['rethinkdb', '--bind', 'all'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) + dbname = bigchaindb.config['database']['name'] line = '' for line in proc.stdout: if line.startswith('Server ready'): # FIXME: seems like tables are not ready when the server is ready, - # that's why we need to sleep a bit before returning. - # Not the optimal solution. Happy to see the right one :) - time.sleep(1) + # that's why we need to query RethinkDB to know the state + # of the database. This code assumes the tables are ready + # when the database is ready. This seems a valid assumption. + + try: + conn = db.get_conn() + # Before checking if the db is ready, we need to query + # the server to check if it contains that db + if r.db_list().contains(dbname).run(conn): + r.db(dbname).wait().run(conn) + except (r.ReqlOpFailedError, r.ReqlDriverError) as exc: + raise StartupError('Error waiting for the database `{}` ' + 'to be ready'.format(dbname)) from exc + return proc - exit('Error starting RethinkDB, reason is: {}'.format(line)) + # We are here when we exhaust the stdout of the process. + # The last `line` contains info about the error. + raise StartupError(line) + proc.kill() diff --git a/bigchaindb/exceptions.py b/bigchaindb/exceptions.py index 0baa4ad2..8abfaaf7 100644 --- a/bigchaindb/exceptions.py +++ b/bigchaindb/exceptions.py @@ -28,4 +28,5 @@ class DatabaseDoesNotExist(Exception): class KeypairNotFoundException(Exception): """Raised if operation cannot proceed because the keypair was not given""" - +class StartupError(Exception): + """Raised when there is an error starting up the system""" From 08aaf784e5084384c330ff19c942fc4807afe270 Mon Sep 17 00:00:00 2001 From: vrde Date: Wed, 27 Apr 2016 17:14:33 +0200 Subject: [PATCH 21/24] Update documentation --- Dockerfile | 2 +- docs/source/installing-server.md | 134 +++++++++++++++++++++++-------- 2 files changed, 100 insertions(+), 36 deletions(-) diff --git a/Dockerfile b/Dockerfile index 15a3cc17..0fcac07f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,7 +17,7 @@ WORKDIR /data ENV BIGCHAINDB_CONFIG_PATH /data/.bigchaindb ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984 -ENV BIGCHAINDB_API_ENDPOINT http://0.0.0.0:9984/api/v1 +ENV BIGCHAINDB_API_ENDPOINT http://bigchaindb:9984/api/v1 ENTRYPOINT ["bigchaindb", "--experimental-start-rethinkdb"] diff --git a/docs/source/installing-server.md b/docs/source/installing-server.md index 09967238..60fd977d 100644 --- a/docs/source/installing-server.md +++ b/docs/source/installing-server.md @@ -111,7 +111,104 @@ If it's the first time you've run `bigchaindb start`, then it creates the databa **NOT for Production Use** -For those who like using Docker and wish to experiment with BigchainDB in non-production environments, we currently maintain a `dockerfile` that can be used to build an image for `bigchaindb`. +For those who like using Docker and wish to experiment with BigchainDB in +non-production environments, we currently maintain a Docker image and a +`dockerfile` that can be used to build an image for `bigchaindb`. + +### Pull and run the image from Docker Hub + +Assuming you have `docker` installed, you would proceed as follows. + +In a terminal shell, pull the latest version of the BigchainDB Docker image +```text +docker pull bigchaindb/bigchaindb:latest +``` + +then do a one-time configuration step to create the config file; we will use +the `-y` option to accept all the default values. The configuration file will +be stored on your host machine under ` ~/bigchaindb_docker/.bigchaindb`: + +```text +$ docker run --rm -v "$HOME/bigchaindb_docker:/data" -ti \ + bigchaindb/bigchaindb:latest configure +Generating keypair +Configuration written to /data/.bigchaindb +Ready to go! +``` + +Let's analyze the command: + - `docker run` tell docker to run some image + - `--rm` remove the container once we are done + - `-v "$HOME/bigchaindb_docker:/data` map the host directory + `$HOME/bigchaindb_docker` to the container directory `/data`, + this allow us to have the data persisted on the host machine, + you can read more in the [official Docker + documentation](https://docs.docker.com/engine/userguide/containers/dockervolumes/#mount-a-host-directory-as-a-data-volume) + - `-t` allocate a pseudo-tty + - `-i` keep STDIN open even if not attached + - `bigchaindb/bigchaindb:latest` the image to use + - `configure` the sub-command to execute + + +After configuring the system, you can run BigchainDB with the following +command: + +```text +$ docker run -v "$HOME/bigchaindb_docker:/data" -d \ + --name bigchaindb + -p "58080:8080" -p "59984:9984" \ + bigchaindb/bigchaindb:latest start +``` + +The command is slightly different from the previous one, the differences are: + - `--name bigchaindb` give a nice name to the container, so it's easier to + refer to it later + - `-p "58080:8080"` map the host port `58080` to the container port `8080` + (the RethinkDB admin interface) + - `-p "59984:9984"` map the host port `59984` to the container port `9984` + (the BigchainDB API server) + - `start` start the BigchainDB service + +Another way to publish the ports exposed by the containeris to use the `-P` (or +`--publish-all`). This will publish all exposed ports to random ports. You can +always run `docker ps` to check the random mapping. + +You can also access the RethinkDB dashboard at: +[http://localhost:58080/](http://localhost:58080/) + +If that doesn't work, then replace `localhost` with the IP or hostname of the +machine running the Docker engine. If you are running docker-machine (e.g.: on +Mac OS X) this will be the IP of the Docker machine (`docker-machine ip +machine_name`). + +#### Load test with Docker + +After BigchainDB is running in the docker container named `bigchaindb`, we can +start another container and run the load test against the first one. + +First, make sure the container `bigchaindb` is running, you can check this by running: +```text +docker ps +``` + +You should see a container named `bigchaindb` running. + +Now, run a new container and link it to `bigchaindb` to run the load test. +```text +$ docker run --rm -v "$HOME/bigchaindb_docker:/data" -ti \ + --link bigchaindb \ + bigchaindb/bigchaindb:test load +``` + +Note that load test transactions accepts also a `-m` option to enable +multiprocessing and make use of all the cores in the host machine. + +You can see BigchainDB processing your transactions running: +```text +$ docker logs -f bigchaindb +``` + +### Building your own image Assuming you have `docker` installed, you would proceed as follows. @@ -125,38 +222,5 @@ Build the Docker image: $ docker build --tag local-bigchaindb . ``` -then do a one-time configuration step to create the config file; we will use -the `-y` option to accept all the default values. The configuration file will -be stored on your host machine under ` ~/bigchaindb_docker/.bigchaindb`: -```text -$ docker run --rm -v "$HOME/bigchaindb_docker:/data" -ti local-bigchaindb -y configure -Generating keypair -Configuration written to /data/.bigchaindb -Ready to go! -``` +Now you can use your own image to run BigchainDB containers. -You can then start it up (in the background, as a daemon) using: -```text -$ BIGCHAIN_CID=$(docker run -d -v "$HOME/bigchaindb_docker:/data" local-bigchaindb) -``` - -then you can load test transactions (using the `-m` option to enable using all -the cores in the host machine) via: -```text -$ docker exec -d $BIGCHAIN_CID load -m -``` - -You can see BigchainDB processing your transactions running: -```text -$ docker logs -f $BIGCHAIN_CID -``` - - -If you're on Linux, you can probably view the RethinkDB dashboard at: - -[http://localhost:8080/](http://localhost:8080/) - -If that doesn't work, then replace `localhost` with the IP or hostname of the -machine running the Docker engine. If you are running docker-machine (e.g.: on -Mac OS X) this will be the IP of the Docker machine (`docker-machine ip -machine_name`). From 66cd3bb77b554809d88274ffc1298118e2a17032 Mon Sep 17 00:00:00 2001 From: vrde Date: Wed, 27 Apr 2016 17:17:11 +0200 Subject: [PATCH 22/24] Fix exception in test --- tests/test_commands.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index f7e27a2d..99a4f466 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -218,9 +218,10 @@ def test_start_rethinkdb_returns_a_process_when_successful(mock_popen): @patch('subprocess.Popen') -def test_start_rethinkdb_exists_when_cannot_start(mock_popen): +def test_start_rethinkdb_exits_when_cannot_start(mock_popen): + from bigchaindb import exceptions from bigchaindb.commands import utils mock_popen.return_value = Mock(stdout=['Nopety nope']) - with pytest.raises(SystemExit): + with pytest.raises(exceptions.StartupError): utils.start_rethinkdb() From cd6bb18f1807450e56d0fa04984d705cf2ffc822 Mon Sep 17 00:00:00 2001 From: troymc Date: Thu, 28 Apr 2016 13:32:25 +0200 Subject: [PATCH 23/24] Minor edits to the Docker documentation --- docs/source/bigchaindb-cli.md | 4 +- docs/source/installing-server.md | 93 ++++++++++++++++---------------- 2 files changed, 50 insertions(+), 47 deletions(-) diff --git a/docs/source/bigchaindb-cli.md b/docs/source/bigchaindb-cli.md index d154e118..229d5b4c 100644 --- a/docs/source/bigchaindb-cli.md +++ b/docs/source/bigchaindb-cli.md @@ -37,9 +37,9 @@ This command drops (erases) the RethinkDB database. You will be prompted to make This command starts BigchainDB. It always begins by trying a `bigchaindb init` first. See the note in the documentation for `bigchaindb init`. -## bigchaindb load +### bigchaindb load -The command is used to run benchmarking tests. You can learn more about it using: +This command is used to run benchmarking tests. You can learn more about it using: ```text $ bigchaindb load -h ``` diff --git a/docs/source/installing-server.md b/docs/source/installing-server.md index 60fd977d..1b830811 100644 --- a/docs/source/installing-server.md +++ b/docs/source/installing-server.md @@ -113,104 +113,108 @@ If it's the first time you've run `bigchaindb start`, then it creates the databa For those who like using Docker and wish to experiment with BigchainDB in non-production environments, we currently maintain a Docker image and a -`dockerfile` that can be used to build an image for `bigchaindb`. +`Dockerfile` that can be used to build an image for `bigchaindb`. -### Pull and run the image from Docker Hub +### Pull and Run the Image from Docker Hub -Assuming you have `docker` installed, you would proceed as follows. +Assuming you have Docker installed, you would proceed as follows. -In a terminal shell, pull the latest version of the BigchainDB Docker image +In a terminal shell, pull the latest version of the BigchainDB Docker image using: ```text docker pull bigchaindb/bigchaindb:latest ``` then do a one-time configuration step to create the config file; we will use the `-y` option to accept all the default values. The configuration file will -be stored on your host machine under ` ~/bigchaindb_docker/.bigchaindb`: +be stored in a file on your host machine at `~/bigchaindb_docker/.bigchaindb`: ```text $ docker run --rm -v "$HOME/bigchaindb_docker:/data" -ti \ - bigchaindb/bigchaindb:latest configure + bigchaindb/bigchaindb:latest -y configure Generating keypair Configuration written to /data/.bigchaindb Ready to go! ``` -Let's analyze the command: - - `docker run` tell docker to run some image - - `--rm` remove the container once we are done - - `-v "$HOME/bigchaindb_docker:/data` map the host directory - `$HOME/bigchaindb_docker` to the container directory `/data`, - this allow us to have the data persisted on the host machine, - you can read more in the [official Docker - documentation](https://docs.docker.com/engine/userguide/containers/dockervolumes/#mount-a-host-directory-as-a-data-volume) - - `-t` allocate a pseudo-tty - - `-i` keep STDIN open even if not attached - - `bigchaindb/bigchaindb:latest` the image to use - - `configure` the sub-command to execute +Let's analyze that command: + +* `docker run` tells Docker to run some image +* `--rm` remove the container once we are done +* `-v "$HOME/bigchaindb_docker:/data"` map the host directory + `$HOME/bigchaindb_docker` to the container directory `/data`; + this allows us to have the data persisted on the host machine, + you can read more in the [official Docker + documentation](https://docs.docker.com/engine/userguide/containers/dockervolumes/#mount-a-host-directory-as-a-data-volume) +* `-t` allocate a pseudo-TTY +* `-i` keep STDIN open even if not attached +* `bigchaindb/bigchaindb:latest` the image to use +* `-y configure` execute the `configure` sub-command (of the `bigchaindb` command) inside the container, with the `-y` option to automatically use all the default config values -After configuring the system, you can run BigchainDB with the following -command: +After configuring the system, you can run BigchainDB with the following command: ```text $ docker run -v "$HOME/bigchaindb_docker:/data" -d \ - --name bigchaindb + --name bigchaindb \ -p "58080:8080" -p "59984:9984" \ bigchaindb/bigchaindb:latest start ``` The command is slightly different from the previous one, the differences are: - - `--name bigchaindb` give a nice name to the container, so it's easier to - refer to it later - - `-p "58080:8080"` map the host port `58080` to the container port `8080` - (the RethinkDB admin interface) - - `-p "59984:9984"` map the host port `59984` to the container port `9984` - (the BigchainDB API server) - - `start` start the BigchainDB service -Another way to publish the ports exposed by the containeris to use the `-P` (or -`--publish-all`). This will publish all exposed ports to random ports. You can +* `-d` run the container in the background +* `--name bigchaindb` give a nice name to the container so it's easier to + refer to it later +* `-p "58080:8080"` map the host port `58080` to the container port `8080` + (the RethinkDB admin interface) +* `-p "59984:9984"` map the host port `59984` to the container port `9984` + (the BigchainDB API server) +* `start` start the BigchainDB service + +Another way to publish the ports exposed by the container is to use the `-P` (or +`--publish-all`) option. This will publish all exposed ports to random ports. You can always run `docker ps` to check the random mapping. -You can also access the RethinkDB dashboard at: +You can also access the RethinkDB dashboard at [http://localhost:58080/](http://localhost:58080/) If that doesn't work, then replace `localhost` with the IP or hostname of the -machine running the Docker engine. If you are running docker-machine (e.g.: on +machine running the Docker engine. If you are running docker-machine (e.g. on Mac OS X) this will be the IP of the Docker machine (`docker-machine ip machine_name`). -#### Load test with Docker +#### Load Testing with Docker -After BigchainDB is running in the docker container named `bigchaindb`, we can -start another container and run the load test against the first one. +Now that we have BigchainDB running in the Docker container named `bigchaindb`, we can +start another BigchainDB container to generate a load test for it. -First, make sure the container `bigchaindb` is running, you can check this by running: +First, make sure the container named `bigchaindb` is still running. You can check that using: ```text docker ps ``` -You should see a container named `bigchaindb` running. +You should see a container named `bigchaindb` in the list. + +You can load test the BigchainDB running in that container by running the `bigchaindb load` command in a second container: -Now, run a new container and link it to `bigchaindb` to run the load test. ```text $ docker run --rm -v "$HOME/bigchaindb_docker:/data" -ti \ --link bigchaindb \ - bigchaindb/bigchaindb:test load + bigchaindb/bigchaindb:latest load ``` -Note that load test transactions accepts also a `-m` option to enable -multiprocessing and make use of all the cores in the host machine. +Note the `--link` option to link to the first container (named `bigchaindb`). -You can see BigchainDB processing your transactions running: +Aside: The `bigchaindb load` command has several options (e.g. `-m`). You can read more about it in [the documentation about the BigchainDB command line interface](bigchaindb-cli.html). + +If you look at the RethinkDB dashboard (in your web browser), you should see the effects of the load test. You can also see some effects in the Docker logs using: ```text $ docker logs -f bigchaindb ``` -### Building your own image +### Building Your Own Image -Assuming you have `docker` installed, you would proceed as follows. +Assuming you have Docker installed, you would proceed as follows. In a terminal shell: ```text @@ -223,4 +227,3 @@ $ docker build --tag local-bigchaindb . ``` Now you can use your own image to run BigchainDB containers. - From f5da2af872a16bfcc9f3e06e64a73ad1b3daba92 Mon Sep 17 00:00:00 2001 From: vrde Date: Thu, 28 Apr 2016 15:46:33 +0200 Subject: [PATCH 24/24] Remove smelly code --- bigchaindb/commands/bigchain.py | 11 ++++++----- bigchaindb/commands/utils.py | 2 -- bigchaindb/config_utils.py | 3 --- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/bigchaindb/commands/bigchain.py b/bigchaindb/commands/bigchain.py index d23f99fc..b37a3812 100644 --- a/bigchaindb/commands/bigchain.py +++ b/bigchaindb/commands/bigchain.py @@ -59,8 +59,6 @@ def run_configure(args, skip_if_exists=False): skip_if_exists (bool): skip the function if a config file already exists """ config_path = args.config or bigchaindb.config_utils.CONFIG_DEFAULT_PATH - bigchaindb.config_utils.autoconfigure(filename=False, force=True) - config_file_exists = False # if the config path is `-` then it's stdout @@ -76,9 +74,13 @@ def run_configure(args, skip_if_exists=False): if want != 'y': return - # Patch the default configuration with the new values conf = copy.deepcopy(bigchaindb.config) - del conf['CONFIGURED'] + + # Patch the default configuration with the new values + conf = bigchaindb.config_utils.update( + conf, + bigchaindb.config_utils.env_config(bigchaindb.config)) + print('Generating keypair', file=sys.stderr) conf['keypair']['private'], conf['keypair']['public'] = \ @@ -173,7 +175,6 @@ def run_start(args): def _run_load(tx_left, stats): logstats.thread.start(stats) client = temp_client() - # b = bigchaindb.Bigchain() while True: tx = client.create() diff --git a/bigchaindb/commands/utils.py b/bigchaindb/commands/utils.py index b3cc356c..dc035de6 100644 --- a/bigchaindb/commands/utils.py +++ b/bigchaindb/commands/utils.py @@ -54,8 +54,6 @@ def start_rethinkdb(): # The last `line` contains info about the error. raise StartupError(line) - proc.kill() - def start(parser, scope): """Utility function to execute a subcommand. diff --git a/bigchaindb/config_utils.py b/bigchaindb/config_utils.py index 4256f580..dc396522 100644 --- a/bigchaindb/config_utils.py +++ b/bigchaindb/config_utils.py @@ -92,9 +92,6 @@ def file_config(filename=None): """ logger.debug('On entry into file_config(), filename = {}'.format(filename)) - if filename is False: - return {} - if filename is None: filename = CONFIG_DEFAULT_PATH