mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge remote-tracking branch 'remotes/origin/master' into feat/127/crypto-conditions-ilp-bigchain-integration-hashlock
This commit is contained in:
commit
f5f4c17839
1
.gitignore
vendored
1
.gitignore
vendored
@ -71,3 +71,4 @@ target/
|
||||
# Some files created when deploying a cluster on AWS
|
||||
deploy-cluster-aws/conf/rethinkdb.conf
|
||||
deploy-cluster-aws/hostlist.py
|
||||
deploy-cluster-aws/confiles/
|
||||
|
24
Dockerfile
24
Dockerfile
@ -1,12 +1,26 @@
|
||||
FROM python:3.5
|
||||
FROM rethinkdb:2.3
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get -y install python3 python3-pip
|
||||
RUN pip3 install --upgrade pip
|
||||
RUN pip3 install --upgrade setuptools
|
||||
|
||||
RUN mkdir -p /usr/src/app
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
RUN pip install --upgrade pip
|
||||
|
||||
COPY . /usr/src/app/
|
||||
|
||||
RUN pip install --no-cache-dir -e .[dev]
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
RUN pip3 install --no-cache-dir -e .
|
||||
|
||||
WORKDIR /data
|
||||
|
||||
ENV BIGCHAINDB_CONFIG_PATH /data/.bigchaindb
|
||||
ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984
|
||||
ENV BIGCHAINDB_API_ENDPOINT http://bigchaindb:9984/api/v1
|
||||
|
||||
ENTRYPOINT ["bigchaindb", "--experimental-start-rethinkdb"]
|
||||
|
||||
CMD ["start"]
|
||||
|
||||
EXPOSE 8080 9984 28015 29015
|
||||
|
@ -9,12 +9,20 @@ import logging
|
||||
import argparse
|
||||
import copy
|
||||
import json
|
||||
import builtins
|
||||
|
||||
import logstats
|
||||
|
||||
|
||||
import bigchaindb
|
||||
import bigchaindb.config_utils
|
||||
from bigchaindb.util import ProcessGroup
|
||||
from bigchaindb.client import temp_client
|
||||
from bigchaindb import db
|
||||
from bigchaindb.exceptions import DatabaseAlreadyExists, KeypairNotFoundException
|
||||
from bigchaindb.commands.utils import base_parser, start
|
||||
from bigchaindb.exceptions import (StartupError,
|
||||
DatabaseAlreadyExists,
|
||||
KeypairNotFoundException)
|
||||
from bigchaindb.commands import utils
|
||||
from bigchaindb.processes import Processes
|
||||
from bigchaindb import crypto
|
||||
|
||||
@ -23,6 +31,14 @@ logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# We need this because `input` always prints on stdout, while it should print
|
||||
# to stderr. It's a very old bug, check it out here:
|
||||
# - https://bugs.python.org/issue1927
|
||||
def input(prompt):
|
||||
print(prompt, end='', file=sys.stderr)
|
||||
return builtins.input()
|
||||
|
||||
|
||||
def run_show_config(args):
|
||||
"""Show the current configuration"""
|
||||
# TODO Proposal: remove the "hidden" configuration. Only show config. If
|
||||
@ -43,7 +59,11 @@ def run_configure(args, skip_if_exists=False):
|
||||
skip_if_exists (bool): skip the function if a config file already exists
|
||||
"""
|
||||
config_path = args.config or bigchaindb.config_utils.CONFIG_DEFAULT_PATH
|
||||
config_file_exists = os.path.exists(config_path)
|
||||
|
||||
config_file_exists = False
|
||||
# if the config path is `-` then it's stdout
|
||||
if config_path != '-':
|
||||
config_file_exists = os.path.exists(config_path)
|
||||
|
||||
if config_file_exists and skip_if_exists:
|
||||
return
|
||||
@ -54,10 +74,15 @@ def run_configure(args, skip_if_exists=False):
|
||||
if want != 'y':
|
||||
return
|
||||
|
||||
# Patch the default configuration with the new values
|
||||
conf = copy.deepcopy(bigchaindb._config)
|
||||
conf = copy.deepcopy(bigchaindb.config)
|
||||
|
||||
print('Generating keypair')
|
||||
# Patch the default configuration with the new values
|
||||
conf = bigchaindb.config_utils.update(
|
||||
conf,
|
||||
bigchaindb.config_utils.env_config(bigchaindb.config))
|
||||
|
||||
|
||||
print('Generating keypair', file=sys.stderr)
|
||||
conf['keypair']['private'], conf['keypair']['public'] = \
|
||||
crypto.generate_key_pair()
|
||||
|
||||
@ -80,9 +105,12 @@ def run_configure(args, skip_if_exists=False):
|
||||
input('Statsd {}? (default `{}`): '.format(key, val)) \
|
||||
or val
|
||||
|
||||
bigchaindb.config_utils.write_config(conf, config_path)
|
||||
print('Configuration written to {}'.format(config_path))
|
||||
print('Ready to go!')
|
||||
if config_path != '-':
|
||||
bigchaindb.config_utils.write_config(conf, config_path)
|
||||
else:
|
||||
print(json.dumps(conf, indent=4, sort_keys=True))
|
||||
print('Configuration written to {}'.format(config_path), file=sys.stderr)
|
||||
print('Ready to go!', file=sys.stderr)
|
||||
|
||||
|
||||
def run_export_my_pubkey(args):
|
||||
@ -110,8 +138,8 @@ def run_init(args):
|
||||
try:
|
||||
db.init()
|
||||
except DatabaseAlreadyExists:
|
||||
print('The database already exists.')
|
||||
print('If you wish to re-initialize it, first drop it.')
|
||||
print('The database already exists.', file=sys.stderr)
|
||||
print('If you wish to re-initialize it, first drop it.', file=sys.stderr)
|
||||
|
||||
|
||||
def run_drop(args):
|
||||
@ -122,8 +150,15 @@ def run_drop(args):
|
||||
|
||||
def run_start(args):
|
||||
"""Start the processes to run the node"""
|
||||
# run_configure(args, skip_if_exists=True)
|
||||
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
||||
|
||||
if args.start_rethinkdb:
|
||||
try:
|
||||
proc = utils.start_rethinkdb()
|
||||
except StartupError as e:
|
||||
sys.exit('Error starting RethinkDB, reason is: {}'.format(e))
|
||||
logger.info('RethinkDB started with PID %s' % proc.pid)
|
||||
|
||||
try:
|
||||
db.init()
|
||||
except DatabaseAlreadyExists:
|
||||
@ -137,10 +172,46 @@ def run_start(args):
|
||||
processes.start()
|
||||
|
||||
|
||||
def _run_load(tx_left, stats):
|
||||
logstats.thread.start(stats)
|
||||
client = temp_client()
|
||||
|
||||
while True:
|
||||
tx = client.create()
|
||||
|
||||
stats['transactions'] += 1
|
||||
|
||||
if tx_left is not None:
|
||||
tx_left -= 1
|
||||
if tx_left == 0:
|
||||
break
|
||||
|
||||
|
||||
def run_load(args):
|
||||
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
||||
logger.info('Starting %s processes', args.multiprocess)
|
||||
stats = logstats.Logstats()
|
||||
logstats.thread.start(stats)
|
||||
|
||||
tx_left = None
|
||||
if args.count > 0:
|
||||
tx_left = int(args.count / args.multiprocess)
|
||||
|
||||
workers = ProcessGroup(concurrency=args.multiprocess,
|
||||
target=_run_load,
|
||||
args=(tx_left, stats.get_child()))
|
||||
workers.start()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Control your BigchainDB node.',
|
||||
parents=[base_parser])
|
||||
parents=[utils.base_parser])
|
||||
|
||||
parser.add_argument('--experimental-start-rethinkdb',
|
||||
dest='start_rethinkdb',
|
||||
action='store_true',
|
||||
help='Run RethinkDB on start')
|
||||
|
||||
# all the commands are contained in the subparsers object,
|
||||
# the command selected by the user will be stored in `args.command`
|
||||
@ -172,7 +243,25 @@ def main():
|
||||
subparsers.add_parser('start',
|
||||
help='Start BigchainDB')
|
||||
|
||||
start(parser, globals())
|
||||
load_parser = subparsers.add_parser('load',
|
||||
help='Write transactions to the backlog')
|
||||
|
||||
load_parser.add_argument('-m', '--multiprocess',
|
||||
nargs='?',
|
||||
type=int,
|
||||
default=False,
|
||||
help='Spawn multiple processes to run the command, '
|
||||
'if no value is provided, the number of processes '
|
||||
'is equal to the number of cores of the host machine')
|
||||
|
||||
load_parser.add_argument('-c', '--count',
|
||||
default=0,
|
||||
type=int,
|
||||
help='Number of transactions to push. If the parameter -m '
|
||||
'is set, the count is distributed equally to all the '
|
||||
'processes')
|
||||
|
||||
utils.start(parser, globals())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -1,84 +0,0 @@
|
||||
"""Command line interface for the `bigchaindb-benchmark` command."""
|
||||
|
||||
import logging
|
||||
import argparse
|
||||
|
||||
import logstats
|
||||
|
||||
import bigchaindb
|
||||
import bigchaindb.config_utils
|
||||
from bigchaindb.util import ProcessGroup
|
||||
from bigchaindb.client import temp_client
|
||||
from bigchaindb.commands.utils import base_parser, start
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _run_load(tx_left, stats):
|
||||
logstats.thread.start(stats)
|
||||
client = temp_client()
|
||||
# b = bigchaindb.Bigchain()
|
||||
|
||||
while True:
|
||||
tx = client.create()
|
||||
|
||||
stats['transactions'] += 1
|
||||
|
||||
if tx_left is not None:
|
||||
tx_left -= 1
|
||||
if tx_left == 0:
|
||||
break
|
||||
|
||||
|
||||
def run_load(args):
|
||||
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
||||
logger.info('Starting %s processes', args.multiprocess)
|
||||
stats = logstats.Logstats()
|
||||
logstats.thread.start(stats)
|
||||
|
||||
tx_left = None
|
||||
if args.count > 0:
|
||||
tx_left = int(args.count / args.multiprocess)
|
||||
|
||||
workers = ProcessGroup(concurrency=args.multiprocess,
|
||||
target=_run_load,
|
||||
args=(tx_left, stats.get_child()))
|
||||
workers.start()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Benchmark your bigchain federation.',
|
||||
parents=[base_parser])
|
||||
|
||||
# all the commands are contained in the subparsers object,
|
||||
# the command selected by the user will be stored in `args.command`
|
||||
# that is used by the `main` function to select which other
|
||||
# function to call.
|
||||
subparsers = parser.add_subparsers(title='Commands',
|
||||
dest='command')
|
||||
|
||||
# parser for database level commands
|
||||
load_parser = subparsers.add_parser('load',
|
||||
help='Write transactions to the backlog')
|
||||
|
||||
load_parser.add_argument('-m', '--multiprocess',
|
||||
nargs='?',
|
||||
type=int,
|
||||
default=False,
|
||||
help='Spawn multiple processes to run the command, '
|
||||
'if no value is provided, the number of processes '
|
||||
'is equal to the number of cores of the host machine')
|
||||
|
||||
load_parser.add_argument('-c', '--count',
|
||||
default=0,
|
||||
type=int,
|
||||
help='Number of transactions to push. If the parameter -m '
|
||||
'is set, the count is distributed equally to all the '
|
||||
'processes')
|
||||
|
||||
start(parser, globals())
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -4,10 +4,57 @@ for ``argparse.ArgumentParser``.
|
||||
|
||||
import argparse
|
||||
import multiprocessing as mp
|
||||
import subprocess
|
||||
|
||||
import rethinkdb as r
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb.exceptions import StartupError
|
||||
from bigchaindb import db
|
||||
from bigchaindb.version import __version__
|
||||
|
||||
|
||||
def start_rethinkdb():
|
||||
"""Start RethinkDB as a child process and wait for it to be
|
||||
available.
|
||||
|
||||
Raises:
|
||||
``bigchaindb.exceptions.StartupError`` if RethinkDB cannot
|
||||
be started.
|
||||
"""
|
||||
|
||||
proc = subprocess.Popen(['rethinkdb', '--bind', 'all'],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True)
|
||||
|
||||
dbname = bigchaindb.config['database']['name']
|
||||
line = ''
|
||||
|
||||
for line in proc.stdout:
|
||||
if line.startswith('Server ready'):
|
||||
# FIXME: seems like tables are not ready when the server is ready,
|
||||
# that's why we need to query RethinkDB to know the state
|
||||
# of the database. This code assumes the tables are ready
|
||||
# when the database is ready. This seems a valid assumption.
|
||||
|
||||
try:
|
||||
conn = db.get_conn()
|
||||
# Before checking if the db is ready, we need to query
|
||||
# the server to check if it contains that db
|
||||
if r.db_list().contains(dbname).run(conn):
|
||||
r.db(dbname).wait().run(conn)
|
||||
except (r.ReqlOpFailedError, r.ReqlDriverError) as exc:
|
||||
raise StartupError('Error waiting for the database `{}` '
|
||||
'to be ready'.format(dbname)) from exc
|
||||
|
||||
return proc
|
||||
|
||||
# We are here when we exhaust the stdout of the process.
|
||||
# The last `line` contains info about the error.
|
||||
raise StartupError(line)
|
||||
|
||||
|
||||
def start(parser, scope):
|
||||
"""Utility function to execute a subcommand.
|
||||
|
||||
@ -51,7 +98,8 @@ def start(parser, scope):
|
||||
base_parser = argparse.ArgumentParser(add_help=False, prog='bigchaindb')
|
||||
|
||||
base_parser.add_argument('-c', '--config',
|
||||
help='Specify the location of the configuration file')
|
||||
help='Specify the location of the configuration file '
|
||||
'(use "-" for stdout)')
|
||||
|
||||
base_parser.add_argument('-y', '--yes', '--yes-please',
|
||||
action='store_true',
|
||||
|
@ -91,7 +91,8 @@ def file_config(filename=None):
|
||||
file at CONFIG_DEFAULT_PATH, if filename == None)
|
||||
"""
|
||||
logger.debug('On entry into file_config(), filename = {}'.format(filename))
|
||||
if not filename:
|
||||
|
||||
if filename is None:
|
||||
filename = CONFIG_DEFAULT_PATH
|
||||
|
||||
logger.debug('file_config() will try to open `{}`'.format(filename))
|
||||
|
@ -30,3 +30,7 @@ class KeypairNotFoundException(Exception):
|
||||
|
||||
class KeypairMismatchException(Exception):
|
||||
"""Raised if the private key(s) provided for signing don't match any of the curret owner(s)"""
|
||||
|
||||
class StartupError(Exception):
|
||||
"""Raised when there is an error starting up the system"""
|
||||
|
||||
|
72
deploy-cluster-aws/clusterize_confiles.py
Normal file
72
deploy-cluster-aws/clusterize_confiles.py
Normal file
@ -0,0 +1,72 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Given a directory full of default BigchainDB config files,
|
||||
transform them into config files for a cluster with proper
|
||||
keyrings, API endpoint values, etc.
|
||||
|
||||
Note: This script assumes that there is a file named hostlist.py
|
||||
containing public_dns_names = a list of the public DNS names of
|
||||
all the hosts in the cluster.
|
||||
|
||||
Usage:
|
||||
python clusterize_confiles.py <dir> <number_of_files>
|
||||
"""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import os
|
||||
import json
|
||||
import argparse
|
||||
|
||||
from hostlist import public_dns_names
|
||||
|
||||
|
||||
# Parse the command-line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('dir',
|
||||
help='Directory containing the config files')
|
||||
parser.add_argument('number_of_files',
|
||||
help='Number of config files expected in dir',
|
||||
type=int)
|
||||
args = parser.parse_args()
|
||||
|
||||
conf_dir = args.dir
|
||||
numfiles_expected = int(args.number_of_files)
|
||||
|
||||
# Check if the number of files in conf_dir is what was expected
|
||||
conf_files = os.listdir(conf_dir)
|
||||
numfiles = len(conf_files)
|
||||
if numfiles != numfiles_expected:
|
||||
raise ValueError('There are {} files in {} but {} were expected'.
|
||||
format(numfiles, conf_dir, numfiles_expected))
|
||||
|
||||
# Make a list containing all the public keys from
|
||||
# all the config files
|
||||
pubkeys = []
|
||||
for filename in conf_files:
|
||||
file_path = os.path.join(conf_dir, filename)
|
||||
with open(file_path, 'r') as f:
|
||||
conf_dict = json.load(f)
|
||||
pubkey = conf_dict['keypair']['public']
|
||||
pubkeys.append(pubkey)
|
||||
|
||||
# Rewrite each config file, one at a time
|
||||
for i, filename in enumerate(conf_files):
|
||||
file_path = os.path.join(conf_dir, filename)
|
||||
with open(file_path, 'r') as f:
|
||||
conf_dict = json.load(f)
|
||||
# The keyring is the list of *all* public keys
|
||||
# minus the config file's own public key
|
||||
keyring = list(pubkeys)
|
||||
keyring.remove(conf_dict['keypair']['public'])
|
||||
conf_dict['keyring'] = keyring
|
||||
# Allow incoming server traffic from any IP address
|
||||
# to port 9984
|
||||
conf_dict['server']['bind'] = '0.0.0.0:9984'
|
||||
# Set the api_endpoint
|
||||
conf_dict['api_endpoint'] = 'http://' + public_dns_names[i] + \
|
||||
':9984/api/v1'
|
||||
# Delete the config file
|
||||
os.remove(file_path)
|
||||
# Write new config file with the same filename
|
||||
print('Rewriting {}'.format(file_path))
|
||||
with open(file_path, 'w') as f2:
|
||||
json.dump(conf_dict, f2)
|
44
deploy-cluster-aws/fabfile.py
vendored
44
deploy-cluster-aws/fabfile.py
vendored
@ -5,7 +5,7 @@ BigchainDB, including its storage backend (RethinkDB).
|
||||
|
||||
from __future__ import with_statement, unicode_literals
|
||||
|
||||
from fabric.api import sudo, env
|
||||
from fabric.api import sudo, env, hosts
|
||||
from fabric.api import task, parallel
|
||||
from fabric.contrib.files import sed
|
||||
from fabric.operations import run, put
|
||||
@ -32,24 +32,19 @@ newrelic_license_key = 'you_need_a_real_license_key'
|
||||
|
||||
# DON'T PUT @parallel
|
||||
@task
|
||||
def set_hosts(hosts):
|
||||
"""A helper function to change env.hosts from the
|
||||
command line.
|
||||
def set_host(host_index):
|
||||
"""A helper task to change env.hosts from the
|
||||
command line. It will only "stick" for the duration
|
||||
of the fab command that called it.
|
||||
|
||||
Args:
|
||||
hosts (str): 'one_node' or 'two_nodes'
|
||||
|
||||
host_index (int): 0, 1, 2, 3, etc.
|
||||
Example:
|
||||
fab set_hosts:one_node init_bigchaindb
|
||||
fab set_host:4 fab_task_A fab_task_B
|
||||
will set env.hosts = [public_dns_names[4]]
|
||||
but only for doing fab_task_A and fab_task_B
|
||||
"""
|
||||
if hosts == 'one_node':
|
||||
env.hosts = public_dns_names[:1]
|
||||
elif hosts == 'two_nodes':
|
||||
env.hosts = public_dns_names[:2]
|
||||
else:
|
||||
raise ValueError('Invalid input to set_hosts.'
|
||||
' Expected one_node or two_nodes.'
|
||||
' Got {}'.format(hosts))
|
||||
env.hosts = [public_dns_names[int(host_index)]]
|
||||
|
||||
|
||||
# Install base software
|
||||
@ -138,13 +133,26 @@ def configure_bigchaindb():
|
||||
run('bigchaindb -y configure', pty=False)
|
||||
|
||||
|
||||
# Send the specified configuration file to
|
||||
# the remote host and save it there in
|
||||
# ~/.bigchaindb
|
||||
# Use in conjunction with set_host()
|
||||
# No @parallel
|
||||
@task
|
||||
def send_confile(confile):
|
||||
put('confiles/' + confile, 'tempfile')
|
||||
run('mv tempfile ~/.bigchaindb')
|
||||
print('For this node, bigchaindb show-config says:')
|
||||
run('bigchaindb show-config')
|
||||
|
||||
|
||||
# Initialize BigchainDB
|
||||
# i.e. create the database, the tables,
|
||||
# the indexes, and the genesis block.
|
||||
# (This only needs to be run on one node.)
|
||||
# Call using:
|
||||
# fab set_hosts:one_node init_bigchaindb
|
||||
# (The @hosts decorator is used to make this
|
||||
# task run on only one node. See http://tinyurl.com/h9qqf3t )
|
||||
@task
|
||||
@hosts(public_dns_names[0])
|
||||
def init_bigchaindb():
|
||||
run('bigchaindb init', pty=False)
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
from __future__ import unicode_literals
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import argparse
|
||||
import botocore
|
||||
import boto3
|
||||
@ -192,8 +193,27 @@ with open('hostlist.py', 'w') as f:
|
||||
f.write('\n')
|
||||
f.write('public_dns_names = {}\n'.format(public_dns_names))
|
||||
|
||||
# Wait
|
||||
wait_time = 45
|
||||
print('Waiting {} seconds to make sure all instances are ready...'.
|
||||
format(wait_time))
|
||||
time.sleep(wait_time)
|
||||
|
||||
# For each node in the cluster, check port 22 (ssh) until it's reachable
|
||||
for instance in instances_with_tag:
|
||||
ip_address = instance.public_ip_address
|
||||
# Create a socket
|
||||
# Address Family: AF_INET (means IPv4)
|
||||
# Type: SOCK_STREAM (means connection-oriented TCP protocol)
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
print('Attempting to connect to {} on port 22 (ssh)...'.
|
||||
format(ip_address))
|
||||
unreachable = True
|
||||
while unreachable:
|
||||
try:
|
||||
# Open a connection to the remote node on port 22
|
||||
s.connect((ip_address, 22))
|
||||
except socket.error as e:
|
||||
print(' Socket error: {}'.format(e))
|
||||
print(' Trying again in 3 seconds')
|
||||
time.sleep(3.0)
|
||||
else:
|
||||
print(' Port 22 is reachable!')
|
||||
s.shutdown(socket.SHUT_WR)
|
||||
s.close()
|
||||
unreachable = False
|
||||
|
40
deploy-cluster-aws/make_confiles.sh
Executable file
40
deploy-cluster-aws/make_confiles.sh
Executable file
@ -0,0 +1,40 @@
|
||||
#! /bin/bash
|
||||
|
||||
# The set -e option instructs bash to immediately exit
|
||||
# if any command has a non-zero exit status
|
||||
set -e
|
||||
|
||||
function printErr()
|
||||
{
|
||||
echo "usage: ./make_confiles.sh <dir> <number_of_files>"
|
||||
echo "No argument $1 supplied"
|
||||
}
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
printErr "<dir>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$2" ]; then
|
||||
printErr "<number_of_files>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CONFDIR=$1
|
||||
NUMFILES=$2
|
||||
|
||||
# If $CONFDIR exists, remove it
|
||||
if [ -d "$CONFDIR" ]; then
|
||||
rm -rf $CONFDIR
|
||||
fi
|
||||
|
||||
# Create $CONFDIR
|
||||
mkdir $CONFDIR
|
||||
|
||||
# Use the bigchaindb configure command to create
|
||||
# $NUMFILES BigchainDB config files in $CONFDIR
|
||||
for (( i=0; i<$NUMFILES; i++ )); do
|
||||
CONPATH=$CONFDIR"/bcdb_conf"$i
|
||||
echo "Writing "$CONPATH
|
||||
bigchaindb -y -c $CONPATH configure
|
||||
done
|
@ -21,7 +21,7 @@ if [ -z "$2" ]; then
|
||||
fi
|
||||
|
||||
TAG=$1
|
||||
NODES=$2
|
||||
NUM_NODES=$2
|
||||
|
||||
# If they don't include a third argument (<pypi_or_branch>)
|
||||
# then assume BRANCH = "pypi" by default
|
||||
@ -38,6 +38,13 @@ if [ ! -f "pem/bigchaindb.pem" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for the confiles directory
|
||||
if [ ! -d "confiles" ]; then
|
||||
echo "Directory confiles is needed but does not exist"
|
||||
echo "See make_confiles.sh to find out how to make it"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Change the file permissions on pem/bigchaindb.pem
|
||||
# so that the owner can read it, but that's all
|
||||
chmod 0400 pem/bigchaindb.pem
|
||||
@ -52,7 +59,7 @@ chmod 0400 pem/bigchaindb.pem
|
||||
# 5. writes the shellscript add2known_hosts.sh
|
||||
# 6. (over)writes a file named hostlist.py
|
||||
# containing a list of all public DNS names.
|
||||
python launch_ec2_nodes.py --tag $TAG --nodes $NODES
|
||||
python launch_ec2_nodes.py --tag $TAG --nodes $NUM_NODES
|
||||
|
||||
# Make add2known_hosts.sh executable then execute it.
|
||||
# This adds remote keys to ~/.ssh/known_hosts
|
||||
@ -86,22 +93,38 @@ else
|
||||
fi
|
||||
|
||||
# Configure BigchainDB on all nodes
|
||||
fab configure_bigchaindb
|
||||
|
||||
# TODO: Get public keys from all nodes
|
||||
# The idea is to send a bunch of locally-created configuration
|
||||
# files out to each of the instances / nodes.
|
||||
|
||||
# Assume a set of $NUM_NODES BigchaindB config files
|
||||
# already exists in the confiles directory.
|
||||
# One can create a set using a command like
|
||||
# ./make_confiles.sh confiles $NUM_NODES
|
||||
# (We can't do that here now because this virtual environment
|
||||
# is a Python 2 environment that may not even have
|
||||
# bigchaindb installed, so bigchaindb configure can't be called)
|
||||
|
||||
# TODO: Add list of public keys to keyring of all nodes
|
||||
# Transform the config files in the confiles directory
|
||||
# to have proper keyrings, api_endpoint values, etc.
|
||||
python clusterize_confiles.py confiles $NUM_NODES
|
||||
|
||||
# Send one of the config files to each instance
|
||||
for (( HOST=0 ; HOST<$NUM_NODES ; HOST++ )); do
|
||||
CONFILE="bcdb_conf"$HOST
|
||||
echo "Sending "$CONFILE
|
||||
fab set_host:$HOST send_confile:$CONFILE
|
||||
done
|
||||
|
||||
# Send a "bigchaindb init" command to one node
|
||||
# to initialize the BigchainDB database
|
||||
# i.e. create the database, the tables,
|
||||
# the indexes, and the genesis block.
|
||||
fab set_hosts:one_node init_bigchaindb
|
||||
# Initialize BigchainDB (i.e. Create the RethinkDB database,
|
||||
# the tables, the indexes, and genesis glock). Note that
|
||||
# this will only be sent to one of the nodes, see the
|
||||
# definition of init_bigchaindb() in fabfile.py to see why.
|
||||
fab init_bigchaindb
|
||||
|
||||
# Start BigchainDB on all the nodes using "screen"
|
||||
fab start_bigchaindb
|
||||
|
||||
# cleanup
|
||||
rm add2known_hosts.sh
|
||||
# rm -rf temp_confs
|
@ -1,6 +1,6 @@
|
||||
# The BigchainDB Command Line Interface (CLI)
|
||||
|
||||
There are some command-line commands for working with BigchainDB: `bigchaindb` and `bigchaindb-benchmark`. This section provides an overview of those commands.
|
||||
The command to interact with BigchainDB is `bigchaindb`. This section provides an overview of the command.
|
||||
|
||||
## bigchaindb
|
||||
|
||||
@ -37,10 +37,9 @@ This command drops (erases) the RethinkDB database. You will be prompted to make
|
||||
This command starts BigchainDB. It always begins by trying a `bigchaindb init` first. See the note in the documentation for `bigchaindb init`.
|
||||
|
||||
|
||||
## bigchaindb-benchmark
|
||||
### bigchaindb load
|
||||
|
||||
The `bigchaindb-benchmark` command is used to run benchmarking tests. You can learn more about it using:
|
||||
This command is used to run benchmarking tests. You can learn more about it using:
|
||||
```text
|
||||
$ bigchaindb-benchmark -h
|
||||
$ bigchaindb-benchmark load -h
|
||||
$ bigchaindb load -h
|
||||
```
|
||||
|
@ -83,16 +83,35 @@ Add some rules for Inbound traffic:
|
||||
**Note: These rules are extremely lax! They're meant to make testing easy.** You'll want to tighten them up if you intend to have a secure cluster. For example, Source = 0.0.0.0/0 is [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) for "allow this traffic to come from _any_ IP address."
|
||||
|
||||
|
||||
## Deployment
|
||||
## AWS Deployment
|
||||
|
||||
Here's an example of how one could launch a BigchainDB cluster of four nodes tagged `wrigley` on AWS:
|
||||
### AWS Deployment Step 1
|
||||
|
||||
Suppose _N_ is the number of nodes you want in your BigchainDB cluster. If you already have a set of _N_ BigchainDB configuration files in the `deploy-cluster-aws/confiles` directory, then you can jump to step 2. To create such a set, you can do something like:
|
||||
```text
|
||||
# in a Python 3 virtual environment where bigchaindb is installed
|
||||
cd bigchaindb
|
||||
cd deploy-cluster-aws
|
||||
./startup.sh wrigley 4 pypi
|
||||
./make_confiles.sh confiles 3
|
||||
```
|
||||
|
||||
The `pypi` on the end means that it will install the latest (stable) `bigchaindb` package from the [Python Package Index (PyPI)](https://pypi.python.org/pypi). That is, on each instance, BigchainDB is installed using `pip install bigchaindb`.
|
||||
That will create three (3) _default_ BigchainDB configuration files in the `deploy-cluster-aws/confiles` directory (which will be created if it doesn't already exist). The three files will be named `bcdb_conf0`, `bcdb_conf1`, and `bcdb_conf2`.
|
||||
|
||||
You can look inside those files if you're curious. In step 2, they'll be modified. For example, the default keyring is an empty list. In step 2, the deployment script automatically changes the keyring of each node to be a list of the public keys of all other nodes. Other changes are also made.
|
||||
|
||||
### AWS Deployment Step 2
|
||||
|
||||
Step 2 is to launch the nodes ("instances") on AWS, to install all the necessary software on them, configure the software, run the software, and more.
|
||||
|
||||
Here's an example of how one could launch a BigchainDB cluster of three (3) nodes tagged `wrigley` on AWS:
|
||||
```text
|
||||
# in a Python 2.5-2.7 virtual environment where fabric, boto3, etc. are installed
|
||||
cd bigchaindb
|
||||
cd deploy-cluster-aws
|
||||
./startup.sh wrigley 3 pypi
|
||||
```
|
||||
|
||||
The `pypi` on the end means that it will install the latest (stable) `bigchaindb` package from the [Python Package Index (PyPI)](https://pypi.python.org/pypi). That is, on each node, BigchainDB is installed using `pip install bigchaindb`.
|
||||
|
||||
`startup.sh` is a Bash script which calls some Python and Fabric scripts. The usage is:
|
||||
```text
|
||||
@ -101,20 +120,7 @@ The `pypi` on the end means that it will install the latest (stable) `bigchaindb
|
||||
|
||||
The first two arguments are self-explanatory. The third argument can be `pypi` or the name of a local Git branch (e.g. `master` or `feat/3752/quote-asimov-on-tuesdays`). If you don't include a third argument, then `pypi` will be assumed by default.
|
||||
|
||||
Here's what the `startup.sh` script does; it:
|
||||
|
||||
0. allocates more elastic IP addresses if necessary,
|
||||
1. launches the specified number of nodes (instances) on Amazon EC2,
|
||||
2. tags them with the specified tag,
|
||||
3. waits until those instances exist and are running,
|
||||
4. for each instance, it associates an elastic IP address with that instance,
|
||||
5. adds remote keys to `~/.ssh/known_hosts`,
|
||||
6. (re)creates the RethinkDB configuration file `conf/rethinkdb.conf`,
|
||||
7. installs base (prerequisite) software on all instances,
|
||||
8. installs RethinkDB on all instances,
|
||||
9. installs BigchainDB on all instances,
|
||||
10. initializes the BigchainDB database,
|
||||
11. starts BigchainDB on all instances.
|
||||
If you're curious what the `startup.sh` script does, the source code has lots of explanatory comments, so it's quite easy to read. Here's a link to the latest version on GitHub: [`startup.sh`](https://github.com/bigchaindb/bigchaindb/blob/master/deploy-cluster-aws/startup.sh)
|
||||
|
||||
It should take a few minutes for the deployment to finish. If you run into problems, see the section on Known Deployment Issues below.
|
||||
|
||||
|
@ -111,9 +111,110 @@ If it's the first time you've run `bigchaindb start`, then it creates the databa
|
||||
|
||||
**NOT for Production Use**
|
||||
|
||||
For those who like using Docker and wish to experiment with BigchainDB in non-production environments, we currently maintain a `dockerfile` that can be used to build an image for `bigchaindb`, along with a `docker-compose.yml` file to manage a "standalone node", consisting mainly of two containers: one for RethinkDB, and another for BigchainDB.
|
||||
For those who like using Docker and wish to experiment with BigchainDB in
|
||||
non-production environments, we currently maintain a Docker image and a
|
||||
`Dockerfile` that can be used to build an image for `bigchaindb`.
|
||||
|
||||
Assuming you have `docker` and `docker-compose` installed, you would proceed as follows.
|
||||
### Pull and Run the Image from Docker Hub
|
||||
|
||||
Assuming you have Docker installed, you would proceed as follows.
|
||||
|
||||
In a terminal shell, pull the latest version of the BigchainDB Docker image using:
|
||||
```text
|
||||
docker pull bigchaindb/bigchaindb:latest
|
||||
```
|
||||
|
||||
then do a one-time configuration step to create the config file; we will use
|
||||
the `-y` option to accept all the default values. The configuration file will
|
||||
be stored in a file on your host machine at `~/bigchaindb_docker/.bigchaindb`:
|
||||
|
||||
```text
|
||||
$ docker run --rm -v "$HOME/bigchaindb_docker:/data" -ti \
|
||||
bigchaindb/bigchaindb:latest -y configure
|
||||
Generating keypair
|
||||
Configuration written to /data/.bigchaindb
|
||||
Ready to go!
|
||||
```
|
||||
|
||||
Let's analyze that command:
|
||||
|
||||
* `docker run` tells Docker to run some image
|
||||
* `--rm` remove the container once we are done
|
||||
* `-v "$HOME/bigchaindb_docker:/data"` map the host directory
|
||||
`$HOME/bigchaindb_docker` to the container directory `/data`;
|
||||
this allows us to have the data persisted on the host machine,
|
||||
you can read more in the [official Docker
|
||||
documentation](https://docs.docker.com/engine/userguide/containers/dockervolumes/#mount-a-host-directory-as-a-data-volume)
|
||||
* `-t` allocate a pseudo-TTY
|
||||
* `-i` keep STDIN open even if not attached
|
||||
* `bigchaindb/bigchaindb:latest` the image to use
|
||||
* `-y configure` execute the `configure` sub-command (of the `bigchaindb` command) inside the container, with the `-y` option to automatically use all the default config values
|
||||
|
||||
|
||||
After configuring the system, you can run BigchainDB with the following command:
|
||||
|
||||
```text
|
||||
$ docker run -v "$HOME/bigchaindb_docker:/data" -d \
|
||||
--name bigchaindb \
|
||||
-p "58080:8080" -p "59984:9984" \
|
||||
bigchaindb/bigchaindb:latest start
|
||||
```
|
||||
|
||||
The command is slightly different from the previous one, the differences are:
|
||||
|
||||
* `-d` run the container in the background
|
||||
* `--name bigchaindb` give a nice name to the container so it's easier to
|
||||
refer to it later
|
||||
* `-p "58080:8080"` map the host port `58080` to the container port `8080`
|
||||
(the RethinkDB admin interface)
|
||||
* `-p "59984:9984"` map the host port `59984` to the container port `9984`
|
||||
(the BigchainDB API server)
|
||||
* `start` start the BigchainDB service
|
||||
|
||||
Another way to publish the ports exposed by the container is to use the `-P` (or
|
||||
`--publish-all`) option. This will publish all exposed ports to random ports. You can
|
||||
always run `docker ps` to check the random mapping.
|
||||
|
||||
You can also access the RethinkDB dashboard at
|
||||
[http://localhost:58080/](http://localhost:58080/)
|
||||
|
||||
If that doesn't work, then replace `localhost` with the IP or hostname of the
|
||||
machine running the Docker engine. If you are running docker-machine (e.g. on
|
||||
Mac OS X) this will be the IP of the Docker machine (`docker-machine ip
|
||||
machine_name`).
|
||||
|
||||
#### Load Testing with Docker
|
||||
|
||||
Now that we have BigchainDB running in the Docker container named `bigchaindb`, we can
|
||||
start another BigchainDB container to generate a load test for it.
|
||||
|
||||
First, make sure the container named `bigchaindb` is still running. You can check that using:
|
||||
```text
|
||||
docker ps
|
||||
```
|
||||
|
||||
You should see a container named `bigchaindb` in the list.
|
||||
|
||||
You can load test the BigchainDB running in that container by running the `bigchaindb load` command in a second container:
|
||||
|
||||
```text
|
||||
$ docker run --rm -v "$HOME/bigchaindb_docker:/data" -ti \
|
||||
--link bigchaindb \
|
||||
bigchaindb/bigchaindb:latest load
|
||||
```
|
||||
|
||||
Note the `--link` option to link to the first container (named `bigchaindb`).
|
||||
|
||||
Aside: The `bigchaindb load` command has several options (e.g. `-m`). You can read more about it in [the documentation about the BigchainDB command line interface](bigchaindb-cli.html).
|
||||
|
||||
If you look at the RethinkDB dashboard (in your web browser), you should see the effects of the load test. You can also see some effects in the Docker logs using:
|
||||
```text
|
||||
$ docker logs -f bigchaindb
|
||||
```
|
||||
|
||||
### Building Your Own Image
|
||||
|
||||
Assuming you have Docker installed, you would proceed as follows.
|
||||
|
||||
In a terminal shell:
|
||||
```text
|
||||
@ -122,41 +223,7 @@ $ git clone git@github.com:bigchaindb/bigchaindb.git
|
||||
|
||||
Build the Docker image:
|
||||
```text
|
||||
$ docker-compose build
|
||||
$ docker build --tag local-bigchaindb .
|
||||
```
|
||||
|
||||
then do a one-time configuration step to create the config file; it will be
|
||||
stored on your host machine under ` ~/.bigchaindb_docker/config`:
|
||||
```text
|
||||
$ docker-compose run --rm bigchaindb bigchaindb configure
|
||||
Starting bigchaindb_rethinkdb-data_1
|
||||
Generating keypair
|
||||
API Server bind? (default `localhost:9984`):
|
||||
Database host? (default `localhost`): rethinkdb
|
||||
Database port? (default `28015`):
|
||||
Database name? (default `bigchain`):
|
||||
Statsd host? (default `localhost`):
|
||||
Statsd port? (default `8125`):
|
||||
Statsd rate? (default `0.01`):
|
||||
Ready to go!
|
||||
```
|
||||
|
||||
As shown above, make sure that you set the database and statsd hosts to their
|
||||
corresponding service names (`rethinkdb`, `statsd`), defined in`docker-compose.yml`
|
||||
and `docker-compose-monitor.yml`.
|
||||
|
||||
You can then start it up (in the background, as a daemon) using:
|
||||
```text
|
||||
$ docker-compose up -d
|
||||
```
|
||||
|
||||
then you can load test transactions via:
|
||||
```text
|
||||
$ docker exec -it docker-bigchaindb bigchaindb-benchmark load -m
|
||||
```
|
||||
|
||||
If you're on Linux, you can probably view the RethinkDB dashboard at:
|
||||
|
||||
[http://localhost:58080/](http://localhost:58080/)
|
||||
|
||||
If that doesn't work, then replace `localhost` with the IP or hostname of the machine running the Docker engine. If you are running docker-machine (e.g.: on Mac OS X) this will be the IP of the Docker machine (`docker-machine ip machine_name`).
|
||||
Now you can use your own image to run BigchainDB containers.
|
||||
|
@ -22,11 +22,11 @@ then point a browser tab to:
|
||||
|
||||
The login and password are `admin` by default. If BigchainDB is running and processing transactions, you should see analytics—if not, [start BigchainDB](installing-server.html#run-bigchaindb) and load some test transactions:
|
||||
```text
|
||||
$ bigchaindb-benchmark load
|
||||
$ bigchaindb load
|
||||
```
|
||||
|
||||
then refresh the page after a few seconds.
|
||||
|
||||
If you're not interested in monitoring, don't worry: BigchainDB will function just fine without any monitoring setup.
|
||||
|
||||
Feel free to modify the [custom Grafana dashboard](https://github.com/rhsimplex/grafana-bigchaindb-docker/blob/master/bigchaindb_dashboard.js) to your liking!
|
||||
Feel free to modify the [custom Grafana dashboard](https://github.com/rhsimplex/grafana-bigchaindb-docker/blob/master/bigchaindb_dashboard.js) to your liking!
|
||||
|
@ -18,16 +18,3 @@ $ python setup.py test
|
||||
|
||||
(Aside: How does the above command work? The documentation for [pytest-runner](https://pypi.python.org/pypi/pytest-runner) explains. We use [pytest](http://pytest.org/latest/) to write all unit tests.)
|
||||
|
||||
### Using docker-compose to Run the Tests
|
||||
|
||||
You can also use `docker-compose` to run the unit tests. (You don't have to start RethinkDB first: `docker-compose` does that on its own, when it reads the `docker-compose.yml` file.)
|
||||
|
||||
First, build the images (~once), using:
|
||||
```text
|
||||
$ docker-compose build
|
||||
```
|
||||
|
||||
then run the unit tests using:
|
||||
```text
|
||||
$ docker-compose run --rm bigchaindb py.test -v
|
||||
```
|
||||
|
5
setup.py
5
setup.py
@ -65,15 +65,14 @@ setup(
|
||||
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'bigchaindb=bigchaindb.commands.bigchain:main',
|
||||
'bigchaindb-benchmark=bigchaindb.commands.bigchain_benchmark:main'
|
||||
'bigchaindb=bigchaindb.commands.bigchain:main'
|
||||
],
|
||||
'bigchaindb.consensus': [
|
||||
'default=bigchaindb.consensus:BaseConsensusRules'
|
||||
]
|
||||
},
|
||||
install_requires=[
|
||||
'rethinkdb==2.2.0.post4',
|
||||
'rethinkdb==2.3.0',
|
||||
'pysha3==0.3',
|
||||
'pytz==2015.7',
|
||||
'cryptoconditions==0.2.2',
|
||||
|
@ -1,4 +1,5 @@
|
||||
import json
|
||||
from unittest.mock import Mock, patch
|
||||
from argparse import Namespace
|
||||
from pprint import pprint
|
||||
import copy
|
||||
@ -62,10 +63,22 @@ def mock_bigchaindb_backup_config(monkeypatch):
|
||||
|
||||
def test_bigchain_run_start(mock_run_configure, mock_processes_start, mock_db_init_with_existing_db):
|
||||
from bigchaindb.commands.bigchain import run_start
|
||||
args = Namespace(config=None, yes=True)
|
||||
args = Namespace(start_rethinkdb=False, config=None, yes=True)
|
||||
run_start(args)
|
||||
|
||||
|
||||
@patch('bigchaindb.commands.utils.start_rethinkdb')
|
||||
def test_bigchain_run_start_with_rethinkdb(mock_start_rethinkdb,
|
||||
mock_run_configure,
|
||||
mock_processes_start,
|
||||
mock_db_init_with_existing_db):
|
||||
from bigchaindb.commands.bigchain import run_start
|
||||
args = Namespace(start_rethinkdb=True, config=None, yes=True)
|
||||
run_start(args)
|
||||
|
||||
mock_start_rethinkdb.assert_called_with()
|
||||
|
||||
|
||||
@pytest.mark.skipif(reason="BigchainDB doesn't support the automatic creation of a config file anymore")
|
||||
def test_bigchain_run_start_assume_yes_create_default_config(monkeypatch, mock_processes_start,
|
||||
mock_generate_key_pair, mock_db_init_with_existing_db):
|
||||
@ -173,7 +186,7 @@ def test_run_configure_when_config_does_not_exist(monkeypatch,
|
||||
mock_bigchaindb_backup_config):
|
||||
from bigchaindb.commands.bigchain import run_configure
|
||||
monkeypatch.setattr('os.path.exists', lambda path: False)
|
||||
monkeypatch.setattr('builtins.input', lambda question: '\n')
|
||||
monkeypatch.setattr('builtins.input', lambda: '\n')
|
||||
args = Namespace(config='foo', yes=True)
|
||||
return_value = run_configure(args)
|
||||
assert return_value is None
|
||||
@ -189,9 +202,26 @@ def test_run_configure_when_config_does_exist(monkeypatch,
|
||||
|
||||
from bigchaindb.commands.bigchain import run_configure
|
||||
monkeypatch.setattr('os.path.exists', lambda path: True)
|
||||
monkeypatch.setattr('builtins.input', lambda question: '\n')
|
||||
monkeypatch.setattr('builtins.input', lambda: '\n')
|
||||
monkeypatch.setattr('bigchaindb.config_utils.write_config', mock_write_config)
|
||||
|
||||
args = Namespace(config='foo', yes=None)
|
||||
run_configure(args)
|
||||
assert value == {}
|
||||
|
||||
|
||||
@patch('subprocess.Popen')
|
||||
def test_start_rethinkdb_returns_a_process_when_successful(mock_popen):
|
||||
from bigchaindb.commands import utils
|
||||
mock_popen.return_value = Mock(stdout=['Server ready'])
|
||||
assert utils.start_rethinkdb() is mock_popen.return_value
|
||||
|
||||
|
||||
@patch('subprocess.Popen')
|
||||
def test_start_rethinkdb_exits_when_cannot_start(mock_popen):
|
||||
from bigchaindb import exceptions
|
||||
from bigchaindb.commands import utils
|
||||
mock_popen.return_value = Mock(stdout=['Nopety nope'])
|
||||
with pytest.raises(exceptions.StartupError):
|
||||
utils.start_rethinkdb()
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user