resolve merge conflicts

This commit is contained in:
ryan 2016-05-12 11:36:31 +02:00
commit b772c1f2f4
41 changed files with 1172 additions and 315 deletions

2
.gitignore vendored
View File

@ -72,3 +72,5 @@ target/
deploy-cluster-aws/conf/rethinkdb.conf
deploy-cluster-aws/hostlist.py
deploy-cluster-aws/confiles/
deploy-cluster-aws/client_confile
benchmarking-tests/hostlist.py

View File

@ -12,10 +12,10 @@ A scalable blockchain database. [The whitepaper](https://www.bigchaindb.com/whit
## Quick Start
### [Install and Run BigchainDB Server](http://bigchaindb.readthedocs.org/en/master/installing-server.html)
### [Run BigchainDB with Docker](http://bigchaindb.readthedocs.org/en/master/installing-server.html#run-bigchaindb-with-docker)
### [The Python Server API by Example](http://bigchaindb.readthedocs.org/en/master/python-server-api-examples.html)
### [The Python Driver API by Example](http://bigchaindb.readthedocs.org/en/master/python-driver-api-examples.html)
### [Install and Run BigchainDB Server](http://bigchaindb.readthedocs.io/en/latest/installing-server.html)
### [Run BigchainDB with Docker](http://bigchaindb.readthedocs.io/en/latest/installing-server.html#run-bigchaindb-with-docker)
### [The Python Server API by Example](http://bigchaindb.readthedocs.io/en/latest/python-server-api-examples.html)
### [The Python Driver API by Example](http://bigchaindb.readthedocs.io/en/latest/python-driver-api-examples.html)
## Links for Everyone
* [BigchainDB.com](https://www.bigchaindb.com/) - the main BigchainDB website, including newsletter signup
@ -26,7 +26,7 @@ A scalable blockchain database. [The whitepaper](https://www.bigchaindb.com/whit
* [Google Group](https://groups.google.com/forum/#!forum/bigchaindb)
## Links for Developers
* [Documentation](http://bigchaindb.readthedocs.org/en/master/) - for developers
* [Documentation](http://bigchaindb.readthedocs.io/en/latest/) - for developers
* [CONTRIBUTING.md](CONTRIBUTING.md) - how to contribute
* [Community guidelines](CODE_OF_CONDUCT.md)
* [Open issues](https://github.com/bigchaindb/bigchaindb/issues)

View File

@ -0,0 +1,3 @@
# Benchmarking tests
This folder contains util files and test case folders to benchmark the performance of a BigchainDB federation.

View File

@ -0,0 +1,134 @@
import multiprocessing as mp
import uuid
import json
import argparse
import csv
import time
import logging
import rethinkdb as r
from os.path import expanduser
from bigchaindb import Bigchain
from bigchaindb.util import ProcessGroup
from bigchaindb.commands import utils
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def create_write_transaction(tx_left):
b = Bigchain()
while tx_left > 0:
# use uuid to prevent duplicate transactions (transactions with the same hash)
tx = b.create_transaction(b.me, b.me, None, 'CREATE',
payload={'msg': str(uuid.uuid4())})
tx_signed = b.sign_transaction(tx, b.me_private)
b.write_transaction(tx_signed)
tx_left -= 1
def run_add_backlog(args):
tx_left = args.num_transactions // mp.cpu_count()
workers = ProcessGroup(target=create_write_transaction, args=(tx_left,))
workers.start()
def run_set_statsd_host(args):
with open(expanduser('~') + '/.bigchaindb', 'r') as f:
conf = json.load(f)
conf['statsd']['host'] = args.statsd_host
with open(expanduser('~') + '/.bigchaindb', 'w') as f:
json.dump(conf, f)
def run_gather_metrics(args):
# setup a rethinkdb connection
conn = r.connect(args.bigchaindb_host, 28015, 'bigchain')
# setup csv writer
csv_file = open(args.csvfile, 'w')
csv_writer = csv.writer(csv_file)
# query for the number of transactions on the backlog
num_transactions = r.table('backlog').count().run(conn)
num_transactions_received = 0
initial_time = None
logger.info('Starting gathering metrics. {} transasctions in the backlog'.format(num_transactions))
logger.info('This process should exit automatically. '
'If this does not happen you can exit at any time using Ctrl-C'
' saving all the metrics gathered up to this point.')
logger.info('\t{:<20} {:<20} {:<20} {:<20}'.format('timestamp', 'tx in block',
'tx/s', '% complete'))
# listen to the changefeed
try:
for change in r.table('bigchain').changes().run(conn):
# check only for new blocks
if change['old_val'] is None:
block_num_transactions = len(change['new_val']['block']['transactions'])
time_now = time.time()
csv_writer.writerow([str(time_now), str(block_num_transactions)])
# log statistics
if initial_time is None:
initial_time = time_now
num_transactions_received += block_num_transactions
elapsed_time = time_now - initial_time
percent_complete = round((num_transactions_received / num_transactions) * 100)
if elapsed_time != 0:
transactions_per_second = round(num_transactions_received / elapsed_time)
else:
transactions_per_second = float('nan')
logger.info('\t{:<20} {:<20} {:<20} {:<20}'.format(time_now, block_num_transactions,
transactions_per_second, percent_complete))
if (num_transactions - num_transactions_received) == 0:
break
except KeyboardInterrupt:
logger.info('Interrupted. Exiting early...')
finally:
# close files
csv_file.close()
def main():
parser = argparse.ArgumentParser(description='BigchainDB benchmarking utils')
subparsers = parser.add_subparsers(title='Commands', dest='command')
# add transactions to backlog
backlog_parser = subparsers.add_parser('add-backlog',
help='Add transactions to the backlog')
backlog_parser.add_argument('num_transactions', metavar='num_transactions', type=int, default=0,
help='Number of transactions to add to the backlog')
# set statsd host
statsd_parser = subparsers.add_parser('set-statsd-host',
help='Set statsd host')
statsd_parser.add_argument('statsd_host', metavar='statsd_host', default='localhost',
help='Hostname of the statsd server')
# metrics
metrics_parser = subparsers.add_parser('gather-metrics',
help='Gather metrics to a csv file')
metrics_parser.add_argument('-b', '--bigchaindb-host',
required=True,
help='Bigchaindb node hostname to connect to gather cluster metrics')
metrics_parser.add_argument('-c', '--csvfile',
required=True,
help='Filename to save the metrics')
utils.start(parser, globals())
if __name__ == '__main__':
main()

53
benchmarking-tests/fabfile.py vendored Normal file
View File

@ -0,0 +1,53 @@
from __future__ import with_statement, unicode_literals
from fabric.api import sudo, env, hosts
from fabric.api import task, parallel
from fabric.contrib.files import sed
from fabric.operations import run, put
from fabric.context_managers import settings
from hostlist import public_dns_names
# Ignore known_hosts
# http://docs.fabfile.org/en/1.10/usage/env.html#disable-known-hosts
env.disable_known_hosts = True
# What remote servers should Fabric connect to? With what usernames?
env.user = 'ubuntu'
env.hosts = public_dns_names
# SSH key files to try when connecting:
# http://docs.fabfile.org/en/1.10/usage/env.html#key-filename
env.key_filename = 'pem/bigchaindb.pem'
@task
@parallel
def put_benchmark_utils():
put('benchmark_utils.py')
@task
@parallel
def set_statsd_host(statsd_host='localhost'):
run('python3 benchmark_utils.py set-statsd-host {}'.format(statsd_host))
print('update configuration')
run('bigchaindb show-config')
@task
@parallel
def prepare_backlog(num_transactions=10000):
run('python3 benchmark_utils.py add-backlog {}'.format(num_transactions))
@task
@parallel
def start_bigchaindb():
run('screen -d -m bigchaindb start &', pty=False)
@task
@parallel
def kill_bigchaindb():
run('killall bigchaindb')

View File

@ -0,0 +1,21 @@
# Transactions per second
Measure how many blocks per second are created on the _bigchain_ with a pre filled backlog.
1. Deploy an aws cluster http://bigchaindb.readthedocs.io/en/latest/deploy-on-aws.html
2. Make a symbolic link to hostlist.py: `ln -s ../deploy-cluster-aws/hostlist.py .`
3. Make a symbolic link to bigchaindb.pem:
```bash
mkdir pem
cd pem
ln -s ../deploy-cluster-aws/pem/bigchaindb.pem .
```
Then:
```bash
fab put_benchmark_utils
fab set_statsd_host:<hostname of the statsd server>
fab prepare_backlog:<num txs per node> # wait for process to finish
fab start_bigchaindb
```

View File

@ -7,6 +7,7 @@ import rethinkdb as r
import bigchaindb
from bigchaindb import Bigchain
from bigchaindb.monitor import Monitor
from bigchaindb.util import ProcessGroup
logger = logging.getLogger(__name__)
@ -180,6 +181,8 @@ class Block(object):
# add results to the queue
for result in initial_results:
q_initial.put(result)
for i in range(mp.cpu_count()):
q_initial.put('stop')
return q_initial
@ -203,17 +206,21 @@ class Block(object):
self._start()
logger.info('exiting block module...')
def kill(self):
for i in range(mp.cpu_count()):
self.q_new_transaction.put('stop')
def _start(self):
"""
Initialize, spawn, and start the processes
"""
# initialize the processes
p_filter = mp.Process(name='filter_transactions', target=self.filter_by_assignee)
p_validate = mp.Process(name='validate_transactions', target=self.validate_transactions)
p_blocks = mp.Process(name='create_blocks', target=self.create_blocks)
p_write = mp.Process(name='write_blocks', target=self.write_blocks)
p_delete = mp.Process(name='delete_transactions', target=self.delete_transactions)
p_filter = ProcessGroup(name='filter_transactions', target=self.filter_by_assignee)
p_validate = ProcessGroup(name='validate_transactions', target=self.validate_transactions)
p_blocks = ProcessGroup(name='create_blocks', target=self.create_blocks)
p_write = ProcessGroup(name='write_blocks', target=self.write_blocks)
p_delete = ProcessGroup(name='delete_transactions', target=self.delete_transactions)
# start the processes
p_filter.start()
@ -222,9 +229,3 @@ class Block(object):
p_write.start()
p_delete.start()
# join processes
p_filter.join()
p_validate.join()
p_blocks.join()
p_write.join()
p_delete.join()

View File

@ -13,6 +13,7 @@ import builtins
import logstats
import rethinkdb as r
import bigchaindb
import bigchaindb.config_utils
@ -203,6 +204,12 @@ def run_load(args):
workers.start()
def run_set_shards(args):
b = bigchaindb.Bigchain()
r.table('bigchain').reconfigure(shards=args.num_shards, replicas=1).run(b.conn)
r.table('backlog').reconfigure(shards=args.num_shards, replicas=1).run(b.conn)
def main():
parser = argparse.ArgumentParser(
description='Control your BigchainDB node.',
@ -243,6 +250,13 @@ def main():
subparsers.add_parser('start',
help='Start BigchainDB')
# parser for configuring the number of shards
sharding_parser = subparsers.add_parser('set-shards',
help='Configure number of shards')
sharding_parser.add_argument('num_shards', metavar='num_shards', type=int, default=1,
help='Number of shards')
load_parser = subparsers.add_parser('load',
help='Write transactions to the backlog')

View File

@ -16,6 +16,7 @@ import copy
import json
import logging
import collections
from functools import lru_cache
from pkg_resources import iter_entry_points, ResolutionError
@ -218,6 +219,7 @@ def autoconfigure(filename=None, config=None, force=False):
set_config(newconfig) # sets bigchaindb.config
@lru_cache()
def load_consensus_plugin(name=None):
"""Find and load the chosen consensus plugin.

View File

@ -15,6 +15,7 @@ class AbstractConsensusRules(metaclass=ABCMeta):
All methods listed below must be implemented.
"""
@staticmethod
@abstractmethod
def validate_transaction(bigchain, transaction):
"""Validate a transaction.
@ -31,8 +32,8 @@ class AbstractConsensusRules(metaclass=ABCMeta):
Descriptive exceptions indicating the reason the transaction failed.
See the `exceptions` module for bigchain-native error classes.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def validate_block(bigchain, block):
"""Validate a block.
@ -49,8 +50,8 @@ class AbstractConsensusRules(metaclass=ABCMeta):
Descriptive exceptions indicating the reason the block failed.
See the `exceptions` module for bigchain-native error classes.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def create_transaction(*args, **kwargs):
"""Create a new transaction.
@ -61,8 +62,8 @@ class AbstractConsensusRules(metaclass=ABCMeta):
Returns:
dict: newly constructed transaction.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def sign_transaction(transaction, *args, **kwargs):
"""Sign a transaction.
@ -74,20 +75,19 @@ class AbstractConsensusRules(metaclass=ABCMeta):
Returns:
dict: transaction with any signatures applied.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def verify_signature(signed_transaction):
"""Verify the signature of a transaction.
def validate_fulfillments(signed_transaction):
"""Validate the fulfillments of a transaction.
Args:
signed_transaction (dict): signed transaction to verify
Returns:
bool: True if the transaction's required signature data is present
bool: True if the transaction's required fulfillments are present
and correct, False otherwise.
"""
raise NotImplementedError
@abstractmethod
def verify_vote_signature(block, signed_vote):
@ -170,8 +170,8 @@ class BaseConsensusRules(AbstractConsensusRules):
if calculated_hash != transaction['id']:
raise exceptions.InvalidHash()
# Check signature
if not util.verify_signature(transaction):
# Check fulfillments
if not util.validate_fulfillments(transaction):
raise exceptions.InvalidSignature()
return transaction
@ -229,13 +229,13 @@ class BaseConsensusRules(AbstractConsensusRules):
return util.sign_tx(transaction, private_key)
@staticmethod
def verify_signature(signed_transaction):
"""Verify the signature of a transaction.
def validate_fulfillments(signed_transaction):
"""Validate the fulfillments of a transaction.
Refer to the documentation of ``bigchaindb.util.verify_signature``
Refer to the documentation of ``bigchaindb.util.validate_fulfillments``
"""
return util.verify_signature(signed_transaction)
return util.validate_fulfillments(signed_transaction)
@staticmethod
def verify_vote_signature(block, signed_vote):

View File

@ -27,16 +27,18 @@ class Bigchain(object):
consensus_plugin=None):
"""Initialize the Bigchain instance
There are three ways in which the Bigchain instance can get its parameters.
The order by which the parameters are chosen are:
1. Setting them by passing them to the `__init__` method itself.
2. Setting them as environment variables
3. Reading them from the `config.json` file.
A Bigchain instance has several configuration parameters (e.g. host).
If a parameter value is passed as an argument to the Bigchain
__init__ method, then that is the value it will have.
Otherwise, the parameter value will be the value from the local
configuration file. If it's not set in that file, then the value
will come from an environment variable. If that environment variable
isn't set, then the parameter will have its default value (defined in
bigchaindb.__init__).
Args:
host (str): hostname where the rethinkdb is running.
port (int): port in which rethinkb is running (usually 28015).
host (str): hostname where RethinkDB is running.
port (int): port in which RethinkDB is running (usually 28015).
dbname (str): the name of the database to connect to (usually bigchain).
public_key (str): the base58 encoded public key for the ED25519 curve.
private_key (str): the base58 encoded private key for the ED25519 curve.
@ -88,17 +90,17 @@ class Bigchain(object):
return self.consensus.sign_transaction(transaction, *args, **kwargs)
def verify_signature(self, signed_transaction, *args, **kwargs):
"""Verify the signature(s) of a transaction.
def validate_fulfillments(self, signed_transaction, *args, **kwargs):
"""Validate the fulfillment(s) of a transaction.
Refer to the documentation of your consensus plugin.
Returns:
bool: True if the transaction's required signature data is present
bool: True if the transaction's required fulfillments are present
and correct, False otherwise.
"""
return self.consensus.verify_signature(
return self.consensus.validate_fulfillments(
signed_transaction, *args, **kwargs)
def write_transaction(self, signed_transaction, durability='soft'):

View File

@ -358,7 +358,7 @@ def fulfill_simple_signature_fulfillment(fulfillment, parsed_fulfillment, fulfil
Args:
fulfillment (dict): BigchainDB fulfillment to fulfill.
parsed_fulfillment (object): cryptoconditions.Ed25519Fulfillment instance.
parsed_fulfillment (cryptoconditions.Ed25519Fulfillment): cryptoconditions.Ed25519Fulfillment instance.
fulfillment_message (dict): message to sign.
key_pairs (dict): dictionary of (public_key, private_key) pairs.
@ -382,7 +382,7 @@ def fulfill_threshold_signature_fulfillment(fulfillment, parsed_fulfillment, ful
Args:
fulfillment (dict): BigchainDB fulfillment to fulfill.
parsed_fulfillment (object): cryptoconditions.ThresholdSha256Fulfillment instance.
parsed_fulfillment (cryptoconditions.ThresholdSha256Fulfillment): cryptoconditions.ThresholdSha256Fulfillment instance.
fulfillment_message (dict): message to sign.
key_pairs (dict): dictionary of (public_key, private_key) pairs.
@ -421,11 +421,11 @@ def check_hash_and_signature(transaction):
raise exceptions.InvalidHash()
# Check signature
if not verify_signature(transaction):
if not validate_fulfillments(transaction):
raise exceptions.InvalidSignature()
def verify_signature(signed_transaction):
def validate_fulfillments(signed_transaction):
"""Verify the signature of a transaction
A valid transaction should have been signed `current_owner` corresponding private key.

View File

@ -75,8 +75,8 @@ def create_transaction():
tx = util.transform_create(tx)
tx = bigchain.consensus.sign_transaction(tx, private_key=bigchain.me_private)
if not bigchain.consensus.verify_signature(tx):
val['error'] = 'Invalid transaction signature'
if not bigchain.consensus.validate_fulfillments(tx):
val['error'] = 'Invalid transaction fulfillments'
with monitor.timer('write_transaction', rate=bigchaindb.config['statsd']['rate']):
val = bigchain.write_transaction(tx)

146
deploy-cluster-aws/awsdeploy.sh Executable file
View File

@ -0,0 +1,146 @@
#! /bin/bash
# The set -e option instructs bash to immediately exit
# if any command has a non-zero exit status
set -e
USAGE="usage: ./awsdeploy.sh <number_of_nodes_in_cluster> <pypi_or_branch> <servers_or_clients>"
if [ -z "$1" ]; then
echo $USAGE
echo "No first argument was specified"
echo "It should be a number like 3 or 15"
exit 1
else
NUM_NODES=$1
fi
if [ -z "$2" ]; then
echo $USAGE
echo "No second argument was specified, so BigchainDB will be installed from PyPI"
BRANCH="pypi"
else
BRANCH=$2
fi
if [ -z "$3" ]; then
echo $USAGE
echo "No third argument was specified, so servers will be deployed"
WHAT_TO_DEPLOY="servers"
else
WHAT_TO_DEPLOY=$3
fi
if [[ ("$WHAT_TO_DEPLOY" != "servers") && ("$WHAT_TO_DEPLOY" != "clients") ]]; then
echo "The third argument, if included, must be servers or clients"
exit 1
fi
# Check for AWS private key file (.pem file)
if [ ! -f "pem/bigchaindb.pem" ]; then
echo "File pem/bigchaindb.pem (AWS private key) is missing"
exit 1
fi
# Check for the confiles directory
if [ ! -d "confiles" ]; then
echo "Directory confiles is needed but does not exist"
echo "See make_confiles.sh to find out how to make it"
exit 1
fi
# Auto-generate the tag to apply to all nodes in the cluster
TAG="BDB-"$WHAT_TO_DEPLOY"-"`date +%m-%d@%H:%M`
echo "TAG = "$TAG
# Change the file permissions on pem/bigchaindb.pem
# so that the owner can read it, but that's all
chmod 0400 pem/bigchaindb.pem
# The following Python script does these things:
# 0. allocates more elastic IP addresses if necessary,
# 1. launches the specified number of nodes (instances) on Amazon EC2,
# 2. tags them with the specified tag,
# 3. waits until those instances exist and are running,
# 4. for each instance, it associates an elastic IP address
# with that instance,
# 5. writes the shellscript add2known_hosts.sh
# 6. (over)writes a file named hostlist.py
# containing a list of all public DNS names.
python launch_ec2_nodes.py --tag $TAG --nodes $NUM_NODES
# Make add2known_hosts.sh executable then execute it.
# This adds remote keys to ~/.ssh/known_hosts
chmod +x add2known_hosts.sh
./add2known_hosts.sh
# Rollout base packages (dependencies) needed before
# storage backend (RethinkDB) and BigchainDB can be rolled out
fab install_base_software
if [ "$WHAT_TO_DEPLOY" == "servers" ]; then
# (Re)create the RethinkDB configuration file conf/rethinkdb.conf
python create_rethinkdb_conf.py
# Rollout storage backend (RethinkDB) and start it
fab install_rethinkdb
fi
# Rollout BigchainDB (but don't start it yet)
if [ "$BRANCH" == "pypi" ]; then
fab install_bigchaindb_from_pypi
else
cd ..
rm -f bigchaindb-archive.tar.gz
git archive $BRANCH --format=tar --output=bigchaindb-archive.tar
# TODO: the archive could exclude more files besides the .gitignore ones
# such as the docs. See http://tinyurl.com/zo6fxeg
gzip bigchaindb-archive.tar
mv bigchaindb-archive.tar.gz deploy-cluster-aws
cd deploy-cluster-aws
fab install_bigchaindb_from_git_archive
rm bigchaindb-archive.tar.gz
fi
# Configure BigchainDB on all nodes
if [ "$WHAT_TO_DEPLOY" == "servers" ]; then
# The idea is to send a bunch of locally-created configuration
# files out to each of the instances / nodes.
# Assume a set of $NUM_NODES BigchaindB config files
# already exists in the confiles directory.
# One can create a set using a command like
# ./make_confiles.sh confiles $NUM_NODES
# (We can't do that here now because this virtual environment
# is a Python 2 environment that may not even have
# bigchaindb installed, so bigchaindb configure can't be called)
# Transform the config files in the confiles directory
# to have proper keyrings, api_endpoint values, etc.
python clusterize_confiles.py confiles $NUM_NODES
# Send one of the config files to each instance
for (( HOST=0 ; HOST<$NUM_NODES ; HOST++ )); do
CONFILE="bcdb_conf"$HOST
echo "Sending "$CONFILE
fab set_host:$HOST send_confile:$CONFILE
done
# Initialize BigchainDB (i.e. Create the RethinkDB database,
# the tables, the indexes, and genesis glock). Note that
# this will only be sent to one of the nodes, see the
# definition of init_bigchaindb() in fabfile.py to see why.
fab init_bigchaindb
fab set_shards:$NUM_NODES
else
# Deploying clients
# The only thing to configure on clients is the api_endpoint
# It should be the public DNS name of a BigchainDB server
fab send_client_confile:client_confile
# Start sending load from the clients to the servers
fab start_bigchaindb_load
fi
# cleanup
rm add2known_hosts.sh

View File

@ -1,45 +1,75 @@
# -*- coding: utf-8 -*-
"""Given a directory full of default BigchainDB config files,
transform them into config files for a cluster with proper
keyrings, API endpoint values, etc.
keyrings, API endpoint values, etc. This script is meant to
be interpreted as a Python 2 script.
Note: This script assumes that there is a file named hostlist.py
Note 1: This script assumes that there is a file named hostlist.py
containing public_dns_names = a list of the public DNS names of
all the hosts in the cluster.
Note 2: If the optional -k argument is included, then a keypairs.py
file must exist and must have enough keypairs in it to assign one
to each of the config files in the directory of config files.
You can create a keypairs.py file using write_keypairs_file.py
Usage:
python clusterize_confiles.py <dir> <number_of_files>
python clusterize_confiles.py [-h] [-k] dir number_of_files
"""
from __future__ import unicode_literals
import os
import json
import argparse
from hostlist import public_dns_names
if os.path.isfile('keypairs.py'):
from keypairs import keypairs_list
# Parse the command-line arguments
parser = argparse.ArgumentParser()
desc = 'Transform a directory of default BigchainDB config files '
desc += 'into config files for a cluster'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('dir',
help='Directory containing the config files')
parser.add_argument('number_of_files',
help='Number of config files expected in dir',
type=int)
parser.add_argument('-k', '--use-keypairs',
action='store_true',
default=False,
help='Use public and private keys from keypairs.py')
args = parser.parse_args()
conf_dir = args.dir
numfiles_expected = int(args.number_of_files)
num_files_expected = int(args.number_of_files)
use_keypairs = args.use_keypairs
# Check if the number of files in conf_dir is what was expected
conf_files = os.listdir(conf_dir)
numfiles = len(conf_files)
if numfiles != numfiles_expected:
conf_files = sorted(os.listdir(conf_dir))
num_files = len(conf_files)
if num_files != num_files_expected:
raise ValueError('There are {} files in {} but {} were expected'.
format(numfiles, conf_dir, numfiles_expected))
format(num_files, conf_dir, num_files_expected))
# Make a list containing all the public keys from
# all the config files
# If the -k option was included, check to make sure there are enough keypairs
# in keypairs_list
num_keypairs = len(keypairs_list)
if use_keypairs:
if num_keypairs < num_files:
raise ValueError('There are {} config files in {} but '
'there are only {} keypairs in keypairs.py'.
format(num_files, conf_dir, num_keypairs))
# Make a list containing all the public keys
if use_keypairs:
print('Using keypairs from keypairs.py')
pubkeys = [keypair[1] for keypair in keypairs_list]
else:
# read the pubkeys from the config files in conf_dir
pubkeys = []
for filename in conf_files:
file_path = os.path.join(conf_dir, filename)
@ -53,6 +83,13 @@ for i, filename in enumerate(conf_files):
file_path = os.path.join(conf_dir, filename)
with open(file_path, 'r') as f:
conf_dict = json.load(f)
# If the -k option was included
# then replace the private and public keys
# with those from keypairs_list
if use_keypairs:
keypair = keypairs_list[i]
conf_dict['keypair']['private'] = keypair[0]
conf_dict['keypair']['public'] = keypair[1]
# The keyring is the list of *all* public keys
# minus the config file's own public key
keyring = list(pubkeys)
@ -64,8 +101,10 @@ for i, filename in enumerate(conf_files):
# Set the api_endpoint
conf_dict['api_endpoint'] = 'http://' + public_dns_names[i] + \
':9984/api/v1'
# Delete the config file
os.remove(file_path)
# Write new config file with the same filename
print('Rewriting {}'.format(file_path))
with open(file_path, 'w') as f2:

View File

@ -0,0 +1,87 @@
# -*- coding: utf-8 -*-
"""A Fabric fabfile with functionality to install Docker,
install Docker Compose, and run a BigchainDB monitoring server
(using the docker-compose-monitor.yml file)
"""
from __future__ import with_statement, unicode_literals
from fabric.api import sudo, env
from fabric.api import task
from fabric.operations import put, run
# Ignore known_hosts
# http://docs.fabfile.org/en/1.10/usage/env.html#disable-known-hosts
env.disable_known_hosts = True
env.user = 'ubuntu'
env.key_filename = 'pem/bigchaindb.pem'
@task
def install_docker_engine():
"""Install Docker on an EC2 Ubuntu 14.04 instance
Example:
fab --fabfile=fabfile-monitor.py \
--hosts=ec2-52-58-106-17.eu-central-1.compute.amazonaws.com \
install_docker_engine
"""
# install prerequisites
sudo('apt-get update')
sudo('apt-get -y install apt-transport-https ca-certificates linux-image-extra-$(uname -r) apparmor')
# install docker repositories
sudo('apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 \
--recv-keys 58118E89F3A912897C070ADBF76221572C52609D')
sudo("echo 'deb https://apt.dockerproject.org/repo ubuntu-trusty main' | \
sudo tee /etc/apt/sources.list.d/docker.list")
# install docker engine
sudo('apt-get update')
sudo('apt-get -y install docker-engine')
# add ubuntu user to the docker group
sudo('usermod -aG docker ubuntu')
@task
def install_docker_compose():
"""Install Docker Compose on an EC2 Ubuntu 14.04 instance
Example:
fab --fabfile=fabfile-monitor.py \
--hosts=ec2-52-58-106-17.eu-central-1.compute.amazonaws.com \
install_docker_compose
"""
sudo('curl -L https://github.com/docker/compose/releases/download/1.7.0/docker-compose-`uname \
-s`-`uname -m` > /usr/local/bin/docker-compose')
sudo('chmod +x /usr/local/bin/docker-compose')
@task
def install_docker():
"""Install Docker and Docker Compose on an EC2 Ubuntu 14.04 instance
Example:
fab --fabfile=fabfile-monitor.py \
--hosts=ec2-52-58-106-17.eu-central-1.compute.amazonaws.com \
install_docker
"""
install_docker_engine()
install_docker_compose()
@task
def run_monitor():
"""Run BigchainDB monitor on an EC2 Ubuntu 14.04 instance
Example:
fab --fabfile=fabfile-monitor.py \
--hosts=ec2-52-58-106-17.eu-central-1.compute.amazonaws.com \
run_monitor
"""
# copy docker-compose-monitor to the ec2 instance
put('../docker-compose-monitor.yml')
run('INFLUXDB_DATA=/influxdb-data docker-compose -f docker-compose-monitor.yml up -d')

View File

@ -146,6 +146,15 @@ def send_confile(confile):
run('bigchaindb show-config')
@task
@parallel
def send_client_confile(confile):
put(confile, 'tempfile')
run('mv tempfile ~/.bigchaindb')
print('For this node, bigchaindb show-config says:')
run('bigchaindb show-config')
# Initialize BigchainDB
# i.e. create the database, the tables,
# the indexes, and the genesis block.
@ -157,6 +166,13 @@ def init_bigchaindb():
run('bigchaindb init', pty=False)
# Set the number of shards (in the backlog and bigchain tables)
@task
@hosts(public_dns_names[0])
def set_shards(num_shards):
run('bigchaindb set-shards {}'.format(num_shards))
# Start BigchainDB using screen
@task
@parallel
@ -164,6 +180,12 @@ def start_bigchaindb():
sudo('screen -d -m bigchaindb -y start &', pty=False)
@task
@parallel
def start_bigchaindb_load():
sudo('screen -d -m bigchaindb load &', pty=False)
# Install and run New Relic
@task
def install_newrelic():

View File

@ -0,0 +1,264 @@
# -*- coding: utf-8 -*-
"""A set of keypairs for use in deploying
BigchainDB servers with a predictable set of keys.
"""
from __future__ import unicode_literals
keypairs_list = [('E72MmhHGiwywGMHdensZreNPTtAKvRxYQEyQEqUpLvXL',
'Ar1Xt6bpmeyNnWoBUAAi8VqLPboK84bvB417FKmxcJzp'),
('BYupx6PLnAqcTgrqsYopKeHYjmSY5F4rpSVoFv6vK3r6',
'6UkRsEhRW7RT6WYPJkW4j4aiqLiXhpyP7H1WRj2toCv3'),
('A3FwThyWmydgjukcpF9SmTzWQ4yoRoV9jTni1t4oicz4',
'91cuZ3GvQkEkR8UVV456fVxiujBSqd9JMp7p3XaHnVUT'),
('CkA7fS6aGmo8JPw3yuchz31AnP7KcxncQiu3pQ81X2Mj',
'PDuBGWm4BnSSkTTxSWVd59PAzFqidaLfFo86aTLZoub'),
('7aoKCGN4QK82yVpErN1EJyn8ciQXBUkVBe1snx7wypEf',
'AXoar7qdJZF2kaTJb19PhqY7iSdX3AEef7GBwb9N8WT6'),
('1GGrwAx34CbTfaV55KdCKah2G5FThjrTdTQk3gTD97x',
'853HbGVt6hT7q17cN6CtDycuTyoncUDWscdo4GUMKntp'),
('7C6BZbk3Xi4nB1o4mUXMty4rs22CeF8dLQ2dUKhyi9qs',
'GVu5QTqKeMGhgz8AgzfVRYP5F3HopkqgabQhRqjujEdN'),
('2WXPBsMGmwjMv7Eg5iqgLAq2VQW1GF6AVAWuveYLXy3Z',
'AuBnVm277newtkgfyjGQrr3X8ykKwzVzrcpqF67muQ4N'),
('H67sRSm8W6gVmR1N3SqWXF3WTx9Arhc1RtwvLEjmhm9t',
'5SQamPP4dWUhu2L247TMnf8vX1C5vuB3jtfh1BpVSsPg'),
('GYztiuCLEvG4wrVszbXKs9AXbKBbDZVhw35xsq8XF63S',
'6pxa9WydnD1xRFReR1yHruXL8VtFu3c6kCNBXkwAyDXA'),
('G7x9iHnJkjKkdLEsrV2kGZ7tFBm9wj2Pive7vRZss47',
'23MvXauT6cKMLrNyxN41jnZv83aKghLP4B37bvemjENa'),
('3MhdzHYRrFrPmQZfXfpNKLw9xEdFNZNfUcehgBmboH43',
'Buqfw4nFfuLoHJcfYZvxXBJf5rTm5ypSyuJfL11jDFkn'),
('B2MbWPXDPSWQAwNirepSws7a4sgKNCVtmduGh5f54Koq',
'Cus791pRcuoVJe24WME2QYeEAX1R4uiTGNxa3HwzwtQY'),
('7yHSmHHX4WwsZ4H6oQxxytkGRqKPiMdqftSvRqXiomYj',
'2qVE6baeD57raXJnNwyUeWi1VyfpQ21QW1J374zMGD6o'),
('E2V7mzxce6J8PZw8rUEZXYYVnTFRkMSfTty7duohox6V',
'HSs1oWnvTfjrMmVouRtFJYLjfgeC1uxEiA8MX9F98A34'),
('4yP4RH18nt3DDFzhpLGborEJuS7hx4cKaz6AAe1xNChe',
'FziConq7CF4h6TYc1W4wYtmJbhNnrAGoareRkeoRLKTi'),
('HGgVjtNG2U6odxbAAR31UAcHknzenY88GxYhnURy2S5C',
'82miL67GzT9fTVt8hFiE2XJBRr7iNXAvFLnuiFj5HyjV'),
('AWY2DyCDbMQqx6v5KtcoW1f9qQd5NqiibeLFpABwibEn',
'9KgHN7xTLa34hfwGq4RpW71jsKjyPKRtaAdAvjHuATtb'),
('BYE1oV6Dyf49Qedrtf3UaVny9D7NEUhtx78pD1X38M6x',
'3ve8upjPmX9vvdEqvir7QBxnXQAyBWiZKwWyEhq47ptx'),
('BiZLPsA8Q3faqLPxrcMP1TT77XUYd2jceAkuB9bFCzUa',
'DrL1j2ZXLvBzk2TmA4DxsRmoR3oCSpW8YPvDCMCei1dU'),
('FNPkTYojwJ4F4psnzbWt8XnNRBqRhwHXog8Pb8Kuuq7V',
'FRxatYaiuuKBtvvERSADKNtSGPDY7iPzCmMaLDnPSuG8'),
('2LiAeAJHjGsrkUmd42jdgPAyKEfzWVttrmnXu6rzfyYM',
'FwQ3jTBnJpY62paSLdudyymZPUDSWy3827wY13jTJUmC'),
('Gcu8TPtFM2tv9gtUo5yQYyBg7KHQWxnW9Dk3bp4zh5VC',
'G3UrGxBB4UCUnxFNKrhmS1rpj3Z7bq484hZUUfNqprx1'),
('HQGHpzMDdB3sYqfJJC5GuyreqaSjpqfoGytZL9NVtg8T',
'GA9eu5RDuReBnjUcSSg9CK4X4A678YTrxHFxCpmWhDjM'),
('2of61RBw2ARZcPD4XJFcnpx18Aj1mU4viUMVN2AXaJsE',
'3aDSyU3E5Kmn9emoUXrktFKH4i7t4uaKBmHNFFhErYU8'),
('J8oF1sfJzXxJL1bDxPwCtDYw1FEh1prVAhWZF8dw1fRa',
'2atybus8CnehWNVj1DcCEidc3uD2Q7q4tiP5ok2CuNSD'),
('AxMvjM1w3i3XQVjH8oVVGhiic9kgtvrrDzxnWKdwhdQo',
'DXYvSgETSxy4vfU2rqPoZFumKw5aLWErkmEDR2w2sR7h'),
('GBuyEpUQTf2v21NAKozUbUQnwwiugHNY9Uh2kPqBwqXK',
'CLDPdckwDKa3qiLnoKuNFW8yHRjJdU37XE6skAmageJK'),
('Bc8ykuXeq7HutQkveQfYXQ28BbFkjRpZCAEuRsAMtxuF',
'B45qxKWDPnoE1C5KzunsMvfHmRgZHfz2LzxaM1LTqVwF'),
('9H9v7uKAWScvy34ZQfWJW2NoJ3SWf2NuaqzabcaVuh4h',
'4Kj9wUpHKfgJbjyLNmMYwEwnotUmsgTDKMCusHbM5gcz'),
('2kWx8nor8McDSZsg8vJ7hZrc3aUVtZhcVcvNeT14iSFo',
'3S9ase3dQd5oz3L7ELGivAsUyaTosK9C5X1aiZNtgcwi'),
('ENEDnokpqJhziw9CPiGDCnNRwSDgnGjAPh1L7XABWP6s',
'2sUKDdtfVaUXZCN6V6WecweBL8ZEY5mCfPBTj4xzhQtq'),
('FPUYgS4VvQ5WaZaQqnrJppBZQasoSMwZ4LyhUBKYnE6Q',
'FtP6Zak6EEWpuptqxSoPAySfm4yA6rWAQqxMCi6s6RYp'),
('FhQjcEjy36p27YGjKzWicdABNWzEYGciSU5Eht98o2eg',
'2hZ3Fby9K5jYQdtrhvehKTeJgq4NDJY46p4oBT7NUAv5'),
('5JD7STAtYDUeMqvA75FxTGUw6mSFmFvnVMJZJkTHLafH',
'HCGf4nWF7q4v4GBPXxTdWMjU7o3SifxfmKzTQ1dWmFqo'),
('3VLPrCmUog6mBVqkTuSJzXP7ZABCA6ejQKu9LpzkJs6s',
'Bap6iTjmZb781zLxSmELoqVA25mbMuL7B8WdAz5wDygG'),
('EiQ57ZLHQvtLbEbiJ41ViZmPctFfd51EFEaK6Y3HZcYb',
'5uu84u8um1CfuT1pvpdFKMy5oWuU4BfWRbpRHzG4eW4A'),
('3hM9hy2LSqe2SsrcE7XeNz1aPPPZgK5bnTeboyFgFsyj',
'3ptDB8YwcU9EiafreJnFSyfNuoKMMws7U7auMadhRxdr'),
('3LoFwupCNbPk4cMYVS58UHtkDhvKpdYNmMJPEbK5hnat',
'CQ56mX3agjJoWwt2uDSa7gtzHWyg3y4Lqp16rZL9qUdF'),
('F9X1XsTcFwJN196D1PdCc88WrVrBGhfDgQnezeXW9Vjx',
'79cg39iLMZHPFbXrWo6aJAbsXFPk7kgqgBxijDbDLKA'),
('Hf1XCRfcXc6sQZVchcvv54Sod8BjBFqsiU5Wu4eX6bTd',
'4o8pJV5jaNVqbQhw1u9EzoHT9m69bkfDSGVGugBYeiPC'),
('2hamLVNSruGH8uT7YvXa3AUcsspg2ERqFqT11gbKpbUK',
'3SziPezcFQbhPrGVJrm5D8HVAZSjduBhFanaXBZcGr3s'),
('6u92HEbihHiorTANWBs5nYsHJSJ21SfSqsD4FwZy8UZr',
'9jo5yogiEVYwxCkzYoHrn7WMnxpRqqJxbAFuMA2TuzmW'),
('4YJJNsfEz3eiBE48w8kihENuwDXGbS1vYLi27663EDvw',
'xcAieBttVYi8g9NQBBjf9jPoaMoWx3hA1h3iCcB11jC'),
('CUSUaZiUyy8f9yf59RSeorwQJGGnVgR6humfvmzpBMmS',
'EbR1dthGhu82wPJT7MmqKu84eKNKQXEuUm6Lqdf4NLXu'),
('5RBfhrADkYu5yFKtgdVZPq1k78VcQc3VZr4kjWpXmACs',
'Ev4PviNfb87KH5HSXEj7gN7uBYLbHWFSFqQPsoYcMHK7'),
('4M4UiTmPLY6H4AhSkgUkKQ6cRceixyL6oT86AUvK9tTs',
'4VuGTkZ62PbgKEotnMtyG6L2M76v3qabhPZKXeJ1npca'),
('BDAWs8i2GbRySDC5SCStzFdEvnfiqCTEbu9mpZRoKdA8',
'FoyMqh9tcY6xCyLxdByrW8xgzAqGNJKR9dPEN7CjPmQ2'),
('Dm1HwCxzLm76hBGAG2NEziNRiPBiYnQoKivPm5JC3477',
'Ap747d6xaUofhsFoyXbg7SCpH53tsD8zbLY39QS2jWfC'),
('6dRpaKGL3pzaoBX1dKsduuDkkPfUB1yBb1taCYZoNGw2',
'7PoRrQTBXmCkKuwvLxQceBbUwqo4eReNTxVaGVT6npdn'),
('Cb6sghYERbQA5VMdxKiZx1xk6j6heJNbW1TxRTMwkquu',
'Am8zvPbAgk2ERqmhGzJZL1NCNkEUjF6enXCwczp4d97B'),
('EhaLhpbbRCfCuLbq3rQS1d4PfE6rHgvYA9MpTGaxACgW',
'EfeeApbq1jBChfhe13JkEPbUfm1JYYFCdKXdtue6YrH5'),
('353aMTUrjH628XzVnKH2oyRmMaAdJ4antn5fGNAzfqMN',
'AqustPmyDtVpFDiUEqWfFjDeVBQhvKYZFU4wjfpXRXee'),
('7x8v2BEkdyDvzVzbRJR9AztZHLv8kUZfwRRmcPEpHEYj',
'88MTxTfy7Btqxwdf5Xo7TmjzACeuNop8MeE63LikQn4k'),
('2jnPZg4oeBzbqL6TdpyTdoxraqjWHqfSrzfmS5Qh8D4V',
'3GSJUg4s6ydymn9obTxdSVBkxpmWZLCGuvBK9fMECefe'),
('N8DS5DA18i2Bh7rEz7nJSSJZycz8qKaNkPP58BCh7Zr',
'AKjy7whpaoUnbDJXzNHbtBHawWnS7tLha3nfMPXh4Qam'),
('DUQ3pGX5XQtvucPDcNEQPMLrqCMxCbRBuWmHHddNg83Q',
'F3vakqePy8xmpb23psZahDVEdu4dywCPQB7fCMsP5mp3'),
('6ABw5HQZSWWJr2Ud6KmD73azu732iNTvEfWbCotCFLrn',
'GW9eq8JgkHDLjtENDscTK5Bj9AAC3css7SPxLZCPcS2V'),
('ByNJL8Eo8B6kKH5UuJxiXBRRrAKfALLvQmt2Rq5JgAA4',
'GEtT15SrZUDxVpLjS4metu4BXYw4o1NmxzH5Wr2DcqAv'),
('F9XaoqP4A4zZoPB6phfTP8i7CQsnSakh6bk8B1CTLwqy',
'9XLZaFGco78AXQS9dmHZ6zypjtg1Z33pj4KoTtDmnLa6'),
('ESamPv9kb87uEBZjfgarKTQwSTcEQMetBH44b8o3mPZC',
'Nv7eXkgL84o8fQLjagN1bVj7bt5FKF6Ah1Md6WWwyLk'),
('E43hqzYjZZ1XRSME6d37Q99UifT4d23piq1CN3fMp6cv',
'HLMB1uPdRuYcQyM9UmY9zerxQa3cYqEaRUku3h9oRBQn'),
('3qfPXUTeCsVRk9L68uyGF5u3XxFSVBtPkATtHayVgCGs',
'ZEkiCeoj3FGfudrN4xgtbz1VihkKWm4cgHN9qJ4p4GH'),
('7fxCmzKhvNGpbn9U2vih9N1aF5UXaVER6NSpwn3HPpoy',
'CmhLU67kWqbL2kyj8bA5TNcg7HiQFJeamcJhe5BB1PAx'),
('BhJsfuvhj9PqfvnvNGQX26fR5SXvcq7JdhhrWyZHoXT9',
'CgMqrhrjr4mBMvTgiHLqgvf4tRzUpZuLtQnMSG1Jjgx2'),
('GZbkL2W22Z2YwHf5SBwRfSEYQf1tquPkELDQjkwm2yU4',
'E47ijUUheN1Zz8TWKmbcDDWz5nduBvZNtcgqbGRiiGv6'),
('9Puc7H9PRHZ2oowzxiuGueZCzNY1X3aSuopy7k4w8TTo',
'FTjTVxsPjiNw6TnbwBeE7WpZbvJuVEMwbdPCt1NppHhc'),
('BczGQKaQNu8QkTc4PWmPdrbLfmXFzAqnoJ9YzHTU1vez',
'4m4xe8fjWAFHyNYLMRYDXskG2d5o9xZxgzCzca23uBBH'),
('BZwZrE1hNzKzfnbThE9MiB5Movox67c7uGJmi9Nhef1j',
'5G6reNxH3e1gyMSgBRCYQJypFtTSBQ85r5fQGw6DfnpM'),
('DFJxcvaR5Xk2bHiuxZzaqDxLDSq6fGSUdg54c5zAFLKz',
'BRL9LWweehDAcEPc8MXjd3uQtAt4ZK1LY8J5KT3GeYKm'),
('5wfyCc1mAhp2DCKMmEQG9nW4bKfaVkk8kpjuerApiFXv',
'rdqo7bdePrF6wR8v8dzJopEHgqNgt2yNmMjxz6wMguQ'),
('8S42sTQQqr5LJTa6jBjCfNg6xvjeL95btPJt2MPHBrDo',
'7VJjwATaownwJyTWXJxKEtJk46eEXTm9UaioPvVFD2zD'),
('57WwYQgHHSu7SYrXXmovjiPDmc2BB25itp6xSu5KrQQn',
'FGW86z4ymEbtqiSpp6zzpDkzdPZv9xDMCGUdGVBz8KLU'),
('CcxnCDQ4JgH2ceTEPW75GcfW8rP7aiAT8ZuEtYbqEa7w',
'7kQdXRZNJaWo7Gj4XtT1fV4LD4ZtN8VmxdZFiJE8q8xF'),
('8CYTgLp2kbVJKqnadQNGZorWcdWNpbaXrt6kvdzJnEjv',
'57Zwyf4FUEWTxEWrmbSb6vrcZBukHmCs7TKzKoygV6cf'),
('4buY9tDvVRpTjfAjM8Up4vWw5yh37xWteSMAerbxpKpv',
'5FvFDCSZgtc57hSpvBqBd8VjhyAJ2a2vxTiHzg2nPyg9'),
('5jJ8hry8Pu7rkgKkWcmZzfZ5FWk6rT3TnYGesEhfijvt',
'7hmVhrQ8vmHmNhxyvyW1cHF5N6gzRoBy7kimfj4b2uZ5'),
('6MUnCTEZFZvsKTCW4NKDiPv4a3SRWZLS7gUNP4nXsFBh',
'5m2oXtepVwbKt9t5er72bFNioiHYMRtHcUu176DVFBQu'),
('GXuU171dpf8JpBLiVgXksyXrdkqDqm6AWJ5A2JZLkkwV',
'BF6xtHg3kcBKHCJ9Y6TTPyGYn3MDKLqxVDshVUbgaCAk'),
('DoRUYrhULJbAnsGot4hYZeHUaFgXj4hwhHiGRUP3rZCj',
'8i67E6uPyrRvAN5WqSX9V7xeSGr4nPXqAgnR2pPQj3ew'),
('At4gvM1wZt6ACte2o26Yxbn5qaodMZBd7U1WsiBwB42x',
'GBPGPkSkkcM4KmJRqKjCPiEygVLW8LmRRarYvj967fbV'),
('48D3mw2apqVQa6KYCjGqDFiG5cbwqZEotL2w8aPWCxtE',
'2Byvg9DGK7Axk9Bp6bmiUoBQkkLsRNrcoq2ZPZu5ZyGg'),
('2YncoUMac2tNMcRFEGEgvuDXb58RdyqHMWmSN2QTMiCP',
'BSNXYAX8Em2TjuCDvmLV3GgnxYT6vX68YFwoGPaPpsSa'),
('7As7DVaC6FBqojvFoTo9jgZTcTGx9QYdVwUhNSNNvUsz',
'E5cMypehm8j2Zxw3dCXKc1MGTCftJJm9FzqPNwnVEgQn'),
('AAwj9V5iW88EwoZpLwuRvqwKn8c8rSgKAyZduFAfvqvV',
'CkTks2ZGnQdM19wucQMehPe1imANjnRAAfLd1uewGUx8'),
('axH9mijksto4vnoTjbbJJfY8iBFziJL2y39ago41WNM',
'GJV8hxcjpieuXmVz9j5JjB2eFLXmRoBF7EYWpbMNKq7Q'),
('6vv2FyJcTNJRnEmsRgHai5Bf7trJ8CsBMqbZckXWkbGk',
'5YXtgt3ZVKKYM3qvHXXKKSpStfH38akRYvg9shNNChWS'),
('DKK6kfAGnLV1mowm9m52yYFViVbQfVEtmRuveiXbnC93',
'YvrVGNzxXSTLQ5QQJ3GHWHDQJnd3qJ5npGQQvZtb4m1'),
('4QWSQeeu9oQA3ZQG7d6LKzZLR3YZ79q999Zzb7hb2cbh',
'42ARr6nFsZXLAgGGwZ5p55kVSW5ETjrnJBUxaV6sFmzk'),
('43oJ9CvF3Wsymj8zrkC19VfzjMiwntw3AXrTvc2UFuuf',
'A661APGeLXuLgYUwmQjKWnuz1XmjuLNW8XVGuGjmEm76'),
('3uN8UwhNcg219uX1GffC3a9tCZrVY327ZUk5rs3YfAR2',
'Ca5B2Z9PAeBkEPuYeUyvs3dHhTqpAzFuXERfHZT3zxto'),
('HuV5FPtboYQe2EEVFVhBkjRxbUBjeBCHRk2VuiNnBS7N',
'5AJCbvgfLmdGdWKjLpDBZtrrJC6NNCQJK5P9NmpvbByy'),
('2Rbr8Lasv1CDhL2Xxu5ZfLHf4fhCfxuTr25YDB2Q5VXN',
'FQTbtsHjw1oYyKF3pUamwubB27UqG1ista1ezL2kgF3N'),
('CLGF2xs7YyJrNZ8ertsPwofzqTBfQiJ5cMiRNcMjgEkh',
'4uSue7UmSr1H8QCYrerRRyUh2BTqX5t5qPWRdVrcyL43'),
('o6jUu8mqTQMaawxRBbvuWd3b7syXYEUPFWJGuNuoDs6',
'7uJuBMMZD3d6mq2ihUtJQLWsAqACAkmQSJ3gUcEgW18W'),
('2wo2o5rqEEyijwm4MuCXHNVp2oJPEYQBF2eU6CoXYuVy',
'AZY2HCpLGjsUgKo7PZ6gdx6btReR6gRCeE9gmzebgGZ2'),
('Eo1z9xyGbHZxH1ezG7iLxJFhuL8YWJ6NREu4T2VtRZky',
'GbjDtbwPBf6pcczRbANBvHeBNb3obMtEMoQTxmmafq2g'),
('8oPaUg1Wc7293c8HR7Phs4m4DvzDjYuzFUBqffJUhJKP',
'9vJKX3jgc1K4sdhnVYLhU6iv7vf8mRygRDYr784mYUpp'),
('K2BCZLghAwL4Y9eiPboQM2sz4GWYFM5WApZT6firnig',
'7j9QMXcyqgeVFejyNMhXszKAbZuNdECFYwZFDNCwHN3V'),
('Dz8Ft3YeeuMcsPKMWNqDDbdx6Qo2s2H2cZNUoX2uDwgY',
'3HqEP9EvU9852orfSh9WZd54pDMJnT5nMnGkjhZibbZg'),
('3cq9D9s9vZgyDertxiZr21etinCYKCMYcf3LXe3o8zT4',
'5174KhHkMsti7XNSYh5j1jFEv22PHQQizTXxT7gT2ZPb'),
('5uJwmzmoZDkADaeyenBvceP4mSzBgEgbqU5cc8JQpTDE',
'HEYiTYWaTwjXkzfbE4eZ1RL78ciJkWqEio8tDTvCXzk8'),
('BkHzLwC5bkLVB4b5KPAqbWc4ekhqmMtk34tfYpLQ53KR',
'537uFsVdCU81kSG7eUZBFV5q3PvadsS4KgzaLuGWGzgG'),
('3eQT6nC2BEqtXa5b5dn51cJEpj4eMHYsx7RkHXfwNEkq',
'2NV1QhXppRfj19ZemqGUgxZ9Pd5yD13aQmrcNd6g25D4'),
('GsBGHmKMiJoYDhoXJXwUnkbH4cVWWQ7emG1t7vTFDdS',
'CsLyGG9J9E4ZLwhpTHRgp21tvGWyPj79SaLGEpqVhHKj'),
('ALytZ6ygpy3hqHVXGHHdNuzuQh1hSoTVU8im5C6CgTR2',
'5646BEZkpyoDWQHMscMav8bXoiAzf7giVmu8yepWsoMN'),
('5XhJnzEfqVRM6trhL19K1AoGAQjbWC84Cv5XZ4nE9fF7',
'BJdQwVTx2fuJWkStt3yPD2WUeopjV3yPQp1646Yi2pXL'),
('7XLiDAjnggSU7PAvrTwsyPebC3bhuc5B2CMdiYAQBGWZ',
'8xnXGiNp1ADNfuG6uLQ91h2h1ekjuiEC5SRdw19rbpnq'),
('7kyFUtCcaiWKfGZmWfb9kvwcYLxxmocBC7qXYwNwotgV',
'574EqNs3exLKJxgqFxKyLE5XQMBkadQf5MKQ8qpjsVJS'),
('ESJSEPbWb13NaDkde8rEdcippc58AMCZodfmJP1SK16m',
'5iwWfDDjgyFfeLpS9EYmwszScwtxTACcgAbinCjFLZTZ'),
('AjnWLT2vZnEmLfioGeseLuxGQFFiyoqtFJj2oEUgzax5',
'9JeUGkGHPyB7s7XVVik1aFyCxarH2tWhpSJapnRXveb8'),
('32yM1jbRpZt7EjnH2UDimusAPfMQ83Wd1AULxLYMv2hq',
'73v6uEUhL12MEwdfFFDmqbWmSQXoC8Y3VPB9vKUYEW5X'),
('F5DjMdHvqqym53MtBG1v9shrza74EttHn1zPFL1ic1hT',
'FpkXbvZsW4LbU4XZYvy6euR7F9SxDMPdyVVCfJFUaT2C'),
('3EPdMUSAXFuQLaVwq1fPHNUPzvSHXqfNupgu6kGhdEVc',
'28RxZbx71Y8ZaYt9f9D2HnAhkH2CvAPT4PXpDgCnXhVY'),
('47YXW4Escn71q7xf6qip8NwdKTq2ScL1i4xmAnJ1RvDW',
'3NQxT4ukLvPPZV3J6qDmx5PFPa7GvaiMBwc1r47SXdfj'),
('AiCfcc6viFsxTxfEJxo82b3GWzim2nRXvBBfB14w4dMr',
'FBCcBLpFUss64MWjf3nuSRrLNoqnWpJGfXKJVaduPezJ'),
('CkeGi1XM3nquJcp3osb2EhTJ99gsisPfTpnsQdYViWWa',
'4L12aHJtN96XGrYbhBFhmEQuPTnsHu95NATsz3X2Uo4Z'),
('A78PS3MuQtWQ937ow5mzHhXUS1LNSzX2nMcmqLN57c3G',
'87T6viSDWX7Rrw2VWsqEXhwVmrsrmf2rjDHRkeUGU4rX'),
('2SzYHP21J4KXwVgSwtNfDQKUbyC7RE8feAwfVuW7PSmD',
'4NCA5NxnhxPAAcWqyxtg4us7MJYSbn8g3Kw6v35Vmnm5'),
('GxGuWY5A1ADiXFrdCiAcVJX4foveGxDfhcJd2Yirg3D8',
'2Jjo3w5gQ7TsQaN2N7iNejfGLjzucaNg4hYZBcwT7AzC'),
('5dYeKTvxfH6s9Esbys8TVMDTZMCzjFJAH4xe623ykmZ2',
'5q7Le5Kcm1eBY1r8XwEseDXnEUKkZE5qtNb6p5BSSKwz'),
('EkbeQ7eoiHxiTmq7ksw6FLvf59b3pGuoDR9LF29KYw4m',
'CDpJ8VmgiBvYUcZMcPYr3B5UxSVEtLxRfq5dH3AxboNT'),
('2zXT2EUMwWKPMWHK5rYvxgLNdmkoedXH754uzUBphaCE',
'5oHnEFaUaM1QRZjV48K1DrqKeEdcbmb8uG2zucTYc5qH'),
('H6c78e97srwPEg5PsW1uuKAovSxTvmNyFt9qJwoeJP4y',
'inwncuMiPRuw6PEucVG2Kempk91yq3dT5kpuf3Umf4j'),
('6yJDrenNeRBpdQxqxMY3C2V6cBrfvpzYpz6MbefxuxsZ',
'CnCjmTECDrqJP5nTPSL2NWJ9LPyyFzLmrTYiRcSjwU7e'),
('3YTX3ntzsjG9CxbkCayToGEzmn1Fgdvw1W8gefCUTa9L',
'FkCbQBoKRZbndsNP44CWheEchwPC65UNdrZ8FntRTyvu'),
('8Y7xgZ5M8qBYdX5iCHe7mPQ6ZcQNXDJd28ZVDdx7FSBa',
'AYTdxj598H36RGmBzEnR4QK8pVF6k5YTRBypxWsDkXUB'),
('AtzLLpKuPehdP4g6x4J4BH2RjNbvXewxf8ibSgKSiJtL',
'vC8C3u71YueJcUhtyfn9Xx5PjpJuizDZNGW23tFb5VY'),
]

View File

@ -18,9 +18,8 @@ import socket
import argparse
import botocore
import boto3
from awscommon import (
get_naeips,
)
from awscommon import get_naeips
# First, ensure they're using Python 2.5-2.7
pyver = sys.version_info

View File

@ -1,130 +0,0 @@
#! /bin/bash
# The set -e option instructs bash to immediately exit
# if any command has a non-zero exit status
set -e
function printErr()
{
echo "usage: ./startup.sh <tag> <number_of_nodes_in_cluster> <pypi_or_branch>"
echo "No argument $1 supplied"
}
if [ -z "$1" ]; then
printErr "<tag>"
exit 1
fi
if [ -z "$2" ]; then
printErr "<number_of_nodes_in_cluster>"
exit 1
fi
TAG=$1
NUM_NODES=$2
# If they don't include a third argument (<pypi_or_branch>)
# then assume BRANCH = "pypi" by default
if [ -z "$3" ]; then
echo "No third argument was specified, so BigchainDB will be installed from PyPI"
BRANCH="pypi"
else
BRANCH=$3
fi
# Check for AWS private key file (.pem file)
if [ ! -f "pem/bigchaindb.pem" ]; then
echo "File pem/bigchaindb.pem (AWS private key) is missing"
exit 1
fi
# Check for the confiles directory
if [ ! -d "confiles" ]; then
echo "Directory confiles is needed but does not exist"
echo "See make_confiles.sh to find out how to make it"
exit 1
fi
# Change the file permissions on pem/bigchaindb.pem
# so that the owner can read it, but that's all
chmod 0400 pem/bigchaindb.pem
# The following Python script does these things:
# 0. allocates more elastic IP addresses if necessary,
# 1. launches the specified number of nodes (instances) on Amazon EC2,
# 2. tags them with the specified tag,
# 3. waits until those instances exist and are running,
# 4. for each instance, it associates an elastic IP address
# with that instance,
# 5. writes the shellscript add2known_hosts.sh
# 6. (over)writes a file named hostlist.py
# containing a list of all public DNS names.
python launch_ec2_nodes.py --tag $TAG --nodes $NUM_NODES
# Make add2known_hosts.sh executable then execute it.
# This adds remote keys to ~/.ssh/known_hosts
chmod +x add2known_hosts.sh
./add2known_hosts.sh
# (Re)create the RethinkDB configuration file conf/rethinkdb.conf
python create_rethinkdb_conf.py
# Rollout base packages (dependencies) needed before
# storage backend (RethinkDB) and BigchainDB can be rolled out
fab install_base_software
# Rollout storage backend (RethinkDB) and start it
fab install_rethinkdb
# Rollout BigchainDB (but don't start it yet)
if [ "$BRANCH" == "pypi" ]; then
fab install_bigchaindb_from_pypi
else
cd ..
rm -f bigchaindb-archive.tar.gz
git archive $BRANCH --format=tar --output=bigchaindb-archive.tar
# TODO: the archive could exclude more files besides the .gitignore ones
# such as the docs. See http://tinyurl.com/zo6fxeg
gzip bigchaindb-archive.tar
mv bigchaindb-archive.tar.gz deploy-cluster-aws
cd deploy-cluster-aws
fab install_bigchaindb_from_git_archive
rm bigchaindb-archive.tar.gz
fi
# Configure BigchainDB on all nodes
# The idea is to send a bunch of locally-created configuration
# files out to each of the instances / nodes.
# Assume a set of $NUM_NODES BigchaindB config files
# already exists in the confiles directory.
# One can create a set using a command like
# ./make_confiles.sh confiles $NUM_NODES
# (We can't do that here now because this virtual environment
# is a Python 2 environment that may not even have
# bigchaindb installed, so bigchaindb configure can't be called)
# Transform the config files in the confiles directory
# to have proper keyrings, api_endpoint values, etc.
python clusterize_confiles.py confiles $NUM_NODES
# Send one of the config files to each instance
for (( HOST=0 ; HOST<$NUM_NODES ; HOST++ )); do
CONFILE="bcdb_conf"$HOST
echo "Sending "$CONFILE
fab set_host:$HOST send_confile:$CONFILE
done
# Initialize BigchainDB (i.e. Create the RethinkDB database,
# the tables, the indexes, and genesis glock). Note that
# this will only be sent to one of the nodes, see the
# definition of init_bigchaindb() in fabfile.py to see why.
fab init_bigchaindb
# Start BigchainDB on all the nodes using "screen"
fab start_bigchaindb
# cleanup
rm add2known_hosts.sh
# rm -rf temp_confs

View File

@ -0,0 +1,49 @@
"""A Python 3 script to write a file with a specified number
of keypairs, using bigchaindb.crypto.generate_key_pair()
The written file is always named keypairs.py and it should be
interpreted as a Python 2 script.
Usage:
$ python3 write_keypairs_file.py num_pairs
Using the list in other Python scripts:
# in a Python 2 script:
from keypairs import keypairs_list
# keypairs_list is a list of (sk, pk) tuples
# sk = signing key (private key)
# pk = verifying key (public key)
"""
import argparse
from bigchaindb import crypto
# Parse the command-line arguments
desc = 'Write a set of keypairs to keypairs.py'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('num_pairs',
help='number of keypairs to write',
type=int)
args = parser.parse_args()
num_pairs = int(args.num_pairs)
# Generate and write the keypairs to keypairs.py
print('Writing {} keypairs to keypairs.py...'.format(num_pairs))
with open('keypairs.py', 'w') as f:
f.write('# -*- coding: utf-8 -*-\n')
f.write('"""A set of keypairs for use in deploying\n')
f.write('BigchainDB servers with a predictable set of keys.\n')
f.write('"""\n')
f.write('\n')
f.write('from __future__ import unicode_literals\n')
f.write('\n')
f.write('keypairs_list = [')
for pair_num in range(num_pairs):
keypair = crypto.generate_key_pair()
spacer = '' if pair_num == 0 else ' '
f.write("{}('{}',\n '{}'),\n".format(
spacer, keypair[0], keypair[1]))
f.write(' ]\n')
print('Done.')

View File

@ -1,25 +1,28 @@
version: '2'
services:
influxdb:
image: tutum/influxdb
ports:
- "8083:8083"
- "8086:8086"
expose:
- "8090"
- "8099"
environment:
PRE_CREATE_DB: "telegraf"
volumes:
- $INFLUXDB_DATA:/data
grafana:
image: bigchaindb/grafana-bigchaindb-docker
tty: true
ports:
- "3000:3000"
links:
- influxdb:localhost
environment:
INFLUXDB_HOST: "influxdb"
statsd:
image: bigchaindb/docker-telegraf-statsd
ports:
- "8125:8125/udp"
links:
- influxdb:localhost
environment:
INFLUXDB_HOST: "influxdb"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -43,3 +43,10 @@ This command is used to run benchmarking tests. You can learn more about it usin
```text
$ bigchaindb load -h
```
### bigchaindb set-shards
This command is used to set the number of shards in the underlying datastore. For example, the following command will set the number of shards to four:
```text
$ bigchaindb set-shards 4
```

View File

@ -30,7 +30,7 @@ validate_transaction(bigchain, transaction)
validate_block(bigchain, block)
create_transaction(*args, **kwargs)
sign_transaction(transaction, *args, **kwargs)
verify_signature(transaction)
validate_fulfillments(transaction)
```
Together, these functions are sufficient for most customizations. For example:

View File

@ -83,9 +83,53 @@ Add some rules for Inbound traffic:
**Note: These rules are extremely lax! They're meant to make testing easy.** You'll want to tighten them up if you intend to have a secure cluster. For example, Source = 0.0.0.0/0 is [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) for "allow this traffic to come from _any_ IP address."
## AWS Deployment
## Deploy a BigchainDB Monitor
### AWS Deployment Step 1
This step is optional.
One way to monitor a BigchainDB cluster is to use the monitoring setup described in the [Monitoring](monitoring.html) section of this documentation. If you want to do that, then you may want to deploy the monitoring server first, so you can tell your BigchainDB nodes where to send their monitoring data.
You can deploy a monitoring server on AWS. To do that, go to the AWS EC2 Console and launch an instance:
1. Choose an AMI: select Ubuntu Server 14.04 LTS.
2. Choose an Instance Type: a t2.micro will suffice.
3. Configure Instance Details: you can accept the defaults, but feel free to change them.
4. Add Storage: A "Root" volume type should already be included. You _could_ store monitoring data there (e.g. in a folder named `/influxdb-data`) but we will attach another volume and store the monitoring data there instead. Select "Add New Volume" and an EBS volume type.
5. Tag Instance: give your instance a memorable name.
6. Configure Security Group: choose your bigchaindb security group.
7. Review and launch your instance.
When it asks, choose an existing key pair: the one you created earlier (named `bigchaindb`).
Give your instance some time to launch and become able to accept SSH connections. You can see its current status in the AWS EC2 Console (in the "Instances" section). SSH into your instance using something like:
```text
cd deploy-cluster-aws
ssh -i pem/bigchaindb.pem ubuntu@ec2-52-58-157-229.eu-central-1.compute.amazonaws.com
```
where `ec2-52-58-157-229.eu-central-1.compute.amazonaws.com` should be replaced by your new instance's EC2 hostname. (To get that, go to the AWS EC2 Console, select Instances, click on your newly-launched instance, and copy its "Public DNS" name.)
Next, create a file system on the attached volume, make a directory named `/influxdb-data`, and set the attached volume's mount point to be `/influxdb-data`. For detailed instructions on how to do that, see the AWS documentation for [Making an Amazon EBS Volume Available for Use](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html).
Then install Docker and Docker Compose:
```text
# in a Python 2.5-2.7 virtual environment where fabric, boto3, etc. are installed
fab --fabfile=fabfile-monitor.py --hosts=<EC2 hostname> install_docker
```
After Docker is installed, we can run the monitor with:
```text
fab --fabfile=fabfile-monitor.py --hosts=<EC2 hostname> run_monitor
```
For more information about monitoring (e.g. how to view the Grafana dashboard in your web browser), see the [Monitoring](monitoring.html) section of this documentation.
To configure a BigchainDB node to send monitoring data to the monitoring server, change the statsd host in the configuration of the BigchainDB node. The section on [Configuring a BigchainDB Node](configuration.html) explains how you can do that. (For example, you can change the statsd host in `$HOME/.bigchaindb`.)
## Deploy a BigchainDB Cluster
### Step 1
Suppose _N_ is the number of nodes you want in your BigchainDB cluster. If you already have a set of _N_ BigchainDB configuration files in the `deploy-cluster-aws/confiles` directory, then you can jump to step 2. To create such a set, you can do something like:
```text
@ -99,28 +143,53 @@ That will create three (3) _default_ BigchainDB configuration files in the `depl
You can look inside those files if you're curious. In step 2, they'll be modified. For example, the default keyring is an empty list. In step 2, the deployment script automatically changes the keyring of each node to be a list of the public keys of all other nodes. Other changes are also made.
### AWS Deployment Step 2
**An Aside on Using a Standard Set of Keypairs**
It's possible to deploy BigchainDB servers with a known set of keypairs. You can generate a set of keypairs in a file named `keypairs.py` using the `write_keypairs_file.py` script. For example:
```text
# in a Python 3 virtual environment where bigchaindb is installed
cd bigchaindb
cd deploy-cluster-aws
python3 write_keypairs_file.py 100
```
The above command generates a file with 100 keypairs. (You can generate more keypairs than you need, so you can use the same list over and over again, for different numbers of servers.) To make the `awsdeploy.sh` script read all keys from `keypairs.py`, you must _edit_ the `awsdeploy.sh` script: change the line that says `python clusterize_confiles.py confiles $NUM_NODES` to `python clusterize_confiles.py -k confiles $NUM_NODES` (i.e. add the `-k` option).
### Step 2
Step 2 is to launch the nodes ("instances") on AWS, to install all the necessary software on them, configure the software, run the software, and more.
Here's an example of how one could launch a BigchainDB cluster of three (3) nodes tagged `wrigley` on AWS:
Here's an example of how one could launch a BigchainDB cluster of three (3) nodes on AWS:
```text
# in a Python 2.5-2.7 virtual environment where fabric, boto3, etc. are installed
cd bigchaindb
cd deploy-cluster-aws
./startup.sh wrigley 3 pypi
./awsdeploy.sh 3
fab start_bigchaindb
```
The `pypi` on the end means that it will install the latest (stable) `bigchaindb` package from the [Python Package Index (PyPI)](https://pypi.python.org/pypi). That is, on each node, BigchainDB is installed using `pip install bigchaindb`.
`startup.sh` is a Bash script which calls some Python and Fabric scripts. The usage is:
`awsdeploy.sh` is a Bash script which calls some Python and Fabric scripts. The usage is:
```text
./startup.sh <tag> <number_of_nodes_in_cluster> <pypi_or_branch>
./awsdeploy.sh <number_of_nodes_in_cluster> [pypi_or_branch] [servers_or_clients]
```
The first two arguments are self-explanatory. The third argument can be `pypi` or the name of a local Git branch (e.g. `master` or `feat/3752/quote-asimov-on-tuesdays`). If you don't include a third argument, then `pypi` will be assumed by default.
**<number_of_nodes_in_cluster>** (Required)
If you're curious what the `startup.sh` script does, the source code has lots of explanatory comments, so it's quite easy to read. Here's a link to the latest version on GitHub: [`startup.sh`](https://github.com/bigchaindb/bigchaindb/blob/master/deploy-cluster-aws/startup.sh)
The number of nodes you want to deploy. Example value: 5
**[pypi_or_branch]** (Optional)
Where the nodes should get their BigchainDB source code. If it's `pypi`, then BigchainDB will be installed from the latest `bigchaindb` package in the [Python Package Index (PyPI)](https://pypi.python.org/pypi). That is, on each node, BigchainDB will be installed using `pip install bigchaindb`. You can also put the name of a local Git branch; it will be compressed and sent out to all the nodes for installation. If you don't include the second argument, then the default is `pypi`.
**[servers_or_clients]** (Optional)
If you want to deploy BigchainDB servers, then the third argument should be `servers`.
If you want to deploy BigchainDB clients, then the third argument should be `clients`.
The third argument is optional, but if you want to include it, you must also include the second argument. If you don't include the third argument, then the default is `servers`.
---
If you're curious what the `awsdeploy.sh` script does, [the source code](https://github.com/bigchaindb/bigchaindb/blob/master/deploy-cluster-aws/awsdeploy.sh) has lots of explanatory comments, so it's quite easy to read.
It should take a few minutes for the deployment to finish. If you run into problems, see the section on Known Deployment Issues below.

View File

@ -22,9 +22,9 @@ Table of Contents
python-driver-api-examples
local-rethinkdb-cluster
deploy-on-aws
json-serialization
cryptography
models
json-serialization
developer-interface
consensus
monitoring

View File

@ -27,7 +27,7 @@ A transaction is an operation between the `current_owner` and the `new_owner` ov
- `id`: sha3 hash of the transaction. The `id` is also the DB primary key.
- `version`: Version of the transaction. For future compability with changes in the transaction model.
- **Transaction body**:
- `fulfillments`: List of fulfillments. Each _fulfillment_ contains a pointer to a unspent digital asset
- `fulfillments`: List of fulfillments. Each _fulfillment_ contains a pointer to an unspent digital asset
and a _crypto fulfillment_ that satisfies a spending condition set on the unspent digital asset. A _fulfillment_
is usually a signature proving the ownership of the digital asset.
See [conditions and fulfillments](models.md#conditions-and-fulfillments)
@ -174,9 +174,9 @@ A block contains a group of transactions and includes the hash of the hash of th
- `timestamp`: timestamp when the block was created. It's provided by the node that created the block.
- `transactions`: the list of transactions included in the block
- `node_pubkey`: the public key of the node that create the block
- `voters`: list public keys of the federation nodes. Since the size of the
- `voters`: list of public keys of the federation nodes. Since the size of the
federation may change over time this will tell us how many nodes existed
in the federation when the block was created so that in a later point in
in the federation when the block was created so that at a later point in
time we can check that the block received the correct number of votes.
- `signature`: Signature of the block by the node that created the block (i.e. To create it, the node serialized the block contents and signed that with its private key)
- `votes`: Initially an empty list. New votes are appended as they come in from the nodes.

View File

@ -16,10 +16,18 @@ $ docker-compose -f docker-compose-monitor.yml build
$ docker-compose -f docker-compose-monitor.yml up
```
then point a browser tab to:
It is also possible to mount a host directory as a data volume for InfluxDB
by setting the `INFLUXDB_DATA` environment variable:
```text
$ INFLUXDB_DATA=/data docker-compose -f docker-compose-monitor.yml up
```
You can view the Grafana dashboard in your web browser at:
[http://localhost:3000/dashboard/script/bigchaindb_dashboard.js](http://localhost:3000/dashboard/script/bigchaindb_dashboard.js)
(You may want to replace `localhost` with another hostname in that URL, e.g. the hostname of a remote monitoring server.)
The login and password are `admin` by default. If BigchainDB is running and processing transactions, you should see analytics—if not, [start BigchainDB](installing-server.html#run-bigchaindb) and load some test transactions:
```text
$ bigchaindb load

View File

@ -12,7 +12,6 @@ First, make sure you have RethinkDB and BigchainDB _installed and running_, i.e.
```text
$ rethinkdb
$ bigchaindb configure
$ bigchaindb init
$ bigchaindb start
```
@ -44,20 +43,20 @@ In BigchainDB, only the federation nodes are allowed to create digital assets, b
```python
from bigchaindb import crypto
# create a test user
# Create a test user
testuser1_priv, testuser1_pub = crypto.generate_key_pair()
# define a digital asset data payload
# Define a digital asset data payload
digital_asset_payload = {'msg': 'Hello BigchainDB!'}
# a create transaction uses the operation `CREATE` and has no inputs
# A create transaction uses the operation `CREATE` and has no inputs
tx = b.create_transaction(b.me, testuser1_pub, None, 'CREATE', payload=digital_asset_payload)
# all transactions need to be signed by the user creating the transaction
# All transactions need to be signed by the user creating the transaction
tx_signed = b.sign_transaction(tx, b.me_private)
# write the transaction to the bigchain
# the transaction will be stored in a backlog where it will be validated,
# Write the transaction to the bigchain.
# The transaction will be stored in a backlog where it will be validated,
# included in a block, and written to the bigchain
b.write_transaction(tx_signed)
```
@ -66,7 +65,7 @@ b.write_transaction(tx_signed)
After a couple of seconds, we can check if the transactions was included in the bigchain:
```python
# retrieve a transaction from the bigchain
# Retrieve a transaction from the bigchain
tx_retrieved = b.get_transaction(tx_signed['id'])
tx_retrieved
```
@ -119,16 +118,16 @@ tx_retrieved
The new owner of the digital asset is now `BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs`, which is the public key of `testuser1`.
Note that the current owner with public key `3LQ5dTiddXymDhNzETB1rEkp4mA7fEV1Qeiu5ghHiJm9` refers to one of the federation nodes that actually created the asset and assigned it to `testuser1`.
Note that the current owner (with public key `3LQ5dTiddXymDhNzETB1rEkp4mA7fEV1Qeiu5ghHiJm9`) is the federation node which created the asset and assigned it to `testuser1`.
## Transfer the Digital Asset
Now that `testuser1` has a digital asset assigned to him, he can transfer it to another user. Transfer transactions require an input. The input will be the transaction id of a digital asset that was assigned to `testuser1`, which in our case is `cdb6331f26ecec0ee7e67e4d5dcd63734e7f75bbd1ebe40699fc6d2960ae4cb2`.
Now that `testuser1` has a digital asset assigned to him, he can transfer it to another user. Transfer transactions require an input. The input will be the transaction id of a digital asset that was assigned to `testuser1`, which in our case is `933cd83a419d2735822a2154c84176a2f419cbd449a74b94e592ab807af23861`.
BigchainDB makes use of the crypto-conditions library to both cryptographically lock and unlock transactions.
The locking script is refered to as a `condition` and a corresponding `fulfillment` unlocks the condition of the `input_tx`.
Since a transaction can have multiple outputs with each their own (crypto)condition, each transaction input should also refer to the condition index `cid`.
Since a transaction can have multiple outputs with each its own (crypto)condition, each transaction input should also refer to the condition index `cid`.
![BigchainDB transactions connecting fulfillments with conditions](./_static/tx_single_condition_single_fulfillment_v1.png)
@ -230,14 +229,16 @@ DoubleSpend: input `{'cid': 0, 'txid': '933cd83a419d2735822a2154c84176a2f419cbd4
## Multiple Owners
When creating a transaction to a group of people with shared ownership of the asset, one can simply provide a list of `new_owners`:
To create a new digital asset with _multiple_ owners, one can simply provide a list of `new_owners`:
```python
# Create a new asset and assign it to multiple owners
tx_multisig = b.create_transaction(b.me, [testuser1_pub, testuser2_pub], None, 'CREATE')
# Have the federation sign the transaction
# Have the federation node sign the transaction
tx_multisig_signed = b.sign_transaction(tx_multisig, b.me_private)
# Write the transaction
b.write_transaction(tx_multisig_signed)
# Check if the transaction is already in the bigchain
@ -320,17 +321,16 @@ tx_multisig_transfer = b.create_transaction([testuser1_pub, testuser2_pub], test
# Sign with both private keys
tx_multisig_transfer_signed = b.sign_transaction(tx_multisig_transfer, [testuser1_priv, testuser2_priv])
# Write to bigchain
# Write the transaction
b.write_transaction(tx_multisig_transfer_signed)
# Check if the transaction is already in the bigchain
tx_multisig_retrieved = b.get_transaction(tx_multisig_transfer_signed['id'])
tx_multisig_retrieved
tx_multisig_transfer_retrieved = b.get_transaction(tx_multisig_transfer_signed['id'])
tx_multisig_transfer_retrieved
```
```python
{
"assignee":"3LQ5dTiddXymDhNzETB1rEkp4mA7fEV1Qeiu5ghHiJm9",
"id":"e689e23f774e7c562eeb310c7c712b34fb6210bea5deb9175e48b68810029150",
"transaction":{
"conditions":[
@ -394,7 +394,7 @@ owned_mimo_inputs = b.get_owned_ids(testuser1_pub)
# Check the number of assets
print(len(owned_mimo_inputs))
# Create a TRANSFER transaction with all the assets
# Create a signed TRANSFER transaction with all the assets
tx_mimo = b.create_transaction(testuser1_pub, testuser2_pub, owned_mimo_inputs, 'TRANSFER')
tx_mimo_signed = b.sign_transaction(tx_mimo, testuser1_priv)
@ -694,7 +694,7 @@ threshold_tx_transfer['transaction']['fulfillments'][0]['fulfillment'] = thresho
# Optional validation checks
assert threshold_fulfillment.validate(threshold_tx_fulfillment_message) == True
assert b.verify_signature(threshold_tx_transfer) == True
assert b.validate_fulfillments(threshold_tx_transfer) == True
assert b.validate_transaction(threshold_tx_transfer)
b.write_transaction(threshold_tx_transfer)
@ -703,7 +703,6 @@ threshold_tx_transfer
```python
{
"assignee":"3LQ5dTiddXymDhNzETB1rEkp4mA7fEV1Qeiu5ghHiJm9",
"id":"a45b2340c59df7422a5788b3c462dee708a18cdf09d1a10bd26be3f31af4b8d7",
"transaction":{
"conditions":[
@ -750,9 +749,13 @@ threshold_tx_transfer
### Hash-locked Conditions
By creating a hash of a difficult-to-guess 256-bit random or pseudo-random integer it is possible to create a condition which the creator can trivially fulfill by publishing the random value. However, for anyone else, the condition is cryptographically hard to fulfill, because they would have to find a preimage for the given condition hash.
A hash-lock condition on an asset is like a password condition: anyone with the secret preimage (like a password) can fulfill the hash-lock condition and transfer the asset to themselves.
One possible usecase might be to redeem a digital voucher when given a secret (voucher code).
Under the hood, fulfilling a hash-lock condition amounts to finding a string (a "preimage") which, when hashed, results in a given value. It's easy to verify that a given preimage hashes to the given value, but it's computationally difficult to _find_ a string which hashes to the given value. The only practical way to get a valid preimage is to get it from the original creator (possibly via intermediaries).
One possible use case is to distribute preimages as "digital vouchers." The first person to redeem a voucher will get the associated asset.
A federation node can create an asset with a hash-lock condition and no `new_owners`. Anyone who can fullfill the hash-lock condition can transfer the asset to themselves.
```python
# Create a hash-locked asset without any new_owners
@ -771,7 +774,7 @@ hashlock_tx['transaction']['conditions'].append({
'new_owners': None
})
# Conditions have been updated, so hash needs updating
# Conditions have been updated, so the hash needs updating
hashlock_tx['id'] = util.get_hash_data(hashlock_tx)
# The asset needs to be signed by the current_owner
@ -787,7 +790,6 @@ hashlock_tx_signed
```python
{
"assignee":"FmLm6MxCABc8TsiZKdeYaZKo5yZWMM6Vty7Q1B6EgcP2",
"id":"604c520244b7ff63604527baf269e0cbfb887122f503703120fd347d6b99a237",
"transaction":{
"conditions":[
@ -817,22 +819,22 @@ hashlock_tx_signed
}
```
In order to redeem the asset, one needs to create a fulfillment the correct secret as a preimage:
In order to redeem the asset, one needs to create a fulfillment with the correct secret:
```python
hashlockuser_priv, hashlockuser_pub = crypto.generate_key_pair()
# create hashlock fulfillment tx
# Create hashlock fulfillment tx
hashlock_fulfill_tx = b.create_transaction(None, hashlockuser_pub, {'txid': hashlock_tx['id'], 'cid': 0}, 'TRANSFER')
# provide a wrong secret
# Provide a wrong secret
hashlock_fulfill_tx_fulfillment = cc.PreimageSha256Fulfillment(preimage=b'')
hashlock_fulfill_tx['transaction']['fulfillments'][0]['fulfillment'] = \
hashlock_fulfill_tx_fulfillment.serialize_uri()
assert b.is_valid_transaction(hashlock_fulfill_tx) == False
# provide the right secret
# Provide the right secret
hashlock_fulfill_tx_fulfillment = cc.PreimageSha256Fulfillment(preimage=secret)
hashlock_fulfill_tx['transaction']['fulfillments'][0]['fulfillment'] = \
hashlock_fulfill_tx_fulfillment.serialize_uri()
@ -846,7 +848,6 @@ hashlock_fulfill_tx
```python
{
"assignee":"FmLm6MxCABc8TsiZKdeYaZKo5yZWMM6Vty7Q1B6EgcP2",
"id":"fe6871bf3ca62eb61c52c5555cec2e07af51df817723f0cb76e5cf6248f449d2",
"transaction":{
"conditions":[

View File

@ -35,6 +35,10 @@ docs_require = [
'sphinx-rtd-theme>=0.1.9',
]
benchmarks_require = [
'line-profiler==1.0',
]
setup(
name='BigchainDB',
version=version['__version__'],
@ -88,7 +92,7 @@ setup(
tests_require=tests_require,
extras_require={
'test': tests_require,
'dev': dev_require + tests_require + docs_require,
'dev': dev_require + tests_require + docs_require + benchmarks_require,
'docs': docs_require,
},
)

3
speed-tests/README.md Normal file
View File

@ -0,0 +1,3 @@
# Speed Tests
This folder contains tests related to the code performance of a single node.

View File

@ -0,0 +1,25 @@
from line_profiler import LineProfiler
import bigchaindb
def speedtest_validate_transaction():
# create a transaction
b = bigchaindb.Bigchain()
tx = b.create_transaction(b.me, b.me, None, 'CREATE')
tx_signed = b.sign_transaction(tx, b.me_private)
# setup the profiler
profiler = LineProfiler()
profiler.enable_by_count()
profiler.add_function(bigchaindb.Bigchain.validate_transaction)
# validate_transaction 1000 times
for i in range(1000):
b.validate_transaction(tx_signed)
profiler.print_stats()
if __name__ == '__main__':
speedtest_validate_transaction()

View File

@ -37,7 +37,7 @@ class TestBigchainApi(object):
@pytest.mark.usefixtures('inputs')
def test_create_transaction_transfer(self, b, user_vk, user_sk):
input_tx = b.get_owned_ids(user_vk).pop()
assert b.verify_signature(b.get_transaction(input_tx['txid'])) == True
assert b.validate_fulfillments(b.get_transaction(input_tx['txid'])) == True
tx = b.create_transaction(user_vk, b.me, input_tx, 'TRANSFER')
@ -46,8 +46,8 @@ class TestBigchainApi(object):
tx_signed = b.sign_transaction(tx, user_sk)
assert b.verify_signature(tx) == False
assert b.verify_signature(tx_signed) == True
assert b.validate_fulfillments(tx) == False
assert b.validate_fulfillments(tx_signed) == True
def test_transaction_hash(self, b, user_vk):
payload = {'cats': 'are awesome'}
@ -73,7 +73,7 @@ class TestBigchainApi(object):
tx_signed = b.sign_transaction(tx, user_sk)
assert tx_signed['transaction']['fulfillments'][0]['fulfillment'] is not None
assert b.verify_signature(tx_signed)
assert b.validate_fulfillments(tx_signed)
def test_serializer(self, b, user_vk):
tx = b.create_transaction(user_vk, user_vk, None, 'CREATE')
@ -714,8 +714,8 @@ class TestBigchainBlock(object):
# run bootstrap
initial_results = block.bootstrap()
# we should have gotten a queue with 100 results
assert initial_results.qsize() - 1 == 100
# we should have gotten a queue with 100 results minus the poison pills
assert initial_results.qsize() - mp.cpu_count() == 100
def test_start(self, b, user_vk):
# start with 100 transactions in the backlog and 100 in the changefeed
@ -736,6 +736,8 @@ class TestBigchainBlock(object):
tx = b.sign_transaction(tx, b.me_private)
b.write_transaction(tx)
new_transactions.put(tx)
for i in range(mp.cpu_count()):
new_transactions.put('stop')
# create a block instance
@ -744,6 +746,8 @@ class TestBigchainBlock(object):
# start the block processes
block.start()
time.sleep(6)
assert new_transactions.qsize() == 0
assert r.table('backlog').count() == 0
assert r.table('bigchain').count() == 2
@ -755,20 +759,14 @@ class TestBigchainBlock(object):
# create block instance
block = Block(new_transactions)
# create block_process
p_block = mp.Process(target=block.start)
# start block process
p_block.start()
block.start()
# wait for 6 seconds to give it time for an empty queue exception to occur
time.sleep(6)
# send the poison pill
new_transactions.put('stop')
# join the process
p_block.join()
block.kill()
def test_duplicated_transactions(self):
pytest.skip('We may have duplicates in the initial_results and changefeed')
@ -1218,7 +1216,7 @@ class TestCryptoconditions(object):
assert fulfillment['current_owners'][0] == b.me
assert fulfillment_from_uri.public_key.to_ascii().decode() == b.me
assert b.verify_signature(tx_signed) == True
assert b.validate_fulfillments(tx_signed) == True
assert b.is_valid_transaction(tx_signed) == tx_signed
@pytest.mark.usefixtures('inputs')
@ -1250,7 +1248,7 @@ class TestCryptoconditions(object):
assert fulfillment['current_owners'][0] == user_vk
assert fulfillment_from_uri.public_key.to_ascii().decode() == user_vk
assert fulfillment_from_uri.condition.serialize_uri() == prev_condition['uri']
assert b.verify_signature(tx_signed) == True
assert b.validate_fulfillments(tx_signed) == True
assert b.is_valid_transaction(tx_signed) == tx_signed
def test_override_condition_create(self, b, user_vk):
@ -1268,7 +1266,7 @@ class TestCryptoconditions(object):
assert fulfillment['current_owners'][0] == b.me
assert fulfillment_from_uri.public_key.to_ascii().decode() == b.me
assert b.verify_signature(tx_signed) == True
assert b.validate_fulfillments(tx_signed) == True
assert b.is_valid_transaction(tx_signed) == tx_signed
@pytest.mark.usefixtures('inputs')
@ -1290,7 +1288,7 @@ class TestCryptoconditions(object):
assert fulfillment['current_owners'][0] == user_vk
assert fulfillment_from_uri.public_key.to_ascii().decode() == user_vk
assert b.verify_signature(tx_signed) == True
assert b.validate_fulfillments(tx_signed) == True
assert b.is_valid_transaction(tx_signed) == tx_signed
def test_override_fulfillment_create(self, b, user_vk):
@ -1302,7 +1300,7 @@ class TestCryptoconditions(object):
tx['transaction']['fulfillments'][0]['fulfillment'] = fulfillment.serialize_uri()
assert b.verify_signature(tx) == True
assert b.validate_fulfillments(tx) == True
assert b.is_valid_transaction(tx) == tx
@pytest.mark.usefixtures('inputs')
@ -1319,7 +1317,7 @@ class TestCryptoconditions(object):
tx['transaction']['fulfillments'][0]['fulfillment'] = fulfillment.serialize_uri()
assert b.verify_signature(tx) == True
assert b.validate_fulfillments(tx) == True
assert b.is_valid_transaction(tx) == tx
@pytest.mark.usefixtures('inputs')
@ -1573,7 +1571,7 @@ class TestCryptoconditions(object):
assert tx_transfer_signed['transaction']['fulfillments'][0]['fulfillment'] \
== expected_fulfillment.serialize_uri()
assert b.verify_signature(tx_transfer_signed) is True
assert b.validate_fulfillments(tx_transfer_signed) is True
def test_create_asset_with_hashlock_condition(self, b):
hashlock_tx = b.create_transaction(b.me, None, None, 'CREATE')

View File

@ -29,7 +29,7 @@ tx_signed = b.sign_transaction(tx, b.me_private)
# included in a block, and written to the bigchain
b.write_transaction(tx_signed)
sleep(10)
sleep(8)
"""
Read the Creation Transaction from the DB
@ -61,10 +61,12 @@ tx_transfer = b.create_transaction(testuser1_pub, testuser2_pub, tx_retrieved_id
# sign the transaction
tx_transfer_signed = b.sign_transaction(tx_transfer, testuser1_priv)
b.validate_transaction(tx_transfer_signed)
# write the transaction
b.write_transaction(tx_transfer_signed)
sleep(10)
sleep(8)
# check if the transaction is already in the bigchain
tx_transfer_retrieved = b.get_transaction(tx_transfer_signed['id'])
@ -95,10 +97,12 @@ tx_multisig = b.create_transaction(b.me, [testuser1_pub, testuser2_pub], None, '
# Have the federation sign the transaction
tx_multisig_signed = b.sign_transaction(tx_multisig, b.me_private)
b.validate_transaction(tx_multisig_signed)
b.write_transaction(tx_multisig_signed)
# wait a few seconds for the asset to appear on the blockchain
sleep(10)
sleep(8)
# retrieve the transaction
tx_multisig_retrieved = b.get_transaction(tx_multisig_signed['id'])
@ -111,15 +115,16 @@ tx_multisig_retrieved_id = b.get_owned_ids(testuser2_pub).pop()
tx_multisig_transfer = b.create_transaction([testuser1_pub, testuser2_pub], testuser3_pub, tx_multisig_retrieved_id, 'TRANSFER')
tx_multisig_transfer_signed = b.sign_transaction(tx_multisig_transfer, [testuser1_priv, testuser2_priv])
b.validate_transaction(tx_multisig_transfer_signed)
b.write_transaction(tx_multisig_transfer_signed)
# wait a few seconds for the asset to appear on the blockchain
sleep(10)
sleep(8)
# retrieve the transaction
tx_multisig_retrieved = b.get_transaction(tx_multisig_transfer_signed['id'])
print(json.dumps(tx_multisig_transfer_signed, sort_keys=True, indent=4, separators=(',', ':')))
tx_multisig_transfer_retrieved = b.get_transaction(tx_multisig_transfer_signed['id'])
assert tx_multisig_transfer_retrieved is not None
print(json.dumps(tx_multisig_transfer_retrieved, sort_keys=True, indent=4, separators=(',', ':')))
"""
Multiple Inputs and Outputs
@ -127,9 +132,10 @@ Multiple Inputs and Outputs
for i in range(3):
tx_mimo_asset = b.create_transaction(b.me, testuser1_pub, None, 'CREATE')
tx_mimo_asset_signed = b.sign_transaction(tx_mimo_asset, b.me_private)
b.validate_transaction(tx_mimo_asset_signed)
b.write_transaction(tx_mimo_asset_signed)
sleep(10)
sleep(8)
# get inputs
owned_mimo_inputs = b.get_owned_ids(testuser1_pub)
@ -137,9 +143,10 @@ print(len(owned_mimo_inputs))
# create a transaction
tx_mimo = b.create_transaction(testuser1_pub, testuser2_pub, owned_mimo_inputs, 'TRANSFER')
tx_mimo_signed = b.sign_transaction(tx_mimo, testuser1_priv)
tx_mimo_signed = b.sign_transaction(tx_mimo, testuser1_priv)
# write the transaction
b.validate_transaction(tx_mimo_signed)
b.write_transaction(tx_mimo_signed)
print(json.dumps(tx_mimo_signed, sort_keys=True, indent=4, separators=(',', ':')))
@ -178,10 +185,11 @@ threshold_tx['id'] = util.get_hash_data(threshold_tx)
# sign the transaction
threshold_tx_signed = b.sign_transaction(threshold_tx, testuser2_priv)
b.validate_transaction(threshold_tx_signed)
# write the transaction
b.write_transaction(threshold_tx_signed)
sleep(10)
sleep(8)
# check if the transaction is already in the bigchain
tx_threshold_retrieved = b.get_transaction(threshold_tx_signed['id'])
@ -225,7 +233,7 @@ assert threshold_fulfillment.validate(threshold_tx_fulfillment_message) == True
threshold_tx_transfer['transaction']['fulfillments'][0]['fulfillment'] = threshold_fulfillment.serialize_uri()
assert b.verify_signature(threshold_tx_transfer) == True
assert b.validate_fulfillments(threshold_tx_transfer) == True
assert b.validate_transaction(threshold_tx_transfer) == threshold_tx_transfer
@ -266,7 +274,7 @@ assert b.is_valid_transaction(hashlock_tx_signed) == hashlock_tx_signed
b.write_transaction(hashlock_tx_signed)
print(json.dumps(hashlock_tx_signed, sort_keys=True, indent=4, separators=(',', ':')))
sleep(10)
sleep(8)
hashlockuser_priv, hashlockuser_pub = crypto.generate_key_pair()

View File

@ -51,7 +51,7 @@ def test_client_can_create_assets(mock_requests_post, client):
assert tx['transaction']['conditions'][0]['new_owners'][0] == client.public_key
assert tx['transaction']['fulfillments'][0]['input'] is None
assert util.verify_signature(tx)
assert util.validate_fulfillments(tx)
def test_client_can_transfer_assets(mock_requests_post, mock_bigchaindb_sign, client):

View File

@ -1,11 +1,12 @@
import json
from unittest.mock import Mock, patch
from argparse import Namespace
from pprint import pprint
import copy
import pytest
from tests.db.conftest import setup_database
@pytest.fixture
def mock_run_configure(monkeypatch):
@ -225,3 +226,22 @@ def test_start_rethinkdb_exits_when_cannot_start(mock_popen):
with pytest.raises(exceptions.StartupError):
utils.start_rethinkdb()
def test_set_shards(b):
import rethinkdb as r
from bigchaindb.commands.bigchain import run_set_shards
# set the number of shards
args = Namespace(num_shards=3)
run_set_shards(args)
# retrieve table configuration
table_config = list(r.db('rethinkdb')
.table('table_config')
.filter(r.row['db'] == b.dbname)
.run(b.conn))
# check that the number of shards got set to the correct value
for table in table_config:
if table['name'] in ['backlog', 'bigchain']:
assert len(table['shards']) == 3

View File

@ -34,7 +34,7 @@ def test_transform_create(b, user_sk, user_vk):
assert tx['transaction']['fulfillments'][0]['current_owners'][0] == b.me
assert tx['transaction']['conditions'][0]['new_owners'][0] == user_vk
assert util.verify_signature(tx)
assert util.validate_fulfillments(tx)
def test_empty_pool_is_populated_with_instances(mock_queue):

View File

@ -58,12 +58,15 @@ def test_load_consensus_plugin_raises_with_invalid_subclass(monkeypatch):
# Monkeypatch entry_point.load to return something other than a
# ConsensusRules instance
from bigchaindb import config_utils
import time
monkeypatch.setattr(config_utils,
'iter_entry_points',
lambda *args: [type('entry_point', (object), {'load': lambda: object})])
with pytest.raises(TypeError):
config_utils.load_consensus_plugin()
# Since the function is decorated with `lru_cache`, we need to
# "miss" the cache using a name that has not been used previously
config_utils.load_consensus_plugin(str(time.time()))
def test_map_leafs_iterator():