Merge remote-tracking branch 'remotes/origin/master' into feat/128/multiple-input-output

Conflicts:
	tests/test_util.py
This commit is contained in:
diminator 2016-04-21 15:19:23 +02:00
commit 51addb9cc9
No known key found for this signature in database
GPG Key ID: C3D8590E6D0D439A
24 changed files with 452 additions and 109 deletions

View File

@ -9,7 +9,7 @@ config = {
'server': {
# Note: this section supports all the Gunicorn settings:
# - http://docs.gunicorn.org/en/stable/settings.html
'bind': '0.0.0.0:9984',
'bind': 'localhost:9984',
'workers': None, # if none, the value will be cpu_count * 2 + 1
'threads': None, # if none, the value will be cpu_count * 2 + 1
},

View File

@ -1,5 +1,7 @@
"""Command line interface for the `bigchain` command."""
"""Implementation of the `bigchaindb` command,
which is one of the commands in the BigchainDB
command-line interface.
"""
import os
import sys
@ -38,7 +40,7 @@ def run_configure(args, skip_if_exists=False):
"""Run a script to configure the current node.
Args:
skip_if_exists (bool): skip the function if a conf file already exists
skip_if_exists (bool): skip the function if a config file already exists
"""
config_path = args.config or bigchaindb.config_utils.CONFIG_DEFAULT_PATH
config_file_exists = os.path.exists(config_path)
@ -48,7 +50,7 @@ def run_configure(args, skip_if_exists=False):
if config_file_exists and not args.yes:
want = input('Config file `{}` exists, do you want to override it? '
'(cannot be undone) [y/n]: '.format(config_path))
'(cannot be undone) [y/N]: '.format(config_path))
if want != 'y':
return
@ -56,25 +58,49 @@ def run_configure(args, skip_if_exists=False):
conf = copy.deepcopy(bigchaindb._config)
print('Generating keypair')
conf['keypair']['private'], conf['keypair']['public'] = crypto.generate_key_pair()
conf['keypair']['private'], conf['keypair']['public'] = \
crypto.generate_key_pair()
if not args.yes:
for key in ('bind', ):
val = conf['server'][key]
conf['server'][key] = input('API Server {}? (default `{}`): '.format(key, val)) or val
conf['server'][key] = \
input('API Server {}? (default `{}`): '.format(key, val)) \
or val
for key in ('host', 'port', 'name'):
val = conf['database'][key]
conf['database'][key] = input('Database {}? (default `{}`): '.format(key, val)) or val
conf['database'][key] = \
input('Database {}? (default `{}`): '.format(key, val)) \
or val
for key in ('host', 'port', 'rate'):
val = conf['statsd'][key]
conf['statsd'][key] = input('Statsd {}? (default `{}`): '.format(key, val)) or val
conf['statsd'][key] = \
input('Statsd {}? (default `{}`): '.format(key, val)) \
or val
bigchaindb.config_utils.write_config(conf, config_path)
print('Configuration written to {}'.format(config_path))
print('Ready to go!')
def run_export_my_pubkey(args):
"""Export this node's public key to standard output
"""
logger.debug('bigchaindb args = {}'.format(args))
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
pubkey = bigchaindb.config['keypair']['public']
if pubkey is not None:
print(pubkey)
else:
sys.exit("This node's public key wasn't set anywhere "
"so it can't be exported")
# raises SystemExit exception
# message is sent to stderr
# exits with exit code 1 (signals tha an error happened)
def run_init(args):
"""Initialize the database"""
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
@ -103,16 +129,18 @@ def run_start(args):
except DatabaseAlreadyExists:
pass
except KeypairNotFoundException:
sys.exit('Cannot start BigchainDB, no keypair found. Did you run `bigchaindb configure`?')
sys.exit("Can't start BigchainDB, no keypair found. "
'Did you run `bigchaindb configure`?')
processes = Processes()
logger.info('Start bigchaindb main process')
logger.info('Starting BigchainDB main process')
processes.start()
def main():
parser = argparse.ArgumentParser(description='Control your bigchain node.',
parents=[base_parser])
parser = argparse.ArgumentParser(
description='Control your BigchainDB node.',
parents=[base_parser])
# all the commands are contained in the subparsers object,
# the command selected by the user will be stored in `args.command`
@ -121,22 +149,28 @@ def main():
subparsers = parser.add_subparsers(title='Commands',
dest='command')
# parser for writing a config file
subparsers.add_parser('configure',
help='Prepare the config file and create the node keypair')
help='Prepare the config file '
'and create the node keypair')
# parser for database level commands
# parsers for showing/exporting config values
subparsers.add_parser('show-config',
help='Show the current configuration')
subparsers.add_parser('export-my-pubkey',
help="Export this node's public key")
# parser for database-level commands
subparsers.add_parser('init',
help='Init the database')
subparsers.add_parser('drop',
help='Drop the database')
# TODO how about just config, or info?
subparsers.add_parser('show-config',
help='Show the current configuration')
# parser for starting BigchainDB
subparsers.add_parser('start',
help='Start bigchain')
help='Start BigchainDB')
start(parser, globals())

View File

@ -1,4 +1,5 @@
'''Command line interface for the `bigchain-benchmark` command.'''
"""Command line interface for the `bigchaindb-benchmark` command."""
import logging
import argparse

View File

@ -1,4 +1,6 @@
"""Utility functions and basic common arguments for ``argparse.ArgumentParser``."""
"""Utility functions and basic common arguments
for ``argparse.ArgumentParser``.
"""
import argparse
import multiprocessing as mp
@ -7,7 +9,8 @@ import multiprocessing as mp
def start(parser, scope):
"""Utility function to execute a subcommand.
The function will look up in the ``scope`` if there is a function called ``run_<parser.args.command>``
The function will look up in the ``scope``
if there is a function called ``run_<parser.args.command>``
and will run it using ``parser.args`` as first positional argument.
Args:
@ -15,7 +18,8 @@ def start(parser, scope):
scope (dict): map containing (eventually) the functions to be called.
Raises:
NotImplementedError: if ``scope`` doesn't contain a function called ``run_<parser.args.command>``.
NotImplementedError: if ``scope`` doesn't contain a function called
``run_<parser.args.command>``.
"""
args = parser.parse_args()
@ -29,7 +33,8 @@ def start(parser, scope):
# if no command has been found, raise a `NotImplementedError`
if not func:
raise NotImplementedError('Command `{}` not yet implemented'.format(args.command))
raise NotImplementedError('Command `{}` not yet implemented'.
format(args.command))
args.multiprocess = getattr(args, 'multiprocess', False)

View File

@ -1,8 +1,8 @@
"""Utils to configure Bigchain.
"""Utils to configure BigchainDB.
By calling `file_config`, the global configuration (stored in
`bigchain.config`) will be updated with the values contained in the
configuration file.
`$HOME/.bigchaindb`) will be updated with the values contained
in the configuration file.
Note that there is a precedence in reading configuration values:
- local config file;
@ -54,7 +54,20 @@ def map_leafs(func, mapping):
# Thanks Alex <3
# http://stackoverflow.com/a/3233356/597097
def update(d, u):
"""Recursively update a mapping."""
"""Recursively update a mapping (i.e. a dict, list, set, or tuple).
Conceptually, d and u are two sets trees (with nodes and edges).
This function goes through all the nodes of u. For each node in u,
if d doesn't have that node yet, then this function adds the node from u,
otherwise this function overwrites the node already in d with u's node.
Args:
d (mapping): The mapping to overwrite and add to.
u (mapping): The mapping to read for changes.
Returns:
mapping: An updated version of d (updated by u).
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update(d.get(k, {}), v)
@ -65,19 +78,21 @@ def update(d, u):
def file_config(filename=None):
"""Returns the values found in a configuration file.
"""Returns the config values found in a configuration file.
Args:
filename (str): the JSON file with the configuration. Defaults to ``None``.
If ``None``, the HOME of the current user and the string ``.bigchaindb`` will be used.
filename (str): the JSON file with the configuration values.
If ``None``, CONFIG_DEFAULT_PATH will be used.
Note:
The function merges the values in ``filename`` with the **default configuration**,
so any update made to ``bigchaindb.config`` will be lost.
Returns:
dict: The config values in the specified config file (or the
file at CONFIG_DEFAULT_PATH, if filename == None)
"""
logger.debug('On entry into file_config(), filename = {}'.format(filename))
if not filename:
filename = CONFIG_DEFAULT_PATH
logger.debug('file_config() will try to open `{}`'.format(filename))
with open(filename) as f:
config = json.load(f)
@ -145,17 +160,21 @@ def update_types(config, reference, list_sep=':'):
return map_leafs(_update_type, config)
def dict_config(config):
"""Merge the provided configuration with the default one.
def set_config(config):
"""Set bigchaindb.config equal to the default config dict,
then update that with whatever is in the provided config dict,
and then set bigchaindb.config['CONFIGURED'] = True
Args:
newconfig (dict): a dictionary with the configuration to load.
config (dict): the config dict to read for changes
to the default config
Note:
The function merges ``newconfig`` with the **default configuration**, so any
update made to ``bigchaindb.config`` will be lost.
Any previous changes made to ``bigchaindb.config`` will be lost.
"""
# Deep copy the default config into bigchaindb.config
bigchaindb.config = copy.deepcopy(bigchaindb._config)
# Update the default config with whatever is in the passed config
update(bigchaindb.config, update_types(config, bigchaindb.config))
bigchaindb.config['CONFIGURED'] = True
@ -193,8 +212,7 @@ def autoconfigure(filename=None, config=None, force=False):
if config:
newconfig = update(newconfig, config)
dict_config(newconfig)
return newconfig
set_config(newconfig) # sets bigchaindb.config
def load_consensus_plugin(name=None):

View File

@ -1,6 +1,9 @@
import copy
import json
import time
import contextlib
import threading
import queue
import multiprocessing as mp
from datetime import datetime
@ -33,6 +36,61 @@ class ProcessGroup(object):
self.processes.append(proc)
# Inspired by:
# - http://stackoverflow.com/a/24741694/597097
def pool(builder, size, timeout=None):
"""Create a pool that imposes a limit on the number of stored
instances.
Args:
builder: a function to build an instance.
size: the size of the pool.
timeout(Optional[float]): the seconds to wait before raising
a ``queue.Empty`` exception if no instances are available
within that time.
Raises:
If ``timeout`` is defined but the request is taking longer
than the specified time, the context manager will raise
a ``queue.Empty`` exception.
Returns:
A context manager that can be used with the ``with``
statement.
"""
lock = threading.Lock()
local_pool = queue.Queue()
current_size = 0
@contextlib.contextmanager
def pooled():
nonlocal current_size
instance = None
# If we still have free slots, then we have room to create new
# instances.
if current_size < size:
with lock:
# We need to check again if we have slots available, since
# the situation might be different after acquiring the lock
if current_size < size:
current_size += 1
instance = builder()
# Watchout: current_size can be equal to size if the previous part of
# the function has been executed, that's why we need to check if the
# instance is None.
if instance is None:
instance = local_pool.get(timeout=timeout)
yield instance
local_pool.put(instance)
return pooled
def serialize(data):
"""Serialize a dict into a JSON formatted string.

View File

@ -8,6 +8,7 @@ import multiprocessing
from flask import Flask
from bigchaindb import util
from bigchaindb import Bigchain
from bigchaindb.web import views
import gunicorn.app.base
@ -45,7 +46,7 @@ class StandaloneApplication(gunicorn.app.base.BaseApplication):
return self.application
def create_app(debug=False):
def create_app(settings):
"""Return an instance of the Flask application.
Args:
@ -54,8 +55,8 @@ def create_app(debug=False):
"""
app = Flask(__name__)
app.debug = debug
app.config['bigchain'] = Bigchain()
app.debug = settings.get('debug', False)
app.config['bigchain_pool'] = util.pool(Bigchain, size=settings.get('threads', 4))
app.register_blueprint(views.basic_views, url_prefix='/api/v1')
return app
@ -79,8 +80,7 @@ def create_server(settings):
if not settings.get('threads'):
settings['threads'] = (multiprocessing.cpu_count() * 2) + 1
debug = settings.pop('debug', False)
app = create_app(debug)
app = create_app(settings)
standalone = StandaloneApplication(app, settings)
return standalone

View File

@ -15,12 +15,11 @@ basic_views = Blueprint('basic_views', __name__)
@basic_views.record
def get_bigchain(state):
bigchain = state.app.config.get('bigchain')
bigchain_pool = state.app.config.get('bigchain_pool')
if bigchain is None:
if bigchain_pool is None:
raise Exception('This blueprint expects you to provide '
'database access through `bigchain`')
'a pool of Bigchain instances called `bigchain_pool`')
@basic_views.route('/transactions/<tx_id>')
@ -34,9 +33,11 @@ def get_transaction(tx_id):
A JSON string containing the data about the transaction.
"""
bigchain = current_app.config['bigchain']
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
tx = bigchain.get_transaction(tx_id)
tx = bigchain.get_transaction(tx_id)
return flask.jsonify(**tx)
@ -47,7 +48,7 @@ def create_transaction():
Return:
A JSON string containing the data about the transaction.
"""
bigchain = current_app.config['bigchain']
pool = current_app.config['bigchain_pool']
val = {}
@ -55,15 +56,15 @@ def create_transaction():
# set to `application/json`
tx = request.get_json(force=True)
if tx['transaction']['operation'] == 'CREATE':
tx = util.transform_create(tx)
tx = bigchain.consensus.sign_transaction(
tx, private_key=bigchain.me_private)
with pool() as bigchain:
if tx['transaction']['operation'] == 'CREATE':
tx = util.transform_create(tx)
tx = bigchain.consensus.sign_transaction(tx, private_key=bigchain.me_private)
if not bigchain.consensus.verify_signature(tx):
val['error'] = 'Invalid transaction signature'
if not bigchain.consensus.verify_signature(tx):
val['error'] = 'Invalid transaction signature'
val = bigchain.write_transaction(tx)
val = bigchain.write_transaction(tx)
return flask.jsonify(**tx)

View File

@ -28,5 +28,7 @@ coverage:
- "tests/*"
comment:
layout: "header, diff, changes, sunburst, suggestions"
# @stevepeak (from codecov.io) suggested we change 'suggestions' to 'uncovered'
# in the following line. Thanks Steve!
layout: "header, diff, changes, sunburst, uncovered"
behavior: default

View File

@ -0,0 +1,43 @@
# -*- coding: utf-8 -*-
"""Release all allocated but non-associated elastic IP addresses
(EIPs). Why? From the AWS docs:
``To ensure efficient use of Elastic IP addresses, we impose a small
hourly charge if an Elastic IP address is not associated with a
running instance, or if it is associated with a stopped instance or
an unattached network interface. While your instance is running,
you are not charged for one Elastic IP address associated with the
instance, but you are charged for any additional Elastic IP
addresses associated with the instance. For more information, see
Amazon EC2 Pricing.''
Source: http://tinyurl.com/ozhxatx
"""
from __future__ import unicode_literals
import boto3
from awscommon import get_naeips
# Get an AWS EC2 "resource"
# See http://boto3.readthedocs.org/en/latest/guide/resources.html
ec2 = boto3.resource(service_name='ec2')
# Create a client from the EC2 resource
# See http://boto3.readthedocs.org/en/latest/guide/clients.html
client = ec2.meta.client
non_associated_eips = get_naeips(client)
print('You have {} allocated elactic IPs which are '
'not associated with instances'.
format(len(non_associated_eips)))
for i, eip in enumerate(non_associated_eips):
public_ip = eip['PublicIp']
print('{}: Releasing {}'.format(i, public_ip))
domain = eip['Domain']
print('(It has Domain = {}.)'.format(domain))
if domain == 'vpc':
client.release_address(AllocationId=eip['AllocationId'])
else:
client.release_address(PublicIp=public_ip)

View File

@ -1,27 +1,46 @@
# The BigchainDB Command Line Interfaces (CLIs)
# The BigchainDB Command Line Interface (CLI)
BigchainDB has some Command Line Interfaces (CLIs). One of them is the `bigchaindb` command which we already saw when we first started BigchainDB using:
```text
$ bigchaindb configure
$ bigchaindb start
```
There are some command-line commands for working with BigchainDB: `bigchaindb` and `bigchaindb-benchmark`. This section provides an overview of those commands.
When you run `bigchaindb configure`, it creates a default configuration file in `$HOME/.bigchaindb`. You can check that configuration using:
```text
$ bigchaindb show-config
```
## bigchaindb
To find out what else you can do with the `bigchain` command, use:
```text
$ bigchaindb -h
```
### bigchaindb --help
There's another command named `bigchaindb-benchmark`. It's used to run benchmarking tests. You can learn more about it using:
One can get basic help with the `bigchaindb` command using `bigchaindb --help` or `bigchaindb -h`.
### bigchaindb configure
This command generates a public/private keypair for the node, and writes a BigchainDB configuration file to the node's file system. It's documented in the section [Configuring a BigchainDB Node](configuration.html).
If you want to force-generate a new configuration file regardless of whether one already exists (i.e. skipping the yes/no prompt), then use `bigchaindb -y configure`.
### bigchaindb show-config
This command shows the values of the configuration settings, which can come from a variety of sources. See [the section on configuring BigchainDB](configuration.html) for more details and examples.
### bigchaindb export-my-pubkey
This command writes the node's public key (i.e. one of its configuration values) to standard output (stdout).
### bigchaindb init
This command creates a RethinkDB database, two RethinkDB database tables (backlog and bigchain), various RethinkDB database indexes, and the genesis block.
Note: The `bigchaindb start` command (see below) always starts by trying a `bigchaindb init` first. If it sees that the RethinkDB database already exists, then it doesn't re-initialize the database. One doesn't have to do `bigchaindb init` before `bigchaindb start`. `bigchaindb init` is useful if you only want to initialize (but not start).
### bigchaindb drop
This command drops (erases) the RethinkDB database. You will be prompted to make sure. If you want to force-drop the database (i.e. skipping the yes/no prompt), then use `bigchaindb -y drop`
### bigchaindb start
This command starts BigchainDB. It always begins by trying a `bigchaindb init` first. See the note in the documentation for `bigchaindb init`.
## bigchaindb-benchmark
The `bigchaindb-benchmark` command is used to run benchmarking tests. You can learn more about it using:
```text
$ bigchaindb-benchmark -h
$ bigchaindb-benchmark load -h
```
Note that you can always start `bigchaindb` using a different config file using the `-c` option.
For more information check the help with `bigchaindb -h`.

View File

@ -71,7 +71,7 @@ author = 'BigchainDB Contributors'
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.4'
release = '0.1.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View File

@ -65,13 +65,20 @@ environment variables available are:
- `BIGCHAINDB_STATSD_RATE` is a float between `0` and `1` that defines the fraction of transaction operations sampled.
- `BIGCHAINDB_API_ENDPOINT` defines the API endpoint to use (e.g. `http://localhost:9984/api/v1`).
- `BIGCHAINDB_CONSENSUS_PLUGIN` defines the name of the [consensus plugin](consensus.html) to use.
- `BIGCHAINDB_SERVER_BIND` defines where to bind the server socket, the format is `addr:port` (e.g. `0.0.0.0:9984`).
- `BIGCHAINDB_SERVER_BIND` defines where to bind the server socket, the format is `addr:port` (e.g. `localhost:9984`).
- `BIGCHAINDB_SERVER_WORKERS` defines the [number of workers](http://docs.gunicorn.org/en/stable/settings.html#workers)
to start for the server API.
- `BIGCHAINDB_SERVER_THREADS` defines the [number of threads](http://docs.gunicorn.org/en/stable/settings.html#threads)
to start for the server API.
## Configuring the API Server
The API Server is powered by [Gunicorn](http://gunicorn.org/), a Python WSGI HTTP Server for UNIX.
If you need to tweak some settings for the API server you can manually edit your `.bigchaindb` config file:
the `server` section accepts all the options specified in the
[Gunicorn settings](http://docs.gunicorn.org/en/stable/settings.html) documentation.
## Order of Precedence in Determining Configuration Values
All configuration values start with their default values (defined in `bigchaindb.__init__`), but a default value can be overriden by an environment variable, and a value set by an environment variable can be overriden by a value in a local configuration file (`$HOME/.bigchaindb` or the location specified by the `-c` command-line option).
@ -129,7 +136,7 @@ you will get the following values for all the configuration settings:
"pubkey1"
],
"server": {
"bind": "0.0.0.0:9984",
"bind": "localhost:9984",
"threads": null,
"workers": null
},
@ -164,7 +171,7 @@ WARNING:bigchaindb.config_utils:Cannot find config file `/home/vrde/.bigchaindb`
},
"keyring": [],
"server": {
"bind": "0.0.0.0:9984",
"bind": "localhost:9984",
"threads": null,
"workers": null
},
@ -213,7 +220,7 @@ WARNING:bigchaindb.config_utils:Cannot find config file `/home/vrde/.bigchaindb`
},
"keyring": [],
"server": {
"bind": "0.0.0.0:9984",
"bind": "localhost:9984",
"threads": null,
"workers": null
},

View File

@ -17,11 +17,10 @@ data = "message"
tx_hash = hashlib.sha3_256(data).hexdigest()
```
## Signature algorithm and keys
## Signature Algorithm and Keys
The signature algorithm used by BigchainDB is [ED25519](https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-04)
using the python [ed25519](https://github.com/warner/python-ed25519) module, overloaded by the [cryptoconditions library](https://github.com/bigchaindb/cryptoconditions).
BigchainDB uses the [Ed25519](https://ed25519.cr.yp.to/) public-key signature system for generating its public/private key pairs (also called verifying/signing keys). Ed25519 is an instance of the [Edwards-curve Digital Signature Algorithm (EdDSA)](https://en.wikipedia.org/wiki/EdDSA). As of April 2016, EdDSA was in ["Internet-Draft" status with the IETF](https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-05) but was [already widely used](https://ianix.com/pub/ed25519-deployment.html).
The private key is the base58 encoded hexadecimal representation of private number.
The public key is the base58 encoded hexadecimal representation of the
compressed public numbers.
BigchainDB uses the the [ed25519](https://github.com/warner/python-ed25519) Python package, overloaded by the [cryptoconditions library](https://github.com/bigchaindb/cryptoconditions).
All keys are represented with the base58 encoding by default.

View File

@ -122,9 +122,17 @@ bigchaindb --help
bigchaindb show-config
```
There are fees associated with running instances on EC2, so if you're not using them, you should terminate them. You can do that from the AWS EC2 Console.
There are fees associated with running instances on EC2, so if you're not using them, you should terminate them. You can do that using the AWS EC2 Console.
The same is true of your allocated elastic IP addresses. There's a small fee to keep them allocated if they're not associated with a running instance. You can release them from the AWS EC2 Console.
The same is true of your allocated elastic IP addresses. There's a small fee to keep them allocated if they're not associated with a running instance. You can release them using the AWS EC2 Console, or by using a handy little script named `release_eips.py`. For example:
```text
$ python release_eips.py
You have 2 allocated elactic IPs which are not associated with instances
0: Releasing 52.58.110.110
(It has Domain = vpc.)
1: Releasing 52.58.107.211
(It has Domain = vpc.)
```
## Known Deployment Issues

View File

@ -5,8 +5,10 @@ When you start Bigchaindb using `bigchaindb start`, an HTTP API is exposed at:
- [http://localhost:9984/api/v1/](http://localhost:9984/api/v1/)
Please note that by default the server binds to `0.0.0.0:9984`, hence the API
is exposed to the world.
Please note that for security reasons the server binds to `localhost:9984`.
If you want to bind the server to `0.0.0.0` we recommend you to read
[Deploying Gunicorn](http://docs.gunicorn.org/en/stable/deploy.html) and
follow the instructions to deploy it in production.
The HTTP API currently exposes two endpoints, one to get information about a
specific transaction id, and one to push a transaction to the BigchainDB

View File

@ -95,16 +95,17 @@ $ rethinkdb
Then open a different terminal and run:
```text
$ bigchaindb -y configure
$ bigchaindb init
```
That creates a configuration file in `$HOME/.bigchaindb` (documented in [the section on configuration](configuration.html)), initializes the database, creates the tables, creates the indexes, and generates the genesis block.
That creates a configuration file in `$HOME/.bigchaindb` (documented in [the section on configuration](configuration.html)). More documentation about the `bigchaindb` command is in the section on [the BigchainDB Command Line Interface (CLI)](bigchaindb-cli.html).
You can start BigchainDB Server using:
```text
$ bigchaindb start
```
If it's the first time you've run `bigchaindb start`, then it creates the database (a RethinkDB database), the tables, the indexes, and the genesis block. It then starts BigchainDB. If you're run `bigchaindb start` or `bigchaindb init` before (and you haven't dropped the database), then `bigchaindb start` just starts BigchainDB.
## Run BigchainDB with Docker
@ -130,7 +131,7 @@ stored on your host machine under ` ~/.bigchaindb_docker/config`:
$ docker-compose run --rm bigchaindb bigchaindb configure
Starting bigchaindb_rethinkdb-data_1
Generating keypair
API Server bind? (default `0.0.0.0:9984`):
API Server bind? (default `localhost:9984`):
Database host? (default `localhost`): rethinkdb
Database port? (default `28015`):
Database name? (default `bigchain`):

View File

@ -32,7 +32,7 @@ docs_require = [
setup(
name='BigchainDB',
version='0.1.4',
version='0.1.5',
description='BigchainDB: A Scalable Blockchain Database',
long_description=__doc__,
url='https://github.com/BigchainDB/bigchaindb/',

View File

@ -45,7 +45,7 @@ def ignore_local_config_file(monkeypatch):
@pytest.fixture(scope='function', autouse=True)
def restore_config(request, node_config):
from bigchaindb import config_utils
config_utils.dict_config(node_config)
config_utils.set_config(node_config)
@pytest.fixture(scope='module')

View File

@ -17,7 +17,7 @@ from bigchaindb.db import get_conn
@pytest.fixture(autouse=True)
def restore_config(request, node_config):
from bigchaindb import config_utils
config_utils.dict_config(node_config)
config_utils.set_config(node_config)
@pytest.fixture(scope='module', autouse=True)

View File

@ -82,7 +82,7 @@ def test_bigchain_run_start_assume_yes_create_default_config(monkeypatch, mock_p
value['return'] = newconfig
monkeypatch.setattr(config_utils, 'write_config', mock_write_config)
monkeypatch.setattr(config_utils, 'file_config', lambda x: config_utils.dict_config(expected_config))
monkeypatch.setattr(config_utils, 'file_config', lambda x: config_utils.set_config(expected_config))
monkeypatch.setattr('os.path.exists', lambda path: False)
args = Namespace(config=None, yes=True)
@ -108,6 +108,42 @@ def test_bigchain_show_config(capsys):
assert output_config == config
def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch):
from bigchaindb import config
from bigchaindb.commands.bigchain import run_export_my_pubkey
args = Namespace(config='dummy')
# so in run_export_my_pubkey(args) below,
# filename=args.config='dummy' is passed to autoconfigure().
# We just assume autoconfigure() works and sets
# config['keypair']['public'] correctly (tested elsewhere).
# We force-set config['keypair']['public'] using monkeypatch.
monkeypatch.setitem(config['keypair'], 'public', 'Charlie_Bucket')
_, _ = capsys.readouterr() # has the effect of clearing capsys
run_export_my_pubkey(args)
out, err = capsys.readouterr()
assert out == config['keypair']['public'] + '\n'
assert out == 'Charlie_Bucket\n'
def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch):
from bigchaindb import config
from bigchaindb.commands.bigchain import run_export_my_pubkey
args = Namespace(config='dummy')
monkeypatch.setitem(config['keypair'], 'public', None)
# assert that run_export_my_pubkey(args) raises SystemExit:
with pytest.raises(SystemExit) as exc_info:
run_export_my_pubkey(args)
# exc_info is an object of class ExceptionInfo
# https://pytest.org/latest/builtin.html#_pytest._code.ExceptionInfo
assert exc_info.type == SystemExit
# exc_info.value is an object of class SystemExit
# https://docs.python.org/3/library/exceptions.html#SystemExit
assert exc_info.value.code == \
"This node's public key wasn't set anywhere so it can't be exported"
def test_bigchain_run_init_when_db_exists(mock_db_init_with_existing_db):
from bigchaindb.commands.bigchain import run_init
args = Namespace(config=None)
@ -159,4 +195,3 @@ def test_run_configure_when_config_does_exist(monkeypatch,
args = Namespace(config='foo', yes=None)
run_configure(args)
assert value == {}

View File

@ -1,7 +1,32 @@
from bigchaindb import util
import pytest
import queue
@pytest.fixture
def mock_queue(monkeypatch):
class MockQueue:
items = []
def get(self, timeout=None):
try:
return self.items.pop()
except IndexError:
if timeout:
raise queue.Empty()
raise
def put(self, item):
self.items.append(item)
mockqueue = MockQueue()
monkeypatch.setattr('queue.Queue', lambda: mockqueue)
return mockqueue
def test_transform_create(b, user_sk, user_vk):
from bigchaindb import util
tx = util.create_tx(user_vk, user_vk, None, 'CREATE')
tx = util.transform_create(tx)
tx = util.sign_tx(tx, b.me_private)
@ -10,3 +35,88 @@ def test_transform_create(b, user_sk, user_vk):
assert tx['transaction']['conditions'][0]['new_owners'][0] == user_vk
assert util.verify_signature(tx)
def test_empty_pool_is_populated_with_instances(mock_queue):
from bigchaindb import util
pool = util.pool(lambda: 'hello', 4)
assert len(mock_queue.items) == 0
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 1
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 2
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 3
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 4
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 4
def test_pool_blocks_if_no_instances_available(mock_queue):
from bigchaindb import util
pool = util.pool(lambda: 'hello', 4)
assert len(mock_queue.items) == 0
# We need to manually trigger the `__enter__` method so the context
# manager will "hang" and not return the resource to the pool
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
# We need to keep a reference of the last context manager so we can
# manually release the resource
last = pool()
assert last.__enter__() == 'hello'
assert len(mock_queue.items) == 0
# This would block using `queue.Queue` but since we mocked it it will
# just raise a IndexError because it's trying to pop from an empty list.
with pytest.raises(IndexError):
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
# Release the last resource
last.__exit__(None, None, None)
assert len(mock_queue.items) == 1
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
def test_pool_raises_empty_exception_when_timeout(mock_queue):
from bigchaindb import util
pool = util.pool(lambda: 'hello', 1, timeout=1)
assert len(mock_queue.items) == 0
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 1
# take the only resource available
assert pool().__enter__() == 'hello'
with pytest.raises(queue.Empty):
with pool() as instance:
assert instance == 'hello'

View File

@ -18,7 +18,7 @@ def test_bigchain_instance_is_initialized_when_conf_provided():
from bigchaindb import config_utils
assert 'CONFIGURED' not in bigchaindb.config
config_utils.dict_config({'keypair': {'public': 'a', 'private': 'b'}})
config_utils.set_config({'keypair': {'public': 'a', 'private': 'b'}})
assert bigchaindb.config['CONFIGURED'] is True
b = bigchaindb.Bigchain()
@ -142,7 +142,7 @@ def test_autoconfigure_read_both_from_file_and_env(monkeypatch):
assert bigchaindb.config == {
'CONFIGURED': True,
'server': {
'bind': '0.0.0.0:9984',
'bind': 'localhost:9984',
'workers': None,
'threads': None,
},

View File

@ -5,7 +5,7 @@ from ..db import conftest
@pytest.fixture(autouse=True)
def restore_config(request, node_config):
from bigchaindb import config_utils
config_utils.dict_config(node_config)
config_utils.set_config(node_config)
@pytest.fixture(scope='module', autouse=True)
@ -25,7 +25,7 @@ def app(request, node_config):
restore_config(request, node_config)
from bigchaindb.web import server
app = server.create_app(debug=True)
app = server.create_app({'debug': True})
return app