mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge remote-tracking branch 'remotes/origin/master' into feat/128/multiple-input-output
Conflicts: tests/test_util.py
This commit is contained in:
commit
51addb9cc9
@ -9,7 +9,7 @@ config = {
|
|||||||
'server': {
|
'server': {
|
||||||
# Note: this section supports all the Gunicorn settings:
|
# Note: this section supports all the Gunicorn settings:
|
||||||
# - http://docs.gunicorn.org/en/stable/settings.html
|
# - http://docs.gunicorn.org/en/stable/settings.html
|
||||||
'bind': '0.0.0.0:9984',
|
'bind': 'localhost:9984',
|
||||||
'workers': None, # if none, the value will be cpu_count * 2 + 1
|
'workers': None, # if none, the value will be cpu_count * 2 + 1
|
||||||
'threads': None, # if none, the value will be cpu_count * 2 + 1
|
'threads': None, # if none, the value will be cpu_count * 2 + 1
|
||||||
},
|
},
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
"""Command line interface for the `bigchain` command."""
|
"""Implementation of the `bigchaindb` command,
|
||||||
|
which is one of the commands in the BigchainDB
|
||||||
|
command-line interface.
|
||||||
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -38,7 +40,7 @@ def run_configure(args, skip_if_exists=False):
|
|||||||
"""Run a script to configure the current node.
|
"""Run a script to configure the current node.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
skip_if_exists (bool): skip the function if a conf file already exists
|
skip_if_exists (bool): skip the function if a config file already exists
|
||||||
"""
|
"""
|
||||||
config_path = args.config or bigchaindb.config_utils.CONFIG_DEFAULT_PATH
|
config_path = args.config or bigchaindb.config_utils.CONFIG_DEFAULT_PATH
|
||||||
config_file_exists = os.path.exists(config_path)
|
config_file_exists = os.path.exists(config_path)
|
||||||
@ -48,7 +50,7 @@ def run_configure(args, skip_if_exists=False):
|
|||||||
|
|
||||||
if config_file_exists and not args.yes:
|
if config_file_exists and not args.yes:
|
||||||
want = input('Config file `{}` exists, do you want to override it? '
|
want = input('Config file `{}` exists, do you want to override it? '
|
||||||
'(cannot be undone) [y/n]: '.format(config_path))
|
'(cannot be undone) [y/N]: '.format(config_path))
|
||||||
if want != 'y':
|
if want != 'y':
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -56,25 +58,49 @@ def run_configure(args, skip_if_exists=False):
|
|||||||
conf = copy.deepcopy(bigchaindb._config)
|
conf = copy.deepcopy(bigchaindb._config)
|
||||||
|
|
||||||
print('Generating keypair')
|
print('Generating keypair')
|
||||||
conf['keypair']['private'], conf['keypair']['public'] = crypto.generate_key_pair()
|
conf['keypair']['private'], conf['keypair']['public'] = \
|
||||||
|
crypto.generate_key_pair()
|
||||||
|
|
||||||
if not args.yes:
|
if not args.yes:
|
||||||
for key in ('bind', ):
|
for key in ('bind', ):
|
||||||
val = conf['server'][key]
|
val = conf['server'][key]
|
||||||
conf['server'][key] = input('API Server {}? (default `{}`): '.format(key, val)) or val
|
conf['server'][key] = \
|
||||||
|
input('API Server {}? (default `{}`): '.format(key, val)) \
|
||||||
|
or val
|
||||||
|
|
||||||
for key in ('host', 'port', 'name'):
|
for key in ('host', 'port', 'name'):
|
||||||
val = conf['database'][key]
|
val = conf['database'][key]
|
||||||
conf['database'][key] = input('Database {}? (default `{}`): '.format(key, val)) or val
|
conf['database'][key] = \
|
||||||
|
input('Database {}? (default `{}`): '.format(key, val)) \
|
||||||
|
or val
|
||||||
|
|
||||||
for key in ('host', 'port', 'rate'):
|
for key in ('host', 'port', 'rate'):
|
||||||
val = conf['statsd'][key]
|
val = conf['statsd'][key]
|
||||||
conf['statsd'][key] = input('Statsd {}? (default `{}`): '.format(key, val)) or val
|
conf['statsd'][key] = \
|
||||||
|
input('Statsd {}? (default `{}`): '.format(key, val)) \
|
||||||
|
or val
|
||||||
|
|
||||||
bigchaindb.config_utils.write_config(conf, config_path)
|
bigchaindb.config_utils.write_config(conf, config_path)
|
||||||
|
print('Configuration written to {}'.format(config_path))
|
||||||
print('Ready to go!')
|
print('Ready to go!')
|
||||||
|
|
||||||
|
|
||||||
|
def run_export_my_pubkey(args):
|
||||||
|
"""Export this node's public key to standard output
|
||||||
|
"""
|
||||||
|
logger.debug('bigchaindb args = {}'.format(args))
|
||||||
|
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
||||||
|
pubkey = bigchaindb.config['keypair']['public']
|
||||||
|
if pubkey is not None:
|
||||||
|
print(pubkey)
|
||||||
|
else:
|
||||||
|
sys.exit("This node's public key wasn't set anywhere "
|
||||||
|
"so it can't be exported")
|
||||||
|
# raises SystemExit exception
|
||||||
|
# message is sent to stderr
|
||||||
|
# exits with exit code 1 (signals tha an error happened)
|
||||||
|
|
||||||
|
|
||||||
def run_init(args):
|
def run_init(args):
|
||||||
"""Initialize the database"""
|
"""Initialize the database"""
|
||||||
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
||||||
@ -103,16 +129,18 @@ def run_start(args):
|
|||||||
except DatabaseAlreadyExists:
|
except DatabaseAlreadyExists:
|
||||||
pass
|
pass
|
||||||
except KeypairNotFoundException:
|
except KeypairNotFoundException:
|
||||||
sys.exit('Cannot start BigchainDB, no keypair found. Did you run `bigchaindb configure`?')
|
sys.exit("Can't start BigchainDB, no keypair found. "
|
||||||
|
'Did you run `bigchaindb configure`?')
|
||||||
|
|
||||||
processes = Processes()
|
processes = Processes()
|
||||||
logger.info('Start bigchaindb main process')
|
logger.info('Starting BigchainDB main process')
|
||||||
processes.start()
|
processes.start()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(description='Control your bigchain node.',
|
parser = argparse.ArgumentParser(
|
||||||
parents=[base_parser])
|
description='Control your BigchainDB node.',
|
||||||
|
parents=[base_parser])
|
||||||
|
|
||||||
# all the commands are contained in the subparsers object,
|
# all the commands are contained in the subparsers object,
|
||||||
# the command selected by the user will be stored in `args.command`
|
# the command selected by the user will be stored in `args.command`
|
||||||
@ -121,22 +149,28 @@ def main():
|
|||||||
subparsers = parser.add_subparsers(title='Commands',
|
subparsers = parser.add_subparsers(title='Commands',
|
||||||
dest='command')
|
dest='command')
|
||||||
|
|
||||||
|
# parser for writing a config file
|
||||||
subparsers.add_parser('configure',
|
subparsers.add_parser('configure',
|
||||||
help='Prepare the config file and create the node keypair')
|
help='Prepare the config file '
|
||||||
|
'and create the node keypair')
|
||||||
|
|
||||||
# parser for database level commands
|
# parsers for showing/exporting config values
|
||||||
|
subparsers.add_parser('show-config',
|
||||||
|
help='Show the current configuration')
|
||||||
|
|
||||||
|
subparsers.add_parser('export-my-pubkey',
|
||||||
|
help="Export this node's public key")
|
||||||
|
|
||||||
|
# parser for database-level commands
|
||||||
subparsers.add_parser('init',
|
subparsers.add_parser('init',
|
||||||
help='Init the database')
|
help='Init the database')
|
||||||
|
|
||||||
subparsers.add_parser('drop',
|
subparsers.add_parser('drop',
|
||||||
help='Drop the database')
|
help='Drop the database')
|
||||||
|
|
||||||
# TODO how about just config, or info?
|
# parser for starting BigchainDB
|
||||||
subparsers.add_parser('show-config',
|
|
||||||
help='Show the current configuration')
|
|
||||||
|
|
||||||
subparsers.add_parser('start',
|
subparsers.add_parser('start',
|
||||||
help='Start bigchain')
|
help='Start BigchainDB')
|
||||||
|
|
||||||
start(parser, globals())
|
start(parser, globals())
|
||||||
|
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
'''Command line interface for the `bigchain-benchmark` command.'''
|
"""Command line interface for the `bigchaindb-benchmark` command."""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
"""Utility functions and basic common arguments for ``argparse.ArgumentParser``."""
|
"""Utility functions and basic common arguments
|
||||||
|
for ``argparse.ArgumentParser``.
|
||||||
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
@ -7,7 +9,8 @@ import multiprocessing as mp
|
|||||||
def start(parser, scope):
|
def start(parser, scope):
|
||||||
"""Utility function to execute a subcommand.
|
"""Utility function to execute a subcommand.
|
||||||
|
|
||||||
The function will look up in the ``scope`` if there is a function called ``run_<parser.args.command>``
|
The function will look up in the ``scope``
|
||||||
|
if there is a function called ``run_<parser.args.command>``
|
||||||
and will run it using ``parser.args`` as first positional argument.
|
and will run it using ``parser.args`` as first positional argument.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -15,7 +18,8 @@ def start(parser, scope):
|
|||||||
scope (dict): map containing (eventually) the functions to be called.
|
scope (dict): map containing (eventually) the functions to be called.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
NotImplementedError: if ``scope`` doesn't contain a function called ``run_<parser.args.command>``.
|
NotImplementedError: if ``scope`` doesn't contain a function called
|
||||||
|
``run_<parser.args.command>``.
|
||||||
"""
|
"""
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
@ -29,7 +33,8 @@ def start(parser, scope):
|
|||||||
|
|
||||||
# if no command has been found, raise a `NotImplementedError`
|
# if no command has been found, raise a `NotImplementedError`
|
||||||
if not func:
|
if not func:
|
||||||
raise NotImplementedError('Command `{}` not yet implemented'.format(args.command))
|
raise NotImplementedError('Command `{}` not yet implemented'.
|
||||||
|
format(args.command))
|
||||||
|
|
||||||
args.multiprocess = getattr(args, 'multiprocess', False)
|
args.multiprocess = getattr(args, 'multiprocess', False)
|
||||||
|
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
"""Utils to configure Bigchain.
|
"""Utils to configure BigchainDB.
|
||||||
|
|
||||||
By calling `file_config`, the global configuration (stored in
|
By calling `file_config`, the global configuration (stored in
|
||||||
`bigchain.config`) will be updated with the values contained in the
|
`$HOME/.bigchaindb`) will be updated with the values contained
|
||||||
configuration file.
|
in the configuration file.
|
||||||
|
|
||||||
Note that there is a precedence in reading configuration values:
|
Note that there is a precedence in reading configuration values:
|
||||||
- local config file;
|
- local config file;
|
||||||
@ -54,7 +54,20 @@ def map_leafs(func, mapping):
|
|||||||
# Thanks Alex <3
|
# Thanks Alex <3
|
||||||
# http://stackoverflow.com/a/3233356/597097
|
# http://stackoverflow.com/a/3233356/597097
|
||||||
def update(d, u):
|
def update(d, u):
|
||||||
"""Recursively update a mapping."""
|
"""Recursively update a mapping (i.e. a dict, list, set, or tuple).
|
||||||
|
|
||||||
|
Conceptually, d and u are two sets trees (with nodes and edges).
|
||||||
|
This function goes through all the nodes of u. For each node in u,
|
||||||
|
if d doesn't have that node yet, then this function adds the node from u,
|
||||||
|
otherwise this function overwrites the node already in d with u's node.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
d (mapping): The mapping to overwrite and add to.
|
||||||
|
u (mapping): The mapping to read for changes.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
mapping: An updated version of d (updated by u).
|
||||||
|
"""
|
||||||
for k, v in u.items():
|
for k, v in u.items():
|
||||||
if isinstance(v, collections.Mapping):
|
if isinstance(v, collections.Mapping):
|
||||||
r = update(d.get(k, {}), v)
|
r = update(d.get(k, {}), v)
|
||||||
@ -65,19 +78,21 @@ def update(d, u):
|
|||||||
|
|
||||||
|
|
||||||
def file_config(filename=None):
|
def file_config(filename=None):
|
||||||
"""Returns the values found in a configuration file.
|
"""Returns the config values found in a configuration file.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
filename (str): the JSON file with the configuration. Defaults to ``None``.
|
filename (str): the JSON file with the configuration values.
|
||||||
If ``None``, the HOME of the current user and the string ``.bigchaindb`` will be used.
|
If ``None``, CONFIG_DEFAULT_PATH will be used.
|
||||||
|
|
||||||
Note:
|
Returns:
|
||||||
The function merges the values in ``filename`` with the **default configuration**,
|
dict: The config values in the specified config file (or the
|
||||||
so any update made to ``bigchaindb.config`` will be lost.
|
file at CONFIG_DEFAULT_PATH, if filename == None)
|
||||||
"""
|
"""
|
||||||
|
logger.debug('On entry into file_config(), filename = {}'.format(filename))
|
||||||
if not filename:
|
if not filename:
|
||||||
filename = CONFIG_DEFAULT_PATH
|
filename = CONFIG_DEFAULT_PATH
|
||||||
|
|
||||||
|
logger.debug('file_config() will try to open `{}`'.format(filename))
|
||||||
with open(filename) as f:
|
with open(filename) as f:
|
||||||
config = json.load(f)
|
config = json.load(f)
|
||||||
|
|
||||||
@ -145,17 +160,21 @@ def update_types(config, reference, list_sep=':'):
|
|||||||
return map_leafs(_update_type, config)
|
return map_leafs(_update_type, config)
|
||||||
|
|
||||||
|
|
||||||
def dict_config(config):
|
def set_config(config):
|
||||||
"""Merge the provided configuration with the default one.
|
"""Set bigchaindb.config equal to the default config dict,
|
||||||
|
then update that with whatever is in the provided config dict,
|
||||||
|
and then set bigchaindb.config['CONFIGURED'] = True
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
newconfig (dict): a dictionary with the configuration to load.
|
config (dict): the config dict to read for changes
|
||||||
|
to the default config
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
The function merges ``newconfig`` with the **default configuration**, so any
|
Any previous changes made to ``bigchaindb.config`` will be lost.
|
||||||
update made to ``bigchaindb.config`` will be lost.
|
|
||||||
"""
|
"""
|
||||||
|
# Deep copy the default config into bigchaindb.config
|
||||||
bigchaindb.config = copy.deepcopy(bigchaindb._config)
|
bigchaindb.config = copy.deepcopy(bigchaindb._config)
|
||||||
|
# Update the default config with whatever is in the passed config
|
||||||
update(bigchaindb.config, update_types(config, bigchaindb.config))
|
update(bigchaindb.config, update_types(config, bigchaindb.config))
|
||||||
bigchaindb.config['CONFIGURED'] = True
|
bigchaindb.config['CONFIGURED'] = True
|
||||||
|
|
||||||
@ -193,8 +212,7 @@ def autoconfigure(filename=None, config=None, force=False):
|
|||||||
if config:
|
if config:
|
||||||
newconfig = update(newconfig, config)
|
newconfig = update(newconfig, config)
|
||||||
|
|
||||||
dict_config(newconfig)
|
set_config(newconfig) # sets bigchaindb.config
|
||||||
return newconfig
|
|
||||||
|
|
||||||
|
|
||||||
def load_consensus_plugin(name=None):
|
def load_consensus_plugin(name=None):
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
import copy
|
import copy
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
|
import contextlib
|
||||||
|
import threading
|
||||||
|
import queue
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
@ -33,6 +36,61 @@ class ProcessGroup(object):
|
|||||||
self.processes.append(proc)
|
self.processes.append(proc)
|
||||||
|
|
||||||
|
|
||||||
|
# Inspired by:
|
||||||
|
# - http://stackoverflow.com/a/24741694/597097
|
||||||
|
def pool(builder, size, timeout=None):
|
||||||
|
"""Create a pool that imposes a limit on the number of stored
|
||||||
|
instances.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
builder: a function to build an instance.
|
||||||
|
size: the size of the pool.
|
||||||
|
timeout(Optional[float]): the seconds to wait before raising
|
||||||
|
a ``queue.Empty`` exception if no instances are available
|
||||||
|
within that time.
|
||||||
|
Raises:
|
||||||
|
If ``timeout`` is defined but the request is taking longer
|
||||||
|
than the specified time, the context manager will raise
|
||||||
|
a ``queue.Empty`` exception.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A context manager that can be used with the ``with``
|
||||||
|
statement.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
lock = threading.Lock()
|
||||||
|
local_pool = queue.Queue()
|
||||||
|
current_size = 0
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def pooled():
|
||||||
|
nonlocal current_size
|
||||||
|
instance = None
|
||||||
|
|
||||||
|
# If we still have free slots, then we have room to create new
|
||||||
|
# instances.
|
||||||
|
if current_size < size:
|
||||||
|
with lock:
|
||||||
|
# We need to check again if we have slots available, since
|
||||||
|
# the situation might be different after acquiring the lock
|
||||||
|
if current_size < size:
|
||||||
|
current_size += 1
|
||||||
|
instance = builder()
|
||||||
|
|
||||||
|
# Watchout: current_size can be equal to size if the previous part of
|
||||||
|
# the function has been executed, that's why we need to check if the
|
||||||
|
# instance is None.
|
||||||
|
if instance is None:
|
||||||
|
instance = local_pool.get(timeout=timeout)
|
||||||
|
|
||||||
|
yield instance
|
||||||
|
|
||||||
|
local_pool.put(instance)
|
||||||
|
|
||||||
|
return pooled
|
||||||
|
|
||||||
|
|
||||||
def serialize(data):
|
def serialize(data):
|
||||||
"""Serialize a dict into a JSON formatted string.
|
"""Serialize a dict into a JSON formatted string.
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@ import multiprocessing
|
|||||||
|
|
||||||
from flask import Flask
|
from flask import Flask
|
||||||
|
|
||||||
|
from bigchaindb import util
|
||||||
from bigchaindb import Bigchain
|
from bigchaindb import Bigchain
|
||||||
from bigchaindb.web import views
|
from bigchaindb.web import views
|
||||||
import gunicorn.app.base
|
import gunicorn.app.base
|
||||||
@ -45,7 +46,7 @@ class StandaloneApplication(gunicorn.app.base.BaseApplication):
|
|||||||
return self.application
|
return self.application
|
||||||
|
|
||||||
|
|
||||||
def create_app(debug=False):
|
def create_app(settings):
|
||||||
"""Return an instance of the Flask application.
|
"""Return an instance of the Flask application.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -54,8 +55,8 @@ def create_app(debug=False):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
app.debug = debug
|
app.debug = settings.get('debug', False)
|
||||||
app.config['bigchain'] = Bigchain()
|
app.config['bigchain_pool'] = util.pool(Bigchain, size=settings.get('threads', 4))
|
||||||
app.register_blueprint(views.basic_views, url_prefix='/api/v1')
|
app.register_blueprint(views.basic_views, url_prefix='/api/v1')
|
||||||
return app
|
return app
|
||||||
|
|
||||||
@ -79,8 +80,7 @@ def create_server(settings):
|
|||||||
if not settings.get('threads'):
|
if not settings.get('threads'):
|
||||||
settings['threads'] = (multiprocessing.cpu_count() * 2) + 1
|
settings['threads'] = (multiprocessing.cpu_count() * 2) + 1
|
||||||
|
|
||||||
debug = settings.pop('debug', False)
|
app = create_app(settings)
|
||||||
app = create_app(debug)
|
|
||||||
standalone = StandaloneApplication(app, settings)
|
standalone = StandaloneApplication(app, settings)
|
||||||
return standalone
|
return standalone
|
||||||
|
|
||||||
|
@ -15,12 +15,11 @@ basic_views = Blueprint('basic_views', __name__)
|
|||||||
|
|
||||||
@basic_views.record
|
@basic_views.record
|
||||||
def get_bigchain(state):
|
def get_bigchain(state):
|
||||||
bigchain = state.app.config.get('bigchain')
|
bigchain_pool = state.app.config.get('bigchain_pool')
|
||||||
|
|
||||||
if bigchain is None:
|
if bigchain_pool is None:
|
||||||
raise Exception('This blueprint expects you to provide '
|
raise Exception('This blueprint expects you to provide '
|
||||||
'database access through `bigchain`')
|
'a pool of Bigchain instances called `bigchain_pool`')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@basic_views.route('/transactions/<tx_id>')
|
@basic_views.route('/transactions/<tx_id>')
|
||||||
@ -34,9 +33,11 @@ def get_transaction(tx_id):
|
|||||||
A JSON string containing the data about the transaction.
|
A JSON string containing the data about the transaction.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
bigchain = current_app.config['bigchain']
|
pool = current_app.config['bigchain_pool']
|
||||||
|
|
||||||
|
with pool() as bigchain:
|
||||||
|
tx = bigchain.get_transaction(tx_id)
|
||||||
|
|
||||||
tx = bigchain.get_transaction(tx_id)
|
|
||||||
return flask.jsonify(**tx)
|
return flask.jsonify(**tx)
|
||||||
|
|
||||||
|
|
||||||
@ -47,7 +48,7 @@ def create_transaction():
|
|||||||
Return:
|
Return:
|
||||||
A JSON string containing the data about the transaction.
|
A JSON string containing the data about the transaction.
|
||||||
"""
|
"""
|
||||||
bigchain = current_app.config['bigchain']
|
pool = current_app.config['bigchain_pool']
|
||||||
|
|
||||||
val = {}
|
val = {}
|
||||||
|
|
||||||
@ -55,15 +56,15 @@ def create_transaction():
|
|||||||
# set to `application/json`
|
# set to `application/json`
|
||||||
tx = request.get_json(force=True)
|
tx = request.get_json(force=True)
|
||||||
|
|
||||||
if tx['transaction']['operation'] == 'CREATE':
|
with pool() as bigchain:
|
||||||
tx = util.transform_create(tx)
|
if tx['transaction']['operation'] == 'CREATE':
|
||||||
tx = bigchain.consensus.sign_transaction(
|
tx = util.transform_create(tx)
|
||||||
tx, private_key=bigchain.me_private)
|
tx = bigchain.consensus.sign_transaction(tx, private_key=bigchain.me_private)
|
||||||
|
|
||||||
if not bigchain.consensus.verify_signature(tx):
|
if not bigchain.consensus.verify_signature(tx):
|
||||||
val['error'] = 'Invalid transaction signature'
|
val['error'] = 'Invalid transaction signature'
|
||||||
|
|
||||||
val = bigchain.write_transaction(tx)
|
val = bigchain.write_transaction(tx)
|
||||||
|
|
||||||
return flask.jsonify(**tx)
|
return flask.jsonify(**tx)
|
||||||
|
|
||||||
|
@ -28,5 +28,7 @@ coverage:
|
|||||||
- "tests/*"
|
- "tests/*"
|
||||||
|
|
||||||
comment:
|
comment:
|
||||||
layout: "header, diff, changes, sunburst, suggestions"
|
# @stevepeak (from codecov.io) suggested we change 'suggestions' to 'uncovered'
|
||||||
|
# in the following line. Thanks Steve!
|
||||||
|
layout: "header, diff, changes, sunburst, uncovered"
|
||||||
behavior: default
|
behavior: default
|
||||||
|
43
deploy-cluster-aws/release_eips.py
Normal file
43
deploy-cluster-aws/release_eips.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""Release all allocated but non-associated elastic IP addresses
|
||||||
|
(EIPs). Why? From the AWS docs:
|
||||||
|
|
||||||
|
``To ensure efficient use of Elastic IP addresses, we impose a small
|
||||||
|
hourly charge if an Elastic IP address is not associated with a
|
||||||
|
running instance, or if it is associated with a stopped instance or
|
||||||
|
an unattached network interface. While your instance is running,
|
||||||
|
you are not charged for one Elastic IP address associated with the
|
||||||
|
instance, but you are charged for any additional Elastic IP
|
||||||
|
addresses associated with the instance. For more information, see
|
||||||
|
Amazon EC2 Pricing.''
|
||||||
|
|
||||||
|
Source: http://tinyurl.com/ozhxatx
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
import boto3
|
||||||
|
from awscommon import get_naeips
|
||||||
|
|
||||||
|
# Get an AWS EC2 "resource"
|
||||||
|
# See http://boto3.readthedocs.org/en/latest/guide/resources.html
|
||||||
|
ec2 = boto3.resource(service_name='ec2')
|
||||||
|
|
||||||
|
# Create a client from the EC2 resource
|
||||||
|
# See http://boto3.readthedocs.org/en/latest/guide/clients.html
|
||||||
|
client = ec2.meta.client
|
||||||
|
|
||||||
|
non_associated_eips = get_naeips(client)
|
||||||
|
|
||||||
|
print('You have {} allocated elactic IPs which are '
|
||||||
|
'not associated with instances'.
|
||||||
|
format(len(non_associated_eips)))
|
||||||
|
|
||||||
|
for i, eip in enumerate(non_associated_eips):
|
||||||
|
public_ip = eip['PublicIp']
|
||||||
|
print('{}: Releasing {}'.format(i, public_ip))
|
||||||
|
domain = eip['Domain']
|
||||||
|
print('(It has Domain = {}.)'.format(domain))
|
||||||
|
if domain == 'vpc':
|
||||||
|
client.release_address(AllocationId=eip['AllocationId'])
|
||||||
|
else:
|
||||||
|
client.release_address(PublicIp=public_ip)
|
@ -1,27 +1,46 @@
|
|||||||
# The BigchainDB Command Line Interfaces (CLIs)
|
# The BigchainDB Command Line Interface (CLI)
|
||||||
|
|
||||||
BigchainDB has some Command Line Interfaces (CLIs). One of them is the `bigchaindb` command which we already saw when we first started BigchainDB using:
|
There are some command-line commands for working with BigchainDB: `bigchaindb` and `bigchaindb-benchmark`. This section provides an overview of those commands.
|
||||||
```text
|
|
||||||
$ bigchaindb configure
|
|
||||||
$ bigchaindb start
|
|
||||||
```
|
|
||||||
|
|
||||||
When you run `bigchaindb configure`, it creates a default configuration file in `$HOME/.bigchaindb`. You can check that configuration using:
|
## bigchaindb
|
||||||
```text
|
|
||||||
$ bigchaindb show-config
|
|
||||||
```
|
|
||||||
|
|
||||||
To find out what else you can do with the `bigchain` command, use:
|
### bigchaindb --help
|
||||||
```text
|
|
||||||
$ bigchaindb -h
|
|
||||||
```
|
|
||||||
|
|
||||||
There's another command named `bigchaindb-benchmark`. It's used to run benchmarking tests. You can learn more about it using:
|
One can get basic help with the `bigchaindb` command using `bigchaindb --help` or `bigchaindb -h`.
|
||||||
|
|
||||||
|
### bigchaindb configure
|
||||||
|
|
||||||
|
This command generates a public/private keypair for the node, and writes a BigchainDB configuration file to the node's file system. It's documented in the section [Configuring a BigchainDB Node](configuration.html).
|
||||||
|
|
||||||
|
If you want to force-generate a new configuration file regardless of whether one already exists (i.e. skipping the yes/no prompt), then use `bigchaindb -y configure`.
|
||||||
|
|
||||||
|
### bigchaindb show-config
|
||||||
|
|
||||||
|
This command shows the values of the configuration settings, which can come from a variety of sources. See [the section on configuring BigchainDB](configuration.html) for more details and examples.
|
||||||
|
|
||||||
|
### bigchaindb export-my-pubkey
|
||||||
|
|
||||||
|
This command writes the node's public key (i.e. one of its configuration values) to standard output (stdout).
|
||||||
|
|
||||||
|
### bigchaindb init
|
||||||
|
|
||||||
|
This command creates a RethinkDB database, two RethinkDB database tables (backlog and bigchain), various RethinkDB database indexes, and the genesis block.
|
||||||
|
|
||||||
|
Note: The `bigchaindb start` command (see below) always starts by trying a `bigchaindb init` first. If it sees that the RethinkDB database already exists, then it doesn't re-initialize the database. One doesn't have to do `bigchaindb init` before `bigchaindb start`. `bigchaindb init` is useful if you only want to initialize (but not start).
|
||||||
|
|
||||||
|
### bigchaindb drop
|
||||||
|
|
||||||
|
This command drops (erases) the RethinkDB database. You will be prompted to make sure. If you want to force-drop the database (i.e. skipping the yes/no prompt), then use `bigchaindb -y drop`
|
||||||
|
|
||||||
|
### bigchaindb start
|
||||||
|
|
||||||
|
This command starts BigchainDB. It always begins by trying a `bigchaindb init` first. See the note in the documentation for `bigchaindb init`.
|
||||||
|
|
||||||
|
|
||||||
|
## bigchaindb-benchmark
|
||||||
|
|
||||||
|
The `bigchaindb-benchmark` command is used to run benchmarking tests. You can learn more about it using:
|
||||||
```text
|
```text
|
||||||
$ bigchaindb-benchmark -h
|
$ bigchaindb-benchmark -h
|
||||||
$ bigchaindb-benchmark load -h
|
$ bigchaindb-benchmark load -h
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that you can always start `bigchaindb` using a different config file using the `-c` option.
|
|
||||||
For more information check the help with `bigchaindb -h`.
|
|
||||||
|
|
||||||
|
@ -71,7 +71,7 @@ author = 'BigchainDB Contributors'
|
|||||||
# The short X.Y version.
|
# The short X.Y version.
|
||||||
version = '0.1'
|
version = '0.1'
|
||||||
# The full version, including alpha/beta/rc tags.
|
# The full version, including alpha/beta/rc tags.
|
||||||
release = '0.1.4'
|
release = '0.1.5'
|
||||||
|
|
||||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||||
# for a list of supported languages.
|
# for a list of supported languages.
|
||||||
|
@ -65,13 +65,20 @@ environment variables available are:
|
|||||||
- `BIGCHAINDB_STATSD_RATE` is a float between `0` and `1` that defines the fraction of transaction operations sampled.
|
- `BIGCHAINDB_STATSD_RATE` is a float between `0` and `1` that defines the fraction of transaction operations sampled.
|
||||||
- `BIGCHAINDB_API_ENDPOINT` defines the API endpoint to use (e.g. `http://localhost:9984/api/v1`).
|
- `BIGCHAINDB_API_ENDPOINT` defines the API endpoint to use (e.g. `http://localhost:9984/api/v1`).
|
||||||
- `BIGCHAINDB_CONSENSUS_PLUGIN` defines the name of the [consensus plugin](consensus.html) to use.
|
- `BIGCHAINDB_CONSENSUS_PLUGIN` defines the name of the [consensus plugin](consensus.html) to use.
|
||||||
- `BIGCHAINDB_SERVER_BIND` defines where to bind the server socket, the format is `addr:port` (e.g. `0.0.0.0:9984`).
|
- `BIGCHAINDB_SERVER_BIND` defines where to bind the server socket, the format is `addr:port` (e.g. `localhost:9984`).
|
||||||
- `BIGCHAINDB_SERVER_WORKERS` defines the [number of workers](http://docs.gunicorn.org/en/stable/settings.html#workers)
|
- `BIGCHAINDB_SERVER_WORKERS` defines the [number of workers](http://docs.gunicorn.org/en/stable/settings.html#workers)
|
||||||
to start for the server API.
|
to start for the server API.
|
||||||
- `BIGCHAINDB_SERVER_THREADS` defines the [number of threads](http://docs.gunicorn.org/en/stable/settings.html#threads)
|
- `BIGCHAINDB_SERVER_THREADS` defines the [number of threads](http://docs.gunicorn.org/en/stable/settings.html#threads)
|
||||||
to start for the server API.
|
to start for the server API.
|
||||||
|
|
||||||
|
|
||||||
|
## Configuring the API Server
|
||||||
|
The API Server is powered by [Gunicorn](http://gunicorn.org/), a Python WSGI HTTP Server for UNIX.
|
||||||
|
If you need to tweak some settings for the API server you can manually edit your `.bigchaindb` config file:
|
||||||
|
the `server` section accepts all the options specified in the
|
||||||
|
[Gunicorn settings](http://docs.gunicorn.org/en/stable/settings.html) documentation.
|
||||||
|
|
||||||
|
|
||||||
## Order of Precedence in Determining Configuration Values
|
## Order of Precedence in Determining Configuration Values
|
||||||
|
|
||||||
All configuration values start with their default values (defined in `bigchaindb.__init__`), but a default value can be overriden by an environment variable, and a value set by an environment variable can be overriden by a value in a local configuration file (`$HOME/.bigchaindb` or the location specified by the `-c` command-line option).
|
All configuration values start with their default values (defined in `bigchaindb.__init__`), but a default value can be overriden by an environment variable, and a value set by an environment variable can be overriden by a value in a local configuration file (`$HOME/.bigchaindb` or the location specified by the `-c` command-line option).
|
||||||
@ -129,7 +136,7 @@ you will get the following values for all the configuration settings:
|
|||||||
"pubkey1"
|
"pubkey1"
|
||||||
],
|
],
|
||||||
"server": {
|
"server": {
|
||||||
"bind": "0.0.0.0:9984",
|
"bind": "localhost:9984",
|
||||||
"threads": null,
|
"threads": null,
|
||||||
"workers": null
|
"workers": null
|
||||||
},
|
},
|
||||||
@ -164,7 +171,7 @@ WARNING:bigchaindb.config_utils:Cannot find config file `/home/vrde/.bigchaindb`
|
|||||||
},
|
},
|
||||||
"keyring": [],
|
"keyring": [],
|
||||||
"server": {
|
"server": {
|
||||||
"bind": "0.0.0.0:9984",
|
"bind": "localhost:9984",
|
||||||
"threads": null,
|
"threads": null,
|
||||||
"workers": null
|
"workers": null
|
||||||
},
|
},
|
||||||
@ -213,7 +220,7 @@ WARNING:bigchaindb.config_utils:Cannot find config file `/home/vrde/.bigchaindb`
|
|||||||
},
|
},
|
||||||
"keyring": [],
|
"keyring": [],
|
||||||
"server": {
|
"server": {
|
||||||
"bind": "0.0.0.0:9984",
|
"bind": "localhost:9984",
|
||||||
"threads": null,
|
"threads": null,
|
||||||
"workers": null
|
"workers": null
|
||||||
},
|
},
|
||||||
|
@ -17,11 +17,10 @@ data = "message"
|
|||||||
tx_hash = hashlib.sha3_256(data).hexdigest()
|
tx_hash = hashlib.sha3_256(data).hexdigest()
|
||||||
```
|
```
|
||||||
|
|
||||||
## Signature algorithm and keys
|
## Signature Algorithm and Keys
|
||||||
|
|
||||||
The signature algorithm used by BigchainDB is [ED25519](https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-04)
|
BigchainDB uses the [Ed25519](https://ed25519.cr.yp.to/) public-key signature system for generating its public/private key pairs (also called verifying/signing keys). Ed25519 is an instance of the [Edwards-curve Digital Signature Algorithm (EdDSA)](https://en.wikipedia.org/wiki/EdDSA). As of April 2016, EdDSA was in ["Internet-Draft" status with the IETF](https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-05) but was [already widely used](https://ianix.com/pub/ed25519-deployment.html).
|
||||||
using the python [ed25519](https://github.com/warner/python-ed25519) module, overloaded by the [cryptoconditions library](https://github.com/bigchaindb/cryptoconditions).
|
|
||||||
|
|
||||||
The private key is the base58 encoded hexadecimal representation of private number.
|
BigchainDB uses the the [ed25519](https://github.com/warner/python-ed25519) Python package, overloaded by the [cryptoconditions library](https://github.com/bigchaindb/cryptoconditions).
|
||||||
The public key is the base58 encoded hexadecimal representation of the
|
|
||||||
compressed public numbers.
|
All keys are represented with the base58 encoding by default.
|
@ -122,9 +122,17 @@ bigchaindb --help
|
|||||||
bigchaindb show-config
|
bigchaindb show-config
|
||||||
```
|
```
|
||||||
|
|
||||||
There are fees associated with running instances on EC2, so if you're not using them, you should terminate them. You can do that from the AWS EC2 Console.
|
There are fees associated with running instances on EC2, so if you're not using them, you should terminate them. You can do that using the AWS EC2 Console.
|
||||||
|
|
||||||
The same is true of your allocated elastic IP addresses. There's a small fee to keep them allocated if they're not associated with a running instance. You can release them from the AWS EC2 Console.
|
The same is true of your allocated elastic IP addresses. There's a small fee to keep them allocated if they're not associated with a running instance. You can release them using the AWS EC2 Console, or by using a handy little script named `release_eips.py`. For example:
|
||||||
|
```text
|
||||||
|
$ python release_eips.py
|
||||||
|
You have 2 allocated elactic IPs which are not associated with instances
|
||||||
|
0: Releasing 52.58.110.110
|
||||||
|
(It has Domain = vpc.)
|
||||||
|
1: Releasing 52.58.107.211
|
||||||
|
(It has Domain = vpc.)
|
||||||
|
```
|
||||||
|
|
||||||
## Known Deployment Issues
|
## Known Deployment Issues
|
||||||
|
|
||||||
|
@ -5,8 +5,10 @@ When you start Bigchaindb using `bigchaindb start`, an HTTP API is exposed at:
|
|||||||
- [http://localhost:9984/api/v1/](http://localhost:9984/api/v1/)
|
- [http://localhost:9984/api/v1/](http://localhost:9984/api/v1/)
|
||||||
|
|
||||||
|
|
||||||
Please note that by default the server binds to `0.0.0.0:9984`, hence the API
|
Please note that for security reasons the server binds to `localhost:9984`.
|
||||||
is exposed to the world.
|
If you want to bind the server to `0.0.0.0` we recommend you to read
|
||||||
|
[Deploying Gunicorn](http://docs.gunicorn.org/en/stable/deploy.html) and
|
||||||
|
follow the instructions to deploy it in production.
|
||||||
|
|
||||||
The HTTP API currently exposes two endpoints, one to get information about a
|
The HTTP API currently exposes two endpoints, one to get information about a
|
||||||
specific transaction id, and one to push a transaction to the BigchainDB
|
specific transaction id, and one to push a transaction to the BigchainDB
|
||||||
|
@ -95,16 +95,17 @@ $ rethinkdb
|
|||||||
Then open a different terminal and run:
|
Then open a different terminal and run:
|
||||||
```text
|
```text
|
||||||
$ bigchaindb -y configure
|
$ bigchaindb -y configure
|
||||||
$ bigchaindb init
|
|
||||||
```
|
```
|
||||||
|
|
||||||
That creates a configuration file in `$HOME/.bigchaindb` (documented in [the section on configuration](configuration.html)), initializes the database, creates the tables, creates the indexes, and generates the genesis block.
|
That creates a configuration file in `$HOME/.bigchaindb` (documented in [the section on configuration](configuration.html)). More documentation about the `bigchaindb` command is in the section on [the BigchainDB Command Line Interface (CLI)](bigchaindb-cli.html).
|
||||||
|
|
||||||
You can start BigchainDB Server using:
|
You can start BigchainDB Server using:
|
||||||
```text
|
```text
|
||||||
$ bigchaindb start
|
$ bigchaindb start
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If it's the first time you've run `bigchaindb start`, then it creates the database (a RethinkDB database), the tables, the indexes, and the genesis block. It then starts BigchainDB. If you're run `bigchaindb start` or `bigchaindb init` before (and you haven't dropped the database), then `bigchaindb start` just starts BigchainDB.
|
||||||
|
|
||||||
|
|
||||||
## Run BigchainDB with Docker
|
## Run BigchainDB with Docker
|
||||||
|
|
||||||
@ -130,7 +131,7 @@ stored on your host machine under ` ~/.bigchaindb_docker/config`:
|
|||||||
$ docker-compose run --rm bigchaindb bigchaindb configure
|
$ docker-compose run --rm bigchaindb bigchaindb configure
|
||||||
Starting bigchaindb_rethinkdb-data_1
|
Starting bigchaindb_rethinkdb-data_1
|
||||||
Generating keypair
|
Generating keypair
|
||||||
API Server bind? (default `0.0.0.0:9984`):
|
API Server bind? (default `localhost:9984`):
|
||||||
Database host? (default `localhost`): rethinkdb
|
Database host? (default `localhost`): rethinkdb
|
||||||
Database port? (default `28015`):
|
Database port? (default `28015`):
|
||||||
Database name? (default `bigchain`):
|
Database name? (default `bigchain`):
|
||||||
|
2
setup.py
2
setup.py
@ -32,7 +32,7 @@ docs_require = [
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='BigchainDB',
|
name='BigchainDB',
|
||||||
version='0.1.4',
|
version='0.1.5',
|
||||||
description='BigchainDB: A Scalable Blockchain Database',
|
description='BigchainDB: A Scalable Blockchain Database',
|
||||||
long_description=__doc__,
|
long_description=__doc__,
|
||||||
url='https://github.com/BigchainDB/bigchaindb/',
|
url='https://github.com/BigchainDB/bigchaindb/',
|
||||||
|
@ -45,7 +45,7 @@ def ignore_local_config_file(monkeypatch):
|
|||||||
@pytest.fixture(scope='function', autouse=True)
|
@pytest.fixture(scope='function', autouse=True)
|
||||||
def restore_config(request, node_config):
|
def restore_config(request, node_config):
|
||||||
from bigchaindb import config_utils
|
from bigchaindb import config_utils
|
||||||
config_utils.dict_config(node_config)
|
config_utils.set_config(node_config)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope='module')
|
@pytest.fixture(scope='module')
|
||||||
|
@ -17,7 +17,7 @@ from bigchaindb.db import get_conn
|
|||||||
@pytest.fixture(autouse=True)
|
@pytest.fixture(autouse=True)
|
||||||
def restore_config(request, node_config):
|
def restore_config(request, node_config):
|
||||||
from bigchaindb import config_utils
|
from bigchaindb import config_utils
|
||||||
config_utils.dict_config(node_config)
|
config_utils.set_config(node_config)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope='module', autouse=True)
|
@pytest.fixture(scope='module', autouse=True)
|
||||||
|
@ -82,7 +82,7 @@ def test_bigchain_run_start_assume_yes_create_default_config(monkeypatch, mock_p
|
|||||||
value['return'] = newconfig
|
value['return'] = newconfig
|
||||||
|
|
||||||
monkeypatch.setattr(config_utils, 'write_config', mock_write_config)
|
monkeypatch.setattr(config_utils, 'write_config', mock_write_config)
|
||||||
monkeypatch.setattr(config_utils, 'file_config', lambda x: config_utils.dict_config(expected_config))
|
monkeypatch.setattr(config_utils, 'file_config', lambda x: config_utils.set_config(expected_config))
|
||||||
monkeypatch.setattr('os.path.exists', lambda path: False)
|
monkeypatch.setattr('os.path.exists', lambda path: False)
|
||||||
|
|
||||||
args = Namespace(config=None, yes=True)
|
args = Namespace(config=None, yes=True)
|
||||||
@ -108,6 +108,42 @@ def test_bigchain_show_config(capsys):
|
|||||||
assert output_config == config
|
assert output_config == config
|
||||||
|
|
||||||
|
|
||||||
|
def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch):
|
||||||
|
from bigchaindb import config
|
||||||
|
from bigchaindb.commands.bigchain import run_export_my_pubkey
|
||||||
|
|
||||||
|
args = Namespace(config='dummy')
|
||||||
|
# so in run_export_my_pubkey(args) below,
|
||||||
|
# filename=args.config='dummy' is passed to autoconfigure().
|
||||||
|
# We just assume autoconfigure() works and sets
|
||||||
|
# config['keypair']['public'] correctly (tested elsewhere).
|
||||||
|
# We force-set config['keypair']['public'] using monkeypatch.
|
||||||
|
monkeypatch.setitem(config['keypair'], 'public', 'Charlie_Bucket')
|
||||||
|
_, _ = capsys.readouterr() # has the effect of clearing capsys
|
||||||
|
run_export_my_pubkey(args)
|
||||||
|
out, err = capsys.readouterr()
|
||||||
|
assert out == config['keypair']['public'] + '\n'
|
||||||
|
assert out == 'Charlie_Bucket\n'
|
||||||
|
|
||||||
|
|
||||||
|
def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch):
|
||||||
|
from bigchaindb import config
|
||||||
|
from bigchaindb.commands.bigchain import run_export_my_pubkey
|
||||||
|
|
||||||
|
args = Namespace(config='dummy')
|
||||||
|
monkeypatch.setitem(config['keypair'], 'public', None)
|
||||||
|
# assert that run_export_my_pubkey(args) raises SystemExit:
|
||||||
|
with pytest.raises(SystemExit) as exc_info:
|
||||||
|
run_export_my_pubkey(args)
|
||||||
|
# exc_info is an object of class ExceptionInfo
|
||||||
|
# https://pytest.org/latest/builtin.html#_pytest._code.ExceptionInfo
|
||||||
|
assert exc_info.type == SystemExit
|
||||||
|
# exc_info.value is an object of class SystemExit
|
||||||
|
# https://docs.python.org/3/library/exceptions.html#SystemExit
|
||||||
|
assert exc_info.value.code == \
|
||||||
|
"This node's public key wasn't set anywhere so it can't be exported"
|
||||||
|
|
||||||
|
|
||||||
def test_bigchain_run_init_when_db_exists(mock_db_init_with_existing_db):
|
def test_bigchain_run_init_when_db_exists(mock_db_init_with_existing_db):
|
||||||
from bigchaindb.commands.bigchain import run_init
|
from bigchaindb.commands.bigchain import run_init
|
||||||
args = Namespace(config=None)
|
args = Namespace(config=None)
|
||||||
@ -159,4 +195,3 @@ def test_run_configure_when_config_does_exist(monkeypatch,
|
|||||||
args = Namespace(config='foo', yes=None)
|
args = Namespace(config='foo', yes=None)
|
||||||
run_configure(args)
|
run_configure(args)
|
||||||
assert value == {}
|
assert value == {}
|
||||||
|
|
||||||
|
@ -1,7 +1,32 @@
|
|||||||
from bigchaindb import util
|
import pytest
|
||||||
|
import queue
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_queue(monkeypatch):
|
||||||
|
|
||||||
|
class MockQueue:
|
||||||
|
items = []
|
||||||
|
|
||||||
|
def get(self, timeout=None):
|
||||||
|
try:
|
||||||
|
return self.items.pop()
|
||||||
|
except IndexError:
|
||||||
|
if timeout:
|
||||||
|
raise queue.Empty()
|
||||||
|
raise
|
||||||
|
|
||||||
|
def put(self, item):
|
||||||
|
self.items.append(item)
|
||||||
|
|
||||||
|
mockqueue = MockQueue()
|
||||||
|
|
||||||
|
monkeypatch.setattr('queue.Queue', lambda: mockqueue)
|
||||||
|
return mockqueue
|
||||||
|
|
||||||
|
|
||||||
def test_transform_create(b, user_sk, user_vk):
|
def test_transform_create(b, user_sk, user_vk):
|
||||||
|
from bigchaindb import util
|
||||||
tx = util.create_tx(user_vk, user_vk, None, 'CREATE')
|
tx = util.create_tx(user_vk, user_vk, None, 'CREATE')
|
||||||
tx = util.transform_create(tx)
|
tx = util.transform_create(tx)
|
||||||
tx = util.sign_tx(tx, b.me_private)
|
tx = util.sign_tx(tx, b.me_private)
|
||||||
@ -10,3 +35,88 @@ def test_transform_create(b, user_sk, user_vk):
|
|||||||
assert tx['transaction']['conditions'][0]['new_owners'][0] == user_vk
|
assert tx['transaction']['conditions'][0]['new_owners'][0] == user_vk
|
||||||
assert util.verify_signature(tx)
|
assert util.verify_signature(tx)
|
||||||
|
|
||||||
|
|
||||||
|
def test_empty_pool_is_populated_with_instances(mock_queue):
|
||||||
|
from bigchaindb import util
|
||||||
|
|
||||||
|
pool = util.pool(lambda: 'hello', 4)
|
||||||
|
|
||||||
|
assert len(mock_queue.items) == 0
|
||||||
|
|
||||||
|
with pool() as instance:
|
||||||
|
assert instance == 'hello'
|
||||||
|
assert len(mock_queue.items) == 1
|
||||||
|
|
||||||
|
with pool() as instance:
|
||||||
|
assert instance == 'hello'
|
||||||
|
assert len(mock_queue.items) == 2
|
||||||
|
|
||||||
|
with pool() as instance:
|
||||||
|
assert instance == 'hello'
|
||||||
|
assert len(mock_queue.items) == 3
|
||||||
|
|
||||||
|
with pool() as instance:
|
||||||
|
assert instance == 'hello'
|
||||||
|
assert len(mock_queue.items) == 4
|
||||||
|
|
||||||
|
with pool() as instance:
|
||||||
|
assert instance == 'hello'
|
||||||
|
assert len(mock_queue.items) == 4
|
||||||
|
|
||||||
|
|
||||||
|
def test_pool_blocks_if_no_instances_available(mock_queue):
|
||||||
|
from bigchaindb import util
|
||||||
|
|
||||||
|
pool = util.pool(lambda: 'hello', 4)
|
||||||
|
|
||||||
|
assert len(mock_queue.items) == 0
|
||||||
|
|
||||||
|
# We need to manually trigger the `__enter__` method so the context
|
||||||
|
# manager will "hang" and not return the resource to the pool
|
||||||
|
assert pool().__enter__() == 'hello'
|
||||||
|
assert len(mock_queue.items) == 0
|
||||||
|
|
||||||
|
assert pool().__enter__() == 'hello'
|
||||||
|
assert len(mock_queue.items) == 0
|
||||||
|
|
||||||
|
assert pool().__enter__() == 'hello'
|
||||||
|
assert len(mock_queue.items) == 0
|
||||||
|
|
||||||
|
# We need to keep a reference of the last context manager so we can
|
||||||
|
# manually release the resource
|
||||||
|
last = pool()
|
||||||
|
assert last.__enter__() == 'hello'
|
||||||
|
assert len(mock_queue.items) == 0
|
||||||
|
|
||||||
|
# This would block using `queue.Queue` but since we mocked it it will
|
||||||
|
# just raise a IndexError because it's trying to pop from an empty list.
|
||||||
|
with pytest.raises(IndexError):
|
||||||
|
assert pool().__enter__() == 'hello'
|
||||||
|
assert len(mock_queue.items) == 0
|
||||||
|
|
||||||
|
# Release the last resource
|
||||||
|
last.__exit__(None, None, None)
|
||||||
|
assert len(mock_queue.items) == 1
|
||||||
|
|
||||||
|
assert pool().__enter__() == 'hello'
|
||||||
|
assert len(mock_queue.items) == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_pool_raises_empty_exception_when_timeout(mock_queue):
|
||||||
|
from bigchaindb import util
|
||||||
|
|
||||||
|
pool = util.pool(lambda: 'hello', 1, timeout=1)
|
||||||
|
|
||||||
|
assert len(mock_queue.items) == 0
|
||||||
|
|
||||||
|
with pool() as instance:
|
||||||
|
assert instance == 'hello'
|
||||||
|
assert len(mock_queue.items) == 1
|
||||||
|
|
||||||
|
# take the only resource available
|
||||||
|
assert pool().__enter__() == 'hello'
|
||||||
|
|
||||||
|
with pytest.raises(queue.Empty):
|
||||||
|
with pool() as instance:
|
||||||
|
assert instance == 'hello'
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ def test_bigchain_instance_is_initialized_when_conf_provided():
|
|||||||
from bigchaindb import config_utils
|
from bigchaindb import config_utils
|
||||||
assert 'CONFIGURED' not in bigchaindb.config
|
assert 'CONFIGURED' not in bigchaindb.config
|
||||||
|
|
||||||
config_utils.dict_config({'keypair': {'public': 'a', 'private': 'b'}})
|
config_utils.set_config({'keypair': {'public': 'a', 'private': 'b'}})
|
||||||
|
|
||||||
assert bigchaindb.config['CONFIGURED'] is True
|
assert bigchaindb.config['CONFIGURED'] is True
|
||||||
b = bigchaindb.Bigchain()
|
b = bigchaindb.Bigchain()
|
||||||
@ -142,7 +142,7 @@ def test_autoconfigure_read_both_from_file_and_env(monkeypatch):
|
|||||||
assert bigchaindb.config == {
|
assert bigchaindb.config == {
|
||||||
'CONFIGURED': True,
|
'CONFIGURED': True,
|
||||||
'server': {
|
'server': {
|
||||||
'bind': '0.0.0.0:9984',
|
'bind': 'localhost:9984',
|
||||||
'workers': None,
|
'workers': None,
|
||||||
'threads': None,
|
'threads': None,
|
||||||
},
|
},
|
||||||
|
@ -5,7 +5,7 @@ from ..db import conftest
|
|||||||
@pytest.fixture(autouse=True)
|
@pytest.fixture(autouse=True)
|
||||||
def restore_config(request, node_config):
|
def restore_config(request, node_config):
|
||||||
from bigchaindb import config_utils
|
from bigchaindb import config_utils
|
||||||
config_utils.dict_config(node_config)
|
config_utils.set_config(node_config)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope='module', autouse=True)
|
@pytest.fixture(scope='module', autouse=True)
|
||||||
@ -25,7 +25,7 @@ def app(request, node_config):
|
|||||||
restore_config(request, node_config)
|
restore_config(request, node_config)
|
||||||
|
|
||||||
from bigchaindb.web import server
|
from bigchaindb.web import server
|
||||||
app = server.create_app(debug=True)
|
app = server.create_app({'debug': True})
|
||||||
return app
|
return app
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user