mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge remote-tracking branch 'origin/master' into validationerror-refactor
This commit is contained in:
commit
53dff83ed4
@ -15,6 +15,12 @@ For reference, the possible headings are:
|
|||||||
* **External Contributors** to list contributors outside of BigchainDB GmbH.
|
* **External Contributors** to list contributors outside of BigchainDB GmbH.
|
||||||
* **Notes**
|
* **Notes**
|
||||||
|
|
||||||
|
## [0.9.3] - 2017-03-06
|
||||||
|
Tag name: v0.9.3
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
Fixed HTTP API 500 error on `GET /outputs`: issues #1200 and #1231.
|
||||||
|
|
||||||
## [0.9.2] - 2017-03-02
|
## [0.9.2] - 2017-03-02
|
||||||
Tag name: v0.9.2
|
Tag name: v0.9.2
|
||||||
|
|
||||||
|
@ -14,10 +14,8 @@ A minor release is preceeded by a feature freeze and created from the 'master' b
|
|||||||
1. In `bigchaindb/version.py`, update `__version__` and `__short_version__`, e.g. to `0.9` and `0.9.0` (with no `.dev` on the end)
|
1. In `bigchaindb/version.py`, update `__version__` and `__short_version__`, e.g. to `0.9` and `0.9.0` (with no `.dev` on the end)
|
||||||
1. Commit that change, and push the new branch to GitHub
|
1. Commit that change, and push the new branch to GitHub
|
||||||
1. Follow steps outlined in [Common Steps](#common-steps)
|
1. Follow steps outlined in [Common Steps](#common-steps)
|
||||||
1. In 'master' branch, Edit `bigchaindb/version.py`, increment the minor version to the next planned release, e.g. `0.10.0.dev'
|
1. In 'master' branch, Edit `bigchaindb/version.py`, increment the minor version to the next planned release, e.g. `0.10.0.dev`. This is so people reading the latest docs will know that they're for the latest (master branch) version of BigchainDB Server, not the docs at the time of the most recent release (which are also available).
|
||||||
This is so people reading the latest docs will know that they're for the latest (master branch)
|
1. Go to [Docker Hub](https://hub.docker.com/), sign in, go to Settings - Build Settings, and under the build with Docker Tag Name equal to `latest`, change the Name to the number of the new release, e.g. `0.9`
|
||||||
version of BigchainDB Server, not the docs at the time of the most recent release (which are also
|
|
||||||
available).
|
|
||||||
|
|
||||||
Congratulations, you have released BigchainDB!
|
Congratulations, you have released BigchainDB!
|
||||||
|
|
||||||
|
@ -10,6 +10,8 @@ _database_rethinkdb = {
|
|||||||
'host': os.environ.get('BIGCHAINDB_DATABASE_HOST', 'localhost'),
|
'host': os.environ.get('BIGCHAINDB_DATABASE_HOST', 'localhost'),
|
||||||
'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 28015)),
|
'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 28015)),
|
||||||
'name': os.environ.get('BIGCHAINDB_DATABASE_NAME', 'bigchain'),
|
'name': os.environ.get('BIGCHAINDB_DATABASE_NAME', 'bigchain'),
|
||||||
|
'connection_timeout': 5000,
|
||||||
|
'max_tries': 3,
|
||||||
}
|
}
|
||||||
|
|
||||||
_database_mongodb = {
|
_database_mongodb = {
|
||||||
@ -18,6 +20,8 @@ _database_mongodb = {
|
|||||||
'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 27017)),
|
'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 27017)),
|
||||||
'name': os.environ.get('BIGCHAINDB_DATABASE_NAME', 'bigchain'),
|
'name': os.environ.get('BIGCHAINDB_DATABASE_NAME', 'bigchain'),
|
||||||
'replicaset': os.environ.get('BIGCHAINDB_DATABASE_REPLICASET', 'bigchain-rs'),
|
'replicaset': os.environ.get('BIGCHAINDB_DATABASE_REPLICASET', 'bigchain-rs'),
|
||||||
|
'connection_timeout': 5000,
|
||||||
|
'max_tries': 3,
|
||||||
}
|
}
|
||||||
|
|
||||||
_database_map = {
|
_database_map = {
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
|
from itertools import repeat
|
||||||
from importlib import import_module
|
from importlib import import_module
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import bigchaindb
|
import bigchaindb
|
||||||
from bigchaindb.common.exceptions import ConfigurationError
|
from bigchaindb.common.exceptions import ConfigurationError
|
||||||
|
from bigchaindb.backend.exceptions import ConnectionError
|
||||||
|
|
||||||
|
|
||||||
BACKENDS = {
|
BACKENDS = {
|
||||||
@ -13,7 +15,8 @@ BACKENDS = {
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def connect(backend=None, host=None, port=None, name=None, replicaset=None):
|
def connect(backend=None, host=None, port=None, name=None, max_tries=None,
|
||||||
|
connection_timeout=None, replicaset=None):
|
||||||
"""Create a new connection to the database backend.
|
"""Create a new connection to the database backend.
|
||||||
|
|
||||||
All arguments default to the current configuration's values if not
|
All arguments default to the current configuration's values if not
|
||||||
@ -58,7 +61,9 @@ def connect(backend=None, host=None, port=None, name=None, replicaset=None):
|
|||||||
raise ConfigurationError('Error loading backend `{}`'.format(backend)) from exc
|
raise ConfigurationError('Error loading backend `{}`'.format(backend)) from exc
|
||||||
|
|
||||||
logger.debug('Connection: {}'.format(Class))
|
logger.debug('Connection: {}'.format(Class))
|
||||||
return Class(host, port, dbname, replicaset=replicaset)
|
return Class(host=host, port=port, dbname=dbname,
|
||||||
|
max_tries=max_tries, connection_timeout=connection_timeout,
|
||||||
|
replicaset=replicaset)
|
||||||
|
|
||||||
|
|
||||||
class Connection:
|
class Connection:
|
||||||
@ -68,17 +73,41 @@ class Connection:
|
|||||||
from and implements this class.
|
from and implements this class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, host=None, port=None, dbname=None, *args, **kwargs):
|
def __init__(self, host=None, port=None, dbname=None,
|
||||||
|
connection_timeout=None, max_tries=None,
|
||||||
|
**kwargs):
|
||||||
"""Create a new :class:`~.Connection` instance.
|
"""Create a new :class:`~.Connection` instance.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
host (str): the host to connect to.
|
host (str): the host to connect to.
|
||||||
port (int): the port to connect to.
|
port (int): the port to connect to.
|
||||||
dbname (str): the name of the database to use.
|
dbname (str): the name of the database to use.
|
||||||
|
connection_timeout (int, optional): the milliseconds to wait
|
||||||
|
until timing out the database connection attempt.
|
||||||
|
Defaults to 5000ms.
|
||||||
|
max_tries (int, optional): how many tries before giving up,
|
||||||
|
if 0 then try forever. Defaults to 3.
|
||||||
**kwargs: arbitrary keyword arguments provided by the
|
**kwargs: arbitrary keyword arguments provided by the
|
||||||
configuration's ``database`` settings
|
configuration's ``database`` settings
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
dbconf = bigchaindb.config['database']
|
||||||
|
|
||||||
|
self.host = host or dbconf['host']
|
||||||
|
self.port = port or dbconf['port']
|
||||||
|
self.dbname = dbname or dbconf['name']
|
||||||
|
self.connection_timeout = connection_timeout if connection_timeout is not None\
|
||||||
|
else dbconf['connection_timeout']
|
||||||
|
self.max_tries = max_tries if max_tries is not None else dbconf['max_tries']
|
||||||
|
self.max_tries_counter = range(self.max_tries) if self.max_tries != 0 else repeat(0)
|
||||||
|
self._conn = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def conn(self):
|
||||||
|
if self._conn is None:
|
||||||
|
self.connect()
|
||||||
|
return self._conn
|
||||||
|
|
||||||
def run(self, query):
|
def run(self, query):
|
||||||
"""Run a query.
|
"""Run a query.
|
||||||
|
|
||||||
@ -94,3 +123,26 @@ class Connection:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def connect(self):
|
||||||
|
"""Try to connect to the database.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
:exc:`~ConnectionError`: If the connection to the database
|
||||||
|
fails.
|
||||||
|
"""
|
||||||
|
|
||||||
|
attempt = 0
|
||||||
|
for i in self.max_tries_counter:
|
||||||
|
attempt += 1
|
||||||
|
try:
|
||||||
|
self._conn = self._connect()
|
||||||
|
except ConnectionError as exc:
|
||||||
|
logger.warning('Attempt %s/%s. Connection to %s:%s failed after %sms.',
|
||||||
|
attempt, self.max_tries if self.max_tries != 0 else '∞',
|
||||||
|
self.host, self.port, self.connection_timeout)
|
||||||
|
if attempt == self.max_tries:
|
||||||
|
logger.critical('Cannot connect to the Database. Giving up.')
|
||||||
|
raise ConnectionError() from exc
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
import time
|
import time
|
||||||
import logging
|
import logging
|
||||||
from itertools import repeat
|
|
||||||
|
|
||||||
import pymongo
|
import pymongo
|
||||||
|
|
||||||
@ -15,46 +14,20 @@ from bigchaindb.backend.connection import Connection
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
# TODO: waiting for #1082 to be merged
|
|
||||||
# to move this constants in the configuration.
|
|
||||||
|
|
||||||
CONNECTION_TIMEOUT = 4000 # in milliseconds
|
|
||||||
MAX_RETRIES = 3 # number of tries before giving up, if 0 then try forever
|
|
||||||
|
|
||||||
|
|
||||||
class MongoDBConnection(Connection):
|
class MongoDBConnection(Connection):
|
||||||
|
|
||||||
def __init__(self, host=None, port=None, dbname=None,
|
def __init__(self, replicaset=None, **kwargs):
|
||||||
connection_timeout=None, max_tries=None,
|
|
||||||
replicaset=None):
|
|
||||||
"""Create a new Connection instance.
|
"""Create a new Connection instance.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
host (str, optional): the host to connect to.
|
|
||||||
port (int, optional): the port to connect to.
|
|
||||||
dbname (str, optional): the database to use.
|
|
||||||
connection_timeout (int, optional): the milliseconds to wait
|
|
||||||
until timing out the database connection attempt.
|
|
||||||
max_tries (int, optional): how many tries before giving up,
|
|
||||||
if 0 then try forever.
|
|
||||||
replicaset (str, optional): the name of the replica set to
|
replicaset (str, optional): the name of the replica set to
|
||||||
connect to.
|
connect to.
|
||||||
|
**kwargs: arbitrary keyword arguments provided by the
|
||||||
|
configuration's ``database`` settings
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self.host = host or bigchaindb.config['database']['host']
|
super().__init__(**kwargs)
|
||||||
self.port = port or bigchaindb.config['database']['port']
|
|
||||||
self.replicaset = replicaset or bigchaindb.config['database']['replicaset']
|
self.replicaset = replicaset or bigchaindb.config['database']['replicaset']
|
||||||
self.dbname = dbname or bigchaindb.config['database']['name']
|
|
||||||
self.connection_timeout = connection_timeout if connection_timeout is not None else CONNECTION_TIMEOUT
|
|
||||||
self.max_tries = max_tries if max_tries is not None else MAX_RETRIES
|
|
||||||
self.max_tries_counter = range(self.max_tries) if self.max_tries != 0 else repeat(0)
|
|
||||||
self.connection = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def conn(self):
|
|
||||||
if self.connection is None:
|
|
||||||
self._connect()
|
|
||||||
return self.connection
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def db(self):
|
def db(self):
|
||||||
@ -94,10 +67,6 @@ class MongoDBConnection(Connection):
|
|||||||
fails.
|
fails.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
attempt = 0
|
|
||||||
for i in self.max_tries_counter:
|
|
||||||
attempt += 1
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# we should only return a connection if the replica set is
|
# we should only return a connection if the replica set is
|
||||||
# initialized. initialize_replica_set will check if the
|
# initialized. initialize_replica_set will check if the
|
||||||
@ -106,7 +75,7 @@ class MongoDBConnection(Connection):
|
|||||||
|
|
||||||
# FYI: this might raise a `ServerSelectionTimeoutError`,
|
# FYI: this might raise a `ServerSelectionTimeoutError`,
|
||||||
# that is a subclass of `ConnectionFailure`.
|
# that is a subclass of `ConnectionFailure`.
|
||||||
self.connection = pymongo.MongoClient(self.host,
|
return pymongo.MongoClient(self.host,
|
||||||
self.port,
|
self.port,
|
||||||
replicaset=self.replicaset,
|
replicaset=self.replicaset,
|
||||||
serverselectiontimeoutms=self.connection_timeout)
|
serverselectiontimeoutms=self.connection_timeout)
|
||||||
@ -114,14 +83,7 @@ class MongoDBConnection(Connection):
|
|||||||
# `initialize_replica_set` might raise `ConnectionFailure` or `OperationFailure`.
|
# `initialize_replica_set` might raise `ConnectionFailure` or `OperationFailure`.
|
||||||
except (pymongo.errors.ConnectionFailure,
|
except (pymongo.errors.ConnectionFailure,
|
||||||
pymongo.errors.OperationFailure) as exc:
|
pymongo.errors.OperationFailure) as exc:
|
||||||
logger.warning('Attempt %s/%s. Connection to %s:%s failed after %sms.',
|
|
||||||
attempt, self.max_tries if self.max_tries != 0 else '∞',
|
|
||||||
self.host, self.port, self.connection_timeout)
|
|
||||||
if attempt == self.max_tries:
|
|
||||||
logger.critical('Cannot connect to the Database. Giving up.')
|
|
||||||
raise ConnectionError() from exc
|
raise ConnectionError() from exc
|
||||||
else:
|
|
||||||
break
|
|
||||||
|
|
||||||
|
|
||||||
def initialize_replica_set(host, port, connection_timeout):
|
def initialize_replica_set(host, port, connection_timeout):
|
||||||
@ -168,7 +130,7 @@ def _check_replica_set(conn):
|
|||||||
options = conn.admin.command('getCmdLineOpts')
|
options = conn.admin.command('getCmdLineOpts')
|
||||||
try:
|
try:
|
||||||
repl_opts = options['parsed']['replication']
|
repl_opts = options['parsed']['replication']
|
||||||
repl_set_name = repl_opts.get('replSetName', None) or repl_opts['replSet']
|
repl_set_name = repl_opts.get('replSetName', repl_opts.get('replSet'))
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise ConfigurationError('mongod was not started with'
|
raise ConfigurationError('mongod was not started with'
|
||||||
' the replSet option.')
|
' the replSet option.')
|
||||||
|
@ -96,7 +96,7 @@ def reconfigure(connection, *, table, shards, replicas,
|
|||||||
try:
|
try:
|
||||||
return connection.run(r.table(table).reconfigure(**params))
|
return connection.run(r.table(table).reconfigure(**params))
|
||||||
except (r.ReqlOpFailedError, r.ReqlQueryLogicError) as e:
|
except (r.ReqlOpFailedError, r.ReqlQueryLogicError) as e:
|
||||||
raise OperationError from e
|
raise OperationError('Failed to reconfigure tables.') from e
|
||||||
|
|
||||||
|
|
||||||
@register_admin(RethinkDBConnection)
|
@register_admin(RethinkDBConnection)
|
||||||
|
@ -3,6 +3,7 @@ import logging
|
|||||||
import rethinkdb as r
|
import rethinkdb as r
|
||||||
|
|
||||||
from bigchaindb import backend
|
from bigchaindb import backend
|
||||||
|
from bigchaindb.backend.exceptions import BackendError
|
||||||
from bigchaindb.backend.changefeed import ChangeFeed
|
from bigchaindb.backend.changefeed import ChangeFeed
|
||||||
from bigchaindb.backend.utils import module_dispatch_registrar
|
from bigchaindb.backend.utils import module_dispatch_registrar
|
||||||
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
|
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
|
||||||
@ -23,8 +24,8 @@ class RethinkDBChangeFeed(ChangeFeed):
|
|||||||
try:
|
try:
|
||||||
self.run_changefeed()
|
self.run_changefeed()
|
||||||
break
|
break
|
||||||
except (r.ReqlDriverError, r.ReqlOpFailedError) as exc:
|
except (BackendError, r.ReqlDriverError) as exc:
|
||||||
logger.exception(exc)
|
logger.exception('Error connecting to the database, retrying')
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
def run_changefeed(self):
|
def run_changefeed(self):
|
||||||
|
@ -1,11 +1,7 @@
|
|||||||
import time
|
|
||||||
import logging
|
|
||||||
|
|
||||||
import rethinkdb as r
|
import rethinkdb as r
|
||||||
|
|
||||||
from bigchaindb.backend.connection import Connection
|
from bigchaindb.backend.connection import Connection
|
||||||
|
from bigchaindb.backend.exceptions import ConnectionError, OperationError
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class RethinkDBConnection(Connection):
|
class RethinkDBConnection(Connection):
|
||||||
@ -17,23 +13,6 @@ class RethinkDBConnection(Connection):
|
|||||||
more times to run the query or open a connection.
|
more times to run the query or open a connection.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, host, port, dbname, max_tries=3, **kwargs):
|
|
||||||
"""Create a new :class:`~.RethinkDBConnection` instance.
|
|
||||||
|
|
||||||
See :meth:`.Connection.__init__` for
|
|
||||||
:attr:`host`, :attr:`port`, and :attr:`dbname`.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
max_tries (int, optional): how many tries before giving up.
|
|
||||||
Defaults to 3.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.host = host
|
|
||||||
self.port = port
|
|
||||||
self.dbname = dbname
|
|
||||||
self.max_tries = max_tries
|
|
||||||
self.conn = None
|
|
||||||
|
|
||||||
def run(self, query):
|
def run(self, query):
|
||||||
"""Run a RethinkDB query.
|
"""Run a RethinkDB query.
|
||||||
|
|
||||||
@ -45,16 +24,10 @@ class RethinkDBConnection(Connection):
|
|||||||
:attr:`~.RethinkDBConnection.max_tries`.
|
:attr:`~.RethinkDBConnection.max_tries`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.conn is None:
|
|
||||||
self._connect()
|
|
||||||
|
|
||||||
for i in range(self.max_tries):
|
|
||||||
try:
|
try:
|
||||||
return query.run(self.conn)
|
return query.run(self.conn)
|
||||||
except r.ReqlDriverError:
|
except r.ReqlDriverError as exc:
|
||||||
if i + 1 == self.max_tries:
|
raise OperationError from exc
|
||||||
raise
|
|
||||||
self._connect()
|
|
||||||
|
|
||||||
def _connect(self):
|
def _connect(self):
|
||||||
"""Set a connection to RethinkDB.
|
"""Set a connection to RethinkDB.
|
||||||
@ -66,16 +39,7 @@ class RethinkDBConnection(Connection):
|
|||||||
:attr:`~.RethinkDBConnection.max_tries`.
|
:attr:`~.RethinkDBConnection.max_tries`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
for i in range(1, self.max_tries + 1):
|
|
||||||
logging.debug('Connecting to database %s:%s/%s. (Attempt %s/%s)',
|
|
||||||
self.host, self.port, self.dbname, i, self.max_tries)
|
|
||||||
try:
|
try:
|
||||||
self.conn = r.connect(host=self.host, port=self.port, db=self.dbname)
|
return r.connect(host=self.host, port=self.port, db=self.dbname)
|
||||||
except r.ReqlDriverError:
|
except r.ReqlDriverError as exc:
|
||||||
if i == self.max_tries:
|
raise ConnectionError from exc
|
||||||
raise
|
|
||||||
wait_time = 2**i
|
|
||||||
logging.debug('Error connecting to database, waiting %ss', wait_time)
|
|
||||||
time.sleep(wait_time)
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
|
@ -3,12 +3,11 @@ the command-line interface (CLI) for BigchainDB Server.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
|
||||||
import logging
|
import logging
|
||||||
import argparse
|
import argparse
|
||||||
import copy
|
import copy
|
||||||
import json
|
import json
|
||||||
import builtins
|
import sys
|
||||||
|
|
||||||
import logstats
|
import logstats
|
||||||
|
|
||||||
@ -17,36 +16,37 @@ from bigchaindb.common.exceptions import (StartupError,
|
|||||||
DatabaseAlreadyExists,
|
DatabaseAlreadyExists,
|
||||||
KeypairNotFoundException)
|
KeypairNotFoundException)
|
||||||
import bigchaindb
|
import bigchaindb
|
||||||
import bigchaindb.config_utils
|
|
||||||
from bigchaindb.models import Transaction
|
from bigchaindb.models import Transaction
|
||||||
from bigchaindb.utils import ProcessGroup
|
from bigchaindb.utils import ProcessGroup
|
||||||
from bigchaindb import backend
|
from bigchaindb import backend, processes
|
||||||
from bigchaindb.backend import schema
|
from bigchaindb.backend import schema
|
||||||
from bigchaindb.backend.admin import (set_replicas, set_shards, add_replicas,
|
from bigchaindb.backend.admin import (set_replicas, set_shards, add_replicas,
|
||||||
remove_replicas)
|
remove_replicas)
|
||||||
from bigchaindb.backend.exceptions import OperationError
|
from bigchaindb.backend.exceptions import OperationError
|
||||||
from bigchaindb.commands import utils
|
from bigchaindb.commands import utils
|
||||||
from bigchaindb import processes
|
from bigchaindb.commands.messages import (
|
||||||
|
CANNOT_START_KEYPAIR_NOT_FOUND,
|
||||||
|
RETHINKDB_STARTUP_ERROR,
|
||||||
|
)
|
||||||
|
from bigchaindb.commands.utils import configure_bigchaindb, input_on_stderr
|
||||||
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
# We need this because `input` always prints on stdout, while it should print
|
# Note about printing:
|
||||||
# to stderr. It's a very old bug, check it out here:
|
# We try to print to stdout for results of a command that may be useful to
|
||||||
# - https://bugs.python.org/issue1927
|
# someone (or another program). Strictly informational text, or errors,
|
||||||
def input_on_stderr(prompt=''):
|
# should be printed to stderr.
|
||||||
print(prompt, end='', file=sys.stderr)
|
|
||||||
return builtins.input()
|
|
||||||
|
|
||||||
|
|
||||||
|
@configure_bigchaindb
|
||||||
def run_show_config(args):
|
def run_show_config(args):
|
||||||
"""Show the current configuration"""
|
"""Show the current configuration"""
|
||||||
# TODO Proposal: remove the "hidden" configuration. Only show config. If
|
# TODO Proposal: remove the "hidden" configuration. Only show config. If
|
||||||
# the system needs to be configured, then display information on how to
|
# the system needs to be configured, then display information on how to
|
||||||
# configure the system.
|
# configure the system.
|
||||||
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
|
||||||
config = copy.deepcopy(bigchaindb.config)
|
config = copy.deepcopy(bigchaindb.config)
|
||||||
del config['CONFIGURED']
|
del config['CONFIGURED']
|
||||||
private_key = config['keypair']['private']
|
private_key = config['keypair']['private']
|
||||||
@ -89,7 +89,7 @@ def run_configure(args, skip_if_exists=False):
|
|||||||
|
|
||||||
# select the correct config defaults based on the backend
|
# select the correct config defaults based on the backend
|
||||||
print('Generating default configuration for backend {}'
|
print('Generating default configuration for backend {}'
|
||||||
.format(args.backend))
|
.format(args.backend), file=sys.stderr)
|
||||||
conf['database'] = bigchaindb._database_map[args.backend]
|
conf['database'] = bigchaindb._database_map[args.backend]
|
||||||
|
|
||||||
if not args.yes:
|
if not args.yes:
|
||||||
@ -119,11 +119,10 @@ def run_configure(args, skip_if_exists=False):
|
|||||||
print('Ready to go!', file=sys.stderr)
|
print('Ready to go!', file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
|
@configure_bigchaindb
|
||||||
def run_export_my_pubkey(args):
|
def run_export_my_pubkey(args):
|
||||||
"""Export this node's public key to standard output
|
"""Export this node's public key to standard output
|
||||||
"""
|
"""
|
||||||
logger.debug('bigchaindb args = {}'.format(args))
|
|
||||||
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
|
||||||
pubkey = bigchaindb.config['keypair']['public']
|
pubkey = bigchaindb.config['keypair']['public']
|
||||||
if pubkey is not None:
|
if pubkey is not None:
|
||||||
print(pubkey)
|
print(pubkey)
|
||||||
@ -141,14 +140,13 @@ def _run_init():
|
|||||||
|
|
||||||
schema.init_database(connection=b.connection)
|
schema.init_database(connection=b.connection)
|
||||||
|
|
||||||
logger.info('Create genesis block.')
|
|
||||||
b.create_genesis_block()
|
b.create_genesis_block()
|
||||||
logger.info('Done, have fun!')
|
logger.info('Genesis block created.')
|
||||||
|
|
||||||
|
|
||||||
|
@configure_bigchaindb
|
||||||
def run_init(args):
|
def run_init(args):
|
||||||
"""Initialize the database"""
|
"""Initialize the database"""
|
||||||
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
|
||||||
# TODO Provide mechanism to:
|
# TODO Provide mechanism to:
|
||||||
# 1. prompt the user to inquire whether they wish to drop the db
|
# 1. prompt the user to inquire whether they wish to drop the db
|
||||||
# 2. force the init, (e.g., via -f flag)
|
# 2. force the init, (e.g., via -f flag)
|
||||||
@ -159,9 +157,9 @@ def run_init(args):
|
|||||||
print('If you wish to re-initialize it, first drop it.', file=sys.stderr)
|
print('If you wish to re-initialize it, first drop it.', file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
|
@configure_bigchaindb
|
||||||
def run_drop(args):
|
def run_drop(args):
|
||||||
"""Drop the database"""
|
"""Drop the database"""
|
||||||
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
|
||||||
dbname = bigchaindb.config['database']['name']
|
dbname = bigchaindb.config['database']['name']
|
||||||
|
|
||||||
if not args.yes:
|
if not args.yes:
|
||||||
@ -174,11 +172,10 @@ def run_drop(args):
|
|||||||
schema.drop_database(conn, dbname)
|
schema.drop_database(conn, dbname)
|
||||||
|
|
||||||
|
|
||||||
|
@configure_bigchaindb
|
||||||
def run_start(args):
|
def run_start(args):
|
||||||
"""Start the processes to run the node"""
|
"""Start the processes to run the node"""
|
||||||
logger.info('BigchainDB Version {}'.format(bigchaindb.__version__))
|
logger.info('BigchainDB Version %s', bigchaindb.__version__)
|
||||||
|
|
||||||
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
|
||||||
|
|
||||||
if args.allow_temp_keypair:
|
if args.allow_temp_keypair:
|
||||||
if not (bigchaindb.config['keypair']['private'] or
|
if not (bigchaindb.config['keypair']['private'] or
|
||||||
@ -194,7 +191,7 @@ def run_start(args):
|
|||||||
try:
|
try:
|
||||||
proc = utils.start_rethinkdb()
|
proc = utils.start_rethinkdb()
|
||||||
except StartupError as e:
|
except StartupError as e:
|
||||||
sys.exit('Error starting RethinkDB, reason is: {}'.format(e))
|
sys.exit(RETHINKDB_STARTUP_ERROR.format(e))
|
||||||
logger.info('RethinkDB started with PID %s' % proc.pid)
|
logger.info('RethinkDB started with PID %s' % proc.pid)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -202,8 +199,7 @@ def run_start(args):
|
|||||||
except DatabaseAlreadyExists:
|
except DatabaseAlreadyExists:
|
||||||
pass
|
pass
|
||||||
except KeypairNotFoundException:
|
except KeypairNotFoundException:
|
||||||
sys.exit("Can't start BigchainDB, no keypair found. "
|
sys.exit(CANNOT_START_KEYPAIR_NOT_FOUND)
|
||||||
'Did you run `bigchaindb configure`?')
|
|
||||||
|
|
||||||
logger.info('Starting BigchainDB main process with public key %s',
|
logger.info('Starting BigchainDB main process with public key %s',
|
||||||
bigchaindb.config['keypair']['public'])
|
bigchaindb.config['keypair']['public'])
|
||||||
@ -227,8 +223,8 @@ def _run_load(tx_left, stats):
|
|||||||
break
|
break
|
||||||
|
|
||||||
|
|
||||||
|
@configure_bigchaindb
|
||||||
def run_load(args):
|
def run_load(args):
|
||||||
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
|
||||||
logger.info('Starting %s processes', args.multiprocess)
|
logger.info('Starting %s processes', args.multiprocess)
|
||||||
stats = logstats.Logstats()
|
stats = logstats.Logstats()
|
||||||
logstats.thread.start(stats)
|
logstats.thread.start(stats)
|
||||||
@ -243,46 +239,48 @@ def run_load(args):
|
|||||||
workers.start()
|
workers.start()
|
||||||
|
|
||||||
|
|
||||||
|
@configure_bigchaindb
|
||||||
def run_set_shards(args):
|
def run_set_shards(args):
|
||||||
conn = backend.connect()
|
conn = backend.connect()
|
||||||
try:
|
try:
|
||||||
set_shards(conn, shards=args.num_shards)
|
set_shards(conn, shards=args.num_shards)
|
||||||
except OperationError as e:
|
except OperationError as e:
|
||||||
logger.warn(e)
|
sys.exit(str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@configure_bigchaindb
|
||||||
def run_set_replicas(args):
|
def run_set_replicas(args):
|
||||||
conn = backend.connect()
|
conn = backend.connect()
|
||||||
try:
|
try:
|
||||||
set_replicas(conn, replicas=args.num_replicas)
|
set_replicas(conn, replicas=args.num_replicas)
|
||||||
except OperationError as e:
|
except OperationError as e:
|
||||||
logger.warn(e)
|
sys.exit(str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@configure_bigchaindb
|
||||||
def run_add_replicas(args):
|
def run_add_replicas(args):
|
||||||
# Note: This command is specific to MongoDB
|
# Note: This command is specific to MongoDB
|
||||||
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
|
||||||
conn = backend.connect()
|
conn = backend.connect()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
add_replicas(conn, args.replicas)
|
add_replicas(conn, args.replicas)
|
||||||
except (OperationError, NotImplementedError) as e:
|
except (OperationError, NotImplementedError) as e:
|
||||||
logger.warn(e)
|
sys.exit(str(e))
|
||||||
else:
|
else:
|
||||||
logger.info('Added {} to the replicaset.'.format(args.replicas))
|
print('Added {} to the replicaset.'.format(args.replicas))
|
||||||
|
|
||||||
|
|
||||||
|
@configure_bigchaindb
|
||||||
def run_remove_replicas(args):
|
def run_remove_replicas(args):
|
||||||
# Note: This command is specific to MongoDB
|
# Note: This command is specific to MongoDB
|
||||||
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
|
||||||
conn = backend.connect()
|
conn = backend.connect()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
remove_replicas(conn, args.replicas)
|
remove_replicas(conn, args.replicas)
|
||||||
except (OperationError, NotImplementedError) as e:
|
except (OperationError, NotImplementedError) as e:
|
||||||
logger.warn(e)
|
sys.exit(str(e))
|
||||||
else:
|
else:
|
||||||
logger.info('Removed {} from the replicaset.'.format(args.replicas))
|
print('Removed {} from the replicaset.'.format(args.replicas))
|
||||||
|
|
||||||
|
|
||||||
def create_parser():
|
def create_parser():
|
||||||
@ -290,16 +288,6 @@ def create_parser():
|
|||||||
description='Control your BigchainDB node.',
|
description='Control your BigchainDB node.',
|
||||||
parents=[utils.base_parser])
|
parents=[utils.base_parser])
|
||||||
|
|
||||||
parser.add_argument('--dev-start-rethinkdb',
|
|
||||||
dest='start_rethinkdb',
|
|
||||||
action='store_true',
|
|
||||||
help='Run RethinkDB on start')
|
|
||||||
|
|
||||||
parser.add_argument('--dev-allow-temp-keypair',
|
|
||||||
dest='allow_temp_keypair',
|
|
||||||
action='store_true',
|
|
||||||
help='Generate a random keypair on start')
|
|
||||||
|
|
||||||
# all the commands are contained in the subparsers object,
|
# all the commands are contained in the subparsers object,
|
||||||
# the command selected by the user will be stored in `args.command`
|
# the command selected by the user will be stored in `args.command`
|
||||||
# that is used by the `main` function to select which other
|
# that is used by the `main` function to select which other
|
||||||
@ -331,9 +319,19 @@ def create_parser():
|
|||||||
help='Drop the database')
|
help='Drop the database')
|
||||||
|
|
||||||
# parser for starting BigchainDB
|
# parser for starting BigchainDB
|
||||||
subparsers.add_parser('start',
|
start_parser = subparsers.add_parser('start',
|
||||||
help='Start BigchainDB')
|
help='Start BigchainDB')
|
||||||
|
|
||||||
|
start_parser.add_argument('--dev-allow-temp-keypair',
|
||||||
|
dest='allow_temp_keypair',
|
||||||
|
action='store_true',
|
||||||
|
help='Generate a random keypair on start')
|
||||||
|
|
||||||
|
start_parser.add_argument('--dev-start-rethinkdb',
|
||||||
|
dest='start_rethinkdb',
|
||||||
|
action='store_true',
|
||||||
|
help='Run RethinkDB on start')
|
||||||
|
|
||||||
# parser for configuring the number of shards
|
# parser for configuring the number of shards
|
||||||
sharding_parser = subparsers.add_parser('set-shards',
|
sharding_parser = subparsers.add_parser('set-shards',
|
||||||
help='Configure number of shards')
|
help='Configure number of shards')
|
||||||
|
10
bigchaindb/commands/messages.py
Normal file
10
bigchaindb/commands/messages.py
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
"""Module to store messages used in commands, such as error messages,
|
||||||
|
warnings, prompts, etc.
|
||||||
|
|
||||||
|
"""
|
||||||
|
CANNOT_START_KEYPAIR_NOT_FOUND = (
|
||||||
|
"Can't start BigchainDB, no keypair found. "
|
||||||
|
'Did you run `bigchaindb configure`?'
|
||||||
|
)
|
||||||
|
|
||||||
|
RETHINKDB_STARTUP_ERROR = 'Error starting RethinkDB, reason is: {}'
|
@ -3,18 +3,39 @@ for ``argparse.ArgumentParser``.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import builtins
|
||||||
|
import functools
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
import rethinkdb as r
|
import rethinkdb as r
|
||||||
from pymongo import uri_parser
|
from pymongo import uri_parser
|
||||||
|
|
||||||
import bigchaindb
|
import bigchaindb
|
||||||
|
import bigchaindb.config_utils
|
||||||
from bigchaindb import backend
|
from bigchaindb import backend
|
||||||
from bigchaindb.common.exceptions import StartupError
|
from bigchaindb.common.exceptions import StartupError
|
||||||
from bigchaindb.version import __version__
|
from bigchaindb.version import __version__
|
||||||
|
|
||||||
|
|
||||||
|
def configure_bigchaindb(command):
|
||||||
|
@functools.wraps(command)
|
||||||
|
def configure(args):
|
||||||
|
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
|
||||||
|
command(args)
|
||||||
|
|
||||||
|
return configure
|
||||||
|
|
||||||
|
|
||||||
|
# We need this because `input` always prints on stdout, while it should print
|
||||||
|
# to stderr. It's a very old bug, check it out here:
|
||||||
|
# - https://bugs.python.org/issue1927
|
||||||
|
def input_on_stderr(prompt=''):
|
||||||
|
print(prompt, end='', file=sys.stderr)
|
||||||
|
return builtins.input()
|
||||||
|
|
||||||
|
|
||||||
def start_rethinkdb():
|
def start_rethinkdb():
|
||||||
"""Start RethinkDB as a child process and wait for it to be
|
"""Start RethinkDB as a child process and wait for it to be
|
||||||
available.
|
available.
|
||||||
|
@ -102,3 +102,7 @@ class TransactionOwnerError(ValidationError):
|
|||||||
|
|
||||||
class SybilError(ValidationError):
|
class SybilError(ValidationError):
|
||||||
"""If a block or vote comes from an unidentifiable node"""
|
"""If a block or vote comes from an unidentifiable node"""
|
||||||
|
|
||||||
|
|
||||||
|
class DuplicateTransaction(ValidationError):
|
||||||
|
"""Raised if a duplicated transaction is found"""
|
||||||
|
@ -404,14 +404,13 @@ class Bigchain(object):
|
|||||||
# check if the owner is in the condition `owners_after`
|
# check if the owner is in the condition `owners_after`
|
||||||
if len(output['public_keys']) == 1:
|
if len(output['public_keys']) == 1:
|
||||||
if output['condition']['details']['public_key'] == owner:
|
if output['condition']['details']['public_key'] == owner:
|
||||||
tx_link = TransactionLink(tx['id'], index)
|
links.append(TransactionLink(tx['id'], index))
|
||||||
else:
|
else:
|
||||||
# for transactions with multiple `public_keys` there will be several subfulfillments nested
|
# for transactions with multiple `public_keys` there will be several subfulfillments nested
|
||||||
# in the condition. We need to iterate the subfulfillments to make sure there is a
|
# in the condition. We need to iterate the subfulfillments to make sure there is a
|
||||||
# subfulfillment for `owner`
|
# subfulfillment for `owner`
|
||||||
if utils.condition_details_has_owner(output['condition']['details'], owner):
|
if utils.condition_details_has_owner(output['condition']['details'], owner):
|
||||||
tx_link = TransactionLink(tx['id'], index)
|
links.append(TransactionLink(tx['id'], index))
|
||||||
links.append(tx_link)
|
|
||||||
return links
|
return links
|
||||||
|
|
||||||
def get_owned_ids(self, owner):
|
def get_owned_ids(self, owner):
|
||||||
|
@ -3,7 +3,8 @@ from bigchaindb.common.exceptions import (InvalidHash, InvalidSignature,
|
|||||||
DoubleSpend, InputDoesNotExist,
|
DoubleSpend, InputDoesNotExist,
|
||||||
TransactionNotInValidBlock,
|
TransactionNotInValidBlock,
|
||||||
AssetIdMismatch, AmountError,
|
AssetIdMismatch, AmountError,
|
||||||
SybilError, ValidationError)
|
SybilError, ValidationError,
|
||||||
|
DuplicateTransaction)
|
||||||
from bigchaindb.common.transaction import Transaction
|
from bigchaindb.common.transaction import Transaction
|
||||||
from bigchaindb.common.utils import gen_timestamp, serialize
|
from bigchaindb.common.utils import gen_timestamp, serialize
|
||||||
from bigchaindb.common.schema import validate_transaction_schema
|
from bigchaindb.common.schema import validate_transaction_schema
|
||||||
@ -233,6 +234,10 @@ class Block(object):
|
|||||||
Raises:
|
Raises:
|
||||||
ValidationError: If an invalid transaction is found
|
ValidationError: If an invalid transaction is found
|
||||||
"""
|
"""
|
||||||
|
txids = [tx.id for tx in self.transactions]
|
||||||
|
if len(txids) != len(set(txids)):
|
||||||
|
raise DuplicateTransaction('Block has duplicate transaction')
|
||||||
|
|
||||||
for tx in self.transactions:
|
for tx in self.transactions:
|
||||||
# If a transaction is not valid, `validate_transactions` will
|
# If a transaction is not valid, `validate_transactions` will
|
||||||
# throw an an exception and block validation will be canceled.
|
# throw an an exception and block validation will be canceled.
|
||||||
|
@ -30,7 +30,7 @@ class BlockPipeline:
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""Initialize the BlockPipeline creator"""
|
"""Initialize the BlockPipeline creator"""
|
||||||
self.bigchain = Bigchain()
|
self.bigchain = Bigchain()
|
||||||
self.txs = []
|
self.txs = tx_collector()
|
||||||
|
|
||||||
def filter_tx(self, tx):
|
def filter_tx(self, tx):
|
||||||
"""Filter a transaction.
|
"""Filter a transaction.
|
||||||
@ -98,11 +98,10 @@ class BlockPipeline:
|
|||||||
:class:`~bigchaindb.models.Block`: The block,
|
:class:`~bigchaindb.models.Block`: The block,
|
||||||
if a block is ready, or ``None``.
|
if a block is ready, or ``None``.
|
||||||
"""
|
"""
|
||||||
if tx:
|
txs = self.txs.send(tx)
|
||||||
self.txs.append(tx)
|
if len(txs) == 1000 or (timeout and txs):
|
||||||
if len(self.txs) == 1000 or (timeout and self.txs):
|
block = self.bigchain.create_block(txs)
|
||||||
block = self.bigchain.create_block(self.txs)
|
self.txs = tx_collector()
|
||||||
self.txs = []
|
|
||||||
return block
|
return block
|
||||||
|
|
||||||
def write(self, block):
|
def write(self, block):
|
||||||
@ -134,6 +133,27 @@ class BlockPipeline:
|
|||||||
return block
|
return block
|
||||||
|
|
||||||
|
|
||||||
|
def tx_collector():
|
||||||
|
""" A helper to deduplicate transactions """
|
||||||
|
|
||||||
|
def snowflake():
|
||||||
|
txids = set()
|
||||||
|
txs = []
|
||||||
|
while True:
|
||||||
|
tx = yield txs
|
||||||
|
if tx:
|
||||||
|
if tx.id not in txids:
|
||||||
|
txids.add(tx.id)
|
||||||
|
txs.append(tx)
|
||||||
|
else:
|
||||||
|
logger.info('Refusing to add tx to block twice: ' +
|
||||||
|
tx.id)
|
||||||
|
|
||||||
|
s = snowflake()
|
||||||
|
s.send(None)
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
def create_pipeline():
|
def create_pipeline():
|
||||||
"""Create and return the pipeline of operations to be distributed
|
"""Create and return the pipeline of operations to be distributed
|
||||||
on different processes."""
|
on different processes."""
|
||||||
|
158
bigchaindb/voting.py
Normal file
158
bigchaindb/voting.py
Normal file
@ -0,0 +1,158 @@
|
|||||||
|
import collections
|
||||||
|
|
||||||
|
from bigchaindb.common.schema import SchemaValidationError, validate_vote_schema
|
||||||
|
from bigchaindb.common.utils import serialize
|
||||||
|
from bigchaindb.common.crypto import PublicKey
|
||||||
|
|
||||||
|
|
||||||
|
VALID = 'valid'
|
||||||
|
INVALID = 'invalid'
|
||||||
|
UNDECIDED = 'undecided'
|
||||||
|
|
||||||
|
|
||||||
|
class Voting:
|
||||||
|
"""
|
||||||
|
Everything to do with creating and checking votes.
|
||||||
|
|
||||||
|
All functions in this class should be referentially transparent, that is,
|
||||||
|
they always give the same output for a given input. This makes it easier
|
||||||
|
to test. This also means no logging!
|
||||||
|
|
||||||
|
Assumptions regarding data:
|
||||||
|
* Vote is a dictionary, but it is not assumed that any properties are.
|
||||||
|
* Everything else is assumed to be structurally correct, otherwise errors
|
||||||
|
may be thrown.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def block_election(cls, block, votes, keyring):
|
||||||
|
"""
|
||||||
|
Calculate the election status of a block.
|
||||||
|
"""
|
||||||
|
eligible_voters = set(block['block']['voters']) & set(keyring)
|
||||||
|
eligible_votes, ineligible_votes = \
|
||||||
|
cls.partition_eligible_votes(votes, eligible_voters)
|
||||||
|
n_voters = len(eligible_voters)
|
||||||
|
results = cls.count_votes(eligible_votes)
|
||||||
|
results['status'] = cls.decide_votes(n_voters, **results['counts'])
|
||||||
|
results['ineligible'] = ineligible_votes
|
||||||
|
return results
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def partition_eligible_votes(cls, votes, eligible_voters):
|
||||||
|
"""
|
||||||
|
Filter votes from unknown nodes or nodes that are not listed on
|
||||||
|
block. This is the primary Sybill protection.
|
||||||
|
"""
|
||||||
|
eligible, ineligible = ([], [])
|
||||||
|
|
||||||
|
for vote in votes:
|
||||||
|
voter_eligible = vote.get('node_pubkey') in eligible_voters
|
||||||
|
if voter_eligible:
|
||||||
|
try:
|
||||||
|
if cls.verify_vote_signature(vote):
|
||||||
|
eligible.append(vote)
|
||||||
|
continue
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
ineligible.append(vote)
|
||||||
|
return eligible, ineligible
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def count_votes(cls, eligible_votes):
|
||||||
|
"""
|
||||||
|
Given a list of eligible votes, (votes from known nodes that are listed
|
||||||
|
as voters), produce the number that say valid and the number that say
|
||||||
|
invalid.
|
||||||
|
|
||||||
|
* Detect if there are multiple votes from a single node and return them
|
||||||
|
in a separate "cheat" dictionary.
|
||||||
|
* Votes must agree on previous block, otherwise they become invalid.
|
||||||
|
|
||||||
|
note:
|
||||||
|
The sum of votes returned by this function does not necessarily
|
||||||
|
equal the length of the list of votes fed in. It may differ for
|
||||||
|
example if there are found to be multiple votes submitted by a
|
||||||
|
single voter.
|
||||||
|
"""
|
||||||
|
prev_blocks = collections.Counter()
|
||||||
|
cheat = []
|
||||||
|
malformed = []
|
||||||
|
|
||||||
|
# Group by pubkey to detect duplicate voting
|
||||||
|
by_voter = collections.defaultdict(list)
|
||||||
|
for vote in eligible_votes:
|
||||||
|
by_voter[vote['node_pubkey']].append(vote)
|
||||||
|
|
||||||
|
for pubkey, votes in by_voter.items():
|
||||||
|
if len(votes) > 1:
|
||||||
|
cheat.append(votes)
|
||||||
|
continue
|
||||||
|
|
||||||
|
vote = votes[0]
|
||||||
|
|
||||||
|
if not cls.verify_vote_schema(vote):
|
||||||
|
malformed.append(vote)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if vote['vote']['is_block_valid'] is True:
|
||||||
|
prev_blocks[vote['vote']['previous_block']] += 1
|
||||||
|
|
||||||
|
n_valid = 0
|
||||||
|
prev_block = None
|
||||||
|
# Valid votes must agree on previous block
|
||||||
|
if prev_blocks:
|
||||||
|
prev_block, n_valid = prev_blocks.most_common()[0]
|
||||||
|
del prev_blocks[prev_block]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'counts': {
|
||||||
|
'n_valid': n_valid,
|
||||||
|
'n_invalid': len(by_voter) - n_valid,
|
||||||
|
},
|
||||||
|
'cheat': cheat,
|
||||||
|
'malformed': malformed,
|
||||||
|
'previous_block': prev_block,
|
||||||
|
'other_previous_block': dict(prev_blocks),
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def decide_votes(cls, n_voters, n_valid, n_invalid):
|
||||||
|
"""
|
||||||
|
Decide on votes.
|
||||||
|
|
||||||
|
To return VALID there must be a clear majority that say VALID
|
||||||
|
and also agree on the previous block.
|
||||||
|
|
||||||
|
A tie on an even number of votes counts as INVALID.
|
||||||
|
"""
|
||||||
|
if n_invalid * 2 >= n_voters:
|
||||||
|
return INVALID
|
||||||
|
if n_valid * 2 > n_voters:
|
||||||
|
return VALID
|
||||||
|
return UNDECIDED
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def verify_vote_signature(cls, vote):
|
||||||
|
"""
|
||||||
|
Verify the signature of a vote
|
||||||
|
"""
|
||||||
|
signature = vote.get('signature')
|
||||||
|
pk_base58 = vote.get('node_pubkey')
|
||||||
|
|
||||||
|
if not (type(signature) == str and type(pk_base58) == str):
|
||||||
|
raise ValueError('Malformed vote: %s' % vote)
|
||||||
|
|
||||||
|
public_key = PublicKey(pk_base58)
|
||||||
|
body = serialize(vote['vote']).encode()
|
||||||
|
return public_key.verify(body, signature)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def verify_vote_schema(cls, vote):
|
||||||
|
# I'm not sure this is the correct approach. Maybe we should allow
|
||||||
|
# duck typing w/r/t votes.
|
||||||
|
try:
|
||||||
|
validate_vote_schema(vote)
|
||||||
|
return True
|
||||||
|
except SchemaValidationError as e:
|
||||||
|
return False
|
@ -75,6 +75,8 @@ docker run \
|
|||||||
--name=rethinkdb \
|
--name=rethinkdb \
|
||||||
--publish=172.17.0.1:28015:28015 \
|
--publish=172.17.0.1:28015:28015 \
|
||||||
--publish=172.17.0.1:58080:8080 \
|
--publish=172.17.0.1:58080:8080 \
|
||||||
|
--restart=always \
|
||||||
|
--volume "$HOME/bigchaindb_docker:/data" \
|
||||||
rethinkdb:2.3
|
rethinkdb:2.3
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -85,11 +87,25 @@ You can also access the RethinkDB dashboard at
|
|||||||
|
|
||||||
#### For MongoDB
|
#### For MongoDB
|
||||||
|
|
||||||
|
Note: MongoDB runs as user `mongodb` which had the UID `999` and GID `999`
|
||||||
|
inside the container. For the volume to be mounted properly, as user `mongodb`
|
||||||
|
in your host, you should have a `mongodb` user with UID and GID `999`.
|
||||||
|
If you have another user on the host with UID `999`, the mapped files will
|
||||||
|
be owned by this user in the host.
|
||||||
|
If there is no owner with UID 999, you can create the corresponding user and
|
||||||
|
group.
|
||||||
|
|
||||||
|
`groupadd -r --gid 999 mongodb && useradd -r --uid 999 -g mongodb mongodb`
|
||||||
|
|
||||||
|
|
||||||
```text
|
```text
|
||||||
docker run \
|
docker run \
|
||||||
--detach \
|
--detach \
|
||||||
--name=mongodb \
|
--name=mongodb \
|
||||||
--publish=172.17.0.1:27017:27017 \
|
--publish=172.17.0.1:27017:27017 \
|
||||||
|
--restart=always \
|
||||||
|
--volume=/tmp/mongodb_docker/db:/data/db \
|
||||||
|
--volume=/tmp/mongodb_docker/configdb:/data/configdb \
|
||||||
mongo:3.4.1 --replSet=bigchain-rs
|
mongo:3.4.1 --replSet=bigchain-rs
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -100,6 +116,7 @@ docker run \
|
|||||||
--detach \
|
--detach \
|
||||||
--name=bigchaindb \
|
--name=bigchaindb \
|
||||||
--publish=59984:9984 \
|
--publish=59984:9984 \
|
||||||
|
--restart=always \
|
||||||
--volume=$HOME/bigchaindb_docker:/data \
|
--volume=$HOME/bigchaindb_docker:/data \
|
||||||
bigchaindb/bigchaindb \
|
bigchaindb/bigchaindb \
|
||||||
start
|
start
|
||||||
|
@ -21,7 +21,7 @@ Step 2: Configure kubectl
|
|||||||
The default location of the kubectl configuration file is ``~/.kube/config``.
|
The default location of the kubectl configuration file is ``~/.kube/config``.
|
||||||
If you don't have that file, then you need to get it.
|
If you don't have that file, then you need to get it.
|
||||||
|
|
||||||
If you deployed your Kubernetes cluster on Azure
|
**Azure.** If you deployed your Kubernetes cluster on Azure
|
||||||
using the Azure CLI 2.0 (as per :doc:`our template <template-kubernetes-azure>`),
|
using the Azure CLI 2.0 (as per :doc:`our template <template-kubernetes-azure>`),
|
||||||
then you can get the ``~/.kube/config`` file using:
|
then you can get the ``~/.kube/config`` file using:
|
||||||
|
|
||||||
@ -32,15 +32,163 @@ then you can get the ``~/.kube/config`` file using:
|
|||||||
--name <ACS cluster name>
|
--name <ACS cluster name>
|
||||||
|
|
||||||
|
|
||||||
Step 3: Run a MongoDB Container
|
Step 3: Create a StorageClass
|
||||||
-------------------------------
|
-----------------------------
|
||||||
|
|
||||||
To start a MongoDB Docker container in a pod on one of the cluster nodes:
|
MongoDB needs somewhere to store its data persistently,
|
||||||
|
outside the container where MongoDB is running.
|
||||||
|
Explaining how Kubernetes handles persistent volumes,
|
||||||
|
and the associated terminology,
|
||||||
|
is beyond the scope of this documentation;
|
||||||
|
see `the Kubernetes docs about persistent volumes
|
||||||
|
<https://kubernetes.io/docs/user-guide/persistent-volumes>`_.
|
||||||
|
|
||||||
|
The first thing to do is create a Kubernetes StorageClass.
|
||||||
|
|
||||||
|
**Azure.** First, you need an Azure storage account.
|
||||||
|
If you deployed your Kubernetes cluster on Azure
|
||||||
|
using the Azure CLI 2.0
|
||||||
|
(as per :doc:`our template <template-kubernetes-azure>`),
|
||||||
|
then the `az acs create` command already created two
|
||||||
|
storage accounts in the same location and resource group
|
||||||
|
as your Kubernetes cluster.
|
||||||
|
Both should have the same "storage account SKU": ``Standard_LRS``.
|
||||||
|
Standard storage is lower-cost and lower-performance.
|
||||||
|
It uses hard disk drives (HDD).
|
||||||
|
LRS means locally-redundant storage: three replicas
|
||||||
|
in the same data center.
|
||||||
|
|
||||||
|
Premium storage is higher-cost and higher-performance.
|
||||||
|
It uses solid state drives (SSD).
|
||||||
|
At the time of writing,
|
||||||
|
when we created a storage account with SKU ``Premium_LRS``
|
||||||
|
and tried to use that,
|
||||||
|
the PersistentVolumeClaim would get stuck in a "Pending" state.
|
||||||
|
For future reference, the command to create a storage account is
|
||||||
|
`az storage account create <https://docs.microsoft.com/en-us/cli/azure/storage/account#create>`_.
|
||||||
|
|
||||||
|
Create a Kubernetes Storage Class named ``slow``
|
||||||
|
by writing a file named ``azureStorageClass.yml`` containing:
|
||||||
|
|
||||||
|
.. code:: yaml
|
||||||
|
|
||||||
|
kind: StorageClass
|
||||||
|
apiVersion: storage.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: slow
|
||||||
|
provisioner: kubernetes.io/azure-disk
|
||||||
|
parameters:
|
||||||
|
skuName: Standard_LRS
|
||||||
|
location: <region where your cluster is located>
|
||||||
|
|
||||||
|
and then:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ kubectl ?????
|
$ kubectl apply -f azureStorageClass.yml
|
||||||
|
|
||||||
|
You can check if it worked using ``kubectl get storageclasses``.
|
||||||
|
|
||||||
|
Note that there is no line of the form
|
||||||
|
``storageAccount: <azure storage account name>``
|
||||||
|
under ``parameters:``. When we included one
|
||||||
|
and then created a PersistentVolumeClaim based on it,
|
||||||
|
the PersistentVolumeClaim would get stuck
|
||||||
|
in a "Pending" state.
|
||||||
|
Kubernetes just looks for a storageAccount
|
||||||
|
with the specified skuName and location.
|
||||||
|
|
||||||
|
|
||||||
Note: The BigchainDB Dashboard can be deployed
|
Step 4: Create a PersistentVolumeClaim
|
||||||
as a Docker container, like everything else.
|
--------------------------------------
|
||||||
|
|
||||||
|
Next, you'll create a PersistentVolumeClaim named ``mongoclaim``.
|
||||||
|
Create a file named ``mongoclaim.yml``
|
||||||
|
with the following contents:
|
||||||
|
|
||||||
|
.. code:: yaml
|
||||||
|
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: mongoclaim
|
||||||
|
annotations:
|
||||||
|
volume.beta.kubernetes.io/storage-class: slow
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 20Gi
|
||||||
|
|
||||||
|
Note how there's no explicit mention of Azure, AWS or whatever.
|
||||||
|
``ReadWriteOnce`` (RWO) means the volume can be mounted as
|
||||||
|
read-write by a single Kubernetes node.
|
||||||
|
(``ReadWriteOnce`` is the *only* access mode supported
|
||||||
|
by AzureDisk.)
|
||||||
|
``storage: 20Gi`` means the volume has a size of 20
|
||||||
|
`gibibytes <https://en.wikipedia.org/wiki/Gibibyte>`_.
|
||||||
|
(You can change that if you like.)
|
||||||
|
|
||||||
|
Create ``mongoclaim`` in your Kubernetes cluster:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ kubectl apply -f mongoclaim.yml
|
||||||
|
|
||||||
|
You can check its status using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ kubectl get pvc
|
||||||
|
|
||||||
|
Initially, the status of ``mongoclaim`` might be "Pending"
|
||||||
|
but it should become "Bound" fairly quickly.
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ kubectl describe pvc
|
||||||
|
Name: mongoclaim
|
||||||
|
Namespace: default
|
||||||
|
StorageClass: slow
|
||||||
|
Status: Bound
|
||||||
|
Volume: pvc-ebed81f1-fdca-11e6-abf0-000d3a27ab21
|
||||||
|
Labels: <none>
|
||||||
|
Capacity: 20Gi
|
||||||
|
Access Modes: RWO
|
||||||
|
No events.
|
||||||
|
|
||||||
|
|
||||||
|
Step 5: Deploy MongoDB & BigchainDB
|
||||||
|
-----------------------------------
|
||||||
|
|
||||||
|
Now you can deploy MongoDB and BigchainDB to your Kubernetes cluster.
|
||||||
|
Currently, the way we do that is we create a StatefulSet with two
|
||||||
|
containers: BigchainDB and MongoDB. (In the future, we'll put them
|
||||||
|
in separate pods, and we'll ensure those pods are in different nodes.)
|
||||||
|
We expose BigchainDB's port 9984 (the HTTP API port)
|
||||||
|
and MongoDB's port 27017 using a Kubernetes Service.
|
||||||
|
|
||||||
|
Get the file ``node-mdb-ss.yaml`` from GitHub using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/node-mdb-ss.yaml
|
||||||
|
|
||||||
|
Take a look inside that file to see how it defines the Service
|
||||||
|
and the StatefulSet.
|
||||||
|
Note how the MongoDB container uses the ``mongoclaim`` PersistentVolumeClaim
|
||||||
|
for its ``/data`` diretory (mount path).
|
||||||
|
|
||||||
|
Create the StatefulSet and Service in your cluster using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ kubectl apply -f node-mdb-ss.yaml
|
||||||
|
|
||||||
|
You can check that they're working using:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ kubectl get services
|
||||||
|
$ kubectl get statefulsets
|
||||||
|
@ -45,6 +45,12 @@ on most common operating systems
|
|||||||
<https://docs.microsoft.com/en-us/cli/azure/install-az-cli2>`_.
|
<https://docs.microsoft.com/en-us/cli/azure/install-az-cli2>`_.
|
||||||
Do that.
|
Do that.
|
||||||
|
|
||||||
|
First, update the Azure CLI to the latest version:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ az component update
|
||||||
|
|
||||||
Next, login to your account using:
|
Next, login to your account using:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
114
k8s/node-mdb-ss.yaml
Normal file
114
k8s/node-mdb-ss.yaml
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
#################################################################
|
||||||
|
# This YAML file desribes a StatefulSet with two containers: #
|
||||||
|
# bigchaindb/bigchaindb:latest and mongo:3.4.1 #
|
||||||
|
# It also describes a Service to expose BigchainDB and MongoDB. #
|
||||||
|
#################################################################
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: bdb-service
|
||||||
|
namespace: default
|
||||||
|
labels:
|
||||||
|
name: bdb-service
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: bdb
|
||||||
|
ports:
|
||||||
|
- port: 9984
|
||||||
|
targetPort: 9984
|
||||||
|
name: bdb-http-api
|
||||||
|
- port: 27017
|
||||||
|
targetPort: 27017
|
||||||
|
name: mongodb-port
|
||||||
|
type: LoadBalancer
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1beta1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: bdb
|
||||||
|
namespace: default
|
||||||
|
spec:
|
||||||
|
serviceName: bdb
|
||||||
|
replicas: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: bdb
|
||||||
|
labels:
|
||||||
|
app: bdb
|
||||||
|
#annotations:
|
||||||
|
#pod.beta.kubernetes.io/init-containers: '[
|
||||||
|
# TODO mongodb user and group; id = 999
|
||||||
|
spec:
|
||||||
|
terminationGracePeriodSeconds: 10
|
||||||
|
containers:
|
||||||
|
- name: bdb-server
|
||||||
|
image: bigchaindb/bigchaindb:latest
|
||||||
|
args:
|
||||||
|
- start
|
||||||
|
env:
|
||||||
|
- name: BIGCHAINDB_KEYPAIR_PRIVATE
|
||||||
|
value: 3CjmRhu718gT1Wkba3LfdqX5pfYuBdaMPLd7ENUga5dm
|
||||||
|
- name: BIGCHAINDB_KEYPAIR_PUBLIC
|
||||||
|
value: EEWUAhsk94ZUHhVw7qx9oZiXYDAWc9cRz93eMrsTG4kZ
|
||||||
|
- name: BIGCHAINDB_KEYRING
|
||||||
|
value: ""
|
||||||
|
- name: BIGCHAINDB_DATABASE_BACKEND
|
||||||
|
value: mongodb
|
||||||
|
- name: BIGCHAINDB_DATABASE_HOST
|
||||||
|
value: localhost
|
||||||
|
- name: BIGCHAINDB_DATABASE_PORT
|
||||||
|
value: "27017"
|
||||||
|
- name: BIGCHAINDB_SERVER_BIND
|
||||||
|
value: "0.0.0.0:9984"
|
||||||
|
- name: BIGCHAINDB_DATABASE_REPLICASET
|
||||||
|
value: bigchain-rs
|
||||||
|
- name: BIGCHAINDB_DATABASE_NAME
|
||||||
|
value: bigchain
|
||||||
|
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
|
||||||
|
value: "120"
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
ports:
|
||||||
|
- containerPort: 9984
|
||||||
|
hostPort: 9984
|
||||||
|
name: bdb-port
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 200m
|
||||||
|
memory: 768Mi
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: bdb-port
|
||||||
|
initialDelaySeconds: 15
|
||||||
|
timeoutSeconds: 10
|
||||||
|
- name: mongodb
|
||||||
|
image: mongo:3.4.1
|
||||||
|
args:
|
||||||
|
- --replSet=bigchain-rs
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
ports:
|
||||||
|
- containerPort: 27017
|
||||||
|
hostPort: 27017
|
||||||
|
name: mdb-port
|
||||||
|
protocol: TCP
|
||||||
|
volumeMounts:
|
||||||
|
- name: mdb-data
|
||||||
|
mountPath: /data
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 200m
|
||||||
|
memory: 768Mi
|
||||||
|
livenessProbe:
|
||||||
|
tcpSocket:
|
||||||
|
port: mdb-port
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 3
|
||||||
|
periodSeconds: 15
|
||||||
|
timeoutSeconds: 1
|
||||||
|
restartPolicy: Always
|
||||||
|
volumes:
|
||||||
|
- name: mdb-data
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: mongoclaim
|
131
k8s/node-rdb-ss.yaml
Normal file
131
k8s/node-rdb-ss.yaml
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
##############################################################
|
||||||
|
# This YAML file desribes a StatefulSet with two containers: #
|
||||||
|
# bigchaindb/bigchaindb:latest and rethinkdb:2.3 #
|
||||||
|
# It also describes a Service to expose BigchainDB, #
|
||||||
|
# the RethinkDB intracluster communications port, and #
|
||||||
|
# the RethinkDB web interface port. #
|
||||||
|
##############################################################
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: bdb-service
|
||||||
|
namespace: default
|
||||||
|
labels:
|
||||||
|
name: bdb-service
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: bdb
|
||||||
|
ports:
|
||||||
|
- port: 9984
|
||||||
|
targetPort: 9984
|
||||||
|
name: bdb-http-api
|
||||||
|
- port: 29015
|
||||||
|
targetPort: 29015
|
||||||
|
name: rdb-intracluster-comm-port
|
||||||
|
- port: 8080
|
||||||
|
targetPort: 8080
|
||||||
|
name: rdb-web-interface-port
|
||||||
|
type: LoadBalancer
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1beta1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: bdb
|
||||||
|
namespace: default
|
||||||
|
spec:
|
||||||
|
serviceName: bdb
|
||||||
|
replicas: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: bdb
|
||||||
|
labels:
|
||||||
|
app: bdb
|
||||||
|
spec:
|
||||||
|
terminationGracePeriodSeconds: 10
|
||||||
|
containers:
|
||||||
|
- name: bdb-server
|
||||||
|
image: bigchaindb/bigchaindb:latest
|
||||||
|
args:
|
||||||
|
- start
|
||||||
|
env:
|
||||||
|
- name: BIGCHAINDB_KEYPAIR_PRIVATE
|
||||||
|
value: 56mEvwwVxcYsFQ3Y8UTFB8DVBv38yoUhxzDW3DAdLVd2
|
||||||
|
- name: BIGCHAINDB_KEYPAIR_PUBLIC
|
||||||
|
value: 9DsHwiEtvk51UHmNM2eV66czFha69j3CdtNrCj1RcZWR
|
||||||
|
- name: BIGCHAINDB_KEYRING
|
||||||
|
value: ""
|
||||||
|
- name: BIGCHAINDB_DATABASE_BACKEND
|
||||||
|
value: rethinkdb
|
||||||
|
- name: BIGCHAINDB_DATABASE_HOST
|
||||||
|
value: localhost
|
||||||
|
- name: BIGCHAINDB_DATABASE_PORT
|
||||||
|
value: "28015"
|
||||||
|
- name: BIGCHAINDB_SERVER_BIND
|
||||||
|
value: "0.0.0.0:9984"
|
||||||
|
- name: BIGCHAINDB_DATABASE_NAME
|
||||||
|
value: bigchain
|
||||||
|
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
|
||||||
|
value: "120"
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
ports:
|
||||||
|
- containerPort: 9984
|
||||||
|
hostPort: 9984
|
||||||
|
name: bdb-port
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 200m
|
||||||
|
memory: 768Mi
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: 9984
|
||||||
|
initialDelaySeconds: 15
|
||||||
|
timeoutSeconds: 10
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: 9984
|
||||||
|
initialDelaySeconds: 15
|
||||||
|
timeoutSeconds: 10
|
||||||
|
- name: rethinkdb
|
||||||
|
image: rethinkdb:2.3
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
ports:
|
||||||
|
- containerPort: 8080
|
||||||
|
hostPort: 8080
|
||||||
|
name: rdb-web-interface-port
|
||||||
|
protocol: TCP
|
||||||
|
- containerPort: 29015
|
||||||
|
hostPort: 29015
|
||||||
|
name: rdb-intra-port
|
||||||
|
protocol: TCP
|
||||||
|
- containerPort: 28015
|
||||||
|
hostPort: 28015
|
||||||
|
name: rdb-client-port
|
||||||
|
protocol: TCP
|
||||||
|
volumeMounts:
|
||||||
|
- name: rdb-data
|
||||||
|
mountPath: /data
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 200m
|
||||||
|
memory: 768Mi
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: 8080
|
||||||
|
initialDelaySeconds: 15
|
||||||
|
timeoutSeconds: 10
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: 8080
|
||||||
|
initialDelaySeconds: 15
|
||||||
|
timeoutSeconds: 10
|
||||||
|
restartPolicy: Always
|
||||||
|
volumes:
|
||||||
|
- name: rdb-data
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: mongoclaim
|
1
setup.py
1
setup.py
@ -50,6 +50,7 @@ tests_require = [
|
|||||||
'pytest>=3.0.0',
|
'pytest>=3.0.0',
|
||||||
'pytest-catchlog>=1.2.2',
|
'pytest-catchlog>=1.2.2',
|
||||||
'pytest-cov>=2.2.1',
|
'pytest-cov>=2.2.1',
|
||||||
|
'pytest-mock',
|
||||||
'pytest-xdist',
|
'pytest-xdist',
|
||||||
'pytest-flask',
|
'pytest-flask',
|
||||||
'tox',
|
'tox',
|
||||||
|
@ -40,7 +40,7 @@ def connection():
|
|||||||
# executed to make sure that the replica set is correctly initialized.
|
# executed to make sure that the replica set is correctly initialized.
|
||||||
# Here we force the the connection setup so that all required
|
# Here we force the the connection setup so that all required
|
||||||
# `Database.command` are executed before we mock them it in the tests.
|
# `Database.command` are executed before we mock them it in the tests.
|
||||||
connection._connect()
|
connection.connect()
|
||||||
return connection
|
return connection
|
||||||
|
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@ def test_run_a_simple_query():
|
|||||||
|
|
||||||
def test_raise_exception_when_max_tries():
|
def test_raise_exception_when_max_tries():
|
||||||
from bigchaindb.backend import connect
|
from bigchaindb.backend import connect
|
||||||
|
from bigchaindb.backend.exceptions import OperationError
|
||||||
|
|
||||||
class MockQuery:
|
class MockQuery:
|
||||||
def run(self, conn):
|
def run(self, conn):
|
||||||
@ -41,27 +42,40 @@ def test_raise_exception_when_max_tries():
|
|||||||
|
|
||||||
conn = connect()
|
conn = connect()
|
||||||
|
|
||||||
with pytest.raises(r.ReqlDriverError):
|
with pytest.raises(OperationError):
|
||||||
conn.run(MockQuery())
|
conn.run(MockQuery())
|
||||||
|
|
||||||
|
|
||||||
def test_reconnect_when_connection_lost():
|
def test_reconnect_when_connection_lost(db_host, db_port):
|
||||||
from bigchaindb.backend import connect
|
from bigchaindb.backend import connect
|
||||||
|
|
||||||
def raise_exception(*args, **kwargs):
|
original_connect = r.connect
|
||||||
raise r.ReqlDriverError('mock')
|
|
||||||
|
with patch('rethinkdb.connect') as mock_connect:
|
||||||
|
mock_connect.side_effect = [
|
||||||
|
r.ReqlDriverError('mock'),
|
||||||
|
original_connect(host=db_host, port=db_port)
|
||||||
|
]
|
||||||
|
|
||||||
conn = connect()
|
conn = connect()
|
||||||
original_connect = r.connect
|
|
||||||
r.connect = raise_exception
|
|
||||||
|
|
||||||
def delayed_start():
|
|
||||||
time.sleep(1)
|
|
||||||
r.connect = original_connect
|
|
||||||
|
|
||||||
thread = Thread(target=delayed_start)
|
|
||||||
query = r.expr('1')
|
query = r.expr('1')
|
||||||
thread.start()
|
assert conn.run(query) == '1'
|
||||||
|
|
||||||
|
|
||||||
|
def test_reconnect_when_connection_lost_tries_n_times():
|
||||||
|
from bigchaindb.backend import connect
|
||||||
|
from bigchaindb.backend.exceptions import ConnectionError
|
||||||
|
|
||||||
|
with patch('rethinkdb.connect') as mock_connect:
|
||||||
|
mock_connect.side_effect = [
|
||||||
|
r.ReqlDriverError('mock'),
|
||||||
|
r.ReqlDriverError('mock'),
|
||||||
|
r.ReqlDriverError('mock')
|
||||||
|
]
|
||||||
|
|
||||||
|
conn = connect(max_tries=3)
|
||||||
|
query = r.expr('1')
|
||||||
|
with pytest.raises(ConnectionError):
|
||||||
assert conn.run(query) == '1'
|
assert conn.run(query) == '1'
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
from argparse import Namespace
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
@ -38,3 +40,13 @@ def mock_bigchaindb_backup_config(monkeypatch):
|
|||||||
'backlog_reassign_delay': 5
|
'backlog_reassign_delay': 5
|
||||||
}
|
}
|
||||||
monkeypatch.setattr('bigchaindb._config', config)
|
monkeypatch.setattr('bigchaindb._config', config)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def run_start_args(request):
|
||||||
|
param = getattr(request, 'param', {})
|
||||||
|
return Namespace(
|
||||||
|
config=param.get('config'),
|
||||||
|
start_rethinkdb=param.get('start_rethinkdb', False),
|
||||||
|
allow_temp_keypair=param.get('allow_temp_keypair', False),
|
||||||
|
)
|
||||||
|
@ -45,7 +45,7 @@ def test_set_shards(mock_reconfigure, monkeypatch, b):
|
|||||||
return {'shards': [{'replicas': [1]}]}
|
return {'shards': [{'replicas': [1]}]}
|
||||||
|
|
||||||
monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_one_replica)
|
monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_one_replica)
|
||||||
args = Namespace(num_shards=3)
|
args = Namespace(num_shards=3, config=None)
|
||||||
run_set_shards(args)
|
run_set_shards(args)
|
||||||
mock_reconfigure.assert_called_with(replicas=1, shards=3, dry_run=False)
|
mock_reconfigure.assert_called_with(replicas=1, shards=3, dry_run=False)
|
||||||
|
|
||||||
@ -59,8 +59,7 @@ def test_set_shards(mock_reconfigure, monkeypatch, b):
|
|||||||
mock_reconfigure.assert_called_with(replicas=3, shards=3, dry_run=False)
|
mock_reconfigure.assert_called_with(replicas=3, shards=3, dry_run=False)
|
||||||
|
|
||||||
|
|
||||||
@patch('logging.Logger.warn')
|
def test_set_shards_raises_exception(monkeypatch, b):
|
||||||
def test_set_shards_raises_exception(mock_log, monkeypatch, b):
|
|
||||||
from bigchaindb.commands.bigchain import run_set_shards
|
from bigchaindb.commands.bigchain import run_set_shards
|
||||||
|
|
||||||
# test that we are correctly catching the exception
|
# test that we are correctly catching the exception
|
||||||
@ -73,10 +72,10 @@ def test_set_shards_raises_exception(mock_log, monkeypatch, b):
|
|||||||
monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_one_replica)
|
monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_one_replica)
|
||||||
monkeypatch.setattr(rethinkdb.ast.Table, 'reconfigure', mock_raise)
|
monkeypatch.setattr(rethinkdb.ast.Table, 'reconfigure', mock_raise)
|
||||||
|
|
||||||
args = Namespace(num_shards=3)
|
args = Namespace(num_shards=3, config=None)
|
||||||
|
with pytest.raises(SystemExit) as exc:
|
||||||
run_set_shards(args)
|
run_set_shards(args)
|
||||||
|
assert exc.value.args == ('Failed to reconfigure tables.',)
|
||||||
assert mock_log.called
|
|
||||||
|
|
||||||
|
|
||||||
@patch('rethinkdb.ast.Table.reconfigure')
|
@patch('rethinkdb.ast.Table.reconfigure')
|
||||||
@ -89,7 +88,7 @@ def test_set_replicas(mock_reconfigure, monkeypatch, b):
|
|||||||
return {'shards': [1, 2]}
|
return {'shards': [1, 2]}
|
||||||
|
|
||||||
monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_two_shards)
|
monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_two_shards)
|
||||||
args = Namespace(num_replicas=2)
|
args = Namespace(num_replicas=2, config=None)
|
||||||
run_set_replicas(args)
|
run_set_replicas(args)
|
||||||
mock_reconfigure.assert_called_with(replicas=2, shards=2, dry_run=False)
|
mock_reconfigure.assert_called_with(replicas=2, shards=2, dry_run=False)
|
||||||
|
|
||||||
@ -103,8 +102,7 @@ def test_set_replicas(mock_reconfigure, monkeypatch, b):
|
|||||||
mock_reconfigure.assert_called_with(replicas=2, shards=3, dry_run=False)
|
mock_reconfigure.assert_called_with(replicas=2, shards=3, dry_run=False)
|
||||||
|
|
||||||
|
|
||||||
@patch('logging.Logger.warn')
|
def test_set_replicas_raises_exception(monkeypatch, b):
|
||||||
def test_set_replicas_raises_exception(mock_log, monkeypatch, b):
|
|
||||||
from bigchaindb.commands.bigchain import run_set_replicas
|
from bigchaindb.commands.bigchain import run_set_replicas
|
||||||
|
|
||||||
# test that we are correctly catching the exception
|
# test that we are correctly catching the exception
|
||||||
@ -117,7 +115,7 @@ def test_set_replicas_raises_exception(mock_log, monkeypatch, b):
|
|||||||
monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_two_shards)
|
monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_two_shards)
|
||||||
monkeypatch.setattr(rethinkdb.ast.Table, 'reconfigure', mock_raise)
|
monkeypatch.setattr(rethinkdb.ast.Table, 'reconfigure', mock_raise)
|
||||||
|
|
||||||
args = Namespace(num_replicas=2)
|
args = Namespace(num_replicas=2, config=None)
|
||||||
|
with pytest.raises(SystemExit) as exc:
|
||||||
run_set_replicas(args)
|
run_set_replicas(args)
|
||||||
|
assert exc.value.args == ('Failed to reconfigure tables.',)
|
||||||
assert mock_log.called
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
from unittest.mock import Mock, patch
|
from unittest.mock import Mock, patch
|
||||||
from argparse import Namespace, ArgumentTypeError
|
from argparse import Namespace
|
||||||
import copy
|
import copy
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@ -26,42 +26,6 @@ def test_make_sure_we_dont_remove_any_command():
|
|||||||
assert parser.parse_args(['remove-replicas', 'localhost:27017']).command
|
assert parser.parse_args(['remove-replicas', 'localhost:27017']).command
|
||||||
|
|
||||||
|
|
||||||
def test_start_raises_if_command_not_implemented():
|
|
||||||
from bigchaindb.commands.bigchain import utils
|
|
||||||
from bigchaindb.commands.bigchain import create_parser
|
|
||||||
|
|
||||||
parser = create_parser()
|
|
||||||
|
|
||||||
with pytest.raises(NotImplementedError):
|
|
||||||
# Will raise because `scope`, the third parameter,
|
|
||||||
# doesn't contain the function `run_start`
|
|
||||||
utils.start(parser, ['start'], {})
|
|
||||||
|
|
||||||
|
|
||||||
def test_start_raises_if_no_arguments_given():
|
|
||||||
from bigchaindb.commands.bigchain import utils
|
|
||||||
from bigchaindb.commands.bigchain import create_parser
|
|
||||||
|
|
||||||
parser = create_parser()
|
|
||||||
|
|
||||||
with pytest.raises(SystemExit):
|
|
||||||
utils.start(parser, [], {})
|
|
||||||
|
|
||||||
|
|
||||||
@patch('multiprocessing.cpu_count', return_value=42)
|
|
||||||
def test_start_sets_multiprocess_var_based_on_cli_args(mock_cpu_count):
|
|
||||||
from bigchaindb.commands.bigchain import utils
|
|
||||||
from bigchaindb.commands.bigchain import create_parser
|
|
||||||
|
|
||||||
def run_load(args):
|
|
||||||
return args
|
|
||||||
|
|
||||||
parser = create_parser()
|
|
||||||
|
|
||||||
assert utils.start(parser, ['load'], {'run_load': run_load}).multiprocess == 1
|
|
||||||
assert utils.start(parser, ['load', '--multiprocess'], {'run_load': run_load}).multiprocess == 42
|
|
||||||
|
|
||||||
|
|
||||||
@patch('bigchaindb.commands.utils.start')
|
@patch('bigchaindb.commands.utils.start')
|
||||||
def test_main_entrypoint(mock_start):
|
def test_main_entrypoint(mock_start):
|
||||||
from bigchaindb.commands.bigchain import main
|
from bigchaindb.commands.bigchain import main
|
||||||
@ -131,9 +95,10 @@ def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch):
|
|||||||
monkeypatch.setitem(config['keypair'], 'public', 'Charlie_Bucket')
|
monkeypatch.setitem(config['keypair'], 'public', 'Charlie_Bucket')
|
||||||
_, _ = capsys.readouterr() # has the effect of clearing capsys
|
_, _ = capsys.readouterr() # has the effect of clearing capsys
|
||||||
run_export_my_pubkey(args)
|
run_export_my_pubkey(args)
|
||||||
out, err = capsys.readouterr()
|
out, _ = capsys.readouterr()
|
||||||
assert out == config['keypair']['public'] + '\n'
|
lines = out.splitlines()
|
||||||
assert out == 'Charlie_Bucket\n'
|
assert config['keypair']['public'] in lines
|
||||||
|
assert 'Charlie_Bucket' in lines
|
||||||
|
|
||||||
|
|
||||||
def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch):
|
def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch):
|
||||||
@ -302,6 +267,64 @@ def test_allow_temp_keypair_doesnt_override_if_keypair_found(mock_gen_keypair,
|
|||||||
assert bigchaindb.config['keypair']['public'] == original_public_key
|
assert bigchaindb.config['keypair']['public'] == original_public_key
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_start_when_db_already_exists(mocker, monkeypatch, run_start_args):
|
||||||
|
from bigchaindb.commands.bigchain import run_start
|
||||||
|
from bigchaindb.common.exceptions import DatabaseAlreadyExists
|
||||||
|
mocked_start = mocker.patch('bigchaindb.processes.start')
|
||||||
|
|
||||||
|
def mock_run_init():
|
||||||
|
raise DatabaseAlreadyExists()
|
||||||
|
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'bigchaindb.commands.bigchain._run_init', mock_run_init)
|
||||||
|
run_start(run_start_args)
|
||||||
|
assert mocked_start.called
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_start_when_keypair_not_found(mocker, monkeypatch, run_start_args):
|
||||||
|
from bigchaindb.commands.bigchain import run_start
|
||||||
|
from bigchaindb.commands.messages import CANNOT_START_KEYPAIR_NOT_FOUND
|
||||||
|
from bigchaindb.common.exceptions import KeypairNotFoundException
|
||||||
|
mocked_start = mocker.patch('bigchaindb.processes.start')
|
||||||
|
|
||||||
|
def mock_run_init():
|
||||||
|
raise KeypairNotFoundException()
|
||||||
|
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'bigchaindb.commands.bigchain._run_init', mock_run_init)
|
||||||
|
|
||||||
|
with pytest.raises(SystemExit) as exc:
|
||||||
|
run_start(run_start_args)
|
||||||
|
|
||||||
|
assert len(exc.value.args) == 1
|
||||||
|
assert exc.value.args[0] == CANNOT_START_KEYPAIR_NOT_FOUND
|
||||||
|
assert not mocked_start.called
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_start_when_start_rethinkdb_fails(mocker,
|
||||||
|
monkeypatch,
|
||||||
|
run_start_args):
|
||||||
|
from bigchaindb.commands.bigchain import run_start
|
||||||
|
from bigchaindb.commands.messages import RETHINKDB_STARTUP_ERROR
|
||||||
|
from bigchaindb.common.exceptions import StartupError
|
||||||
|
run_start_args.start_rethinkdb = True
|
||||||
|
mocked_start = mocker.patch('bigchaindb.processes.start')
|
||||||
|
err_msg = 'Error starting rethinkdb.'
|
||||||
|
|
||||||
|
def mock_start_rethinkdb():
|
||||||
|
raise StartupError(err_msg)
|
||||||
|
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'bigchaindb.commands.utils.start_rethinkdb', mock_start_rethinkdb)
|
||||||
|
|
||||||
|
with pytest.raises(SystemExit) as exc:
|
||||||
|
run_start(run_start_args)
|
||||||
|
|
||||||
|
assert len(exc.value.args) == 1
|
||||||
|
assert exc.value.args[0] == RETHINKDB_STARTUP_ERROR.format(err_msg)
|
||||||
|
assert not mocked_start.called
|
||||||
|
|
||||||
|
|
||||||
@patch('argparse.ArgumentParser.parse_args')
|
@patch('argparse.ArgumentParser.parse_args')
|
||||||
@patch('bigchaindb.commands.utils.base_parser')
|
@patch('bigchaindb.commands.utils.base_parser')
|
||||||
@patch('bigchaindb.commands.utils.start')
|
@patch('bigchaindb.commands.utils.start')
|
||||||
@ -320,11 +343,6 @@ def test_calling_main(start_mock, base_parser_mock, parse_args_mock,
|
|||||||
main()
|
main()
|
||||||
|
|
||||||
assert argparser_mock.called is True
|
assert argparser_mock.called is True
|
||||||
assert parser.add_argument.called is True
|
|
||||||
parser.add_argument.assert_any_call('--dev-start-rethinkdb',
|
|
||||||
dest='start_rethinkdb',
|
|
||||||
action='store_true',
|
|
||||||
help='Run RethinkDB on start')
|
|
||||||
parser.add_subparsers.assert_called_with(title='Commands',
|
parser.add_subparsers.assert_called_with(title='Commands',
|
||||||
dest='command')
|
dest='command')
|
||||||
subparsers.add_parser.assert_any_call('configure',
|
subparsers.add_parser.assert_any_call('configure',
|
||||||
@ -338,11 +356,19 @@ def test_calling_main(start_mock, base_parser_mock, parse_args_mock,
|
|||||||
'key')
|
'key')
|
||||||
subparsers.add_parser.assert_any_call('init', help='Init the database')
|
subparsers.add_parser.assert_any_call('init', help='Init the database')
|
||||||
subparsers.add_parser.assert_any_call('drop', help='Drop the database')
|
subparsers.add_parser.assert_any_call('drop', help='Drop the database')
|
||||||
|
|
||||||
subparsers.add_parser.assert_any_call('start', help='Start BigchainDB')
|
subparsers.add_parser.assert_any_call('start', help='Start BigchainDB')
|
||||||
|
subsubparsers.add_argument.assert_any_call('--dev-start-rethinkdb',
|
||||||
|
dest='start_rethinkdb',
|
||||||
|
action='store_true',
|
||||||
|
help='Run RethinkDB on start')
|
||||||
|
subsubparsers.add_argument.assert_any_call('--dev-allow-temp-keypair',
|
||||||
|
dest='allow_temp_keypair',
|
||||||
|
action='store_true',
|
||||||
|
help='Generate a random keypair on start')
|
||||||
|
|
||||||
subparsers.add_parser.assert_any_call('set-shards',
|
subparsers.add_parser.assert_any_call('set-shards',
|
||||||
help='Configure number of shards')
|
help='Configure number of shards')
|
||||||
|
|
||||||
subsubparsers.add_argument.assert_any_call('num_shards',
|
subsubparsers.add_argument.assert_any_call('num_shards',
|
||||||
metavar='num_shards',
|
metavar='num_shards',
|
||||||
type=int, default=1,
|
type=int, default=1,
|
||||||
@ -395,14 +421,18 @@ def test_run_add_replicas(mock_add_replicas):
|
|||||||
mock_add_replicas.reset_mock()
|
mock_add_replicas.reset_mock()
|
||||||
|
|
||||||
# test add_replicas with `OperationError`
|
# test add_replicas with `OperationError`
|
||||||
mock_add_replicas.side_effect = OperationError()
|
mock_add_replicas.side_effect = OperationError('err')
|
||||||
assert run_add_replicas(args) is None
|
with pytest.raises(SystemExit) as exc:
|
||||||
|
run_add_replicas(args)
|
||||||
|
assert exc.value.args == ('err',)
|
||||||
assert mock_add_replicas.call_count == 1
|
assert mock_add_replicas.call_count == 1
|
||||||
mock_add_replicas.reset_mock()
|
mock_add_replicas.reset_mock()
|
||||||
|
|
||||||
# test add_replicas with `NotImplementedError`
|
# test add_replicas with `NotImplementedError`
|
||||||
mock_add_replicas.side_effect = NotImplementedError()
|
mock_add_replicas.side_effect = NotImplementedError('err')
|
||||||
assert run_add_replicas(args) is None
|
with pytest.raises(SystemExit) as exc:
|
||||||
|
run_add_replicas(args)
|
||||||
|
assert exc.value.args == ('err',)
|
||||||
assert mock_add_replicas.call_count == 1
|
assert mock_add_replicas.call_count == 1
|
||||||
mock_add_replicas.reset_mock()
|
mock_add_replicas.reset_mock()
|
||||||
|
|
||||||
@ -422,29 +452,17 @@ def test_run_remove_replicas(mock_remove_replicas):
|
|||||||
mock_remove_replicas.reset_mock()
|
mock_remove_replicas.reset_mock()
|
||||||
|
|
||||||
# test add_replicas with `OperationError`
|
# test add_replicas with `OperationError`
|
||||||
mock_remove_replicas.side_effect = OperationError()
|
mock_remove_replicas.side_effect = OperationError('err')
|
||||||
assert run_remove_replicas(args) is None
|
with pytest.raises(SystemExit) as exc:
|
||||||
|
run_remove_replicas(args)
|
||||||
|
assert exc.value.args == ('err',)
|
||||||
assert mock_remove_replicas.call_count == 1
|
assert mock_remove_replicas.call_count == 1
|
||||||
mock_remove_replicas.reset_mock()
|
mock_remove_replicas.reset_mock()
|
||||||
|
|
||||||
# test add_replicas with `NotImplementedError`
|
# test add_replicas with `NotImplementedError`
|
||||||
mock_remove_replicas.side_effect = NotImplementedError()
|
mock_remove_replicas.side_effect = NotImplementedError('err')
|
||||||
assert run_remove_replicas(args) is None
|
with pytest.raises(SystemExit) as exc:
|
||||||
|
run_remove_replicas(args)
|
||||||
|
assert exc.value.args == ('err',)
|
||||||
assert mock_remove_replicas.call_count == 1
|
assert mock_remove_replicas.call_count == 1
|
||||||
mock_remove_replicas.reset_mock()
|
mock_remove_replicas.reset_mock()
|
||||||
|
|
||||||
|
|
||||||
def test_mongodb_host_type():
|
|
||||||
from bigchaindb.commands.utils import mongodb_host
|
|
||||||
|
|
||||||
# bad port provided
|
|
||||||
with pytest.raises(ArgumentTypeError):
|
|
||||||
mongodb_host('localhost:11111111111')
|
|
||||||
|
|
||||||
# no port information provided
|
|
||||||
with pytest.raises(ArgumentTypeError):
|
|
||||||
mongodb_host('localhost')
|
|
||||||
|
|
||||||
# bad host provided
|
|
||||||
with pytest.raises(ArgumentTypeError):
|
|
||||||
mongodb_host(':27017')
|
|
||||||
|
63
tests/commands/test_utils.py
Normal file
63
tests/commands/test_utils.py
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
import argparse
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
|
||||||
|
def test_start_raises_if_command_not_implemented():
|
||||||
|
from bigchaindb.commands import utils
|
||||||
|
from bigchaindb.commands.bigchain import create_parser
|
||||||
|
|
||||||
|
parser = create_parser()
|
||||||
|
|
||||||
|
with pytest.raises(NotImplementedError):
|
||||||
|
# Will raise because `scope`, the third parameter,
|
||||||
|
# doesn't contain the function `run_start`
|
||||||
|
utils.start(parser, ['start'], {})
|
||||||
|
|
||||||
|
|
||||||
|
def test_start_raises_if_no_arguments_given():
|
||||||
|
from bigchaindb.commands import utils
|
||||||
|
from bigchaindb.commands.bigchain import create_parser
|
||||||
|
|
||||||
|
parser = create_parser()
|
||||||
|
|
||||||
|
with pytest.raises(SystemExit):
|
||||||
|
utils.start(parser, [], {})
|
||||||
|
|
||||||
|
|
||||||
|
@patch('multiprocessing.cpu_count', return_value=42)
|
||||||
|
def test_start_sets_multiprocess_var_based_on_cli_args(mock_cpu_count):
|
||||||
|
from bigchaindb.commands import utils
|
||||||
|
|
||||||
|
def run_mp_arg_test(args):
|
||||||
|
return args
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
subparser = parser.add_subparsers(title='Commands',
|
||||||
|
dest='command')
|
||||||
|
mp_arg_test_parser = subparser.add_parser('mp_arg_test')
|
||||||
|
mp_arg_test_parser.add_argument('-m', '--multiprocess',
|
||||||
|
nargs='?',
|
||||||
|
type=int,
|
||||||
|
default=False)
|
||||||
|
|
||||||
|
scope = {'run_mp_arg_test': run_mp_arg_test}
|
||||||
|
assert utils.start(parser, ['mp_arg_test'], scope).multiprocess == 1
|
||||||
|
assert utils.start(parser, ['mp_arg_test', '--multiprocess'], scope).multiprocess == 42
|
||||||
|
|
||||||
|
|
||||||
|
def test_mongodb_host_type():
|
||||||
|
from bigchaindb.commands.utils import mongodb_host
|
||||||
|
|
||||||
|
# bad port provided
|
||||||
|
with pytest.raises(argparse.ArgumentTypeError):
|
||||||
|
mongodb_host('localhost:11111111111')
|
||||||
|
|
||||||
|
# no port information provided
|
||||||
|
with pytest.raises(argparse.ArgumentTypeError):
|
||||||
|
mongodb_host('localhost')
|
||||||
|
|
||||||
|
# bad host provided
|
||||||
|
with pytest.raises(argparse.ArgumentTypeError):
|
||||||
|
mongodb_host(':27017')
|
@ -5,27 +5,6 @@ import pytest
|
|||||||
pytestmark = [pytest.mark.bdb, pytest.mark.usefixtures('processes')]
|
pytestmark = [pytest.mark.bdb, pytest.mark.usefixtures('processes')]
|
||||||
|
|
||||||
|
|
||||||
def test_fast_double_create(b, user_pk):
|
|
||||||
from bigchaindb.models import Transaction
|
|
||||||
from bigchaindb.backend.query import count_blocks
|
|
||||||
tx = Transaction.create([b.me], [([user_pk], 1)],
|
|
||||||
metadata={'test': 'test'}).sign([b.me_private])
|
|
||||||
|
|
||||||
# write everything fast
|
|
||||||
b.write_transaction(tx)
|
|
||||||
b.write_transaction(tx)
|
|
||||||
|
|
||||||
time.sleep(2)
|
|
||||||
tx_returned = b.get_transaction(tx.id)
|
|
||||||
|
|
||||||
# test that the tx can be queried
|
|
||||||
assert tx_returned == tx
|
|
||||||
# test the transaction appears only once
|
|
||||||
last_voted_block = b.get_last_voted_block()
|
|
||||||
assert len(last_voted_block.transactions) == 1
|
|
||||||
assert count_blocks(b.connection) == 2
|
|
||||||
|
|
||||||
|
|
||||||
def test_double_create(b, user_pk):
|
def test_double_create(b, user_pk):
|
||||||
from bigchaindb.models import Transaction
|
from bigchaindb.models import Transaction
|
||||||
from bigchaindb.backend.query import count_blocks
|
from bigchaindb.backend.query import count_blocks
|
||||||
|
@ -72,6 +72,7 @@ class MultipipesStepper:
|
|||||||
r = f(**kwargs)
|
r = f(**kwargs)
|
||||||
if r is not None:
|
if r is not None:
|
||||||
self._enqueue(next_name, r)
|
self._enqueue(next_name, r)
|
||||||
|
return r
|
||||||
|
|
||||||
self.tasks[name] = functools.wraps(f)(inner)
|
self.tasks[name] = functools.wraps(f)(inner)
|
||||||
self.input_tasks.add(name)
|
self.input_tasks.add(name)
|
||||||
@ -90,6 +91,7 @@ class MultipipesStepper:
|
|||||||
out = f(*args, **kwargs)
|
out = f(*args, **kwargs)
|
||||||
if out is not None and next:
|
if out is not None and next:
|
||||||
self._enqueue(next_name, out)
|
self._enqueue(next_name, out)
|
||||||
|
return out
|
||||||
|
|
||||||
task = functools.wraps(f)(inner)
|
task = functools.wraps(f)(inner)
|
||||||
self.tasks[name] = task
|
self.tasks[name] = task
|
||||||
@ -111,12 +113,12 @@ class MultipipesStepper:
|
|||||||
logging.debug('Stepping %s', name)
|
logging.debug('Stepping %s', name)
|
||||||
task = self.tasks[name]
|
task = self.tasks[name]
|
||||||
if name in self.input_tasks:
|
if name in self.input_tasks:
|
||||||
task(**kwargs)
|
return task(**kwargs)
|
||||||
else:
|
else:
|
||||||
queue = self.queues.get(name, [])
|
queue = self.queues.get(name, [])
|
||||||
if not queue:
|
if not queue:
|
||||||
raise Empty(name)
|
raise Empty(name)
|
||||||
task(*queue.pop(0), **kwargs)
|
return task(*queue.pop(0), **kwargs)
|
||||||
logging.debug('Stepped %s', name)
|
logging.debug('Stepped %s', name)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -217,3 +217,12 @@ def test_full_pipeline(b, user_pk):
|
|||||||
block_len = len(block_doc.transactions)
|
block_len = len(block_doc.transactions)
|
||||||
assert chained_block == block_doc
|
assert chained_block == block_doc
|
||||||
assert number_assigned_to_others == 100 - block_len
|
assert number_assigned_to_others == 100 - block_len
|
||||||
|
|
||||||
|
|
||||||
|
def test_block_snowflake(create_tx, signed_transfer_tx):
|
||||||
|
from bigchaindb.pipelines.block import tx_collector
|
||||||
|
snowflake = tx_collector()
|
||||||
|
assert snowflake.send(create_tx) == [create_tx]
|
||||||
|
snowflake.send(signed_transfer_tx)
|
||||||
|
snowflake.send(create_tx)
|
||||||
|
assert snowflake.send(None) == [create_tx, signed_transfer_tx]
|
||||||
|
@ -20,9 +20,26 @@ def test_stepping_changefeed_produces_update(b, steps):
|
|||||||
[tx.id, tx.id])
|
[tx.id, tx.id])
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.bdb
|
||||||
|
@pytest.mark.genesis
|
||||||
|
def test_dupe_tx_in_block(b, steps):
|
||||||
|
tx = input_single_create(b)
|
||||||
|
for i in range(2):
|
||||||
|
steps.stale_check_transactions()
|
||||||
|
steps.stale_reassign_transactions()
|
||||||
|
steps.block_changefeed()
|
||||||
|
steps.block_filter_tx()
|
||||||
|
steps.block_validate_tx()
|
||||||
|
steps.block_validate_tx()
|
||||||
|
assert steps.counts == {'block_create': 2}
|
||||||
|
steps.block_create(timeout=False)
|
||||||
|
block = steps.block_create(timeout=True)
|
||||||
|
assert block.transactions == [tx]
|
||||||
|
|
||||||
|
|
||||||
def input_single_create(b):
|
def input_single_create(b):
|
||||||
from bigchaindb.common.transaction import Transaction
|
from bigchaindb.common.transaction import Transaction
|
||||||
metadata = {'r': random.random()}
|
metadata = {'r': random.random()}
|
||||||
tx = Transaction.create([b.me], [([b.me], 1)], metadata)
|
tx = Transaction.create([b.me], [([b.me], 1)], metadata).sign([b.me_private])
|
||||||
b.write_transaction(tx)
|
b.write_transaction(tx)
|
||||||
return tx
|
return tx
|
||||||
|
@ -169,12 +169,17 @@ def test_autoconfigure_read_both_from_file_and_env(monkeypatch, request):
|
|||||||
'host': DATABASE_HOST,
|
'host': DATABASE_HOST,
|
||||||
'port': DATABASE_PORT,
|
'port': DATABASE_PORT,
|
||||||
'name': DATABASE_NAME,
|
'name': DATABASE_NAME,
|
||||||
|
'connection_timeout': 5000,
|
||||||
|
'max_tries': 3
|
||||||
}
|
}
|
||||||
|
|
||||||
database_mongodb = {
|
database_mongodb = {
|
||||||
'backend': 'mongodb',
|
'backend': 'mongodb',
|
||||||
'host': DATABASE_HOST,
|
'host': DATABASE_HOST,
|
||||||
'port': DATABASE_PORT,
|
'port': DATABASE_PORT,
|
||||||
'name': DATABASE_NAME,
|
'name': DATABASE_NAME,
|
||||||
|
'connection_timeout': 5000,
|
||||||
|
'max_tries': 3,
|
||||||
'replicaset': 'bigchain-rs',
|
'replicaset': 'bigchain-rs',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,6 +10,8 @@ def config(request, monkeypatch):
|
|||||||
'port': 28015,
|
'port': 28015,
|
||||||
'name': 'bigchain',
|
'name': 'bigchain',
|
||||||
'replicaset': 'bigchain-rs',
|
'replicaset': 'bigchain-rs',
|
||||||
|
'connection_timeout': 5000,
|
||||||
|
'max_tries': 3
|
||||||
},
|
},
|
||||||
'keypair': {
|
'keypair': {
|
||||||
'public': 'pubkey',
|
'public': 'pubkey',
|
||||||
|
@ -163,3 +163,11 @@ class TestBlockModel(object):
|
|||||||
|
|
||||||
public_key = PublicKey(b.me)
|
public_key = PublicKey(b.me)
|
||||||
assert public_key.verify(expected_block_serialized, block.signature)
|
assert public_key.verify(expected_block_serialized, block.signature)
|
||||||
|
|
||||||
|
def test_block_dupe_tx(self, b):
|
||||||
|
from bigchaindb.models import Transaction
|
||||||
|
from bigchaindb.common.exceptions import DuplicateTransaction
|
||||||
|
tx = Transaction.create([b.me], [([b.me], 1)])
|
||||||
|
block = b.create_block([tx, tx])
|
||||||
|
with raises(DuplicateTransaction):
|
||||||
|
block._validate_block_transactions(b)
|
||||||
|
203
tests/test_voting.py
Normal file
203
tests/test_voting.py
Normal file
@ -0,0 +1,203 @@
|
|||||||
|
import pytest
|
||||||
|
from collections import Counter
|
||||||
|
|
||||||
|
from bigchaindb.core import Bigchain
|
||||||
|
from bigchaindb.voting import Voting, INVALID, VALID, UNDECIDED
|
||||||
|
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Tests for checking vote eligibility
|
||||||
|
|
||||||
|
|
||||||
|
def test_partition_eligible_votes():
|
||||||
|
class TestVoting(Voting):
|
||||||
|
@classmethod
|
||||||
|
def verify_vote_signature(cls, vote):
|
||||||
|
if vote['node_pubkey'] == 'invalid sig':
|
||||||
|
return False
|
||||||
|
if vote['node_pubkey'] == 'value error':
|
||||||
|
raise ValueError()
|
||||||
|
return True
|
||||||
|
|
||||||
|
voters = ['valid', 'invalid sig', 'value error', 'not in set']
|
||||||
|
votes = [{'node_pubkey': k} for k in voters]
|
||||||
|
|
||||||
|
el, inel = TestVoting.partition_eligible_votes(votes, voters[:-1])
|
||||||
|
assert el == [votes[0]]
|
||||||
|
assert inel == votes[1:]
|
||||||
|
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Test vote counting
|
||||||
|
|
||||||
|
|
||||||
|
def test_count_votes():
|
||||||
|
class TestVoting(Voting):
|
||||||
|
@classmethod
|
||||||
|
def verify_vote_schema(cls, vote):
|
||||||
|
return vote['node_pubkey'] != 'malformed'
|
||||||
|
|
||||||
|
voters = (['cheat', 'cheat', 'says invalid', 'malformed'] +
|
||||||
|
['kosher' + str(i) for i in range(10)])
|
||||||
|
|
||||||
|
votes = [Bigchain(v).vote('block', 'a', True) for v in voters]
|
||||||
|
votes[2]['vote']['is_block_valid'] = False
|
||||||
|
# Incorrect previous block subtracts from n_valid and adds to n_invalid
|
||||||
|
votes[-1]['vote']['previous_block'] = 'z'
|
||||||
|
|
||||||
|
assert TestVoting.count_votes(votes) == {
|
||||||
|
'counts': {
|
||||||
|
'n_valid': 9, # 9 kosher votes
|
||||||
|
'n_invalid': 4, # 1 cheat, 1 invalid, 1 malformed, 1 rogue prev block
|
||||||
|
# One of the cheat votes counts towards n_invalid, the other is
|
||||||
|
# not counted here.
|
||||||
|
# len(cheat) + n_valid + n_invalid == len(votes)
|
||||||
|
},
|
||||||
|
'cheat': [votes[:2]],
|
||||||
|
'malformed': [votes[3]],
|
||||||
|
'previous_block': 'a',
|
||||||
|
'other_previous_block': {'z': 1},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def test_must_agree_prev_block():
|
||||||
|
class TestVoting(Voting):
|
||||||
|
@classmethod
|
||||||
|
def verify_vote_schema(cls, vote):
|
||||||
|
return True
|
||||||
|
|
||||||
|
voters = 'abcd'
|
||||||
|
votes = [Bigchain(v).vote('block', 'a', True) for v in voters]
|
||||||
|
votes[0]['vote']['previous_block'] = 'b'
|
||||||
|
votes[1]['vote']['previous_block'] = 'c'
|
||||||
|
assert TestVoting.count_votes(votes) == {
|
||||||
|
'counts': {
|
||||||
|
'n_valid': 2,
|
||||||
|
'n_invalid': 2,
|
||||||
|
},
|
||||||
|
'previous_block': 'a',
|
||||||
|
'other_previous_block': {'b': 1, 'c': 1},
|
||||||
|
'malformed': [],
|
||||||
|
'cheat': [],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Tests for vote decision making
|
||||||
|
|
||||||
|
|
||||||
|
DECISION_TESTS = [
|
||||||
|
{'n_voters': 1, 'n_valid': 1, 'n_invalid': 1},
|
||||||
|
{'n_voters': 2, 'n_valid': 2, 'n_invalid': 1},
|
||||||
|
{'n_voters': 3, 'n_valid': 2, 'n_invalid': 2},
|
||||||
|
{'n_voters': 4, 'n_valid': 3, 'n_invalid': 2},
|
||||||
|
{'n_voters': 5, 'n_valid': 3, 'n_invalid': 3},
|
||||||
|
{'n_voters': 6, 'n_valid': 4, 'n_invalid': 3},
|
||||||
|
{'n_voters': 7, 'n_valid': 4, 'n_invalid': 4},
|
||||||
|
{'n_voters': 8, 'n_valid': 5, 'n_invalid': 4}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('kwargs', DECISION_TESTS)
|
||||||
|
def test_decide_votes_valid(kwargs):
|
||||||
|
kwargs = kwargs.copy()
|
||||||
|
kwargs['n_invalid'] = 0
|
||||||
|
assert Voting.decide_votes(**kwargs) == VALID
|
||||||
|
kwargs['n_valid'] -= 1
|
||||||
|
assert Voting.decide_votes(**kwargs) == UNDECIDED
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('kwargs', DECISION_TESTS)
|
||||||
|
def test_decide_votes_invalid(kwargs):
|
||||||
|
kwargs = kwargs.copy()
|
||||||
|
kwargs['n_valid'] = 0
|
||||||
|
assert Voting.decide_votes(**kwargs) == INVALID
|
||||||
|
kwargs['n_invalid'] -= 1
|
||||||
|
assert Voting.decide_votes(**kwargs) == UNDECIDED
|
||||||
|
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Actions - test state transitions
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('n_voters', range(8))
|
||||||
|
def test_vote_actions(n_voters):
|
||||||
|
"""
|
||||||
|
* Legal transitions are UNDECIDED -> [VALID|INVALID] only
|
||||||
|
* Block is never left UNDECIDED after voting
|
||||||
|
* Accomodates rogues on previous block / invalid schema
|
||||||
|
"""
|
||||||
|
class TestVoting(Voting):
|
||||||
|
@classmethod
|
||||||
|
def verify_vote_schema(cls, vote):
|
||||||
|
return type(vote['vote']['is_block_valid']) == bool
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def verify_vote_signature(cls, vote):
|
||||||
|
return True
|
||||||
|
|
||||||
|
keyring = 'abcdefghijklmnopqrstuvwxyz'[:n_voters]
|
||||||
|
block = {'id': 'block', 'block': {'voters': keyring}}
|
||||||
|
state = UNDECIDED
|
||||||
|
todo = [(state, [], [])]
|
||||||
|
|
||||||
|
def branch(p, r):
|
||||||
|
todo.append((state, votes, votes + [{
|
||||||
|
'node_pubkey': keyring[len(votes)],
|
||||||
|
'vote': {'previous_block': p, 'is_block_valid': r}
|
||||||
|
}]))
|
||||||
|
|
||||||
|
while todo:
|
||||||
|
prev_state, prev_votes, votes = todo.pop(0)
|
||||||
|
results = Counter(v['vote']['is_block_valid'] for v in votes)
|
||||||
|
prev_blocks = Counter(v['vote']['previous_block'] for v in votes)
|
||||||
|
majority = n_voters // 2 + 1
|
||||||
|
honest = (len(votes) == majority and len(prev_blocks) == 1 and
|
||||||
|
not results['lol'] and len(results) == 1)
|
||||||
|
closed = len(votes) == n_voters
|
||||||
|
|
||||||
|
# Test legal transition
|
||||||
|
if votes:
|
||||||
|
state = TestVoting.block_election(block, votes, keyring)['status']
|
||||||
|
assert prev_state in [state, UNDECIDED]
|
||||||
|
|
||||||
|
# Test that decision has been reached
|
||||||
|
if honest or closed:
|
||||||
|
assert state != UNDECIDED or n_voters == 0
|
||||||
|
|
||||||
|
if closed:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Can accomodate more votes, add them to the todo list.
|
||||||
|
# This vote is the good case
|
||||||
|
branch('A', True)
|
||||||
|
# This vote disagrees on previous block
|
||||||
|
branch('B', True)
|
||||||
|
# This vote says the block is invalid
|
||||||
|
branch('A', False)
|
||||||
|
# This vote is invalid
|
||||||
|
branch('A', 'lol')
|
||||||
|
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Tests for vote signature
|
||||||
|
|
||||||
|
|
||||||
|
def test_verify_vote_signature_passes(b):
|
||||||
|
vote = b.vote('block', 'a', True)
|
||||||
|
assert Voting.verify_vote_signature(vote)
|
||||||
|
vote['signature'] = ''
|
||||||
|
assert not Voting.verify_vote_signature(vote)
|
||||||
|
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Tests for vote schema
|
||||||
|
|
||||||
|
|
||||||
|
def test_verify_vote_schema(b):
|
||||||
|
vote = b.vote('b' * 64, 'a' * 64, True)
|
||||||
|
assert Voting.verify_vote_schema(vote)
|
||||||
|
vote = b.vote('b' * 64, 'a', True)
|
||||||
|
assert not Voting.verify_vote_schema(vote)
|
||||||
|
vote = b.vote('b', 'a' * 64, True)
|
||||||
|
assert not Voting.verify_vote_schema(vote)
|
@ -47,3 +47,68 @@ def test_get_outputs_endpoint_with_invalid_unspent(client, user_pk):
|
|||||||
res = client.get(OUTPUTS_ENDPOINT + params)
|
res = client.get(OUTPUTS_ENDPOINT + params)
|
||||||
assert expected == res.json
|
assert expected == res.json
|
||||||
assert res.status_code == 400
|
assert res.status_code == 400
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.bdb
|
||||||
|
@pytest.mark.usefixtures('inputs')
|
||||||
|
def test_get_divisble_transactions_returns_500(b, client):
|
||||||
|
from bigchaindb.models import Transaction
|
||||||
|
from bigchaindb.common import crypto
|
||||||
|
import json
|
||||||
|
|
||||||
|
TX_ENDPOINT = '/api/v1/transactions'
|
||||||
|
|
||||||
|
def mine(tx_list):
|
||||||
|
block = b.create_block(tx_list)
|
||||||
|
b.write_block(block)
|
||||||
|
|
||||||
|
# vote the block valid
|
||||||
|
vote = b.vote(block.id, b.get_last_voted_block().id, True)
|
||||||
|
b.write_vote(vote)
|
||||||
|
|
||||||
|
alice_priv, alice_pub = crypto.generate_key_pair()
|
||||||
|
bob_priv, bob_pub = crypto.generate_key_pair()
|
||||||
|
carly_priv, carly_pub = crypto.generate_key_pair()
|
||||||
|
|
||||||
|
create_tx = Transaction.create([alice_pub], [([alice_pub], 4)])
|
||||||
|
create_tx.sign([alice_priv])
|
||||||
|
|
||||||
|
res = client.post(TX_ENDPOINT, data=json.dumps(create_tx.to_dict()))
|
||||||
|
assert res.status_code == 202
|
||||||
|
|
||||||
|
mine([create_tx])
|
||||||
|
|
||||||
|
transfer_tx = Transaction.transfer(create_tx.to_inputs(),
|
||||||
|
[([alice_pub], 3), ([bob_pub], 1)],
|
||||||
|
asset_id=create_tx.id)
|
||||||
|
transfer_tx.sign([alice_priv])
|
||||||
|
|
||||||
|
res = client.post(TX_ENDPOINT, data=json.dumps(transfer_tx.to_dict()))
|
||||||
|
assert res.status_code == 202
|
||||||
|
|
||||||
|
mine([transfer_tx])
|
||||||
|
|
||||||
|
transfer_tx_carly = Transaction.transfer([transfer_tx.to_inputs()[1]],
|
||||||
|
[([carly_pub], 1)],
|
||||||
|
asset_id=create_tx.id)
|
||||||
|
transfer_tx_carly.sign([bob_priv])
|
||||||
|
|
||||||
|
res = client.post(TX_ENDPOINT, data=json.dumps(transfer_tx_carly.to_dict()))
|
||||||
|
assert res.status_code == 202
|
||||||
|
|
||||||
|
mine([transfer_tx_carly])
|
||||||
|
|
||||||
|
asset_id = create_tx.id
|
||||||
|
|
||||||
|
url = TX_ENDPOINT + "?asset_id=" + asset_id
|
||||||
|
assert client.get(url).status_code == 200
|
||||||
|
assert len(client.get(url).json) == 3
|
||||||
|
|
||||||
|
url = OUTPUTS_ENDPOINT + '?public_key=' + alice_pub
|
||||||
|
assert client.get(url).status_code == 200
|
||||||
|
|
||||||
|
url = OUTPUTS_ENDPOINT + '?public_key=' + bob_pub
|
||||||
|
assert client.get(url).status_code == 200
|
||||||
|
|
||||||
|
url = OUTPUTS_ENDPOINT + '?public_key=' + carly_pub
|
||||||
|
assert client.get(url).status_code == 200
|
||||||
|
Loading…
x
Reference in New Issue
Block a user