Merge remote-tracking branch 'remotes/origin/master' into core/557/improve-get-transaction

This commit is contained in:
diminator 2016-08-22 15:52:01 +02:00
commit 1fbb0ed4ba
51 changed files with 1036 additions and 264 deletions

7
.gitignore vendored
View File

@ -65,12 +65,11 @@ target/
# pyenv
.python-version
# Private key files from AWS
*.pem
# Some files created when deploying a cluster on AWS
deploy-cluster-aws/conf/rethinkdb.conf
deploy-cluster-aws/hostlist.py
deploy-cluster-aws/confiles/
deploy-cluster-aws/client_confile
deploy-cluster-aws/hostlist.py
deploy-cluster-aws/ssh_key.py
benchmarking-tests/hostlist.py
benchmarking-tests/ssh_key.py

View File

@ -1,10 +1,12 @@
# Code Licenses
All officially-supported BigchainDB _driver code_ is licensed under the Apache License, Version 2.0, the full text of which can be found at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0).
All code in _this_ repository is licensed under the GNU Affero General Public License version 3 (AGPLv3), the full text of which can be found at [http://www.gnu.org/licenses/agpl.html](http://www.gnu.org/licenses/agpl.html).
If you want to make modifications to the code in _this_ repository and you want to keep those modifications proprietary, then you must get a commercial license from BigchainDB GmbH.
All short code snippets embedded in the official BigchainDB _documentation_ are licensed under the Apache License, Version 2.0, the full text of which can be found at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0).
All _other_ officially-supported BigchainDB code is licensed under the GNU Affero General Public License version 3 (AGPLv3), the full text of which can be found at [http://www.gnu.org/licenses/agpl.html](http://www.gnu.org/licenses/agpl.html).
For the licenses on all other BigchainDB-related code, see the LICENSE file in the associated repository.
# Documentation Licenses

View File

@ -7,6 +7,7 @@ from fabric.operations import run, put
from fabric.context_managers import settings
from hostlist import public_dns_names
from ssh_key import ssh_key_path
# Ignore known_hosts
# http://docs.fabfile.org/en/1.10/usage/env.html#disable-known-hosts
@ -18,7 +19,7 @@ env.hosts = public_dns_names
# SSH key files to try when connecting:
# http://docs.fabfile.org/en/1.10/usage/env.html#key-filename
env.key_filename = 'pem/bigchaindb.pem'
env.key_filename = ssh_key_path
@task

View File

@ -56,8 +56,8 @@ class Client:
"""
tx = self.consensus.create_transaction(
current_owner=self.public_key,
new_owner=self.public_key,
owner_before=self.public_key,
owner_after=self.public_key,
tx_input=None,
operation='CREATE',
payload=payload)
@ -66,11 +66,11 @@ class Client:
tx, private_key=self.private_key)
return self._push(signed_tx)
def transfer(self, new_owner, tx_input, payload=None):
def transfer(self, owner_after, tx_input, payload=None):
"""Issue a transaction to transfer an asset.
Args:
new_owner (str): the public key of the new owner
owner_after (str): the public key of the new owner
tx_input (str): the id of the transaction to use as input
payload (dict, optional): the payload for the transaction.
@ -79,8 +79,8 @@ class Client:
"""
tx = self.consensus.create_transaction(
current_owner=self.public_key,
new_owner=new_owner,
owner_before=self.public_key,
owner_after=owner_after,
tx_input=tx_input,
operation='TRANSFER',
payload=payload)

View File

@ -22,6 +22,7 @@ from pkg_resources import iter_entry_points, ResolutionError
import bigchaindb
from bigchaindb.consensus import AbstractConsensusRules
from bigchaindb import exceptions
# TODO: move this to a proper configuration file for logging
logging.getLogger('requests').setLevel(logging.WARNING)
@ -98,7 +99,12 @@ def file_config(filename=None):
logger.debug('file_config() will try to open `{}`'.format(filename))
with open(filename) as f:
config = json.load(f)
try:
config = json.load(f)
except ValueError as err:
raise exceptions.ConfigurationError(
'Failed to parse the JSON configuration from `{}`, {}'.format(filename, err)
)
logger.info('Configuration loaded from `{}`'.format(filename))

View File

@ -133,14 +133,14 @@ class BaseConsensusRules(AbstractConsensusRules):
# TODO: for now lets assume a CREATE transaction only has one fulfillment
if transaction['transaction']['fulfillments'][0]['input']:
raise ValueError('A CREATE operation has no inputs')
# TODO: for now lets assume a CREATE transaction only has one current_owner
if transaction['transaction']['fulfillments'][0]['current_owners'][0] not in (
# TODO: for now lets assume a CREATE transaction only has one owner_before
if transaction['transaction']['fulfillments'][0]['owners_before'][0] not in (
bigchain.nodes_except_me + [bigchain.me]):
raise exceptions.OperationError(
'Only federation nodes can use the operation `CREATE`')
else:
# check if the input exists, is owned by the current_owner
# check if the input exists, is owned by the owner_before
if not transaction['transaction']['fulfillments']:
raise ValueError('Transaction contains no fulfillments')
@ -206,14 +206,14 @@ class BaseConsensusRules(AbstractConsensusRules):
return block
@staticmethod
def create_transaction(current_owner, new_owner, tx_input, operation,
def create_transaction(owner_before, owner_after, tx_input, operation,
payload=None):
"""Create a new transaction
Refer to the documentation of ``bigchaindb.util.create_tx``
"""
return util.create_tx(current_owner, new_owner, tx_input, operation,
return util.create_tx(owner_before, owner_after, tx_input, operation,
payload)
@staticmethod

View File

@ -317,11 +317,11 @@ class Bigchain(object):
list: list of `txids` currently owned by `owner`
"""
# get all transactions in which owner is in the `new_owners` list
# get all transactions in which owner is in the `owners_after` list
response = r.table('bigchain', read_mode=self.read_mode) \
.concat_map(lambda doc: doc['block']['transactions']) \
.filter(lambda tx: tx['transaction']['conditions']
.contains(lambda c: c['new_owners']
.contains(lambda c: c['owners_after']
.contains(owner))) \
.run(self.conn)
owned = []
@ -337,12 +337,12 @@ class Bigchain(object):
# to get a list of outputs available to spend
for condition in tx['transaction']['conditions']:
# for simple signature conditions there are no subfulfillments
# check if the owner is in the condition `new_owners`
if len(condition['new_owners']) == 1:
# check if the owner is in the condition `owners_after`
if len(condition['owners_after']) == 1:
if condition['condition']['details']['public_key'] == owner:
tx_input = {'txid': tx['id'], 'cid': condition['cid']}
else:
# for transactions with multiple `new_owners` there will be several subfulfillments nested
# for transactions with multiple `owners_after` there will be several subfulfillments nested
# in the condition. We need to iterate the subfulfillments to make sure there is a
# subfulfillment for `owner`
if util.condition_details_has_owner(condition['condition']['details'], owner):
@ -369,7 +369,8 @@ class Bigchain(object):
def is_valid_transaction(self, transaction):
"""Check whether a transacion is valid or invalid.
Similar to `validate_transaction` but does not raise an exception if the transaction is valid.
Similar to `validate_transaction` but never raises an exception.
It returns `False` if the transaction is invalid.
Args:
transaction (dict): transaction to check.

View File

@ -1,6 +1,9 @@
"""Custom exceptions used in the `bigchaindb` package.
"""
class ConfigurationError(Exception):
"""Raised when there is a problem with server configuration"""
class OperationError(Exception):
"""Raised when an operation cannot go through"""

View File

@ -44,7 +44,7 @@ class Election:
def get_changefeed():
return ChangeFeed(table='votes', operation='insert')
return ChangeFeed(table='votes', operation=ChangeFeed.INSERT)
def create_pipeline():

View File

@ -20,17 +20,19 @@ class ChangeFeed(Node):
to output before the actual changefeed.
"""
INSERT = 'insert'
DELETE = 'delete'
UPDATE = 'update'
INSERT = 1
DELETE = 2
UPDATE = 4
def __init__(self, table, operation, prefeed=None):
"""Create a new RethinkDB ChangeFeed.
Args:
table (str): name of the table to listen to for changes.
operation (str): can be ChangeFeed.INSERT, ChangeFeed.DELETE, or
ChangeFeed.UPDATE.
operation (int): can be ChangeFeed.INSERT, ChangeFeed.DELETE, or
ChangeFeed.UPDATE. Combining multiple operation is possible using
the bitwise ``|`` operator
(e.g. ``ChangeFeed.INSERT | ChangeFeed.UPDATE``)
prefeed (iterable): whatever set of data you want to be published
first.
"""
@ -51,10 +53,10 @@ class ChangeFeed(Node):
is_delete = change['new_val'] is None
is_update = not is_insert and not is_delete
if is_insert and self.operation == ChangeFeed.INSERT:
if is_insert and (self.operation & ChangeFeed.INSERT):
self.outqueue.put(change['new_val'])
elif is_delete and self.operation == ChangeFeed.DELETE:
elif is_delete and (self.operation & ChangeFeed.DELETE):
self.outqueue.put(change['old_val'])
elif is_update and self.operation == ChangeFeed.UPDATE:
elif is_update and (self.operation & ChangeFeed.UPDATE):
self.outqueue.put(change)

View File

@ -144,7 +144,7 @@ def initial():
def get_changefeed():
"""Create and return the changefeed for the bigchain table."""
return ChangeFeed('bigchain', 'insert', prefeed=initial())
return ChangeFeed('bigchain', operation=ChangeFeed.INSERT, prefeed=initial())
def create_pipeline():

View File

@ -137,7 +137,7 @@ def timestamp():
# TODO: Consider remove the operation (if there are no inputs CREATE else TRANSFER)
def create_tx(current_owners, new_owners, inputs, operation, payload=None):
def create_tx(owners_before, owners_after, inputs, operation, payload=None):
"""Create a new transaction
A transaction in the bigchain is a transfer of a digital asset between two entities represented
@ -153,8 +153,8 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
`TRANSFER` - A transfer operation allows for a transfer of the digital assets between entities.
Args:
current_owners (list): base58 encoded public key of the current owners of the asset.
new_owners (list): base58 encoded public key of the new owners of the digital asset.
owners_before (list): base58 encoded public key of the current owners of the asset.
owners_after (list): base58 encoded public key of the new owners of the digital asset.
inputs (list): id of the transaction to use as input.
operation (str): Either `CREATE` or `TRANSFER` operation.
payload (Optional[dict]): dictionary with information about asset.
@ -173,7 +173,7 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
"version": "transaction version number",
"fulfillments": [
{
"current_owners": ["list of <pub-keys>"],
"owners_before": ["list of <pub-keys>"],
"input": {
"txid": "<sha3 hash>",
"cid": "condition index"
@ -184,7 +184,7 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
],
"conditions": [
{
"new_owners": ["list of <pub-keys>"],
"owners_after": ["list of <pub-keys>"],
"condition": "condition to be met",
"cid": "condition index (1-to-1 mapping with fid)"
}
@ -205,16 +205,16 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
# validate arguments (owners and inputs should be lists or None)
# The None case appears on fulfilling a hashlock
if current_owners is None:
current_owners = []
if not isinstance(current_owners, list):
current_owners = [current_owners]
if owners_before is None:
owners_before = []
if not isinstance(owners_before, list):
owners_before = [owners_before]
# The None case appears on assigning a hashlock
if new_owners is None:
new_owners = []
if not isinstance(new_owners, list):
new_owners = [new_owners]
if owners_after is None:
owners_after = []
if not isinstance(owners_after, list):
owners_after = [owners_after]
if not isinstance(inputs, list):
inputs = [inputs]
@ -235,7 +235,7 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
if inputs:
for fid, tx_input in enumerate(inputs):
fulfillments.append({
'current_owners': current_owners,
'owners_before': owners_before,
'input': tx_input,
'fulfillment': None,
'fid': fid
@ -243,7 +243,7 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
# create
else:
fulfillments.append({
'current_owners': current_owners,
'owners_before': owners_before,
'input': None,
'fulfillment': None,
'fid': 0
@ -254,14 +254,14 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
for fulfillment in fulfillments:
# threshold condition
if len(new_owners) > 1:
condition = cc.ThresholdSha256Fulfillment(threshold=len(new_owners))
for new_owner in new_owners:
condition.add_subfulfillment(cc.Ed25519Fulfillment(public_key=new_owner))
if len(owners_after) > 1:
condition = cc.ThresholdSha256Fulfillment(threshold=len(owners_after))
for owner_after in owners_after:
condition.add_subfulfillment(cc.Ed25519Fulfillment(public_key=owner_after))
# simple signature condition
elif len(new_owners) == 1:
condition = cc.Ed25519Fulfillment(public_key=new_owners[0])
elif len(owners_after) == 1:
condition = cc.Ed25519Fulfillment(public_key=owners_after[0])
# to be added later (hashlock conditions)
else:
@ -269,7 +269,7 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
if condition:
conditions.append({
'new_owners': new_owners,
'owners_after': owners_after,
'condition': {
'details': condition.to_dict(),
'uri': condition.condition_uri
@ -301,7 +301,7 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
def sign_tx(transaction, signing_keys, bigchain=None):
"""Sign a transaction
A transaction signed with the `current_owner` corresponding private key.
A transaction signed with the `owner_before` corresponding private key.
Args:
transaction (dict): transaction to sign.
@ -317,7 +317,7 @@ def sign_tx(transaction, signing_keys, bigchain=None):
if not isinstance(signing_keys, list):
signing_keys = [signing_keys]
# create a mapping between sk and vk so that we can match the private key to the current_owners
# create a mapping between sk and vk so that we can match the private key to the owners_before
key_pairs = {}
for sk in signing_keys:
signing_key = crypto.SigningKey(sk)
@ -368,13 +368,13 @@ def fulfill_simple_signature_fulfillment(fulfillment, parsed_fulfillment, fulfil
object: fulfilled cryptoconditions.Ed25519Fulfillment
"""
current_owner = fulfillment['current_owners'][0]
owner_before = fulfillment['owners_before'][0]
try:
parsed_fulfillment.sign(serialize(fulfillment_message), key_pairs[current_owner])
parsed_fulfillment.sign(serialize(fulfillment_message), key_pairs[owner_before])
except KeyError:
raise exceptions.KeypairMismatchException('Public key {} is not a pair to any of the private keys'
.format(current_owner))
.format(owner_before))
return parsed_fulfillment
@ -395,17 +395,17 @@ def fulfill_threshold_signature_fulfillment(fulfillment, parsed_fulfillment, ful
parsed_fulfillment_copy = copy.deepcopy(parsed_fulfillment)
parsed_fulfillment.subconditions = []
for current_owner in fulfillment['current_owners']:
for owner_before in fulfillment['owners_before']:
try:
subfulfillment = parsed_fulfillment_copy.get_subcondition_from_vk(current_owner)[0]
subfulfillment = parsed_fulfillment_copy.get_subcondition_from_vk(owner_before)[0]
except IndexError:
raise exceptions.KeypairMismatchException(
'Public key {} cannot be found in the fulfillment'.format(current_owner))
'Public key {} cannot be found in the fulfillment'.format(owner_before))
try:
private_key = key_pairs[current_owner]
private_key = key_pairs[owner_before]
except KeyError:
raise exceptions.KeypairMismatchException(
'Public key {} is not a pair to any of the private keys'.format(current_owner))
'Public key {} is not a pair to any of the private keys'.format(owner_before))
subfulfillment.sign(serialize(fulfillment_message), private_key)
parsed_fulfillment.add_subfulfillment(subfulfillment)
@ -413,8 +413,8 @@ def fulfill_threshold_signature_fulfillment(fulfillment, parsed_fulfillment, ful
return parsed_fulfillment
def create_and_sign_tx(private_key, current_owner, new_owner, tx_input, operation='TRANSFER', payload=None):
tx = create_tx(current_owner, new_owner, tx_input, operation, payload)
def create_and_sign_tx(private_key, owner_before, owner_after, tx_input, operation='TRANSFER', payload=None):
tx = create_tx(owner_before, owner_after, tx_input, operation, payload)
return sign_tx(tx, private_key)
@ -432,7 +432,7 @@ def check_hash_and_signature(transaction):
def validate_fulfillments(signed_transaction):
"""Verify the signature of a transaction
A valid transaction should have been signed `current_owner` corresponding private key.
A valid transaction should have been signed `owner_before` corresponding private key.
Args:
signed_transaction (dict): a transaction with the `signature` included.
@ -516,8 +516,8 @@ def get_input_condition(bigchain, fulfillment):
# if `CREATE` transaction
# there is no previous transaction so we need to create one on the fly
else:
current_owner = fulfillment['current_owners'][0]
condition = cc.Ed25519Fulfillment(public_key=current_owner)
owner_before = fulfillment['owners_before'][0]
condition = cc.Ed25519Fulfillment(public_key=owner_before)
return {
'condition': {
@ -581,7 +581,7 @@ def get_hash_data(transaction):
def verify_vote_signature(block, signed_vote):
"""Verify the signature of a vote
A valid vote should have been signed `current_owner` corresponding private key.
A valid vote should have been signed `owner_before` corresponding private key.
Args:
block (dict): block under election
@ -612,7 +612,7 @@ def transform_create(tx):
payload = None
if transaction['data'] and 'payload' in transaction['data']:
payload = transaction['data']['payload']
new_tx = create_tx(b.me, transaction['fulfillments'][0]['current_owners'], None, 'CREATE', payload=payload)
new_tx = create_tx(b.me, transaction['fulfillments'][0]['owners_before'], None, 'CREATE', payload=payload)
return new_tx

View File

@ -11,7 +11,9 @@ import gunicorn.app.base
from bigchaindb import util
from bigchaindb import Bigchain
from bigchaindb.web import views
from bigchaindb.web.views.info import info_views
from bigchaindb.web.views.transactions import transaction_views
from bigchaindb.monitor import Monitor
@ -62,8 +64,8 @@ def create_app(settings):
app.config['bigchain_pool'] = util.pool(Bigchain, size=settings.get('threads', 4))
app.config['monitor'] = Monitor()
app.register_blueprint(views.info_views, url_prefix='/')
app.register_blueprint(views.basic_views, url_prefix='/api/v1')
app.register_blueprint(info_views, url_prefix='/')
app.register_blueprint(transaction_views, url_prefix='/api/v1')
return app

View File

View File

@ -0,0 +1,15 @@
from flask import jsonify
def make_error(status_code, message=None):
if status_code == 404 and message is None:
message = 'Not found'
response = jsonify({
'status': status_code,
'message': message
})
response.status_code = status_code
return response

View File

@ -0,0 +1,26 @@
"""This module provides the blueprint for some basic API endpoints.
For more information please refer to the documentation on ReadTheDocs:
- https://bigchaindb.readthedocs.io/en/latest/drivers-clients/http-client-server-api.html
"""
import flask
from flask import Blueprint
import bigchaindb
from bigchaindb import version
info_views = Blueprint('info_views', __name__)
@info_views.route('/')
def home():
return flask.jsonify({
'software': 'BigchainDB',
'version': version.__version__,
'public_key': bigchaindb.config['keypair']['public'],
'keyring': bigchaindb.config['keyring'],
'api_endpoint': bigchaindb.config['api_endpoint']
})

View File

@ -1,24 +1,23 @@
"""This module provides the blueprint for some basic API endpoints.
For more information please refer to the documentation in Apiary:
- http://docs.bigchaindb.apiary.io/
For more information please refer to the documentation on ReadTheDocs:
- https://bigchaindb.readthedocs.io/en/latest/drivers-clients/http-client-server-api.html
"""
import flask
from flask import abort, current_app, request, Blueprint
from flask import current_app, request, Blueprint
import bigchaindb
from bigchaindb import util, version
from bigchaindb import util
from bigchaindb.web.views.base import make_error
info_views = Blueprint('info_views', __name__)
basic_views = Blueprint('basic_views', __name__)
transaction_views = Blueprint('transaction_views', __name__)
# Unfortunately I cannot find a reference to this decorator.
# This answer on SO is quite useful tho:
# - http://stackoverflow.com/a/13432373/597097
@basic_views.record
@transaction_views.record
def record(state):
"""This function checks if the blueprint can be initialized
with the provided state."""
@ -35,18 +34,8 @@ def record(state):
'a monitor instance to record system '
'performance.')
@info_views.route('/')
def home():
return flask.jsonify({
'software': 'BigchainDB',
'version': version.__version__,
'public_key': bigchaindb.config['keypair']['public'],
'keyring': bigchaindb.config['keyring'],
'api_endpoint': bigchaindb.config['api_endpoint']
})
@basic_views.route('/transactions/<tx_id>')
@transaction_views.route('/transactions/<tx_id>')
def get_transaction(tx_id):
"""API endpoint to get details about a transaction.
@ -63,12 +52,12 @@ def get_transaction(tx_id):
tx = bigchain.get_transaction(tx_id)
if not tx:
abort(404)
return make_error(404)
return flask.jsonify(**tx)
@basic_views.route('/transactions/', methods=['POST'])
@transaction_views.route('/transactions/', methods=['POST'])
def create_transaction():
"""API endpoint to push transactions to the Federation.
@ -78,8 +67,6 @@ def create_transaction():
pool = current_app.config['bigchain_pool']
monitor = current_app.config['monitor']
val = {}
# `force` will try to format the body of the POST request even if the `content-type` header is not
# set to `application/json`
tx = request.get_json(force=True)
@ -89,11 +76,11 @@ def create_transaction():
tx = util.transform_create(tx)
tx = bigchain.consensus.sign_transaction(tx, private_key=bigchain.me_private)
if not bigchain.consensus.validate_fulfillments(tx):
val['error'] = 'Invalid transaction fulfillments'
if not bigchain.is_valid_transaction(tx):
return make_error(400, 'Invalid transaction')
with monitor.timer('write_transaction', rate=bigchaindb.config['statsd']['rate']):
val = bigchain.write_transaction(tx)
bigchain.write_transaction(tx)
return flask.jsonify(**tx)

View File

@ -31,6 +31,7 @@ coverage:
- "bigchaindb/version.py"
- "benchmarking-tests/*"
- "speed-tests/*"
- "ntools/*"
comment:
# @stevepeak (from codecov.io) suggested we change 'suggestions' to 'uncovered'

View File

@ -26,9 +26,19 @@ fi
# to set environment variables related to AWS deployment
echo "Reading "$DEPLOY_CONF_FILE
source $DEPLOY_CONF_FILE
# Check if SSH_KEY_NAME got set
if [ "$SSH_KEY_NAME" == "not-set-yet" ] || \
[ "$SSH_KEY_NAME" == "" ] || \
[ -z ${SSH_KEY_NAME+x} ]; then
echo "SSH_KEY_NAME was not set in that file"
exit 1
fi
echo "NUM_NODES = "$NUM_NODES
echo "BRANCH = "$BRANCH
echo "WHAT_TO_DEPLOY = "$WHAT_TO_DEPLOY
echo "SSH_KEY_NAME" = $SSH_KEY_NAME
echo "USE_KEYPAIRS_FILE = "$USE_KEYPAIRS_FILE
echo "IMAGE_ID = "$IMAGE_ID
echo "INSTANCE_TYPE = "$INSTANCE_TYPE
@ -38,9 +48,9 @@ if [ "$USING_EBS" = True ]; then
echo "EBS_OPTIMIZED = "$EBS_OPTIMIZED
fi
# Check for AWS private key file (.pem file)
if [ ! -f "pem/bigchaindb.pem" ]; then
echo "File pem/bigchaindb.pem (AWS private key) is missing"
# Check for the SSH private key file
if [ ! -f "$HOME/.ssh/$SSH_KEY_NAME" ]; then
echo "The SSH private key file "$HOME"/.ssh/"$SSH_KEY_NAME" is missing"
exit 1
fi
@ -70,9 +80,9 @@ fi
TAG="BDB-"$WHAT_TO_DEPLOY"-"`date +%m-%d@%H:%M`
echo "TAG = "$TAG
# Change the file permissions on pem/bigchaindb.pem
# Change the file permissions on the SSH private key file
# so that the owner can read it, but that's all
chmod 0400 pem/bigchaindb.pem
chmod 0400 $HOME/.ssh/$SSH_KEY_NAME
# The following Python script does these things:
# 0. allocates more elastic IP addresses if necessary,
@ -84,6 +94,8 @@ chmod 0400 pem/bigchaindb.pem
# 5. writes the shellscript add2known_hosts.sh
# 6. (over)writes a file named hostlist.py
# containing a list of all public DNS names.
# 7. (over)writes a file named ssh_key.py
# containing the location of the private SSH key file.
python launch_ec2_nodes.py --deploy-conf-file $DEPLOY_CONF_FILE --tag $TAG
# Make add2known_hosts.sh executable then execute it.
@ -91,6 +103,10 @@ python launch_ec2_nodes.py --deploy-conf-file $DEPLOY_CONF_FILE --tag $TAG
chmod +x add2known_hosts.sh
./add2known_hosts.sh
# Test an SSH connection to one of the hosts
# and prompt the user for their SSH password if necessary
fab set_host:0 test_ssh
# Rollout base packages (dependencies) needed before
# storage backend (RethinkDB) and BigchainDB can be rolled out
fab install_base_software

View File

@ -27,6 +27,11 @@ BRANCH="master"
# What do you want to deploy?
WHAT_TO_DEPLOY="servers"
# SSH_KEY_NAME is the name of the SSH private key file
# in $HOME/.ssh/
# It is used for SSH communications with AWS instances.
SSH_KEY_NAME="not-set-yet"
# USE_KEYPAIRS_FILE is either True or False
# Should node keypairs be read from keypairs.py?
# (If False, then the keypairs will be whatever is in the the

View File

@ -15,6 +15,7 @@ from fabric.operations import run, put
from fabric.context_managers import settings
from hostlist import public_dns_names
from ssh_key import ssh_key_path
# Ignore known_hosts
# http://docs.fabfile.org/en/1.10/usage/env.html#disable-known-hosts
@ -26,7 +27,7 @@ env.hosts = public_dns_names
# SSH key files to try when connecting:
# http://docs.fabfile.org/en/1.10/usage/env.html#key-filename
env.key_filename = 'pem/bigchaindb.pem'
env.key_filename = ssh_key_path
######################################################################
@ -48,6 +49,11 @@ def set_host(host_index):
env.hosts = [public_dns_names[int(host_index)]]
@task
def test_ssh():
run('echo "If you see this, then SSH to a remote host worked."')
# Install base software
@task
@parallel

View File

@ -9,9 +9,12 @@
5. writes the shellscript add2known_hosts.sh
6. (over)writes a file named hostlist.py
containing a list of all public DNS names.
7. (over)writes a file named ssh_key.py
containing the location of the private SSH key file.
"""
from __future__ import unicode_literals
from os.path import expanduser
import sys
import time
import socket
@ -23,9 +26,9 @@ import boto3
from awscommon import get_naeips
SETTINGS = ['NUM_NODES', 'BRANCH', 'WHAT_TO_DEPLOY', 'USE_KEYPAIRS_FILE',
'IMAGE_ID', 'INSTANCE_TYPE', 'USING_EBS', 'EBS_VOLUME_SIZE',
'EBS_OPTIMIZED']
SETTINGS = ['NUM_NODES', 'BRANCH', 'WHAT_TO_DEPLOY', 'SSH_KEY_NAME',
'USE_KEYPAIRS_FILE', 'IMAGE_ID', 'INSTANCE_TYPE', 'USING_EBS',
'EBS_VOLUME_SIZE', 'EBS_OPTIMIZED']
class SettingsTypeError(TypeError):
@ -76,6 +79,9 @@ if not isinstance(BRANCH, str):
if not isinstance(WHAT_TO_DEPLOY, str):
raise SettingsTypeError('WHAT_TO_DEPLOY should be a string')
if not isinstance(SSH_KEY_NAME, str):
raise SettingsTypeError('SSH_KEY_NAME should be a string')
if not isinstance(USE_KEYPAIRS_FILE, bool):
msg = 'USE_KEYPAIRS_FILE should be a boolean (True or False)'
raise SettingsTypeError(msg)
@ -105,6 +111,11 @@ if WHAT_TO_DEPLOY not in ['servers', 'clients']:
'The AWS deployment configuration file sets it to {}'.
format(WHAT_TO_DEPLOY))
if SSH_KEY_NAME in ['not-set-yet', '', None]:
raise ValueError('SSH_KEY_NAME should be set. '
'The AWS deployment configuration file sets it to {}'.
format(SSH_KEY_NAME))
# Since we assume 'gp2' volumes (for now), the possible range is 1 to 16384
if EBS_VOLUME_SIZE > 16384:
raise ValueError('EBS_VOLUME_SIZE should be <= 16384. '
@ -193,7 +204,7 @@ for _ in range(NUM_NODES):
ImageId=IMAGE_ID,
MinCount=1,
MaxCount=1,
KeyName='bigchaindb',
KeyName=SSH_KEY_NAME,
InstanceType=INSTANCE_TYPE,
SecurityGroupIds=['bigchaindb'],
BlockDeviceMappings=[dm],
@ -204,7 +215,7 @@ for _ in range(NUM_NODES):
ImageId=IMAGE_ID,
MinCount=1,
MaxCount=1,
KeyName='bigchaindb',
KeyName=SSH_KEY_NAME,
InstanceType=INSTANCE_TYPE,
SecurityGroupIds=['bigchaindb']
)
@ -281,6 +292,20 @@ with open('hostlist.py', 'w') as f:
f.write('\n')
f.write('public_dns_names = {}\n'.format(public_dns_names))
# Create a file named ssh_key.py
# containing the location of the private SSH key file.
# If a ssh_key.py already exists, it will be overwritten.
print('Writing ssh_key.py')
with open('ssh_key.py', 'w') as f:
f.write('# -*- coding: utf-8 -*-\n')
f.write('"""This file exists as a convenient way for Fabric to get\n')
f.write('the location of the private SSH key file.')
f.write('"""\n')
f.write('\n')
f.write('from __future__ import unicode_literals\n')
f.write('\n')
home = expanduser('~')
f.write('ssh_key_path = "{}/.ssh/{}"\n'.format(home, SSH_KEY_NAME))
# For each node in the cluster, check port 22 (ssh) until it's reachable
for instance in instances_with_tag:

View File

@ -6,7 +6,7 @@
* For quick overview and help documents, feel free to create `README.md` or other `X.md` files, written using [GitHub-flavored Markdown](https://help.github.com/categories/writing-on-github/). Markdown files render nicely on GitHub. We might auto-convert some .md files into a format that can be included in the long-form documentation.
* We use [Sphinx](http://www.sphinx-doc.org/en/stable/) to generate the long-form documentation in various formats (e.g. HTML, PDF).
* We also use [Sphinx](http://www.sphinx-doc.org/en/stable/) to generate Python code documentation (from docstrings and possibly other sources).
* We will use [Apiary](https://apiary.io/) to document all REST APIs.
* We also use Sphinx to document all REST APIs, with the help of [the `httpdomain` extension](https://pythonhosted.org/sphinxcontrib-httpdomain/).
# How to Generate the HTML Version of the Long-Form Documentation

View File

@ -36,3 +36,37 @@ Default output format [None]: [Press Enter]
```
This writes two files: `~/.aws/credentials` and `~/.aws/config`. AWS tools and packages look for those files.
## Generate an RSA Key Pair for SSH
Eventually, you'll have one or more instances (virtual machines) running on AWS and you'll want to SSH to them. To do that, you need a public/private key pair. The public key will be sent to AWS, and you can tell AWS to put it in any instances you provision there. You'll keep the private key on your local workstation.
First you need to make up a key name. Some ideas:
* `bcdb-troy-1`
* `bigchaindb-7`
* `bcdb-jupiter`
If you already have key pairs on AWS (Amazon EC2), you have to pick a name that's not already being used.
Below, replace every instance of `<key-name>` with your actual key name.
To generate a public/private RSA key pair with that name:
```text
ssh-keygen -t rsa -C "<key-name>" -f ~/.ssh/<key-name>
```
It will ask you for a passphrase. You can use whatever passphrase you like, but don't lose it. Two keys (files) will be created in `~/.ssh/`:
1. `~/.ssh/<key-name>.pub` is the public key
2. `~/.ssh/<key-name>` is the private key
To send the public key to AWS, use the AWS Command-Line Interface:
```text
aws ec2 import-key-pair \
--key-name "<key-name>" \
--public-key-material file://~/.ssh/<key-name>.pub
```
If you're curious why there's a `file://` in front of the path to the public key, see issue [aws/aws-cli#41 on GitHub](https://github.com/aws/aws-cli/issues/41).
If you want to verify that your key pair was imported by AWS, go to the Amazon EC2 console at [https://console.aws.amazon.com/ec2/](https://console.aws.amazon.com/ec2/), select the region you gave above when you did `aws configure` (e.g. eu-central-1), click on **Key Pairs** in the left sidebar, and check that `<key-name>` is listed.

View File

@ -41,16 +41,6 @@ See the page about [basic AWS Setup](../appendices/aws-setup.html) in the Append
The AWS cluster deployment scripts use elastic IP addresses (although that may change in the future). By default, AWS accounts get five elastic IP addresses. If you want to deploy a cluster with more than five nodes, then you will need more than five elastic IP addresses; you may have to apply for those; see [the AWS documentation on elastic IP addresses](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html).
## Create an Amazon EC2 Key Pair
Go to the AWS EC2 Console and select "Key Pairs" in the left sidebar. Click the "Create Key Pair" button. Give it the name `bigchaindb`. You should be prompted to save a file named `bigchaindb.pem`. That file contains the RSA private key. (You can get the public key from the private key, so there's no need to send it separately.)
Save the file in `bigchaindb/deploy-cluster-aws/pem/bigchaindb.pem`.
**You should not share your private key.**
## Create an Amazon EC2 Security Group
Go to the AWS EC2 Console and select "Security Groups" in the left sidebar. Click the "Create Security Group" button. Name it `bigchaindb`. The description probably doesn't matter; you can also put `bigchaindb` for that.
@ -132,6 +122,7 @@ Step 2 is to make an AWS deployment configuration file, if necessary. There's an
NUM_NODES=3
BRANCH="master"
WHAT_TO_DEPLOY="servers"
SSH_KEY_NAME="not-set-yet"
USE_KEYPAIRS_FILE=False
IMAGE_ID="ami-accff2b1"
INSTANCE_TYPE="m3.2xlarge"
@ -140,7 +131,7 @@ EBS_VOLUME_SIZE=30
EBS_OPTIMIZED=False
```
If you're happy with those settings, then you can skip to the next step. Otherwise, you could make a copy of `example_deploy_conf.py` (e.g. `cp example_deploy_conf.py my_deploy_conf.py`) and then edit the copy using a text editor.
Make a copy of that file and call it whatever you like (e.g. `cp example_deploy_conf.py my_deploy_conf.py`). You can leave most of the settings at their default values, but you must change the value of `SSH_KEY_NAME` to the name of your private SSH key. You can do that with a text editor. Set `SSH_KEY_NAME` to the name you used for `<key-name>` when you generated an RSA key pair for SSH (in basic AWS setup).
If you want your nodes to have a predictable set of pre-generated keypairs, then you should 1) set `USE_KEYPAIRS_FILE=True` in the AWS deployment configuration file, and 2) provide a `keypairs.py` file containing enough keypairs for all of your nodes. You can generate a `keypairs.py` file using the `write_keypairs_file.py` script. For example:
```text

View File

@ -13,17 +13,21 @@ There are other configuration settings related to the web server (serving the HT
The HTTP API currently exposes two endpoints, one to get information about a specific transaction, and one to push a new transaction to the BigchainDB cluster.
.. http:get:: /transactions/(tx_id)
.. http:get:: /transactions/{tx_id}
The transaction with the transaction ID `tx_id`.
Get the transaction with the ID ``tx_id``.
This endpoint returns only a transaction from a ``VALID`` or ``UNDECIDED`` block on ``bigchain``, if exists.
:param tx_id: transaction ID
:type tx_id: hex string
**Example request**:
.. sourcecode:: http
GET /transactions/96480ce68912aa39a54766ac16334a835fbf777039670352ff967bf6d65bf4f7 HTTP/1.1
GET /transactions/7ad5a4b83bc8c70c4fd7420ff3c60693ab8e6d0e3124378ca69ed5acd2578792 HTTP/1.1
Host: example.com
TODO: Other headers?
**Example response**:
@ -31,30 +35,56 @@ The HTTP API currently exposes two endpoints, one to get information about a spe
HTTP/1.1 200 OK
Content-Type: application/json
TODO: Other headers?
{'id': '96480ce68912aa39a54766ac16334a835fbf777039670352ff967bf6d65bf4f7',
'transaction': {'conditions': [{'cid': 0,
'condition': {'details': {'bitmask': 32,
'public_key': 'FoWUUY6kK7QhgCsgVrV2vpDWfW43mq5ewb16Uh7FBbSF',
'signature': None,
'type': 'fulfillment',
'type_id': 4},
'uri': 'cc:4:20:2-2pA2qKr2i-GM6REdqJCLEL_CEWpy-5iQky7YgRZTA:96'},
'new_owners': ['FoWUUY6kK7QhgCsgVrV2vpDWfW43mq5ewb16Uh7FBbSF']}],
'data': {'payload': None, 'uuid': 'f14dc5a6-510e-4307-89c6-aec42af8a1ae'},
'fulfillments': [{'current_owners': ['Ftat68WVLsPxVFLz2Rh2Sbwrrt51uFE3UpjkxY73vGKZ'],
'fid': 0,
'fulfillment': 'cf:4:3TqMI1ZFolraqHWADT6nIvUUt4HOwqdr0_-yj5Cglbg1V5qQV2CF2Yup1l6fQH2uhLGGFo9uHhZ6HNv9lssiD0ZaG88Bg_MTkz6xg2SW2Cw_YgpM-CyESVT404g54ZsK',
'input': None}],
'operation': 'CREATE',
'timestamp': '1468494923'},
'version': 1}
{
"id":"7ad5a4b83bc8c70c4fd7420ff3c60693ab8e6d0e3124378ca69ed5acd2578792",
"transaction":{
"conditions":[
{
"cid":0,
"condition":{
"details":{
"bitmask":32,
"public_key":"CwA8s2QYQBfNz4WvjEwmJi83zYr7JhxRhidx6uZ5KBVd",
"signature":null,
"type":"fulfillment",
"type_id":4
},
"uri":"cc:4:20:sVA_3p8gvl8yRFNTomqm6MaavKewka6dGYcFAuPrRXQ:96"
},
"owners_after":[
"CwA8s2QYQBfNz4WvjEwmJi83zYr7JhxRhidx6uZ5KBVd"
]
}
],
"data":{
"payload":null,
"uuid":"a9999d69-6cde-4b80-819d-ed57f6abe257"
},
"fulfillments":[
{
"owners_before":[
"JEAkEJqLbbgDRAtMm8YAjGp759Aq2qTn9eaEHUj2XePE"
],
"fid":0,
"fulfillment":"cf:4:__Y_Um6H73iwPe6ejWXEw930SQhqVGjtAHTXilPp0P01vE_Cx6zs3GJVoO1jhPL18C94PIVkLTGMUB2aKC9qsbIb3w8ejpOf0_I3OCuTbPdkd6r2lKMeVftMyMxkeWoM",
"input":{
"cid":0,
"txid":"598ce4e9a29837a1c6fc337ee4a41b61c20ad25d01646754c825b1116abd8761"
}
}
],
"operation":"TRANSFER",
"timestamp":"1471423869",
"version":1
}
}
:statuscode 200: A transaction with that ID was found.
:statuscode 404: A transaction with that ID was not found.
.. http:post:: /transactions/
Push a new transaction.
@ -66,9 +96,50 @@ The HTTP API currently exposes two endpoints, one to get information about a spe
POST /transactions/ HTTP/1.1
Host: example.com
Content-Type: application/json
TODO: Other headers?
(TODO) Insert example request body here
{
"id":"7ad5a4b83bc8c70c4fd7420ff3c60693ab8e6d0e3124378ca69ed5acd2578792",
"transaction":{
"conditions":[
{
"cid":0,
"condition":{
"details":{
"bitmask":32,
"public_key":"CwA8s2QYQBfNz4WvjEwmJi83zYr7JhxRhidx6uZ5KBVd",
"signature":null,
"type":"fulfillment",
"type_id":4
},
"uri":"cc:4:20:sVA_3p8gvl8yRFNTomqm6MaavKewka6dGYcFAuPrRXQ:96"
},
"owners_after":[
"CwA8s2QYQBfNz4WvjEwmJi83zYr7JhxRhidx6uZ5KBVd"
]
}
],
"data":{
"payload":null,
"uuid":"a9999d69-6cde-4b80-819d-ed57f6abe257"
},
"fulfillments":[
{
"owners_before":[
"JEAkEJqLbbgDRAtMm8YAjGp759Aq2qTn9eaEHUj2XePE"
],
"fid":0,
"fulfillment":"cf:4:__Y_Um6H73iwPe6ejWXEw930SQhqVGjtAHTXilPp0P01vE_Cx6zs3GJVoO1jhPL18C94PIVkLTGMUB2aKC9qsbIb3w8ejpOf0_I3OCuTbPdkd6r2lKMeVftMyMxkeWoM",
"input":{
"cid":0,
"txid":"598ce4e9a29837a1c6fc337ee4a41b61c20ad25d01646754c825b1116abd8761"
}
}
],
"operation":"TRANSFER",
"timestamp":"1471423869",
"version":1
}
}
**Example response**:
@ -76,10 +147,78 @@ The HTTP API currently exposes two endpoints, one to get information about a spe
HTTP/1.1 201 Created
Content-Type: application/json
TODO: Other headers?
(TODO) Insert example response body here
{
"assignee":"4XYfCbabAWVUCbjTmRTFEu2sc3dFEdkse4r6X498B1s8",
"id":"7ad5a4b83bc8c70c4fd7420ff3c60693ab8e6d0e3124378ca69ed5acd2578792",
"transaction":{
"conditions":[
{
"cid":0,
"condition":{
"details":{
"bitmask":32,
"public_key":"CwA8s2QYQBfNz4WvjEwmJi83zYr7JhxRhidx6uZ5KBVd",
"signature":null,
"type":"fulfillment",
"type_id":4
},
"uri":"cc:4:20:sVA_3p8gvl8yRFNTomqm6MaavKewka6dGYcFAuPrRXQ:96"
},
"owners_after":[
"CwA8s2QYQBfNz4WvjEwmJi83zYr7JhxRhidx6uZ5KBVd"
]
}
],
"data":{
"payload":null,
"uuid":"a9999d69-6cde-4b80-819d-ed57f6abe257"
},
"fulfillments":[
{
"owners_before":[
"JEAkEJqLbbgDRAtMm8YAjGp759Aq2qTn9eaEHUj2XePE"
],
"fid":0,
"fulfillment":"cf:4:__Y_Um6H73iwPe6ejWXEw930SQhqVGjtAHTXilPp0P01vE_Cx6zs3GJVoO1jhPL18C94PIVkLTGMUB2aKC9qsbIb3w8ejpOf0_I3OCuTbPdkd6r2lKMeVftMyMxkeWoM",
"input":{
"cid":0,
"txid":"598ce4e9a29837a1c6fc337ee4a41b61c20ad25d01646754c825b1116abd8761"
}
}
],
"operation":"TRANSFER",
"timestamp":"1471423869",
"version":1
}
}
:statuscode 201: A new transaction was created.
:statuscode 400: The transaction was invalid and not created.
(TODO) What's the response status code if the POST fails?
**Disclaimer**
``CREATE`` transactions are treated differently from ``TRANSFER`` assets.
The reason is that a ``CREATE`` transaction needs to be signed by a federation node and not by the client.
The following python snippet in a client can be used to generate ``CREATE`` transactions before they can be pushed to the API server:
.. code-block:: python
from bigchaindb import util
tx = util.create_and_sign_tx(my_privkey, my_pubkey, my_pubkey, None, 'CREATE')
When POSTing ``tx`` to the API, the ``CREATE`` transaction will be signed by a federation node.
A ``TRANSFER`` transaction, that takes an existing input transaction to change ownership can be generated in multiple ways:
.. code-block:: python
from bigchaindb import util, Bigchain
tx = util.create_and_sign_tx(my_privkey, my_pubkey, other_pubkey, input_tx, 'TRANSFER')
# or
b = Bigchain()
tx_unsigned = b.create_transaction(my_pubkey, other_pubkey, input_tx, 'TRANSFER')
tx = b.sign_transaction(tx_unsigned, my_privkey)
More information on generating transactions can be found in the `Python server API examples <python-server-api-examples.html>`_

View File

@ -19,9 +19,9 @@ Out[5]:
'type': 'fulfillment',
'type_id': 4},
'uri': 'cc:4:20:eoUROTxUArrpXGVBrvrYqkcEGG8lB_leliNvSvSddDg:96'},
'new_owners': ['9FGRd2jLxmwtRkwsWTpEoqy1rZpg6ycuT7NwmCR4QVk3']}],
'owners_after': ['9FGRd2jLxmwtRkwsWTpEoqy1rZpg6ycuT7NwmCR4QVk3']}],
'data': {'payload': None, 'uuid': 'b4884e37-3c8e-4cc2-bfc8-68a05ed090ad'},
'fulfillments': [{'current_owners': ['3NsvDXiiuf2BRPnqfRuBM9yHNjsH4L33gcZ4rh4GMY2J'],
'fulfillments': [{'owners_before': ['3NsvDXiiuf2BRPnqfRuBM9yHNjsH4L33gcZ4rh4GMY2J'],
'fid': 0,
'fulfillment': 'cf:4:I1IkuhCSf_hGqJ-JKHTQIO1g4apbQuaZXNMEX4isyxd7azkJreyGKyaMLs6Xk9kxQClwz1nQiKM6OMRk7fdusN0373szGbq-PppnsjY6ilbx1JmP-IH7hdjjwjjx9coM',
'input': None}],
@ -40,9 +40,9 @@ Out[6]:
'type': 'fulfillment',
'type_id': 4},
'uri': 'cc:4:20:akjKWxLO2hbe6RVva_FsWNDJmnUKYjQ57HIhUQbwb2Q:96'},
'new_owners': ['89tbMBospYsTNDgpqFS4RLszNsxuE4JEumNuY3WTAnT5']}],
'owners_after': ['89tbMBospYsTNDgpqFS4RLszNsxuE4JEumNuY3WTAnT5']}],
'data': {'payload': None, 'uuid': 'a640a9d6-9384-4e9c-a130-e899ea6416aa'},
'fulfillments': [{'current_owners': ['9FGRd2jLxmwtRkwsWTpEoqy1rZpg6ycuT7NwmCR4QVk3'],
'fulfillments': [{'owners_before': ['9FGRd2jLxmwtRkwsWTpEoqy1rZpg6ycuT7NwmCR4QVk3'],
'fid': 0,
'fulfillment': 'cf:4:eoUROTxUArrpXGVBrvrYqkcEGG8lB_leliNvSvSddDgVmY6O7YTER04mWjAVd6m0qOv5R44Cxpv_65OtLnNUD-HEgD-9z3ys4GvPf7BZF5dKSbAs_3a8yCQM0bkCcqkB',
'input': {'cid': 0,

View File

@ -87,7 +87,7 @@ tx_retrieved
},
"uri":"cc:4:20:oqXTWvR3afHHX8OaOO84kZxS6nH4GEBXD4Vw8Mc5iBo:96"
},
"new_owners":[
"owners_after":[
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs"
]
}
@ -100,7 +100,7 @@ tx_retrieved
},
"fulfillments":[
{
"current_owners":[
"owners_before":[
"3LQ5dTiddXymDhNzETB1rEkp4mA7fEV1Qeiu5ghHiJm9"
],
"fid":0,
@ -182,7 +182,7 @@ tx_transfer_retrieved
},
"uri":"cc:4:20:DIfyalZvV_9ukoO01mxmK3nxsfAWSKYYF33XDYkbY4E:96"
},
"new_owners":[
"owners_after":[
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
]
}
@ -190,7 +190,7 @@ tx_transfer_retrieved
"data":None,
"fulfillments":[
{
"current_owners":[
"owners_before":[
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs"
],
"fid":0,
@ -231,7 +231,7 @@ DoubleSpend: input `{'cid': 0, 'txid': '933cd83a419d2735822a2154c84176a2f419cbd4
## Multiple Owners
To create a new digital asset with _multiple_ owners, one can simply provide a list of `new_owners`:
To create a new digital asset with _multiple_ owners, one can simply provide a list of `owners_after`:
```python
# Create a new asset and assign it to multiple owners
@ -282,7 +282,7 @@ tx_multisig_retrieved
},
"uri":"cc:2:29:DpflJzUSlnTUBx8lD8QUolOA-M9nQnrGwvWSk7f3REc:206"
},
"new_owners":[
"owners_after":[
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs",
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
]
@ -291,7 +291,7 @@ tx_multisig_retrieved
"data":None,
"fulfillments":[
{
"current_owners":[
"owners_before":[
"3LQ5dTiddXymDhNzETB1rEkp4mA7fEV1Qeiu5ghHiJm9"
],
"fid":0,
@ -306,7 +306,7 @@ tx_multisig_retrieved
}
```
The asset can be transfered as soon as each of the `new_owners` signs the transaction.
The asset can be transfered as soon as each of the `owners_after` signs the transaction.
To do so, simply provide a list of all private keys to the signing routine:
@ -348,7 +348,7 @@ tx_multisig_transfer_retrieved
},
"uri":"cc:4:20:cAq6JQJXtwlxURqrksiyqLThB9zh08ZxSPLTDSaReYE:96"
},
"new_owners":[
"owners_after":[
"8YN9fALMj9CkeCcmTiM2kxwurpkMzHg9RkwSLJKMasvG"
]
}
@ -356,7 +356,7 @@ tx_multisig_transfer_retrieved
"data":None,
"fulfillments":[
{
"current_owners":[
"owners_before":[
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs",
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
],
@ -427,7 +427,7 @@ tx_mimo_retrieved
},
"uri":"cc:4:20:2AXg2JJ7mQ8o2Q9-hafP-XmFh3YR7I2_Sz55AubfxIc:96"
},
"new_owners":[
"owners_after":[
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
]
},
@ -443,7 +443,7 @@ tx_mimo_retrieved
},
"uri":"cc:4:20:2AXg2JJ7mQ8o2Q9-hafP-XmFh3YR7I2_Sz55AubfxIc:96"
},
"new_owners":[
"owners_after":[
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
]
},
@ -459,7 +459,7 @@ tx_mimo_retrieved
},
"uri":"cc:4:20:2AXg2JJ7mQ8o2Q9-hafP-XmFh3YR7I2_Sz55AubfxIc:96"
},
"new_owners":[
"owners_after":[
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
]
}
@ -467,7 +467,7 @@ tx_mimo_retrieved
"data":None,
"fulfillments":[
{
"current_owners":[
"owners_before":[
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs"
],
"fid":0,
@ -478,7 +478,7 @@ tx_mimo_retrieved
}
},
{
"current_owners":[
"owners_before":[
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs"
],
"fid":1,
@ -489,7 +489,7 @@ tx_mimo_retrieved
}
},
{
"current_owners":[
"owners_before":[
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs"
],
"fid":2,
@ -529,11 +529,11 @@ Setting up a generic threshold condition is a bit more elaborate than regular tr
The basic workflow for creating a more complex cryptocondition is the following:
1. Create a transaction template that include the public key of all (nested) parties as `new_owners`
1. Create a transaction template that include the public key of all (nested) parties as `owners_after`
2. Set up the threshold condition using the [cryptocondition library](https://github.com/bigchaindb/cryptoconditions)
3. Update the condition and hash in the transaction template
We'll illustrate this by a threshold condition where 2 out of 3 `new_owners` need to sign the transaction:
We'll illustrate this by a threshold condition where 2 out of 3 `owners_after` need to sign the transaction:
```python
import copy
@ -620,7 +620,7 @@ tx_threshold_retrieved
},
"uri":"cc:2:29:FoElId4TE5TU2loonT7sayXhxwcmaJVoCeIduh56Dxw:246"
},
"new_owners":[
"owners_after":[
"8NaGq26YMcEvj8Sc5MnqspKzFTQd1eZBAuuPDw4ERHpz",
"ALE9Agojob28D1fHWCxFXJwpqrYPkcsUs26YksBVj27z",
"Cx4jWSGci7fw6z5QyeApCijbwnMpyuhp4C1kzuFc3XrM"
@ -630,7 +630,7 @@ tx_threshold_retrieved
"data":None,
"fulfillments":[
{
"current_owners":[
"owners_before":[
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
],
"fid":0,
@ -652,7 +652,7 @@ The transaction can now be transfered by fulfilling the threshold condition.
The fulfillment involves:
1. Create a transaction template that include the public key of all (nested) parties as `current_owners`
1. Create a transaction template that include the public key of all (nested) parties as `owners_before`
2. Parsing the threshold condition into a fulfillment using the [cryptocondition library](https://github.com/bigchaindb/cryptoconditions)
3. Signing all necessary subfulfillments and updating the fulfillment field in the transaction
@ -721,7 +721,7 @@ threshold_tx_transfer
},
"uri":"cc:4:20:xDz3NhRG-3eVzIB9sgnd99LKjOyDF-KlxWuf1TgNT0s:96"
},
"new_owners":[
"owners_after":[
"ED2pyPfsbNRTHkdMnaFkAwCSpZWRmbaM1h8fYzgRRMmc"
]
}
@ -729,7 +729,7 @@ threshold_tx_transfer
"data":None,
"fulfillments":[
{
"current_owners":[
"owners_before":[
"8NaGq26YMcEvj8Sc5MnqspKzFTQd1eZBAuuPDw4ERHpz",
"ALE9Agojob28D1fHWCxFXJwpqrYPkcsUs26YksBVj27z",
"Cx4jWSGci7fw6z5QyeApCijbwnMpyuhp4C1kzuFc3XrM"
@ -758,10 +758,10 @@ Under the hood, fulfilling a hash-lock condition amounts to finding a string (a
One possible use case is to distribute preimages as "digital vouchers." The first person to redeem a voucher will get the associated asset.
A federation node can create an asset with a hash-lock condition and no `new_owners`. Anyone who can fullfill the hash-lock condition can transfer the asset to themselves.
A federation node can create an asset with a hash-lock condition and no `owners_after`. Anyone who can fullfill the hash-lock condition can transfer the asset to themselves.
```python
# Create a hash-locked asset without any new_owners
# Create a hash-locked asset without any owners_after
hashlock_tx = b.create_transaction(b.me, None, None, 'CREATE')
# Define a secret that will be hashed - fulfillments need to guess the secret
@ -774,13 +774,13 @@ hashlock_tx['transaction']['conditions'].append({
'uri': first_tx_condition.condition.serialize_uri()
},
'cid': 0,
'new_owners': None
'owners_after': None
})
# Conditions have been updated, so the hash needs updating
hashlock_tx['id'] = util.get_hash_data(hashlock_tx)
# The asset needs to be signed by the current_owner
# The asset needs to be signed by the owner_before
hashlock_tx_signed = b.sign_transaction(hashlock_tx, b.me_private)
# Some validations
@ -800,13 +800,13 @@ hashlock_tx_signed
"condition":{
"uri":"cc:0:3:nsW2IiYgk9EUtsg4uBe3pBnOgRoAEX2IIsPgjqZz47U:17"
},
"new_owners":None
"owners_after":None
}
],
"data":None,
"fulfillments":[
{
"current_owners":[
"owners_before":[
"FmLm6MxCABc8TsiZKdeYaZKo5yZWMM6Vty7Q1B6EgcP2"
],
"fid":0,
@ -864,7 +864,7 @@ hashlock_fulfill_tx
},
"uri":"cc:4:20:y9884Md2YI_wdnGSTJGhwvFaNsKLe8sqwimqk-2JLSI:96"
},
"new_owners":[
"owners_after":[
"EiqCKxnBCmmNb83qyGch48tULK9RLaEt4xFA43UVCVDb"
]
}
@ -872,7 +872,7 @@ hashlock_fulfill_tx
"data":None,
"fulfillments":[
{
"current_owners":[],
"owners_before":[],
"fid":0,
"fulfillment":"cf:0:bXVjaCBzZWNyZXQhIHdvdyE",
"input":{
@ -901,7 +901,7 @@ __Note__: The timeout conditions are BigchainDB-specific and not (yet) supported
__Caveat__: The times between nodes in a BigchainDB federation may (and will) differ slightly. In this case, the majority of the nodes will decide.
```python
# Create a timeout asset without any new_owners
# Create a timeout asset without any owners_after
tx_timeout = b.create_transaction(b.me, None, None, 'CREATE')
# Set expiry time - the asset needs to be transfered before expiration
@ -916,13 +916,13 @@ tx_timeout['transaction']['conditions'].append({
'uri': condition_timeout.condition.serialize_uri()
},
'cid': 0,
'new_owners': None
'owners_after': None
})
# Conditions have been updated, so the hash needs updating
tx_timeout['id'] = util.get_hash_data(tx_timeout)
# The asset needs to be signed by the current_owner
# The asset needs to be signed by the owner_before
tx_timeout_signed = b.sign_transaction(tx_timeout, b.me_private)
# Some validations
@ -948,13 +948,13 @@ tx_timeout_signed
},
"uri":"cc:63:9:sceU_NZc3cAjAvaR1TVmgj7am5y8hJEBoqLm-tbqGbQ:17"
},
"new_owners":null
"owners_after":null
}
],
"data":null,
"fulfillments":[
{
"current_owners":[
"owners_before":[
"FmLm6MxCABc8TsiZKdeYaZKo5yZWMM6Vty7Q1B6EgcP2"
],
"fid":0,
@ -1086,7 +1086,7 @@ tx_escrow['transaction']['conditions'][0]['condition'] = {
# Conditions have been updated, so the hash needs updating
tx_escrow['id'] = util.get_hash_data(tx_escrow)
# The asset needs to be signed by the current_owner
# The asset needs to be signed by the owner_before
tx_escrow_signed = b.sign_transaction(tx_escrow, testuser2_priv)
# Some validations
@ -1171,7 +1171,7 @@ tx_escrow_signed
},
"uri":"cc:2:29:sg08ERtppQrGxot7mu7XMdNkZTc29xCbWE1r8DgxuL8:181"
},
"new_owners":[
"owners_after":[
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs",
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
]
@ -1180,7 +1180,7 @@ tx_escrow_signed
"data":null,
"fulfillments":[
{
"current_owners":[
"owners_before":[
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
],
"fid":0,

View File

@ -9,3 +9,4 @@ BigchainDB Nodes
node-components
node-requirements
setup-run-node

View File

@ -14,4 +14,3 @@ In a production environment, a BigchainDB node can have several other components
* A RethinkDB proxy server
* Scalable storage for RethinkDB (e.g. using RAID)
* Monitoring software, to monitor all the machines in the node
* Maybe a configuration management (CM) server and CM agents on all machines

View File

@ -0,0 +1,210 @@
# Set Up and Run a Cluster Node
If you want to set up a BigchainDB node that's intended to be one of the nodes in a BigchainDB cluster (i.e. where each node is operated by a different member of a federation), then this page is for you, otherwise see [elsewhere](../introduction.html).
This is a page of general guidelines for setting up a node. It says nothing about how to upgrade software, storage, processing, etc. or other details of node management. That will be added in the future, in [the section on production node setup & management](../prod-node-setup-mgmt/index.html). Once that section is more complete, this page will probably be deleted.
## Get a Server
The first step is to get a server (or equivalent) which meets [the requirements for a BigchainDB node](node-requirements.html).
## Secure Your Server
The steps that you must take to secure your server depend on your server OS and where your server is physically located. There are many articles and books about how to secure a server. Here we just cover special considerations when securing a BigchainDB node.
There are some [notes on BigchainDB-specific firewall setup](../appendices/firewall-notes.html) in the Appendices.
## Sync Your System Clock
A BigchainDB node uses its system clock to generate timestamps for blocks and votes, so that clock should be kept in sync with some standard clock(s). The standard way to do that is to run an NTP daemon (Network Time Protocol daemon) on the node. (You could also use tlsdate, which uses TLS timestamps rather than NTP, but don't: it's not very accurate and it will break with TLS 1.3, which removes the timestamp.)
NTP is a standard protocol. There are many NTP daemons implementing it. We don't recommend a particular one. On the contrary, we recommend that different nodes in a federation run different NTP daemons, so that a problem with one daemon won't affect all nodes.
Please see the [notes on NTP daemon setup](../appendices/ntp-notes.html) in the Appendices.
## Set Up Storage for RethinkDB Data
Below are some things to consider when setting up storage for the RethinkDB data. The Appendices have a [section with concrete examples](../appendices/example-rethinkdb-storage-setups.html).
We suggest you set up a separate storage "device" (partition, RAID array, or logical volume) to store the RethinkDB data. Here are some questions to ask:
* How easy will it be to add storage in the future? Will I have to shut down my server?
* How big can the storage get? (Remember that [RAID](https://en.wikipedia.org/wiki/RAID) can be used to make several physical drives look like one.)
* How fast can it read & write data? How many input/output operations per second (IOPS)?
* How does IOPS scale as more physical hard drives are added?
* What's the latency?
* What's the reliability? Is there replication?
* What's in the Service Level Agreement (SLA), if applicable?
* What's the cost?
There are many options and tradeoffs. Don't forget to look into Amazon Elastic Block Store (EBS) and Amazon Elastic File System (EFS), or their equivalents from other providers.
**Storage Notes Specific to RethinkDB**
* The RethinkDB storage engine has a number of SSD optimizations, so you _can_ benefit from using SSDs. ([source](https://www.rethinkdb.com/docs/architecture/))
* If you want a RethinkDB cluster to store an amount of data D, with a replication factor of R (on every table), and the cluster has N nodes, then each node will need to be able to store R×D/N data.
* RethinkDB tables can have [at most 64 shards](https://rethinkdb.com/limitations/). For example, if you have only one table and more than 64 nodes, some nodes won't have the primary of any shard, i.e. they will have replicas only. In other words, once you pass 64 nodes, adding more nodes won't provide more storage space for new data. If the biggest single-node storage available is d, then the most you can store in a RethinkDB cluster is < 64×d: accomplished by putting one primary shard in each of 64 nodes, with all replica shards on other nodes. (This is assuming one table. If there are T tables, then the most you can store is < 64×d×T.)
* When you set up storage for your RethinkDB data, you may have to select a filesystem. (Sometimes, the filesystem is already decided by the choice of storage.) We recommend using a filesystem that supports direct I/O (Input/Output). Many compressed or encrypted file systems don't support direct I/O. The ext4 filesystem supports direct I/O (but be careful: if you enable the data=journal mode, then direct I/O support will be disabled; the default is data=ordered). If your chosen filesystem supports direct I/O and you're using Linux, then you don't need to do anything to request or enable direct I/O. RethinkDB does that.
<p style="background-color: lightgrey;">What is direct I/O? It allows RethinkDB to write directly to the storage device (or use its own in-memory caching mechanisms), rather than relying on the operating system's file read and write caching mechanisms. (If you're using Linux, a write-to-file normally writes to the in-memory Page Cache first; only later does that Page Cache get flushed to disk. The Page Cache is also used when reading files.)</p>
* RethinkDB stores its data in a specific directory. You can tell RethinkDB _which_ directory using the RethinkDB config file, as explained below. In this documentation, we assume the directory is `/data`. If you set up a separate device (partition, RAID array, or logical volume) to store the RethinkDB data, then mount that device on `/data`.
## Install RethinkDB Server
If you don't already have RethinkDB Server installed, you must install it. The RethinkDB documentation has instructions for [how to install RethinkDB Server on a variety of operating systems](http://rethinkdb.com/docs/install/).
## Configure RethinkDB Server
Create a RethinkDB configuration file (text file) named `instance1.conf` with the following contents (explained below):
```text
directory=/data
bind=all
direct-io
# Replace node?_hostname with actual node hostnames below, e.g. rdb.examples.com
join=node0_hostname:29015
join=node1_hostname:29015
join=node2_hostname:29015
# continue until there's a join= line for each node in the federation
```
* `directory=/data` tells the RethinkDB node to store its share of the database data in `/data`.
* `bind=all` binds RethinkDB to all local network interfaces (e.g. loopback, Ethernet, wireless, whatever is available), so it can communicate with the outside world. (The default is to bind only to local interfaces.)
* `direct-io` tells RethinkDB to use direct I/O (explained earlier). Only include this line if your file system supports direct I/O.
* `join=hostname:29015` lines: A cluster node needs to find out the hostnames of all the other nodes somehow. You _could_ designate one node to be the one that every other node asks, and put that node's hostname in the config file, but that wouldn't be very decentralized. Instead, we include _every_ node in the list of nodes-to-ask.
If you're curious about the RethinkDB config file, there's [a RethinkDB documentation page about it](https://www.rethinkdb.com/docs/config-file/). The [explanations of the RethinkDB command-line options](https://rethinkdb.com/docs/cli-options/) are another useful reference.
See the [RethinkDB documentation on securing your cluster](https://rethinkdb.com/docs/security/).
## Install Python 3.4+
If you don't already have it, then you should [install Python 3.4+](https://www.python.org/downloads/).
If you're testing or developing BigchainDB on a stand-alone node, then you should probably create a Python 3.4+ virtual environment and activate it (e.g. using virtualenv or conda). Later we will install several Python packages and you probably only want those installed in the virtual environment.
## Install BigchainDB Server
BigchainDB Server has some OS-level dependencies that must be installed.
On Ubuntu 14.04, we found that the following was enough:
```text
sudo apt-get update
sudo apt-get install g++ python3-dev
```
On Fedora 23, we found that the following was enough (tested in February 2015):
```text
sudo dnf update
sudo dnf install gcc-c++ redhat-rpm-config python3-devel
```
(If you're using a version of Fedora before version 22, you may have to use `yum` instead of `dnf`.)
With OS-level dependencies installed, you can install BigchainDB Server with `pip` or from source.
### How to Install BigchainDB with pip
BigchainDB (i.e. both the Server and the officially-supported drivers) is distributed as a Python package on PyPI so you can install it using `pip`. First, make sure you have an up-to-date Python 3.4+ version of `pip` installed:
```text
pip -V
```
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.4, then you must install a `pip` version associated with Python 3.4+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
On Ubuntu 14.04, we found that this works:
```text
sudo apt-get install python3-pip
```
That should install a Python 3 version of `pip` named `pip3`. If that didn't work, then another way to get `pip3` is to do `sudo apt-get install python3-setuptools` followed by `sudo easy_install3 pip`.
You can upgrade `pip` (`pip3`) and `setuptools` to the latest versions using:
```text
pip3 install --upgrade pip setuptools
pip3 -V
```
Now you can install BigchainDB Server (and officially-supported BigchainDB drivers) using:
```text
pip3 install bigchaindb
```
(If you're not in a virtualenv and you want to install bigchaindb system-wide, then put `sudo` in front.)
Note: You can use `pip3` to upgrade the `bigchaindb` package to the latest version using `pip3 install --upgrade bigchaindb`.
### How to Install BigchainDB from Source
If you want to install BitchainDB from source because you want to use the very latest bleeding-edge code, clone the public repository:
```text
git clone git@github.com:bigchaindb/bigchaindb.git
python setup.py install
```
## Configure BigchainDB Server
Start by creating a default BigchainDB config file:
```text
bigchaindb -y configure
```
(There's documentation for the `bigchaindb` command is in the section on [the BigchainDB Command Line Interface (CLI)](bigchaindb-cli.html).)
Edit the created config file:
* Open `$HOME/.bigchaindb` (the created config file) in your text editor.
* Change `"server": {"bind": "localhost:9984", ... }` to `"server": {"bind": "0.0.0.0:9984", ... }`. This makes it so traffic can come from any IP address to port 9984 (the HTTP Client-Server API port).
* Change `"api_endpoint": "http://localhost:9984/api/v1"` to `"api_endpoint": "http://your_api_hostname:9984/api/v1"`
* Change `"keyring": []` to `"keyring": ["public_key_of_other_node_A", "public_key_of_other_node_B", "..."]` i.e. a list of the public keys of all the other nodes in the federation. The keyring should _not_ include your node's public key.
For more information about the BigchainDB config file, see [Configuring a BigchainDB Node](configuration.html).
## Run RethinkDB Server
Start RethinkDB using:
```text
rethinkdb --config-file path/to/instance1.conf
```
except replace the path with the actual path to `instance1.conf`.
Note: It's possible to [make RethinkDB start at system startup](https://www.rethinkdb.com/docs/start-on-startup/).
You can verify that RethinkDB is running by opening the RethinkDB web interface in your web browser. It should be at `http://rethinkdb-hostname:8080/`. If you're running RethinkDB on localhost, that would be [http://localhost:8080/](http://localhost:8080/).
## Run BigchainDB Server
After all node operators have started RethinkDB, but before they start BigchainDB, one designated node operator must configure the RethinkDB database by running the following commands:
```text
bigchaindb init
bigchaindb set-shards numshards
bigchaindb set-replicas numreplicas
```
where:
* `bigchaindb init` creates the database within RethinkDB, the tables, the indexes, and the genesis block.
* `numshards` should be set to the number of nodes in the initial cluster.
* `numreplicas` should be set to the database replication factor decided by the federation. It must be 3 or more for [RethinkDB failover](https://rethinkdb.com/docs/failover/) to work.
Once the RethinkDB database is configured, every node operator can start BigchainDB using:
```text
bigchaindb start
```

View File

@ -5,4 +5,6 @@ Production Node Setup & Management
:maxdepth: 1
overview
install-terraform
prov-one-m-aws

View File

@ -0,0 +1,27 @@
# Install Terraform
The [Terraform documentation has installation instructions](https://www.terraform.io/intro/getting-started/install.html) for all common operating systems.
Note: Hashicorp (the company behind Terraform) will try to convince you that running Terraform on their servers (inside Atlas) would be great. **While that might be true for many, it is not true for BigchainDB.** BigchainDB federations are supposed to be decentralized, and if everyone used Atlas, that would be a point of centralization. If you don't want to run Terraform on your local machine, you could install it on a cloud machine under your control (e.g. on AWS).
## Ubuntu Installation Tips
If you want to install Terraform on Ubuntu, first [download the .zip file](https://www.terraform.io/downloads.html). Then install it in `/opt`:
```text
sudo mkdir -p /opt/terraform
sudo unzip path/to/zip-file.zip -d /opt/terraform
```
Why install it in `/opt`? See [the answers at Ask Ubuntu](https://askubuntu.com/questions/1148/what-is-the-best-place-to-install-user-apps).
Next, add `/opt/terraform` to your path. If you use bash for your shell, then you could add this line to `~/.bashrc`:
```text
export PATH="/opt/terraform:$PATH"
```
After doing that, relaunch your shell or force it to read `~/.bashrc` again, e.g. by doing `source ~/.bashrc`. You can verify that terraform is installed and in your path by doing:
```text
terraform --version
```
It should say the current version of Terraform.

View File

@ -7,6 +7,7 @@ Deploying and managing a production BigchainDB node is much more involved than w
* Production nodes need monitoring
* Production nodes need maintenance, e.g. software upgrades, scaling
Thankfully, there are tools to help!
Thankfully, there are tools to help! We use:
This section explains how to use various tools to deploy and manage a production node.
* [Terraform](https://www.terraform.io/) to provision infrastructure such as AWS instances, storage and security groups
* [Ansible](https://www.ansible.com/) to manage the software installed on that infrastructure (configuration management)

View File

@ -0,0 +1,50 @@
# Provision a One-Machine Node on AWS
This page describes how to provision the resources needed for a one-machine BigchainDB node on AWS using Terraform.
## Get Set
First, do the [basic AWS setup steps outlined in the Appendices](../appendices/aws-setup.html).
Then go to the `.../bigchaindb/ntools/one-m/aws/` directory and open the file `variables.tf`. Most of the variables have sensible default values, but you can change them if you like. In particular, you may want to change `aws_region`. (Terraform looks in `~/.aws/credentials` to get your AWS credentials, so you don't have to enter those anywhere.)
The `ssh_key_name` has no default value, so Terraform will prompt you every time it needs it.
To see what Terraform will do, run:
```text
terraform plan
```
It should ask you the value of `ssh_key_name`.
It figured out the plan by reading all the `.tf` Terraform files in the directory.
If you don't want to be asked for the `ssh_key_name`, you can change the default value of `ssh_key_name` or [you can set an environmen variable](https://www.terraform.io/docs/configuration/variables.html) named `TF_VAR_ssh_key_name`.
## Provision
To provision all the resources specified in the plan, do the following. **Note: This will provision actual resources on AWS, and those cost money. Be sure to shut down the resources you don't want to keep running later, otherwise the cost will keep growing.**
```text
terraform apply
```
Terraform will report its progress as it provisions all the resources. Once it's done, you can go to the Amazon EC2 web console and see the instance, its security group, its elastic IP, and its attached storage volumes (one for the root directory and one for RethinkDB storage).
At this point, there is no software installed on the instance except for Ubuntu 14.04 and whatever else came with the Amazon Machine Image (AMI) specified in the configuration. The next step is to use Ansible to install and configure all the necessary software.
## (Optional) "Destroy"
If you want to shut down all the resources just provisioned, you must first disable termination protection on the instance:
1. Go to the EC2 console and select the instance you just launched. It should be named `BigchainDB_node`.
2. Click **Actions** > **Instance Settings** > **Change Termination Protection** > **Yes, Disable**
3. Back in your terminal, do `terraform destroy`
Terraform should "destroy" (i.e. terminate or delete) all the AWS resources you provisioned above.
## See Also
* The [Terraform Documentation](https://www.terraform.io/docs/)
* The [Terraform Documentation for the AWS "Provider"](https://www.terraform.io/docs/providers/aws/index.html)

View File

@ -120,7 +120,7 @@ When one creates a condition, one can calculate its fulfillment length (e.g. 96)
If someone tries to make a condition where the output of a threshold condition feeds into the input of another “earlier” threshold condition (i.e. in a closed logical circuit), then their computer will take forever to calculate the (infinite) “condition URI”, at least in theory. In practice, their computer will run out of memory or their client software will timeout after a while.
Aside: In what follows, the list of `new_owners` (in a condition) is always who owned the asset at the time the transaction completed, but before the next transaction started. The list of `current_owners` (in a fulfillment) is always equal to the list of `new_owners` in that asset's previous transaction.
Aside: In what follows, the list of `owners_after` (in a condition) is always who owned the asset at the time the transaction completed, but before the next transaction started. The list of `owners_before` (in a fulfillment) is always equal to the list of `owners_after` in that asset's previous transaction.
### Conditions
@ -141,17 +141,17 @@ If there is only one _new owner_, the condition will be a simple signature condi
},
"uri": "<string>"
},
"new_owners": ["<new owner public key>"]
"owners_after": ["<new owner public key>"]
}
```
- **Condition header**:
- `cid`: Condition index so that we can reference this output as an input to another transaction. It also matches
the input `fid`, making this the condition to fulfill in order to spend the asset used as input with `fid`.
- `new_owners`: A list containing one item: the public key of the new owner.
- `owners_after`: A list containing one item: the public key of the new owner.
- **Condition body**:
- `bitmask`: A set of bits representing the features required by the condition type.
- `public_key`: The _new_owner's_ public key.
- `public_key`: The new owner's public key.
- `type_id`: The fulfillment type ID; see the [ILP spec](https://interledger.org/five-bells-condition/spec.html).
- `uri`: Binary representation of the condition using only URL-safe characters.
@ -189,9 +189,9 @@ to spend the asset. For example:
"type_id": 2
},
"uri": "cc:2:29:ytNK3X6-bZsbF-nCGDTuopUIMi1HCyCkyPewm6oLI3o:206"},
"new_owners": [
"<new owner 1 public key>",
"<new owner 2 public key>"
"owners_after": [
"owner 1 public key>",
"owner 2 public key>"
]
}
```
@ -210,7 +210,7 @@ If there is only one _current owner_, the fulfillment will be a simple signature
```json
{
"current_owners": ["<public key of current owner>"],
"owners_before": ["<public key of the owner before the transaction happened>"],
"fid": 0,
"fulfillment": "cf:4:RxFzIE679tFBk8zwEgizhmTuciAylvTUwy6EL6ehddHFJOhK5F4IjwQ1xLu2oQK9iyRCZJdfWAefZVjTt3DeG5j2exqxpGliOPYseNkRAWEakqJ_UrCwgnj92dnFRAEE",
"input": {
@ -222,7 +222,7 @@ If there is only one _current owner_, the fulfillment will be a simple signature
- `fid`: Fulfillment index. It matches a `cid` in the conditions with a new _crypto-condition_ that the new owner
needs to fulfill to spend this asset.
- `current_owners`: A list of public keys of the current owners; in this case it has just one public key.
- `owners_before`: A list of public keys of the owners before the transaction; in this case it has just one public key.
- `fulfillment`: A crypto-conditions URI that encodes the cryptographic fulfillments like signatures and others, see [crypto-conditions](https://interledger.org/five-bells-condition/spec.html).
- `input`: Pointer to the asset and condition of a previous transaction
- `cid`: Condition index

1
ntools/README.md Normal file
View File

@ -0,0 +1 @@
This directory contains tools for provisioning, deploying and managing a BigchainDB node (on AWS, Azure or wherever).

20
ntools/one-m/aws/amis.tf Normal file
View File

@ -0,0 +1,20 @@
# Each AWS region has a different AMI name
# even though the contents are the same.
# This file has the mapping from region --> AMI name.
#
# These are all Ubuntu 14.04 LTS AMIs
# with Arch = amd64, Instance Type = hvm:ebs-ssd
# from https://cloud-images.ubuntu.com/locator/ec2/
variable "amis" {
type = "map"
default = {
eu-west-1 = "ami-55452e26"
eu-central-1 = "ami-b1cf39de"
us-east-1 = "ami-8e0b9499"
us-west-2 = "ami-547b3834"
ap-northeast-1 = "ami-49d31328"
ap-southeast-1 = "ami-5e429c3d"
ap-southeast-2 = "ami-25f3c746"
sa-east-1 = "ami-97980efb"
}
}

View File

@ -0,0 +1,6 @@
# You can get the value of "ip_address" after running terraform apply using:
# $ terraform output ip_address
# You could use that in a script, for example
output "ip_address" {
value = "${aws_eip.ip.public_ip}"
}

View File

@ -0,0 +1,6 @@
provider "aws" {
# An AWS access_key and secret_key are needed; Terraform looks
# for an AWS credentials file in the default location.
# See https://tinyurl.com/pu8gd9h
region = "${var.aws_region}"
}

View File

@ -0,0 +1,47 @@
# One instance (virtual machine) on AWS:
# https://www.terraform.io/docs/providers/aws/r/instance.html
resource "aws_instance" "instance" {
ami = "${lookup(var.amis, var.aws_region)}"
instance_type = "${var.aws_instance_type}"
tags {
Name = "BigchainDB_node"
}
ebs_optimized = true
key_name = "${var.ssh_key_name}"
vpc_security_group_ids = ["${aws_security_group.node_sg1.id}"]
root_block_device = {
volume_type = "gp2"
volume_size = "${var.root_storage_in_GiB}"
delete_on_termination = true
}
# Enable EC2 Instance Termination Protection
disable_api_termination = true
}
# This EBS volume will be used for database storage (not for root).
# https://www.terraform.io/docs/providers/aws/r/ebs_volume.html
resource "aws_ebs_volume" "db_storage" {
type = "gp2"
availability_zone = "${aws_instance.instance.availability_zone}"
# Size in GiB (not GB!)
size = "${var.DB_storage_in_GiB}"
tags {
Name = "BigchainDB_db_storage"
}
}
# This allocates a new elastic IP address, if necessary
# and then associates it with the above aws_instance
resource "aws_eip" "ip" {
instance = "${aws_instance.instance.id}"
vpc = true
}
# This attaches the instance to the EBS volume for RethinkDB storage
# https://www.terraform.io/docs/providers/aws/r/volume_attachment.html
resource "aws_volume_attachment" "ebs_att" {
# Why /dev/sdp? See https://tinyurl.com/z2zqm6n
device_name = "/dev/sdp"
volume_id = "${aws_ebs_volume.db_storage.id}"
instance_id = "${aws_instance.instance.id}"
}

View File

@ -0,0 +1,89 @@
resource "aws_security_group" "node_sg1" {
name_prefix = "BigchainDB_"
description = "Single-machine BigchainDB node security group"
tags = {
Name = "BigchainDB_one-m"
}
# Allow *all* outbound traffic
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
# SSH
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# DNS
ingress {
from_port = 53
to_port = 53
protocol = "udp"
cidr_blocks = ["0.0.0.0/0"]
}
# HTTP used by some package managers
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# NTP daemons use port 123 but the request will
# come from inside the firewall so a response is expected
# SNMP (e.g. for server monitoring)
ingress {
from_port = 161
to_port = 161
protocol = "udp"
cidr_blocks = ["0.0.0.0/0"]
}
# HTTPS used when installing RethinkDB
# and by some package managers
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# StatsD
ingress {
from_port = 8125
to_port = 8125
protocol = "udp"
cidr_blocks = ["0.0.0.0/0"]
}
# Don't allow port 8080 for the RethinkDB web interface.
# Use a SOCKS proxy or reverse proxy instead.
# BigchainDB Client-Server REST API
ingress {
from_port = 9984
to_port = 9984
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# Port 28015 doesn't have to be open to the outside
# since the RethinkDB client and server are on localhost
# RethinkDB intracluster communications use port 29015
ingress {
from_port = 29015
to_port = 29015
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
}

View File

@ -0,0 +1,19 @@
variable "aws_region" {
default = "eu-central-1"
}
variable "aws_instance_type" {
default = "m4.xlarge"
}
variable "root_storage_in_GiB" {
default = 10
}
variable "DB_storage_in_GiB" {
default = 30
}
variable "ssh_key_name" {
# No default. Ask as needed.
}

View File

@ -102,7 +102,7 @@ setup(
'logstats==0.2.1',
'base58==0.2.2',
'flask==0.10.1',
'requests==2.9',
'requests~=2.9',
'gunicorn~=19.0',
'multipipes~=0.1.0',
],

View File

@ -456,7 +456,7 @@ class TestTransactionValidation(object):
with pytest.raises(exceptions.InvalidSignature) as excinfo:
b.validate_transaction(tx)
# assert excinfo.value.args[0] == 'current_owner `a` does not own the input `{}`'.format(valid_input)
# assert excinfo.value.args[0] == 'owner_before `a` does not own the input `{}`'.format(valid_input)
assert b.is_valid_transaction(tx) is False
@pytest.mark.usefixtures('inputs')
@ -584,7 +584,7 @@ class TestBlockValidation(object):
with pytest.raises(exceptions.TransactionOwnerError) as excinfo:
b.validate_block(block)
assert excinfo.value.args[0] == 'current_owner `a` does not own the input `{}`'.format(valid_input)
assert excinfo.value.args[0] == 'owner_before `a` does not own the input `{}`'.format(valid_input)
def test_invalid_block_id(self, b):
block = dummy_block()
@ -723,7 +723,7 @@ class TestMultipleInputs(object):
assert len(tx_signed['transaction']['fulfillments']) == 1
assert len(tx_signed['transaction']['conditions']) == 1
def test_single_current_owner_multiple_new_owners_single_input(self, b, user_sk, user_vk, inputs):
def test_single_owner_before_multiple_owners_after_single_input(self, b, user_sk, user_vk, inputs):
# create a new users
user2_sk, user2_vk = crypto.generate_key_pair()
user3_sk, user3_vk = crypto.generate_key_pair()
@ -741,7 +741,7 @@ class TestMultipleInputs(object):
assert len(tx_signed['transaction']['fulfillments']) == 1
assert len(tx_signed['transaction']['conditions']) == 1
def test_single_current_owner_multiple_new_owners_multiple_inputs(self, b, user_sk, user_vk):
def test_single_owner_before_multiple_owners_after_multiple_inputs(self, b, user_sk, user_vk):
# create a new users
user2_sk, user2_vk = crypto.generate_key_pair()
user3_sk, user3_vk = crypto.generate_key_pair()
@ -769,7 +769,7 @@ class TestMultipleInputs(object):
assert len(tx_signed['transaction']['fulfillments']) == 3
assert len(tx_signed['transaction']['conditions']) == 3
def test_multiple_current_owners_single_new_owner_single_input(self, b, user_sk, user_vk):
def test_multiple_owners_before_single_owner_after_single_input(self, b, user_sk, user_vk):
# create a new users
user2_sk, user2_vk = crypto.generate_key_pair()
user3_sk, user3_vk = crypto.generate_key_pair()
@ -793,7 +793,7 @@ class TestMultipleInputs(object):
assert len(tx_signed['transaction']['fulfillments']) == 1
assert len(tx_signed['transaction']['conditions']) == 1
def test_multiple_current_owners_single_new_owner_multiple_inputs(self, b, user_sk, user_vk):
def test_multiple_owners_before_single_owner_after_multiple_inputs(self, b, user_sk, user_vk):
# create a new users
user2_sk, user2_vk = crypto.generate_key_pair()
user3_sk, user3_vk = crypto.generate_key_pair()
@ -820,7 +820,7 @@ class TestMultipleInputs(object):
assert len(tx_signed['transaction']['fulfillments']) == 3
assert len(tx_signed['transaction']['conditions']) == 3
def test_multiple_current_owners_multiple_new_owners_single_input(self, b, user_sk, user_vk):
def test_multiple_owners_before_multiple_owners_after_single_input(self, b, user_sk, user_vk):
# create a new users
user2_sk, user2_vk = crypto.generate_key_pair()
user3_sk, user3_vk = crypto.generate_key_pair()
@ -845,7 +845,7 @@ class TestMultipleInputs(object):
assert len(tx_signed['transaction']['fulfillments']) == 1
assert len(tx_signed['transaction']['conditions']) == 1
def test_multiple_current_owners_multiple_new_owners_multiple_inputs(self, b, user_sk, user_vk):
def test_multiple_owners_before_multiple_owners_after_multiple_inputs(self, b, user_sk, user_vk):
# create a new users
user2_sk, user2_vk = crypto.generate_key_pair()
user3_sk, user3_vk = crypto.generate_key_pair()
@ -1155,7 +1155,7 @@ class TestFulfillmentMessage(object):
assert fulfillment_message['data']['payload'] == tx['transaction']['data']['payload']
assert fulfillment_message['id'] == tx['id']
assert fulfillment_message['condition'] == tx['transaction']['conditions'][0]
assert fulfillment_message['fulfillment']['current_owners'] == original_fulfillment['current_owners']
assert fulfillment_message['fulfillment']['owners_before'] == original_fulfillment['owners_before']
assert fulfillment_message['fulfillment']['fid'] == original_fulfillment['fid']
assert fulfillment_message['fulfillment']['input'] == original_fulfillment['input']
assert fulfillment_message['operation'] == tx['transaction']['operation']
@ -1178,14 +1178,14 @@ class TestFulfillmentMessage(object):
assert fulfillment_message['data']['payload'] == tx['transaction']['data']['payload']
assert fulfillment_message['id'] == tx['id']
assert fulfillment_message['condition'] == tx['transaction']['conditions'][0]
assert fulfillment_message['fulfillment']['current_owners'] == original_fulfillment['current_owners']
assert fulfillment_message['fulfillment']['owners_before'] == original_fulfillment['owners_before']
assert fulfillment_message['fulfillment']['fid'] == original_fulfillment['fid']
assert fulfillment_message['fulfillment']['input'] == original_fulfillment['input']
assert fulfillment_message['operation'] == tx['transaction']['operation']
assert fulfillment_message['timestamp'] == tx['transaction']['timestamp']
assert fulfillment_message['version'] == tx['transaction']['version']
def test_fulfillment_message_multiple_current_owners_multiple_new_owners_multiple_inputs(self, b, user_vk):
def test_fulfillment_message_multiple_owners_before_multiple_owners_after_multiple_inputs(self, b, user_vk):
# create a new users
user2_sk, user2_vk = crypto.generate_key_pair()
user3_sk, user3_vk = crypto.generate_key_pair()
@ -1216,7 +1216,7 @@ class TestFulfillmentMessage(object):
assert fulfillment_message['data']['payload'] == tx['transaction']['data']['payload']
assert fulfillment_message['id'] == tx['id']
assert fulfillment_message['condition'] == tx['transaction']['conditions'][original_fulfillment['fid']]
assert fulfillment_message['fulfillment']['current_owners'] == original_fulfillment['current_owners']
assert fulfillment_message['fulfillment']['owners_before'] == original_fulfillment['owners_before']
assert fulfillment_message['fulfillment']['fid'] == original_fulfillment['fid']
assert fulfillment_message['fulfillment']['input'] == original_fulfillment['input']
assert fulfillment_message['operation'] == tx['transaction']['operation']
@ -1269,7 +1269,7 @@ class TestTransactionMalleability(object):
tx_changed = copy.deepcopy(tx_signed)
tx_changed['transaction']['fulfillments'] = [
{
"current_owners": [
"owners_before": [
"AFbofwJYEB7Cx2fgrPrCJzbdDVRzRKysoGXt4DsvuTGN"
],
"fid": 0,
@ -1287,7 +1287,7 @@ class TestTransactionMalleability(object):
assert b.is_valid_transaction(tx_changed) is False
tx_changed = copy.deepcopy(tx_signed)
tx_changed['transaction']['fulfillments'][0]['current_owners'] = [
tx_changed['transaction']['fulfillments'][0]['owners_before'] = [
"AFbofwJYEB7Cx2fgrPrCJzbdDVRzRKysoGXt4DsvuTGN"]
assert b.validate_fulfillments(tx_changed) is False
assert b.is_valid_transaction(tx_changed) is False
@ -1316,7 +1316,7 @@ class TestCryptoconditions(object):
fulfillment = tx_signed['transaction']['fulfillments'][0]
fulfillment_from_uri = cc.Fulfillment.from_uri(fulfillment['fulfillment'])
assert fulfillment['current_owners'][0] == b.me
assert fulfillment['owners_before'][0] == b.me
assert fulfillment_from_uri.public_key.to_ascii().decode() == b.me
assert b.validate_fulfillments(tx_signed) == True
assert b.is_valid_transaction(tx_signed) == tx_signed
@ -1347,7 +1347,7 @@ class TestCryptoconditions(object):
fulfillment = tx_signed['transaction']['fulfillments'][0]
fulfillment_from_uri = cc.Fulfillment.from_uri(fulfillment['fulfillment'])
assert fulfillment['current_owners'][0] == user_vk
assert fulfillment['owners_before'][0] == user_vk
assert fulfillment_from_uri.public_key.to_ascii().decode() == user_vk
assert fulfillment_from_uri.condition.serialize_uri() == prev_condition['uri']
assert b.validate_fulfillments(tx_signed) == True
@ -1366,7 +1366,7 @@ class TestCryptoconditions(object):
fulfillment = tx_signed['transaction']['fulfillments'][0]
fulfillment_from_uri = cc.Fulfillment.from_uri(fulfillment['fulfillment'])
assert fulfillment['current_owners'][0] == b.me
assert fulfillment['owners_before'][0] == b.me
assert fulfillment_from_uri.public_key.to_ascii().decode() == b.me
assert b.validate_fulfillments(tx_signed) == True
assert b.is_valid_transaction(tx_signed) == tx_signed
@ -1388,7 +1388,7 @@ class TestCryptoconditions(object):
fulfillment = tx_signed['transaction']['fulfillments'][0]
fulfillment_from_uri = cc.Fulfillment.from_uri(fulfillment['fulfillment'])
assert fulfillment['current_owners'][0] == user_vk
assert fulfillment['owners_before'][0] == user_vk
assert fulfillment_from_uri.public_key.to_ascii().decode() == user_vk
assert b.validate_fulfillments(tx_signed) == True
assert b.is_valid_transaction(tx_signed) == tx_signed
@ -1627,7 +1627,7 @@ class TestCryptoconditions(object):
def test_default_threshold_conditions_for_multiple_owners(self, b, user_sk, user_vk):
user2_sk, user2_vk = crypto.generate_key_pair()
# create transaction with multiple new_owners
# create transaction with multiple owners_after
tx = b.create_transaction(b.me, [user_vk, user2_vk], None, 'CREATE')
assert len(tx['transaction']['conditions']) == 1
@ -1647,7 +1647,7 @@ class TestCryptoconditions(object):
def test_default_threshold_fulfillments_for_multiple_owners(self, b, user_sk, user_vk):
user2_sk, user2_vk = crypto.generate_key_pair()
# create transaction with multiple new_owners
# create transaction with multiple owners_after
tx_create = b.create_transaction(b.me, [user_vk, user2_vk], None, 'CREATE')
tx_create_signed = b.sign_transaction(tx_create, b.me_private)
block = b.create_block([tx_create_signed])
@ -1688,7 +1688,7 @@ class TestCryptoconditions(object):
'uri': first_tx_condition.condition.serialize_uri()
},
'cid': 0,
'new_owners': None
'owners_after': None
})
# conditions have been updated, so hash needs updating
hashlock_tx['id'] = util.get_hash_data(hashlock_tx)
@ -1720,7 +1720,7 @@ class TestCryptoconditions(object):
'uri': first_tx_condition.condition.serialize_uri()
},
'cid': 0,
'new_owners': None
'owners_after': None
})
# conditions have been updated, so hash needs updating
hashlock_tx['id'] = util.get_hash_data(hashlock_tx)
@ -1751,7 +1751,7 @@ class TestCryptoconditions(object):
'uri': first_tx_condition.condition.serialize_uri()
},
'cid': 0,
'new_owners': None
'owners_after': None
})
# conditions have been updated, so hash needs updating
hashlock_tx['id'] = util.get_hash_data(hashlock_tx)
@ -1813,15 +1813,15 @@ class TestCryptoconditions(object):
user3_sk, user3_vk = crypto.generate_key_pair()
user4_sk, user4_vk = crypto.generate_key_pair()
user5_sk, user5_vk = crypto.generate_key_pair()
new_owners = [user_vk, user2_vk, user3_vk, user4_vk, user5_vk]
owners_after = [user_vk, user2_vk, user3_vk, user4_vk, user5_vk]
# create a transaction with multiple new_owners
tx = b.create_transaction(b.me, new_owners, None, 'CREATE')
# create a transaction with multiple owners_after
tx = b.create_transaction(b.me, owners_after, None, 'CREATE')
condition = cc.Fulfillment.from_dict(tx['transaction']['conditions'][0]['condition']['details'])
for new_owner in new_owners:
subcondition = condition.get_subcondition_from_vk(new_owner)[0]
assert subcondition.public_key.to_ascii().decode() == new_owner
for owner_after in owners_after:
subcondition = condition.get_subcondition_from_vk(owner_after)[0]
assert subcondition.public_key.to_ascii().decode() == owner_after
@pytest.mark.usefixtures('inputs')
def test_transfer_asset_with_escrow_condition(self, b, user_vk, user_sk):

View File

@ -252,7 +252,7 @@ print(json.dumps(threshold_tx_transfer, sort_keys=True, indent=4, separators=(',
Hashlocked Conditions
"""
# Create a hash-locked asset without any new_owners
# Create a hash-locked asset without any owners_after
hashlock_tx = b.create_transaction(b.me, None, None, 'CREATE')
# Define a secret that will be hashed - fulfillments need to guess the secret
@ -265,13 +265,13 @@ hashlock_tx['transaction']['conditions'].append({
'uri': first_tx_condition.condition.serialize_uri()
},
'cid': 0,
'new_owners': None
'owners_after': None
})
# Conditions have been updated, so hash needs updating
hashlock_tx['id'] = util.get_hash_data(hashlock_tx)
# The asset needs to be signed by the current_owner
# The asset needs to be signed by the owner_before
hashlock_tx_signed = b.sign_transaction(hashlock_tx, b.me_private)
# Some validations
@ -327,7 +327,7 @@ tx_timeout['transaction']['conditions'].append({
'uri': condition_timeout.condition.serialize_uri()
},
'cid': 0,
'new_owners': None
'owners_after': None
})
# conditions have been updated, so hash needs updating

View File

@ -3,7 +3,7 @@ from unittest.mock import patch
import rethinkdb
from multipipes import Pipe
from bigchaindb.pipelines import utils
from bigchaindb.pipelines.utils import ChangeFeed
MOCK_CHANGEFEED_DATA = [{
@ -21,36 +21,50 @@ MOCK_CHANGEFEED_DATA = [{
@patch.object(rethinkdb.ast.RqlQuery, 'run', return_value=MOCK_CHANGEFEED_DATA)
def test_changefeed_insert(mock_run):
outpipe = Pipe()
changefeed = utils.ChangeFeed('backlog', 'insert')
changefeed = ChangeFeed('backlog', ChangeFeed.INSERT)
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.get() == 'seems like we have an insert here'
assert outpipe.qsize() == 0
@patch.object(rethinkdb.ast.RqlQuery, 'run', return_value=MOCK_CHANGEFEED_DATA)
def test_changefeed_delete(mock_run):
outpipe = Pipe()
changefeed = utils.ChangeFeed('backlog', 'delete')
changefeed = ChangeFeed('backlog', ChangeFeed.DELETE)
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.get() == 'seems like we have a delete here'
assert outpipe.qsize() == 0
@patch.object(rethinkdb.ast.RqlQuery, 'run', return_value=MOCK_CHANGEFEED_DATA)
def test_changefeed_update(mock_run):
outpipe = Pipe()
changefeed = utils.ChangeFeed('backlog', 'update')
changefeed = ChangeFeed('backlog', ChangeFeed.UPDATE)
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.get() == {'new_val': 'seems like we have an update here',
'old_val': 'seems like we have an update here'}
assert outpipe.qsize() == 0
@patch.object(rethinkdb.ast.RqlQuery, 'run', return_value=MOCK_CHANGEFEED_DATA)
def test_changefeed_multiple_operations(mock_run):
outpipe = Pipe()
changefeed = ChangeFeed('backlog', ChangeFeed.INSERT | ChangeFeed.UPDATE)
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.get() == 'seems like we have an insert here'
assert outpipe.get() == {'new_val': 'seems like we have an update here',
'old_val': 'seems like we have an update here'}
assert outpipe.qsize() == 0
@patch.object(rethinkdb.ast.RqlQuery, 'run', return_value=MOCK_CHANGEFEED_DATA)
def test_changefeed_prefeed(mock_run):
outpipe = Pipe()
changefeed = utils.ChangeFeed('backlog', 'insert', prefeed=[1, 2, 3])
changefeed = ChangeFeed('backlog', ChangeFeed.INSERT, prefeed=[1, 2, 3])
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.qsize() == 4

View File

@ -44,11 +44,11 @@ def test_client_can_create_assets(mock_requests_post, client):
# XXX: `CREATE` operations require the node that receives the transaction to modify the data in
# the transaction itself.
# `current_owner` will be overwritten with the public key of the node in the federation
# `owner_before` will be overwritten with the public key of the node in the federation
# that will create the real transaction. `signature` will be overwritten with the new signature.
# Note that this scenario is ignored by this test.
assert tx['transaction']['fulfillments'][0]['current_owners'][0] == client.public_key
assert tx['transaction']['conditions'][0]['new_owners'][0] == client.public_key
assert tx['transaction']['fulfillments'][0]['owners_before'][0] == client.public_key
assert tx['transaction']['conditions'][0]['owners_after'][0] == client.public_key
assert tx['transaction']['fulfillments'][0]['input'] is None
assert util.validate_fulfillments(tx)
@ -56,8 +56,8 @@ def test_client_can_create_assets(mock_requests_post, client):
def test_client_can_transfer_assets(mock_requests_post, mock_bigchaindb_sign, client):
tx = client.transfer(client.public_key, 123)
assert tx['transaction']['fulfillments'][0]['current_owners'][0] == client.public_key
assert tx['transaction']['conditions'][0]['new_owners'][0] == client.public_key
assert tx['transaction']['fulfillments'][0]['owners_before'][0] == client.public_key
assert tx['transaction']['conditions'][0]['owners_after'][0] == client.public_key
assert tx['transaction']['fulfillments'][0]['input'] == 123

View File

@ -229,6 +229,13 @@ def test_file_config():
assert config == {}
def test_invalid_file_config():
from bigchaindb.config_utils import file_config, CONFIG_DEFAULT_PATH
with patch('builtins.open', mock_open(read_data='{_INVALID_JSON_}')) as m:
with pytest.raises(exceptions.ConfigurationError):
file_config()
def test_write_config():
from bigchaindb.config_utils import write_config, CONFIG_DEFAULT_PATH
m = mock_open()

View File

@ -35,8 +35,8 @@ def test_transform_create(b, user_sk, user_vk):
tx = util.transform_create(tx)
tx = util.sign_tx(tx, b.me_private)
assert tx['transaction']['fulfillments'][0]['current_owners'][0] == b.me
assert tx['transaction']['conditions'][0]['new_owners'][0] == user_vk
assert tx['transaction']['fulfillments'][0]['owners_before'][0] == b.me
assert tx['transaction']['conditions'][0]['owners_after'][0] == user_vk
assert util.validate_fulfillments(tx)
@ -159,7 +159,7 @@ def test_create_tx_with_empty_inputs():
assert 'data' in tx['transaction']
assert len(tx['transaction']['fulfillments']) == 1
assert tx['transaction']['fulfillments'][0] == {
'current_owners': [], 'input': None, 'fulfillment': None, 'fid': 0}
'owners_before': [], 'input': None, 'fulfillment': None, 'fid': 0}
def test_fulfill_threshold_signature_fulfillment_pubkey_notfound(monkeypatch):
@ -170,7 +170,7 @@ def test_fulfill_threshold_signature_fulfillment_pubkey_notfound(monkeypatch):
'get_subcondition_from_vk',
lambda x, y: []
)
fulfillment = {'current_owners': (None,)}
fulfillment = {'owners_before': (None,)}
parsed_fulfillment = ThresholdSha256Fulfillment()
with pytest.raises(KeypairMismatchException):
fulfill_threshold_signature_fulfillment(
@ -185,7 +185,7 @@ def test_fulfill_threshold_signature_fulfillment_wrong_privkeys(monkeypatch):
'get_subcondition_from_vk',
lambda x, y: (None,)
)
fulfillment = {'current_owners': ('alice-pub-key',)}
fulfillment = {'owners_before': ('alice-pub-key',)}
parsed_fulfillment = ThresholdSha256Fulfillment()
with pytest.raises(KeypairMismatchException):
fulfill_threshold_signature_fulfillment(

View File

@ -14,6 +14,7 @@ def test_get_transaction_endpoint(b, client, user_vk):
tx = b.get_transaction(input_tx['txid'])
res = client.get(TX_ENDPOINT + input_tx['txid'])
assert tx == res.json
assert res.status_code == 200
@pytest.mark.usefixtures('inputs')
@ -35,8 +36,8 @@ def test_post_create_transaction_endpoint(b, client):
tx = util.create_and_sign_tx(keypair[0], keypair[1], keypair[1], None, 'CREATE')
res = client.post(TX_ENDPOINT, data=json.dumps(tx))
assert res.json['transaction']['fulfillments'][0]['current_owners'][0] == b.me
assert res.json['transaction']['conditions'][0]['new_owners'][0] == keypair[1]
assert res.json['transaction']['fulfillments'][0]['owners_before'][0] == b.me
assert res.json['transaction']['conditions'][0]['owners_after'][0] == keypair[1]
@pytest.mark.usefixtures('inputs')
@ -47,6 +48,17 @@ def test_post_transfer_transaction_endpoint(b, client, user_vk, user_sk):
transfer = util.create_and_sign_tx(user_sk, user_vk, to_keypair[1], input_valid)
res = client.post(TX_ENDPOINT, data=json.dumps(transfer))
assert res.json['transaction']['fulfillments'][0]['current_owners'][0] == user_vk
assert res.json['transaction']['conditions'][0]['new_owners'][0] == to_keypair[1]
assert res.json['transaction']['fulfillments'][0]['owners_before'][0] == user_vk
assert res.json['transaction']['conditions'][0]['owners_after'][0] == to_keypair[1]
@pytest.mark.usefixtures('inputs')
def test_post_invalid_transfer_transaction_returns_400(b, client, user_vk, user_sk):
to_keypair = crypto.generate_key_pair()
input_valid = b.get_owned_ids(user_vk).pop()
transfer = b.create_transaction(user_vk, to_keypair[0], input_valid, 'TRANSFER')
# transfer is not signed
res = client.post(TX_ENDPOINT, data=json.dumps(transfer))
assert res.status_code == 400