mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge remote-tracking branch 'origin/master' into feat/550/ansible-security-setup-on-ubuntu
This commit is contained in:
commit
bc373880d9
@ -2,6 +2,8 @@
|
||||
|
||||
All code in _this_ repository is licensed under the GNU Affero General Public License version 3 (AGPLv3), the full text of which can be found at [http://www.gnu.org/licenses/agpl.html](http://www.gnu.org/licenses/agpl.html).
|
||||
|
||||
If you want to make modifications to the code in _this_ repository and you want to keep those modifications proprietary, then you must get a commercial license from BigchainDB GmbH.
|
||||
|
||||
All short code snippets embedded in the official BigchainDB _documentation_ are licensed under the Apache License, Version 2.0, the full text of which can be found at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
||||
For the licenses on all other BigchainDB-related code, see the LICENSE file in the associated repository.
|
||||
|
@ -56,8 +56,8 @@ class Client:
|
||||
"""
|
||||
|
||||
tx = self.consensus.create_transaction(
|
||||
current_owner=self.public_key,
|
||||
new_owner=self.public_key,
|
||||
owner_before=self.public_key,
|
||||
owner_after=self.public_key,
|
||||
tx_input=None,
|
||||
operation='CREATE',
|
||||
payload=payload)
|
||||
@ -66,11 +66,11 @@ class Client:
|
||||
tx, private_key=self.private_key)
|
||||
return self._push(signed_tx)
|
||||
|
||||
def transfer(self, new_owner, tx_input, payload=None):
|
||||
def transfer(self, owner_after, tx_input, payload=None):
|
||||
"""Issue a transaction to transfer an asset.
|
||||
|
||||
Args:
|
||||
new_owner (str): the public key of the new owner
|
||||
owner_after (str): the public key of the new owner
|
||||
tx_input (str): the id of the transaction to use as input
|
||||
payload (dict, optional): the payload for the transaction.
|
||||
|
||||
@ -79,8 +79,8 @@ class Client:
|
||||
"""
|
||||
|
||||
tx = self.consensus.create_transaction(
|
||||
current_owner=self.public_key,
|
||||
new_owner=new_owner,
|
||||
owner_before=self.public_key,
|
||||
owner_after=owner_after,
|
||||
tx_input=tx_input,
|
||||
operation='TRANSFER',
|
||||
payload=payload)
|
||||
|
@ -22,6 +22,7 @@ from pkg_resources import iter_entry_points, ResolutionError
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb.consensus import AbstractConsensusRules
|
||||
from bigchaindb import exceptions
|
||||
|
||||
# TODO: move this to a proper configuration file for logging
|
||||
logging.getLogger('requests').setLevel(logging.WARNING)
|
||||
@ -98,7 +99,12 @@ def file_config(filename=None):
|
||||
|
||||
logger.debug('file_config() will try to open `{}`'.format(filename))
|
||||
with open(filename) as f:
|
||||
try:
|
||||
config = json.load(f)
|
||||
except ValueError as err:
|
||||
raise exceptions.ConfigurationError(
|
||||
'Failed to parse the JSON configuration from `{}`, {}'.format(filename, err)
|
||||
)
|
||||
|
||||
logger.info('Configuration loaded from `{}`'.format(filename))
|
||||
|
||||
|
@ -133,14 +133,14 @@ class BaseConsensusRules(AbstractConsensusRules):
|
||||
# TODO: for now lets assume a CREATE transaction only has one fulfillment
|
||||
if transaction['transaction']['fulfillments'][0]['input']:
|
||||
raise ValueError('A CREATE operation has no inputs')
|
||||
# TODO: for now lets assume a CREATE transaction only has one current_owner
|
||||
if transaction['transaction']['fulfillments'][0]['current_owners'][0] not in (
|
||||
# TODO: for now lets assume a CREATE transaction only has one owner_before
|
||||
if transaction['transaction']['fulfillments'][0]['owners_before'][0] not in (
|
||||
bigchain.nodes_except_me + [bigchain.me]):
|
||||
raise exceptions.OperationError(
|
||||
'Only federation nodes can use the operation `CREATE`')
|
||||
|
||||
else:
|
||||
# check if the input exists, is owned by the current_owner
|
||||
# check if the input exists, is owned by the owner_before
|
||||
if not transaction['transaction']['fulfillments']:
|
||||
raise ValueError('Transaction contains no fulfillments')
|
||||
|
||||
@ -206,14 +206,14 @@ class BaseConsensusRules(AbstractConsensusRules):
|
||||
return block
|
||||
|
||||
@staticmethod
|
||||
def create_transaction(current_owner, new_owner, tx_input, operation,
|
||||
def create_transaction(owner_before, owner_after, tx_input, operation,
|
||||
payload=None):
|
||||
"""Create a new transaction
|
||||
|
||||
Refer to the documentation of ``bigchaindb.util.create_tx``
|
||||
"""
|
||||
|
||||
return util.create_tx(current_owner, new_owner, tx_input, operation,
|
||||
return util.create_tx(owner_before, owner_after, tx_input, operation,
|
||||
payload)
|
||||
|
||||
@staticmethod
|
||||
|
@ -16,9 +16,14 @@ class Bigchain(object):
|
||||
Create, read, sign, write transactions to the database
|
||||
"""
|
||||
|
||||
# return if a block has been voted invalid
|
||||
BLOCK_INVALID = 'invalid'
|
||||
BLOCK_VALID = 'valid'
|
||||
BLOCK_UNDECIDED = 'undecided'
|
||||
# return if a block is valid, or tx is in valid block
|
||||
BLOCK_VALID = TX_VALID = 'valid'
|
||||
# return if block is undecided, or tx is in undecided block
|
||||
BLOCK_UNDECIDED = TX_UNDECIDED = 'undecided'
|
||||
# return if transaction is in backlog
|
||||
TX_IN_BACKLOG = 'backlog'
|
||||
|
||||
def __init__(self, host=None, port=None, dbname=None,
|
||||
public_key=None, private_key=None, keyring=[],
|
||||
@ -132,46 +137,72 @@ class Bigchain(object):
|
||||
response = r.table('backlog').insert(signed_transaction, durability=durability).run(self.conn)
|
||||
return response
|
||||
|
||||
def get_transaction(self, txid):
|
||||
def get_transaction(self, txid, include_status=False):
|
||||
"""Retrieve a transaction with `txid` from bigchain.
|
||||
|
||||
Queries the bigchain for a transaction that was already included in a block.
|
||||
Queries the bigchain for a transaction, if it's in a valid or invalid
|
||||
block.
|
||||
|
||||
Args:
|
||||
txid (str): transaction id of the transaction to query
|
||||
include_status (bool): also return the status of the transaction
|
||||
the return value is then a tuple: (tx, status)
|
||||
|
||||
Returns:
|
||||
A dict with the transaction details if the transaction was found.
|
||||
|
||||
If no transaction with that `txid` was found it returns `None`
|
||||
Will add the transaction status to payload ('valid', 'undecided',
|
||||
or 'backlog'). If no transaction with that `txid` was found it
|
||||
returns `None`
|
||||
"""
|
||||
|
||||
response, tx_status = None, None
|
||||
|
||||
validity = self.get_blocks_status_containing_tx(txid)
|
||||
|
||||
if validity:
|
||||
# Disregard invalid blocks, and return if there are no valid or undecided blocks
|
||||
validity = {_id: status for _id, status in validity.items()
|
||||
if status != Bigchain.BLOCK_INVALID}
|
||||
if not validity:
|
||||
return None
|
||||
if validity:
|
||||
|
||||
tx_status = self.TX_UNDECIDED
|
||||
# If the transaction is in a valid or any undecided block, return it. Does not check
|
||||
# if transactions in undecided blocks are consistent, but selects the valid block before
|
||||
# undecided ones
|
||||
for _id in validity:
|
||||
target_block_id = _id
|
||||
if validity[_id] == Bigchain.BLOCK_VALID:
|
||||
for target_block_id in validity:
|
||||
if validity[target_block_id] == Bigchain.BLOCK_VALID:
|
||||
tx_status = self.TX_VALID
|
||||
break
|
||||
|
||||
# Query the transaction in the target block and return
|
||||
response = r.table('bigchain', read_mode=self.read_mode).get(target_block_id)\
|
||||
.get_field('block').get_field('transactions')\
|
||||
.filter(lambda tx: tx['id'] == txid).run(self.conn)
|
||||
|
||||
return response[0]
|
||||
.filter(lambda tx: tx['id'] == txid).run(self.conn)[0]
|
||||
|
||||
else:
|
||||
return None
|
||||
# Otherwise, check the backlog
|
||||
response = r.table('backlog').get(txid).run(self.conn)
|
||||
if response:
|
||||
tx_status = self.TX_IN_BACKLOG
|
||||
|
||||
if include_status:
|
||||
return response, tx_status
|
||||
else:
|
||||
return response
|
||||
|
||||
def get_status(self, txid):
|
||||
"""Retrieve the status of a transaction with `txid` from bigchain.
|
||||
|
||||
Args:
|
||||
txid (str): transaction id of the transaction to query
|
||||
|
||||
Returns:
|
||||
(string): transaction status ('valid', 'undecided',
|
||||
or 'backlog'). If no transaction with that `txid` was found it
|
||||
returns `None`
|
||||
"""
|
||||
_, status = self.get_transaction(txid, include_status=True)
|
||||
return status
|
||||
|
||||
def search_block_election_on_index(self, value, index):
|
||||
"""Retrieve block election information given a secondary index and value
|
||||
@ -299,11 +330,11 @@ class Bigchain(object):
|
||||
list: list of `txids` currently owned by `owner`
|
||||
"""
|
||||
|
||||
# get all transactions in which owner is in the `new_owners` list
|
||||
# get all transactions in which owner is in the `owners_after` list
|
||||
response = r.table('bigchain', read_mode=self.read_mode) \
|
||||
.concat_map(lambda doc: doc['block']['transactions']) \
|
||||
.filter(lambda tx: tx['transaction']['conditions']
|
||||
.contains(lambda c: c['new_owners']
|
||||
.contains(lambda c: c['owners_after']
|
||||
.contains(owner))) \
|
||||
.run(self.conn)
|
||||
owned = []
|
||||
@ -319,12 +350,12 @@ class Bigchain(object):
|
||||
# to get a list of outputs available to spend
|
||||
for condition in tx['transaction']['conditions']:
|
||||
# for simple signature conditions there are no subfulfillments
|
||||
# check if the owner is in the condition `new_owners`
|
||||
if len(condition['new_owners']) == 1:
|
||||
# check if the owner is in the condition `owners_after`
|
||||
if len(condition['owners_after']) == 1:
|
||||
if condition['condition']['details']['public_key'] == owner:
|
||||
tx_input = {'txid': tx['id'], 'cid': condition['cid']}
|
||||
else:
|
||||
# for transactions with multiple `new_owners` there will be several subfulfillments nested
|
||||
# for transactions with multiple `owners_after` there will be several subfulfillments nested
|
||||
# in the condition. We need to iterate the subfulfillments to make sure there is a
|
||||
# subfulfillment for `owner`
|
||||
if util.condition_details_has_owner(condition['condition']['details'], owner):
|
||||
|
@ -1,6 +1,9 @@
|
||||
"""Custom exceptions used in the `bigchaindb` package.
|
||||
"""
|
||||
|
||||
class ConfigurationError(Exception):
|
||||
"""Raised when there is a problem with server configuration"""
|
||||
|
||||
|
||||
class OperationError(Exception):
|
||||
"""Raised when an operation cannot go through"""
|
||||
|
@ -44,7 +44,7 @@ class Election:
|
||||
|
||||
|
||||
def get_changefeed():
|
||||
return ChangeFeed(table='votes', operation='insert')
|
||||
return ChangeFeed(table='votes', operation=ChangeFeed.INSERT)
|
||||
|
||||
|
||||
def create_pipeline():
|
||||
|
@ -20,17 +20,19 @@ class ChangeFeed(Node):
|
||||
to output before the actual changefeed.
|
||||
"""
|
||||
|
||||
INSERT = 'insert'
|
||||
DELETE = 'delete'
|
||||
UPDATE = 'update'
|
||||
INSERT = 1
|
||||
DELETE = 2
|
||||
UPDATE = 4
|
||||
|
||||
def __init__(self, table, operation, prefeed=None):
|
||||
"""Create a new RethinkDB ChangeFeed.
|
||||
|
||||
Args:
|
||||
table (str): name of the table to listen to for changes.
|
||||
operation (str): can be ChangeFeed.INSERT, ChangeFeed.DELETE, or
|
||||
ChangeFeed.UPDATE.
|
||||
operation (int): can be ChangeFeed.INSERT, ChangeFeed.DELETE, or
|
||||
ChangeFeed.UPDATE. Combining multiple operation is possible using
|
||||
the bitwise ``|`` operator
|
||||
(e.g. ``ChangeFeed.INSERT | ChangeFeed.UPDATE``)
|
||||
prefeed (iterable): whatever set of data you want to be published
|
||||
first.
|
||||
"""
|
||||
@ -51,10 +53,10 @@ class ChangeFeed(Node):
|
||||
is_delete = change['new_val'] is None
|
||||
is_update = not is_insert and not is_delete
|
||||
|
||||
if is_insert and self.operation == ChangeFeed.INSERT:
|
||||
if is_insert and (self.operation & ChangeFeed.INSERT):
|
||||
self.outqueue.put(change['new_val'])
|
||||
elif is_delete and self.operation == ChangeFeed.DELETE:
|
||||
elif is_delete and (self.operation & ChangeFeed.DELETE):
|
||||
self.outqueue.put(change['old_val'])
|
||||
elif is_update and self.operation == ChangeFeed.UPDATE:
|
||||
elif is_update and (self.operation & ChangeFeed.UPDATE):
|
||||
self.outqueue.put(change)
|
||||
|
||||
|
@ -144,7 +144,7 @@ def initial():
|
||||
def get_changefeed():
|
||||
"""Create and return the changefeed for the bigchain table."""
|
||||
|
||||
return ChangeFeed('bigchain', 'insert', prefeed=initial())
|
||||
return ChangeFeed('bigchain', operation=ChangeFeed.INSERT, prefeed=initial())
|
||||
|
||||
|
||||
def create_pipeline():
|
||||
|
@ -137,7 +137,7 @@ def timestamp():
|
||||
|
||||
|
||||
# TODO: Consider remove the operation (if there are no inputs CREATE else TRANSFER)
|
||||
def create_tx(current_owners, new_owners, inputs, operation, payload=None):
|
||||
def create_tx(owners_before, owners_after, inputs, operation, payload=None):
|
||||
"""Create a new transaction
|
||||
|
||||
A transaction in the bigchain is a transfer of a digital asset between two entities represented
|
||||
@ -153,8 +153,8 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
|
||||
`TRANSFER` - A transfer operation allows for a transfer of the digital assets between entities.
|
||||
|
||||
Args:
|
||||
current_owners (list): base58 encoded public key of the current owners of the asset.
|
||||
new_owners (list): base58 encoded public key of the new owners of the digital asset.
|
||||
owners_before (list): base58 encoded public key of the current owners of the asset.
|
||||
owners_after (list): base58 encoded public key of the new owners of the digital asset.
|
||||
inputs (list): id of the transaction to use as input.
|
||||
operation (str): Either `CREATE` or `TRANSFER` operation.
|
||||
payload (Optional[dict]): dictionary with information about asset.
|
||||
@ -173,7 +173,7 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
|
||||
"version": "transaction version number",
|
||||
"fulfillments": [
|
||||
{
|
||||
"current_owners": ["list of <pub-keys>"],
|
||||
"owners_before": ["list of <pub-keys>"],
|
||||
"input": {
|
||||
"txid": "<sha3 hash>",
|
||||
"cid": "condition index"
|
||||
@ -184,7 +184,7 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
|
||||
],
|
||||
"conditions": [
|
||||
{
|
||||
"new_owners": ["list of <pub-keys>"],
|
||||
"owners_after": ["list of <pub-keys>"],
|
||||
"condition": "condition to be met",
|
||||
"cid": "condition index (1-to-1 mapping with fid)"
|
||||
}
|
||||
@ -205,16 +205,16 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
|
||||
# validate arguments (owners and inputs should be lists or None)
|
||||
|
||||
# The None case appears on fulfilling a hashlock
|
||||
if current_owners is None:
|
||||
current_owners = []
|
||||
if not isinstance(current_owners, list):
|
||||
current_owners = [current_owners]
|
||||
if owners_before is None:
|
||||
owners_before = []
|
||||
if not isinstance(owners_before, list):
|
||||
owners_before = [owners_before]
|
||||
|
||||
# The None case appears on assigning a hashlock
|
||||
if new_owners is None:
|
||||
new_owners = []
|
||||
if not isinstance(new_owners, list):
|
||||
new_owners = [new_owners]
|
||||
if owners_after is None:
|
||||
owners_after = []
|
||||
if not isinstance(owners_after, list):
|
||||
owners_after = [owners_after]
|
||||
|
||||
if not isinstance(inputs, list):
|
||||
inputs = [inputs]
|
||||
@ -235,7 +235,7 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
|
||||
if inputs:
|
||||
for fid, tx_input in enumerate(inputs):
|
||||
fulfillments.append({
|
||||
'current_owners': current_owners,
|
||||
'owners_before': owners_before,
|
||||
'input': tx_input,
|
||||
'fulfillment': None,
|
||||
'fid': fid
|
||||
@ -243,7 +243,7 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
|
||||
# create
|
||||
else:
|
||||
fulfillments.append({
|
||||
'current_owners': current_owners,
|
||||
'owners_before': owners_before,
|
||||
'input': None,
|
||||
'fulfillment': None,
|
||||
'fid': 0
|
||||
@ -254,14 +254,14 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
|
||||
for fulfillment in fulfillments:
|
||||
|
||||
# threshold condition
|
||||
if len(new_owners) > 1:
|
||||
condition = cc.ThresholdSha256Fulfillment(threshold=len(new_owners))
|
||||
for new_owner in new_owners:
|
||||
condition.add_subfulfillment(cc.Ed25519Fulfillment(public_key=new_owner))
|
||||
if len(owners_after) > 1:
|
||||
condition = cc.ThresholdSha256Fulfillment(threshold=len(owners_after))
|
||||
for owner_after in owners_after:
|
||||
condition.add_subfulfillment(cc.Ed25519Fulfillment(public_key=owner_after))
|
||||
|
||||
# simple signature condition
|
||||
elif len(new_owners) == 1:
|
||||
condition = cc.Ed25519Fulfillment(public_key=new_owners[0])
|
||||
elif len(owners_after) == 1:
|
||||
condition = cc.Ed25519Fulfillment(public_key=owners_after[0])
|
||||
|
||||
# to be added later (hashlock conditions)
|
||||
else:
|
||||
@ -269,7 +269,7 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
|
||||
|
||||
if condition:
|
||||
conditions.append({
|
||||
'new_owners': new_owners,
|
||||
'owners_after': owners_after,
|
||||
'condition': {
|
||||
'details': condition.to_dict(),
|
||||
'uri': condition.condition_uri
|
||||
@ -301,7 +301,7 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
|
||||
def sign_tx(transaction, signing_keys, bigchain=None):
|
||||
"""Sign a transaction
|
||||
|
||||
A transaction signed with the `current_owner` corresponding private key.
|
||||
A transaction signed with the `owner_before` corresponding private key.
|
||||
|
||||
Args:
|
||||
transaction (dict): transaction to sign.
|
||||
@ -317,7 +317,7 @@ def sign_tx(transaction, signing_keys, bigchain=None):
|
||||
if not isinstance(signing_keys, list):
|
||||
signing_keys = [signing_keys]
|
||||
|
||||
# create a mapping between sk and vk so that we can match the private key to the current_owners
|
||||
# create a mapping between sk and vk so that we can match the private key to the owners_before
|
||||
key_pairs = {}
|
||||
for sk in signing_keys:
|
||||
signing_key = crypto.SigningKey(sk)
|
||||
@ -368,13 +368,13 @@ def fulfill_simple_signature_fulfillment(fulfillment, parsed_fulfillment, fulfil
|
||||
object: fulfilled cryptoconditions.Ed25519Fulfillment
|
||||
|
||||
"""
|
||||
current_owner = fulfillment['current_owners'][0]
|
||||
owner_before = fulfillment['owners_before'][0]
|
||||
|
||||
try:
|
||||
parsed_fulfillment.sign(serialize(fulfillment_message), key_pairs[current_owner])
|
||||
parsed_fulfillment.sign(serialize(fulfillment_message), key_pairs[owner_before])
|
||||
except KeyError:
|
||||
raise exceptions.KeypairMismatchException('Public key {} is not a pair to any of the private keys'
|
||||
.format(current_owner))
|
||||
.format(owner_before))
|
||||
|
||||
return parsed_fulfillment
|
||||
|
||||
@ -395,17 +395,17 @@ def fulfill_threshold_signature_fulfillment(fulfillment, parsed_fulfillment, ful
|
||||
parsed_fulfillment_copy = copy.deepcopy(parsed_fulfillment)
|
||||
parsed_fulfillment.subconditions = []
|
||||
|
||||
for current_owner in fulfillment['current_owners']:
|
||||
for owner_before in fulfillment['owners_before']:
|
||||
try:
|
||||
subfulfillment = parsed_fulfillment_copy.get_subcondition_from_vk(current_owner)[0]
|
||||
subfulfillment = parsed_fulfillment_copy.get_subcondition_from_vk(owner_before)[0]
|
||||
except IndexError:
|
||||
raise exceptions.KeypairMismatchException(
|
||||
'Public key {} cannot be found in the fulfillment'.format(current_owner))
|
||||
'Public key {} cannot be found in the fulfillment'.format(owner_before))
|
||||
try:
|
||||
private_key = key_pairs[current_owner]
|
||||
private_key = key_pairs[owner_before]
|
||||
except KeyError:
|
||||
raise exceptions.KeypairMismatchException(
|
||||
'Public key {} is not a pair to any of the private keys'.format(current_owner))
|
||||
'Public key {} is not a pair to any of the private keys'.format(owner_before))
|
||||
|
||||
subfulfillment.sign(serialize(fulfillment_message), private_key)
|
||||
parsed_fulfillment.add_subfulfillment(subfulfillment)
|
||||
@ -413,8 +413,8 @@ def fulfill_threshold_signature_fulfillment(fulfillment, parsed_fulfillment, ful
|
||||
return parsed_fulfillment
|
||||
|
||||
|
||||
def create_and_sign_tx(private_key, current_owner, new_owner, tx_input, operation='TRANSFER', payload=None):
|
||||
tx = create_tx(current_owner, new_owner, tx_input, operation, payload)
|
||||
def create_and_sign_tx(private_key, owner_before, owner_after, tx_input, operation='TRANSFER', payload=None):
|
||||
tx = create_tx(owner_before, owner_after, tx_input, operation, payload)
|
||||
return sign_tx(tx, private_key)
|
||||
|
||||
|
||||
@ -432,7 +432,7 @@ def check_hash_and_signature(transaction):
|
||||
def validate_fulfillments(signed_transaction):
|
||||
"""Verify the signature of a transaction
|
||||
|
||||
A valid transaction should have been signed `current_owner` corresponding private key.
|
||||
A valid transaction should have been signed `owner_before` corresponding private key.
|
||||
|
||||
Args:
|
||||
signed_transaction (dict): a transaction with the `signature` included.
|
||||
@ -516,8 +516,8 @@ def get_input_condition(bigchain, fulfillment):
|
||||
# if `CREATE` transaction
|
||||
# there is no previous transaction so we need to create one on the fly
|
||||
else:
|
||||
current_owner = fulfillment['current_owners'][0]
|
||||
condition = cc.Ed25519Fulfillment(public_key=current_owner)
|
||||
owner_before = fulfillment['owners_before'][0]
|
||||
condition = cc.Ed25519Fulfillment(public_key=owner_before)
|
||||
|
||||
return {
|
||||
'condition': {
|
||||
@ -581,7 +581,7 @@ def get_hash_data(transaction):
|
||||
def verify_vote_signature(block, signed_vote):
|
||||
"""Verify the signature of a vote
|
||||
|
||||
A valid vote should have been signed `current_owner` corresponding private key.
|
||||
A valid vote should have been signed `owner_before` corresponding private key.
|
||||
|
||||
Args:
|
||||
block (dict): block under election
|
||||
@ -612,7 +612,7 @@ def transform_create(tx):
|
||||
payload = None
|
||||
if transaction['data'] and 'payload' in transaction['data']:
|
||||
payload = transaction['data']['payload']
|
||||
new_tx = create_tx(b.me, transaction['fulfillments'][0]['current_owners'], None, 'CREATE', payload=payload)
|
||||
new_tx = create_tx(b.me, transaction['fulfillments'][0]['owners_before'], None, 'CREATE', payload=payload)
|
||||
return new_tx
|
||||
|
||||
|
||||
|
@ -11,7 +11,9 @@ import gunicorn.app.base
|
||||
|
||||
from bigchaindb import util
|
||||
from bigchaindb import Bigchain
|
||||
from bigchaindb.web import views
|
||||
from bigchaindb.web.views.info import info_views
|
||||
from bigchaindb.web.views.transactions import transaction_views
|
||||
|
||||
from bigchaindb.monitor import Monitor
|
||||
|
||||
|
||||
@ -62,8 +64,8 @@ def create_app(settings):
|
||||
app.config['bigchain_pool'] = util.pool(Bigchain, size=settings.get('threads', 4))
|
||||
app.config['monitor'] = Monitor()
|
||||
|
||||
app.register_blueprint(views.info_views, url_prefix='/')
|
||||
app.register_blueprint(views.basic_views, url_prefix='/api/v1')
|
||||
app.register_blueprint(info_views, url_prefix='/')
|
||||
app.register_blueprint(transaction_views, url_prefix='/api/v1')
|
||||
return app
|
||||
|
||||
|
||||
|
0
bigchaindb/web/views/__init__.py
Normal file
0
bigchaindb/web/views/__init__.py
Normal file
15
bigchaindb/web/views/base.py
Normal file
15
bigchaindb/web/views/base.py
Normal file
@ -0,0 +1,15 @@
|
||||
from flask import jsonify
|
||||
|
||||
|
||||
def make_error(status_code, message=None):
|
||||
|
||||
if status_code == 404 and message is None:
|
||||
message = 'Not found'
|
||||
|
||||
response = jsonify({
|
||||
'status': status_code,
|
||||
'message': message
|
||||
})
|
||||
response.status_code = status_code
|
||||
return response
|
||||
|
26
bigchaindb/web/views/info.py
Normal file
26
bigchaindb/web/views/info.py
Normal file
@ -0,0 +1,26 @@
|
||||
"""This module provides the blueprint for some basic API endpoints.
|
||||
|
||||
For more information please refer to the documentation on ReadTheDocs:
|
||||
- https://bigchaindb.readthedocs.io/en/latest/drivers-clients/http-client-server-api.html
|
||||
"""
|
||||
|
||||
import flask
|
||||
from flask import Blueprint
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb import version
|
||||
|
||||
|
||||
info_views = Blueprint('info_views', __name__)
|
||||
|
||||
|
||||
@info_views.route('/')
|
||||
def home():
|
||||
return flask.jsonify({
|
||||
'software': 'BigchainDB',
|
||||
'version': version.__version__,
|
||||
'public_key': bigchaindb.config['keypair']['public'],
|
||||
'keyring': bigchaindb.config['keyring'],
|
||||
'api_endpoint': bigchaindb.config['api_endpoint']
|
||||
})
|
||||
|
@ -1,24 +1,23 @@
|
||||
"""This module provides the blueprint for some basic API endpoints.
|
||||
|
||||
For more information please refer to the documentation in Apiary:
|
||||
- http://docs.bigchaindb.apiary.io/
|
||||
For more information please refer to the documentation on ReadTheDocs:
|
||||
- https://bigchaindb.readthedocs.io/en/latest/drivers-clients/http-client-server-api.html
|
||||
"""
|
||||
|
||||
import flask
|
||||
from flask import abort, current_app, request, Blueprint
|
||||
from flask import current_app, request, Blueprint
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb import util, version
|
||||
from bigchaindb import util
|
||||
from bigchaindb.web.views.base import make_error
|
||||
|
||||
|
||||
info_views = Blueprint('info_views', __name__)
|
||||
basic_views = Blueprint('basic_views', __name__)
|
||||
transaction_views = Blueprint('transaction_views', __name__)
|
||||
|
||||
|
||||
# Unfortunately I cannot find a reference to this decorator.
|
||||
# This answer on SO is quite useful tho:
|
||||
# - http://stackoverflow.com/a/13432373/597097
|
||||
@basic_views.record
|
||||
@transaction_views.record
|
||||
def record(state):
|
||||
"""This function checks if the blueprint can be initialized
|
||||
with the provided state."""
|
||||
@ -35,18 +34,8 @@ def record(state):
|
||||
'a monitor instance to record system '
|
||||
'performance.')
|
||||
|
||||
@info_views.route('/')
|
||||
def home():
|
||||
return flask.jsonify({
|
||||
'software': 'BigchainDB',
|
||||
'version': version.__version__,
|
||||
'public_key': bigchaindb.config['keypair']['public'],
|
||||
'keyring': bigchaindb.config['keyring'],
|
||||
'api_endpoint': bigchaindb.config['api_endpoint']
|
||||
})
|
||||
|
||||
|
||||
@basic_views.route('/transactions/<tx_id>')
|
||||
@transaction_views.route('/transactions/<tx_id>')
|
||||
def get_transaction(tx_id):
|
||||
"""API endpoint to get details about a transaction.
|
||||
|
||||
@ -63,12 +52,12 @@ def get_transaction(tx_id):
|
||||
tx = bigchain.get_transaction(tx_id)
|
||||
|
||||
if not tx:
|
||||
abort(404)
|
||||
return make_error(404)
|
||||
|
||||
return flask.jsonify(**tx)
|
||||
|
||||
|
||||
@basic_views.route('/transactions/', methods=['POST'])
|
||||
@transaction_views.route('/transactions/', methods=['POST'])
|
||||
def create_transaction():
|
||||
"""API endpoint to push transactions to the Federation.
|
||||
|
||||
@ -78,8 +67,6 @@ def create_transaction():
|
||||
pool = current_app.config['bigchain_pool']
|
||||
monitor = current_app.config['monitor']
|
||||
|
||||
val = {}
|
||||
|
||||
# `force` will try to format the body of the POST request even if the `content-type` header is not
|
||||
# set to `application/json`
|
||||
tx = request.get_json(force=True)
|
||||
@ -89,11 +76,33 @@ def create_transaction():
|
||||
tx = util.transform_create(tx)
|
||||
tx = bigchain.consensus.sign_transaction(tx, private_key=bigchain.me_private)
|
||||
|
||||
if not bigchain.consensus.validate_fulfillments(tx):
|
||||
val['error'] = 'Invalid transaction fulfillments'
|
||||
if not bigchain.is_valid_transaction(tx):
|
||||
return make_error(400, 'Invalid transaction')
|
||||
|
||||
with monitor.timer('write_transaction', rate=bigchaindb.config['statsd']['rate']):
|
||||
val = bigchain.write_transaction(tx)
|
||||
bigchain.write_transaction(tx)
|
||||
|
||||
return flask.jsonify(**tx)
|
||||
|
||||
|
||||
@transaction_views.route('/transactions/<tx_id>/status')
|
||||
def get_transaction_status(tx_id):
|
||||
"""API endpoint to get details about the status of a transaction.
|
||||
|
||||
Args:
|
||||
tx_id (str): the id of the transaction.
|
||||
|
||||
Return:
|
||||
A JSON string containing the status of the transaction.
|
||||
Possible values: "valid", "invalid", "undecided", "backlog", None
|
||||
"""
|
||||
|
||||
pool = current_app.config['bigchain_pool']
|
||||
|
||||
with pool() as bigchain:
|
||||
status = bigchain.get_status(tx_id)
|
||||
|
||||
if not status:
|
||||
return make_error(404)
|
||||
|
||||
return flask.jsonify({'status': status})
|
@ -13,17 +13,21 @@ There are other configuration settings related to the web server (serving the HT
|
||||
|
||||
The HTTP API currently exposes two endpoints, one to get information about a specific transaction, and one to push a new transaction to the BigchainDB cluster.
|
||||
|
||||
.. http:get:: /transactions/(tx_id)
|
||||
.. http:get:: /transactions/{tx_id}
|
||||
|
||||
The transaction with the transaction ID `tx_id`.
|
||||
Get the transaction with the ID ``tx_id``.
|
||||
|
||||
This endpoint returns only a transaction from a ``VALID`` or ``UNDECIDED`` block on ``bigchain``, if exists.
|
||||
|
||||
:param tx_id: transaction ID
|
||||
:type tx_id: hex string
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /transactions/96480ce68912aa39a54766ac16334a835fbf777039670352ff967bf6d65bf4f7 HTTP/1.1
|
||||
GET /transactions/7ad5a4b83bc8c70c4fd7420ff3c60693ab8e6d0e3124378ca69ed5acd2578792 HTTP/1.1
|
||||
Host: example.com
|
||||
TODO: Other headers?
|
||||
|
||||
**Example response**:
|
||||
|
||||
@ -31,30 +35,87 @@ The HTTP API currently exposes two endpoints, one to get information about a spe
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
TODO: Other headers?
|
||||
|
||||
{'id': '96480ce68912aa39a54766ac16334a835fbf777039670352ff967bf6d65bf4f7',
|
||||
'transaction': {'conditions': [{'cid': 0,
|
||||
'condition': {'details': {'bitmask': 32,
|
||||
'public_key': 'FoWUUY6kK7QhgCsgVrV2vpDWfW43mq5ewb16Uh7FBbSF',
|
||||
'signature': None,
|
||||
'type': 'fulfillment',
|
||||
'type_id': 4},
|
||||
'uri': 'cc:4:20:2-2pA2qKr2i-GM6REdqJCLEL_CEWpy-5iQky7YgRZTA:96'},
|
||||
'new_owners': ['FoWUUY6kK7QhgCsgVrV2vpDWfW43mq5ewb16Uh7FBbSF']}],
|
||||
'data': {'payload': None, 'uuid': 'f14dc5a6-510e-4307-89c6-aec42af8a1ae'},
|
||||
'fulfillments': [{'current_owners': ['Ftat68WVLsPxVFLz2Rh2Sbwrrt51uFE3UpjkxY73vGKZ'],
|
||||
'fid': 0,
|
||||
'fulfillment': 'cf:4:3TqMI1ZFolraqHWADT6nIvUUt4HOwqdr0_-yj5Cglbg1V5qQV2CF2Yup1l6fQH2uhLGGFo9uHhZ6HNv9lssiD0ZaG88Bg_MTkz6xg2SW2Cw_YgpM-CyESVT404g54ZsK',
|
||||
'input': None}],
|
||||
'operation': 'CREATE',
|
||||
'timestamp': '1468494923'},
|
||||
'version': 1}
|
||||
{
|
||||
"id":"7ad5a4b83bc8c70c4fd7420ff3c60693ab8e6d0e3124378ca69ed5acd2578792",
|
||||
"transaction":{
|
||||
"conditions":[
|
||||
{
|
||||
"cid":0,
|
||||
"condition":{
|
||||
"details":{
|
||||
"bitmask":32,
|
||||
"public_key":"CwA8s2QYQBfNz4WvjEwmJi83zYr7JhxRhidx6uZ5KBVd",
|
||||
"signature":null,
|
||||
"type":"fulfillment",
|
||||
"type_id":4
|
||||
},
|
||||
"uri":"cc:4:20:sVA_3p8gvl8yRFNTomqm6MaavKewka6dGYcFAuPrRXQ:96"
|
||||
},
|
||||
"owners_after":[
|
||||
"CwA8s2QYQBfNz4WvjEwmJi83zYr7JhxRhidx6uZ5KBVd"
|
||||
]
|
||||
}
|
||||
],
|
||||
"data":{
|
||||
"payload":null,
|
||||
"uuid":"a9999d69-6cde-4b80-819d-ed57f6abe257"
|
||||
},
|
||||
"fulfillments":[
|
||||
{
|
||||
"owners_before":[
|
||||
"JEAkEJqLbbgDRAtMm8YAjGp759Aq2qTn9eaEHUj2XePE"
|
||||
],
|
||||
"fid":0,
|
||||
"fulfillment":"cf:4:__Y_Um6H73iwPe6ejWXEw930SQhqVGjtAHTXilPp0P01vE_Cx6zs3GJVoO1jhPL18C94PIVkLTGMUB2aKC9qsbIb3w8ejpOf0_I3OCuTbPdkd6r2lKMeVftMyMxkeWoM",
|
||||
"input":{
|
||||
"cid":0,
|
||||
"txid":"598ce4e9a29837a1c6fc337ee4a41b61c20ad25d01646754c825b1116abd8761"
|
||||
}
|
||||
}
|
||||
],
|
||||
"operation":"TRANSFER",
|
||||
"timestamp":"1471423869",
|
||||
"version":1
|
||||
}
|
||||
}
|
||||
|
||||
:statuscode 200: A transaction with that ID was found.
|
||||
:statuscode 404: A transaction with that ID was not found.
|
||||
|
||||
|
||||
.. http:get:: /transactions/{tx_id}/status
|
||||
|
||||
Get the status of a transaction with the ID ``tx_id``.
|
||||
|
||||
This endpoint returns the status of a transaction if exists.
|
||||
|
||||
Possible values are ``valid``, ``invalid``, ``undecided`` or ``backlog``.
|
||||
|
||||
:param tx_id: transaction ID
|
||||
:type tx_id: hex string
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /transactions/7ad5a4b83bc8c70c4fd7420ff3c60693ab8e6d0e3124378ca69ed5acd2578792/status HTTP/1.1
|
||||
Host: example.com
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"status": "valid"
|
||||
}
|
||||
|
||||
:statuscode 200: A transaction with that ID was found and the status is returned.
|
||||
:statuscode 404: A transaction with that ID was not found.
|
||||
|
||||
.. http:post:: /transactions/
|
||||
|
||||
Push a new transaction.
|
||||
@ -66,9 +127,50 @@ The HTTP API currently exposes two endpoints, one to get information about a spe
|
||||
POST /transactions/ HTTP/1.1
|
||||
Host: example.com
|
||||
Content-Type: application/json
|
||||
TODO: Other headers?
|
||||
|
||||
(TODO) Insert example request body here
|
||||
{
|
||||
"id":"7ad5a4b83bc8c70c4fd7420ff3c60693ab8e6d0e3124378ca69ed5acd2578792",
|
||||
"transaction":{
|
||||
"conditions":[
|
||||
{
|
||||
"cid":0,
|
||||
"condition":{
|
||||
"details":{
|
||||
"bitmask":32,
|
||||
"public_key":"CwA8s2QYQBfNz4WvjEwmJi83zYr7JhxRhidx6uZ5KBVd",
|
||||
"signature":null,
|
||||
"type":"fulfillment",
|
||||
"type_id":4
|
||||
},
|
||||
"uri":"cc:4:20:sVA_3p8gvl8yRFNTomqm6MaavKewka6dGYcFAuPrRXQ:96"
|
||||
},
|
||||
"owners_after":[
|
||||
"CwA8s2QYQBfNz4WvjEwmJi83zYr7JhxRhidx6uZ5KBVd"
|
||||
]
|
||||
}
|
||||
],
|
||||
"data":{
|
||||
"payload":null,
|
||||
"uuid":"a9999d69-6cde-4b80-819d-ed57f6abe257"
|
||||
},
|
||||
"fulfillments":[
|
||||
{
|
||||
"owners_before":[
|
||||
"JEAkEJqLbbgDRAtMm8YAjGp759Aq2qTn9eaEHUj2XePE"
|
||||
],
|
||||
"fid":0,
|
||||
"fulfillment":"cf:4:__Y_Um6H73iwPe6ejWXEw930SQhqVGjtAHTXilPp0P01vE_Cx6zs3GJVoO1jhPL18C94PIVkLTGMUB2aKC9qsbIb3w8ejpOf0_I3OCuTbPdkd6r2lKMeVftMyMxkeWoM",
|
||||
"input":{
|
||||
"cid":0,
|
||||
"txid":"598ce4e9a29837a1c6fc337ee4a41b61c20ad25d01646754c825b1116abd8761"
|
||||
}
|
||||
}
|
||||
],
|
||||
"operation":"TRANSFER",
|
||||
"timestamp":"1471423869",
|
||||
"version":1
|
||||
}
|
||||
}
|
||||
|
||||
**Example response**:
|
||||
|
||||
@ -76,10 +178,78 @@ The HTTP API currently exposes two endpoints, one to get information about a spe
|
||||
|
||||
HTTP/1.1 201 Created
|
||||
Content-Type: application/json
|
||||
TODO: Other headers?
|
||||
|
||||
(TODO) Insert example response body here
|
||||
{
|
||||
"assignee":"4XYfCbabAWVUCbjTmRTFEu2sc3dFEdkse4r6X498B1s8",
|
||||
"id":"7ad5a4b83bc8c70c4fd7420ff3c60693ab8e6d0e3124378ca69ed5acd2578792",
|
||||
"transaction":{
|
||||
"conditions":[
|
||||
{
|
||||
"cid":0,
|
||||
"condition":{
|
||||
"details":{
|
||||
"bitmask":32,
|
||||
"public_key":"CwA8s2QYQBfNz4WvjEwmJi83zYr7JhxRhidx6uZ5KBVd",
|
||||
"signature":null,
|
||||
"type":"fulfillment",
|
||||
"type_id":4
|
||||
},
|
||||
"uri":"cc:4:20:sVA_3p8gvl8yRFNTomqm6MaavKewka6dGYcFAuPrRXQ:96"
|
||||
},
|
||||
"owners_after":[
|
||||
"CwA8s2QYQBfNz4WvjEwmJi83zYr7JhxRhidx6uZ5KBVd"
|
||||
]
|
||||
}
|
||||
],
|
||||
"data":{
|
||||
"payload":null,
|
||||
"uuid":"a9999d69-6cde-4b80-819d-ed57f6abe257"
|
||||
},
|
||||
"fulfillments":[
|
||||
{
|
||||
"owners_before":[
|
||||
"JEAkEJqLbbgDRAtMm8YAjGp759Aq2qTn9eaEHUj2XePE"
|
||||
],
|
||||
"fid":0,
|
||||
"fulfillment":"cf:4:__Y_Um6H73iwPe6ejWXEw930SQhqVGjtAHTXilPp0P01vE_Cx6zs3GJVoO1jhPL18C94PIVkLTGMUB2aKC9qsbIb3w8ejpOf0_I3OCuTbPdkd6r2lKMeVftMyMxkeWoM",
|
||||
"input":{
|
||||
"cid":0,
|
||||
"txid":"598ce4e9a29837a1c6fc337ee4a41b61c20ad25d01646754c825b1116abd8761"
|
||||
}
|
||||
}
|
||||
],
|
||||
"operation":"TRANSFER",
|
||||
"timestamp":"1471423869",
|
||||
"version":1
|
||||
}
|
||||
}
|
||||
|
||||
:statuscode 201: A new transaction was created.
|
||||
:statuscode 400: The transaction was invalid and not created.
|
||||
|
||||
(TODO) What's the response status code if the POST fails?
|
||||
**Disclaimer**
|
||||
|
||||
``CREATE`` transactions are treated differently from ``TRANSFER`` assets.
|
||||
The reason is that a ``CREATE`` transaction needs to be signed by a federation node and not by the client.
|
||||
|
||||
The following python snippet in a client can be used to generate ``CREATE`` transactions before they can be pushed to the API server:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from bigchaindb import util
|
||||
tx = util.create_and_sign_tx(my_privkey, my_pubkey, my_pubkey, None, 'CREATE')
|
||||
|
||||
When POSTing ``tx`` to the API, the ``CREATE`` transaction will be signed by a federation node.
|
||||
|
||||
A ``TRANSFER`` transaction, that takes an existing input transaction to change ownership can be generated in multiple ways:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from bigchaindb import util, Bigchain
|
||||
tx = util.create_and_sign_tx(my_privkey, my_pubkey, other_pubkey, input_tx, 'TRANSFER')
|
||||
# or
|
||||
b = Bigchain()
|
||||
tx_unsigned = b.create_transaction(my_pubkey, other_pubkey, input_tx, 'TRANSFER')
|
||||
tx = b.sign_transaction(tx_unsigned, my_privkey)
|
||||
|
||||
More information on generating transactions can be found in the `Python server API examples <python-server-api-examples.html>`_
|
@ -19,9 +19,9 @@ Out[5]:
|
||||
'type': 'fulfillment',
|
||||
'type_id': 4},
|
||||
'uri': 'cc:4:20:eoUROTxUArrpXGVBrvrYqkcEGG8lB_leliNvSvSddDg:96'},
|
||||
'new_owners': ['9FGRd2jLxmwtRkwsWTpEoqy1rZpg6ycuT7NwmCR4QVk3']}],
|
||||
'owners_after': ['9FGRd2jLxmwtRkwsWTpEoqy1rZpg6ycuT7NwmCR4QVk3']}],
|
||||
'data': {'payload': None, 'uuid': 'b4884e37-3c8e-4cc2-bfc8-68a05ed090ad'},
|
||||
'fulfillments': [{'current_owners': ['3NsvDXiiuf2BRPnqfRuBM9yHNjsH4L33gcZ4rh4GMY2J'],
|
||||
'fulfillments': [{'owners_before': ['3NsvDXiiuf2BRPnqfRuBM9yHNjsH4L33gcZ4rh4GMY2J'],
|
||||
'fid': 0,
|
||||
'fulfillment': 'cf:4:I1IkuhCSf_hGqJ-JKHTQIO1g4apbQuaZXNMEX4isyxd7azkJreyGKyaMLs6Xk9kxQClwz1nQiKM6OMRk7fdusN0373szGbq-PppnsjY6ilbx1JmP-IH7hdjjwjjx9coM',
|
||||
'input': None}],
|
||||
@ -40,9 +40,9 @@ Out[6]:
|
||||
'type': 'fulfillment',
|
||||
'type_id': 4},
|
||||
'uri': 'cc:4:20:akjKWxLO2hbe6RVva_FsWNDJmnUKYjQ57HIhUQbwb2Q:96'},
|
||||
'new_owners': ['89tbMBospYsTNDgpqFS4RLszNsxuE4JEumNuY3WTAnT5']}],
|
||||
'owners_after': ['89tbMBospYsTNDgpqFS4RLszNsxuE4JEumNuY3WTAnT5']}],
|
||||
'data': {'payload': None, 'uuid': 'a640a9d6-9384-4e9c-a130-e899ea6416aa'},
|
||||
'fulfillments': [{'current_owners': ['9FGRd2jLxmwtRkwsWTpEoqy1rZpg6ycuT7NwmCR4QVk3'],
|
||||
'fulfillments': [{'owners_before': ['9FGRd2jLxmwtRkwsWTpEoqy1rZpg6ycuT7NwmCR4QVk3'],
|
||||
'fid': 0,
|
||||
'fulfillment': 'cf:4:eoUROTxUArrpXGVBrvrYqkcEGG8lB_leliNvSvSddDgVmY6O7YTER04mWjAVd6m0qOv5R44Cxpv_65OtLnNUD-HEgD-9z3ys4GvPf7BZF5dKSbAs_3a8yCQM0bkCcqkB',
|
||||
'input': {'cid': 0,
|
||||
|
@ -87,7 +87,7 @@ tx_retrieved
|
||||
},
|
||||
"uri":"cc:4:20:oqXTWvR3afHHX8OaOO84kZxS6nH4GEBXD4Vw8Mc5iBo:96"
|
||||
},
|
||||
"new_owners":[
|
||||
"owners_after":[
|
||||
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs"
|
||||
]
|
||||
}
|
||||
@ -100,7 +100,7 @@ tx_retrieved
|
||||
},
|
||||
"fulfillments":[
|
||||
{
|
||||
"current_owners":[
|
||||
"owners_before":[
|
||||
"3LQ5dTiddXymDhNzETB1rEkp4mA7fEV1Qeiu5ghHiJm9"
|
||||
],
|
||||
"fid":0,
|
||||
@ -182,7 +182,7 @@ tx_transfer_retrieved
|
||||
},
|
||||
"uri":"cc:4:20:DIfyalZvV_9ukoO01mxmK3nxsfAWSKYYF33XDYkbY4E:96"
|
||||
},
|
||||
"new_owners":[
|
||||
"owners_after":[
|
||||
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
|
||||
]
|
||||
}
|
||||
@ -190,7 +190,7 @@ tx_transfer_retrieved
|
||||
"data":None,
|
||||
"fulfillments":[
|
||||
{
|
||||
"current_owners":[
|
||||
"owners_before":[
|
||||
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs"
|
||||
],
|
||||
"fid":0,
|
||||
@ -231,7 +231,7 @@ DoubleSpend: input `{'cid': 0, 'txid': '933cd83a419d2735822a2154c84176a2f419cbd4
|
||||
|
||||
## Multiple Owners
|
||||
|
||||
To create a new digital asset with _multiple_ owners, one can simply provide a list of `new_owners`:
|
||||
To create a new digital asset with _multiple_ owners, one can simply provide a list of `owners_after`:
|
||||
|
||||
```python
|
||||
# Create a new asset and assign it to multiple owners
|
||||
@ -282,7 +282,7 @@ tx_multisig_retrieved
|
||||
},
|
||||
"uri":"cc:2:29:DpflJzUSlnTUBx8lD8QUolOA-M9nQnrGwvWSk7f3REc:206"
|
||||
},
|
||||
"new_owners":[
|
||||
"owners_after":[
|
||||
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs",
|
||||
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
|
||||
]
|
||||
@ -291,7 +291,7 @@ tx_multisig_retrieved
|
||||
"data":None,
|
||||
"fulfillments":[
|
||||
{
|
||||
"current_owners":[
|
||||
"owners_before":[
|
||||
"3LQ5dTiddXymDhNzETB1rEkp4mA7fEV1Qeiu5ghHiJm9"
|
||||
],
|
||||
"fid":0,
|
||||
@ -306,7 +306,7 @@ tx_multisig_retrieved
|
||||
}
|
||||
```
|
||||
|
||||
The asset can be transfered as soon as each of the `new_owners` signs the transaction.
|
||||
The asset can be transfered as soon as each of the `owners_after` signs the transaction.
|
||||
|
||||
To do so, simply provide a list of all private keys to the signing routine:
|
||||
|
||||
@ -348,7 +348,7 @@ tx_multisig_transfer_retrieved
|
||||
},
|
||||
"uri":"cc:4:20:cAq6JQJXtwlxURqrksiyqLThB9zh08ZxSPLTDSaReYE:96"
|
||||
},
|
||||
"new_owners":[
|
||||
"owners_after":[
|
||||
"8YN9fALMj9CkeCcmTiM2kxwurpkMzHg9RkwSLJKMasvG"
|
||||
]
|
||||
}
|
||||
@ -356,7 +356,7 @@ tx_multisig_transfer_retrieved
|
||||
"data":None,
|
||||
"fulfillments":[
|
||||
{
|
||||
"current_owners":[
|
||||
"owners_before":[
|
||||
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs",
|
||||
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
|
||||
],
|
||||
@ -427,7 +427,7 @@ tx_mimo_retrieved
|
||||
},
|
||||
"uri":"cc:4:20:2AXg2JJ7mQ8o2Q9-hafP-XmFh3YR7I2_Sz55AubfxIc:96"
|
||||
},
|
||||
"new_owners":[
|
||||
"owners_after":[
|
||||
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
|
||||
]
|
||||
},
|
||||
@ -443,7 +443,7 @@ tx_mimo_retrieved
|
||||
},
|
||||
"uri":"cc:4:20:2AXg2JJ7mQ8o2Q9-hafP-XmFh3YR7I2_Sz55AubfxIc:96"
|
||||
},
|
||||
"new_owners":[
|
||||
"owners_after":[
|
||||
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
|
||||
]
|
||||
},
|
||||
@ -459,7 +459,7 @@ tx_mimo_retrieved
|
||||
},
|
||||
"uri":"cc:4:20:2AXg2JJ7mQ8o2Q9-hafP-XmFh3YR7I2_Sz55AubfxIc:96"
|
||||
},
|
||||
"new_owners":[
|
||||
"owners_after":[
|
||||
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
|
||||
]
|
||||
}
|
||||
@ -467,7 +467,7 @@ tx_mimo_retrieved
|
||||
"data":None,
|
||||
"fulfillments":[
|
||||
{
|
||||
"current_owners":[
|
||||
"owners_before":[
|
||||
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs"
|
||||
],
|
||||
"fid":0,
|
||||
@ -478,7 +478,7 @@ tx_mimo_retrieved
|
||||
}
|
||||
},
|
||||
{
|
||||
"current_owners":[
|
||||
"owners_before":[
|
||||
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs"
|
||||
],
|
||||
"fid":1,
|
||||
@ -489,7 +489,7 @@ tx_mimo_retrieved
|
||||
}
|
||||
},
|
||||
{
|
||||
"current_owners":[
|
||||
"owners_before":[
|
||||
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs"
|
||||
],
|
||||
"fid":2,
|
||||
@ -529,11 +529,11 @@ Setting up a generic threshold condition is a bit more elaborate than regular tr
|
||||
|
||||
The basic workflow for creating a more complex cryptocondition is the following:
|
||||
|
||||
1. Create a transaction template that include the public key of all (nested) parties as `new_owners`
|
||||
1. Create a transaction template that include the public key of all (nested) parties as `owners_after`
|
||||
2. Set up the threshold condition using the [cryptocondition library](https://github.com/bigchaindb/cryptoconditions)
|
||||
3. Update the condition and hash in the transaction template
|
||||
|
||||
We'll illustrate this by a threshold condition where 2 out of 3 `new_owners` need to sign the transaction:
|
||||
We'll illustrate this by a threshold condition where 2 out of 3 `owners_after` need to sign the transaction:
|
||||
|
||||
```python
|
||||
import copy
|
||||
@ -620,7 +620,7 @@ tx_threshold_retrieved
|
||||
},
|
||||
"uri":"cc:2:29:FoElId4TE5TU2loonT7sayXhxwcmaJVoCeIduh56Dxw:246"
|
||||
},
|
||||
"new_owners":[
|
||||
"owners_after":[
|
||||
"8NaGq26YMcEvj8Sc5MnqspKzFTQd1eZBAuuPDw4ERHpz",
|
||||
"ALE9Agojob28D1fHWCxFXJwpqrYPkcsUs26YksBVj27z",
|
||||
"Cx4jWSGci7fw6z5QyeApCijbwnMpyuhp4C1kzuFc3XrM"
|
||||
@ -630,7 +630,7 @@ tx_threshold_retrieved
|
||||
"data":None,
|
||||
"fulfillments":[
|
||||
{
|
||||
"current_owners":[
|
||||
"owners_before":[
|
||||
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
|
||||
],
|
||||
"fid":0,
|
||||
@ -652,7 +652,7 @@ The transaction can now be transfered by fulfilling the threshold condition.
|
||||
|
||||
The fulfillment involves:
|
||||
|
||||
1. Create a transaction template that include the public key of all (nested) parties as `current_owners`
|
||||
1. Create a transaction template that include the public key of all (nested) parties as `owners_before`
|
||||
2. Parsing the threshold condition into a fulfillment using the [cryptocondition library](https://github.com/bigchaindb/cryptoconditions)
|
||||
3. Signing all necessary subfulfillments and updating the fulfillment field in the transaction
|
||||
|
||||
@ -721,7 +721,7 @@ threshold_tx_transfer
|
||||
},
|
||||
"uri":"cc:4:20:xDz3NhRG-3eVzIB9sgnd99LKjOyDF-KlxWuf1TgNT0s:96"
|
||||
},
|
||||
"new_owners":[
|
||||
"owners_after":[
|
||||
"ED2pyPfsbNRTHkdMnaFkAwCSpZWRmbaM1h8fYzgRRMmc"
|
||||
]
|
||||
}
|
||||
@ -729,7 +729,7 @@ threshold_tx_transfer
|
||||
"data":None,
|
||||
"fulfillments":[
|
||||
{
|
||||
"current_owners":[
|
||||
"owners_before":[
|
||||
"8NaGq26YMcEvj8Sc5MnqspKzFTQd1eZBAuuPDw4ERHpz",
|
||||
"ALE9Agojob28D1fHWCxFXJwpqrYPkcsUs26YksBVj27z",
|
||||
"Cx4jWSGci7fw6z5QyeApCijbwnMpyuhp4C1kzuFc3XrM"
|
||||
@ -758,10 +758,10 @@ Under the hood, fulfilling a hash-lock condition amounts to finding a string (a
|
||||
|
||||
One possible use case is to distribute preimages as "digital vouchers." The first person to redeem a voucher will get the associated asset.
|
||||
|
||||
A federation node can create an asset with a hash-lock condition and no `new_owners`. Anyone who can fullfill the hash-lock condition can transfer the asset to themselves.
|
||||
A federation node can create an asset with a hash-lock condition and no `owners_after`. Anyone who can fullfill the hash-lock condition can transfer the asset to themselves.
|
||||
|
||||
```python
|
||||
# Create a hash-locked asset without any new_owners
|
||||
# Create a hash-locked asset without any owners_after
|
||||
hashlock_tx = b.create_transaction(b.me, None, None, 'CREATE')
|
||||
|
||||
# Define a secret that will be hashed - fulfillments need to guess the secret
|
||||
@ -774,13 +774,13 @@ hashlock_tx['transaction']['conditions'].append({
|
||||
'uri': first_tx_condition.condition.serialize_uri()
|
||||
},
|
||||
'cid': 0,
|
||||
'new_owners': None
|
||||
'owners_after': None
|
||||
})
|
||||
|
||||
# Conditions have been updated, so the hash needs updating
|
||||
hashlock_tx['id'] = util.get_hash_data(hashlock_tx)
|
||||
|
||||
# The asset needs to be signed by the current_owner
|
||||
# The asset needs to be signed by the owner_before
|
||||
hashlock_tx_signed = b.sign_transaction(hashlock_tx, b.me_private)
|
||||
|
||||
# Some validations
|
||||
@ -800,13 +800,13 @@ hashlock_tx_signed
|
||||
"condition":{
|
||||
"uri":"cc:0:3:nsW2IiYgk9EUtsg4uBe3pBnOgRoAEX2IIsPgjqZz47U:17"
|
||||
},
|
||||
"new_owners":None
|
||||
"owners_after":None
|
||||
}
|
||||
],
|
||||
"data":None,
|
||||
"fulfillments":[
|
||||
{
|
||||
"current_owners":[
|
||||
"owners_before":[
|
||||
"FmLm6MxCABc8TsiZKdeYaZKo5yZWMM6Vty7Q1B6EgcP2"
|
||||
],
|
||||
"fid":0,
|
||||
@ -864,7 +864,7 @@ hashlock_fulfill_tx
|
||||
},
|
||||
"uri":"cc:4:20:y9884Md2YI_wdnGSTJGhwvFaNsKLe8sqwimqk-2JLSI:96"
|
||||
},
|
||||
"new_owners":[
|
||||
"owners_after":[
|
||||
"EiqCKxnBCmmNb83qyGch48tULK9RLaEt4xFA43UVCVDb"
|
||||
]
|
||||
}
|
||||
@ -872,7 +872,7 @@ hashlock_fulfill_tx
|
||||
"data":None,
|
||||
"fulfillments":[
|
||||
{
|
||||
"current_owners":[],
|
||||
"owners_before":[],
|
||||
"fid":0,
|
||||
"fulfillment":"cf:0:bXVjaCBzZWNyZXQhIHdvdyE",
|
||||
"input":{
|
||||
@ -901,7 +901,7 @@ __Note__: The timeout conditions are BigchainDB-specific and not (yet) supported
|
||||
__Caveat__: The times between nodes in a BigchainDB federation may (and will) differ slightly. In this case, the majority of the nodes will decide.
|
||||
|
||||
```python
|
||||
# Create a timeout asset without any new_owners
|
||||
# Create a timeout asset without any owners_after
|
||||
tx_timeout = b.create_transaction(b.me, None, None, 'CREATE')
|
||||
|
||||
# Set expiry time - the asset needs to be transfered before expiration
|
||||
@ -916,13 +916,13 @@ tx_timeout['transaction']['conditions'].append({
|
||||
'uri': condition_timeout.condition.serialize_uri()
|
||||
},
|
||||
'cid': 0,
|
||||
'new_owners': None
|
||||
'owners_after': None
|
||||
})
|
||||
|
||||
# Conditions have been updated, so the hash needs updating
|
||||
tx_timeout['id'] = util.get_hash_data(tx_timeout)
|
||||
|
||||
# The asset needs to be signed by the current_owner
|
||||
# The asset needs to be signed by the owner_before
|
||||
tx_timeout_signed = b.sign_transaction(tx_timeout, b.me_private)
|
||||
|
||||
# Some validations
|
||||
@ -948,13 +948,13 @@ tx_timeout_signed
|
||||
},
|
||||
"uri":"cc:63:9:sceU_NZc3cAjAvaR1TVmgj7am5y8hJEBoqLm-tbqGbQ:17"
|
||||
},
|
||||
"new_owners":null
|
||||
"owners_after":null
|
||||
}
|
||||
],
|
||||
"data":null,
|
||||
"fulfillments":[
|
||||
{
|
||||
"current_owners":[
|
||||
"owners_before":[
|
||||
"FmLm6MxCABc8TsiZKdeYaZKo5yZWMM6Vty7Q1B6EgcP2"
|
||||
],
|
||||
"fid":0,
|
||||
@ -1086,7 +1086,7 @@ tx_escrow['transaction']['conditions'][0]['condition'] = {
|
||||
# Conditions have been updated, so the hash needs updating
|
||||
tx_escrow['id'] = util.get_hash_data(tx_escrow)
|
||||
|
||||
# The asset needs to be signed by the current_owner
|
||||
# The asset needs to be signed by the owner_before
|
||||
tx_escrow_signed = b.sign_transaction(tx_escrow, testuser2_priv)
|
||||
|
||||
# Some validations
|
||||
@ -1171,7 +1171,7 @@ tx_escrow_signed
|
||||
},
|
||||
"uri":"cc:2:29:sg08ERtppQrGxot7mu7XMdNkZTc29xCbWE1r8DgxuL8:181"
|
||||
},
|
||||
"new_owners":[
|
||||
"owners_after":[
|
||||
"BwuhqQX8FPsmqYiRV2CSZYWWsSWgSSQQFHjqxKEuqkPs",
|
||||
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
|
||||
]
|
||||
@ -1180,7 +1180,7 @@ tx_escrow_signed
|
||||
"data":null,
|
||||
"fulfillments":[
|
||||
{
|
||||
"current_owners":[
|
||||
"owners_before":[
|
||||
"qv8DvdNG5nZHWCP5aPSqgqxAvaPJpQj19abRvFCntor"
|
||||
],
|
||||
"fid":0,
|
||||
|
@ -9,3 +9,4 @@ BigchainDB Nodes
|
||||
|
||||
node-components
|
||||
node-requirements
|
||||
setup-run-node
|
||||
|
210
docs/source/nodes/setup-run-node.md
Normal file
210
docs/source/nodes/setup-run-node.md
Normal file
@ -0,0 +1,210 @@
|
||||
# Set Up and Run a Cluster Node
|
||||
|
||||
If you want to set up a BigchainDB node that's intended to be one of the nodes in a BigchainDB cluster (i.e. where each node is operated by a different member of a federation), then this page is for you, otherwise see [elsewhere](../introduction.html).
|
||||
|
||||
This is a page of general guidelines for setting up a node. It says nothing about how to upgrade software, storage, processing, etc. or other details of node management. That will be added in the future, in [the section on production node setup & management](../prod-node-setup-mgmt/index.html). Once that section is more complete, this page will probably be deleted.
|
||||
|
||||
|
||||
## Get a Server
|
||||
|
||||
The first step is to get a server (or equivalent) which meets [the requirements for a BigchainDB node](node-requirements.html).
|
||||
|
||||
|
||||
## Secure Your Server
|
||||
|
||||
The steps that you must take to secure your server depend on your server OS and where your server is physically located. There are many articles and books about how to secure a server. Here we just cover special considerations when securing a BigchainDB node.
|
||||
|
||||
There are some [notes on BigchainDB-specific firewall setup](../appendices/firewall-notes.html) in the Appendices.
|
||||
|
||||
|
||||
## Sync Your System Clock
|
||||
|
||||
A BigchainDB node uses its system clock to generate timestamps for blocks and votes, so that clock should be kept in sync with some standard clock(s). The standard way to do that is to run an NTP daemon (Network Time Protocol daemon) on the node. (You could also use tlsdate, which uses TLS timestamps rather than NTP, but don't: it's not very accurate and it will break with TLS 1.3, which removes the timestamp.)
|
||||
|
||||
NTP is a standard protocol. There are many NTP daemons implementing it. We don't recommend a particular one. On the contrary, we recommend that different nodes in a federation run different NTP daemons, so that a problem with one daemon won't affect all nodes.
|
||||
|
||||
Please see the [notes on NTP daemon setup](../appendices/ntp-notes.html) in the Appendices.
|
||||
|
||||
|
||||
## Set Up Storage for RethinkDB Data
|
||||
|
||||
Below are some things to consider when setting up storage for the RethinkDB data. The Appendices have a [section with concrete examples](../appendices/example-rethinkdb-storage-setups.html).
|
||||
|
||||
We suggest you set up a separate storage "device" (partition, RAID array, or logical volume) to store the RethinkDB data. Here are some questions to ask:
|
||||
|
||||
* How easy will it be to add storage in the future? Will I have to shut down my server?
|
||||
* How big can the storage get? (Remember that [RAID](https://en.wikipedia.org/wiki/RAID) can be used to make several physical drives look like one.)
|
||||
* How fast can it read & write data? How many input/output operations per second (IOPS)?
|
||||
* How does IOPS scale as more physical hard drives are added?
|
||||
* What's the latency?
|
||||
* What's the reliability? Is there replication?
|
||||
* What's in the Service Level Agreement (SLA), if applicable?
|
||||
* What's the cost?
|
||||
|
||||
There are many options and tradeoffs. Don't forget to look into Amazon Elastic Block Store (EBS) and Amazon Elastic File System (EFS), or their equivalents from other providers.
|
||||
|
||||
**Storage Notes Specific to RethinkDB**
|
||||
|
||||
* The RethinkDB storage engine has a number of SSD optimizations, so you _can_ benefit from using SSDs. ([source](https://www.rethinkdb.com/docs/architecture/))
|
||||
|
||||
* If you want a RethinkDB cluster to store an amount of data D, with a replication factor of R (on every table), and the cluster has N nodes, then each node will need to be able to store R×D/N data.
|
||||
|
||||
* RethinkDB tables can have [at most 64 shards](https://rethinkdb.com/limitations/). For example, if you have only one table and more than 64 nodes, some nodes won't have the primary of any shard, i.e. they will have replicas only. In other words, once you pass 64 nodes, adding more nodes won't provide more storage space for new data. If the biggest single-node storage available is d, then the most you can store in a RethinkDB cluster is < 64×d: accomplished by putting one primary shard in each of 64 nodes, with all replica shards on other nodes. (This is assuming one table. If there are T tables, then the most you can store is < 64×d×T.)
|
||||
|
||||
* When you set up storage for your RethinkDB data, you may have to select a filesystem. (Sometimes, the filesystem is already decided by the choice of storage.) We recommend using a filesystem that supports direct I/O (Input/Output). Many compressed or encrypted file systems don't support direct I/O. The ext4 filesystem supports direct I/O (but be careful: if you enable the data=journal mode, then direct I/O support will be disabled; the default is data=ordered). If your chosen filesystem supports direct I/O and you're using Linux, then you don't need to do anything to request or enable direct I/O. RethinkDB does that.
|
||||
|
||||
<p style="background-color: lightgrey;">What is direct I/O? It allows RethinkDB to write directly to the storage device (or use its own in-memory caching mechanisms), rather than relying on the operating system's file read and write caching mechanisms. (If you're using Linux, a write-to-file normally writes to the in-memory Page Cache first; only later does that Page Cache get flushed to disk. The Page Cache is also used when reading files.)</p>
|
||||
|
||||
* RethinkDB stores its data in a specific directory. You can tell RethinkDB _which_ directory using the RethinkDB config file, as explained below. In this documentation, we assume the directory is `/data`. If you set up a separate device (partition, RAID array, or logical volume) to store the RethinkDB data, then mount that device on `/data`.
|
||||
|
||||
|
||||
## Install RethinkDB Server
|
||||
|
||||
If you don't already have RethinkDB Server installed, you must install it. The RethinkDB documentation has instructions for [how to install RethinkDB Server on a variety of operating systems](http://rethinkdb.com/docs/install/).
|
||||
|
||||
|
||||
## Configure RethinkDB Server
|
||||
|
||||
Create a RethinkDB configuration file (text file) named `instance1.conf` with the following contents (explained below):
|
||||
```text
|
||||
directory=/data
|
||||
bind=all
|
||||
direct-io
|
||||
# Replace node?_hostname with actual node hostnames below, e.g. rdb.examples.com
|
||||
join=node0_hostname:29015
|
||||
join=node1_hostname:29015
|
||||
join=node2_hostname:29015
|
||||
# continue until there's a join= line for each node in the federation
|
||||
```
|
||||
|
||||
* `directory=/data` tells the RethinkDB node to store its share of the database data in `/data`.
|
||||
* `bind=all` binds RethinkDB to all local network interfaces (e.g. loopback, Ethernet, wireless, whatever is available), so it can communicate with the outside world. (The default is to bind only to local interfaces.)
|
||||
* `direct-io` tells RethinkDB to use direct I/O (explained earlier). Only include this line if your file system supports direct I/O.
|
||||
* `join=hostname:29015` lines: A cluster node needs to find out the hostnames of all the other nodes somehow. You _could_ designate one node to be the one that every other node asks, and put that node's hostname in the config file, but that wouldn't be very decentralized. Instead, we include _every_ node in the list of nodes-to-ask.
|
||||
|
||||
If you're curious about the RethinkDB config file, there's [a RethinkDB documentation page about it](https://www.rethinkdb.com/docs/config-file/). The [explanations of the RethinkDB command-line options](https://rethinkdb.com/docs/cli-options/) are another useful reference.
|
||||
|
||||
See the [RethinkDB documentation on securing your cluster](https://rethinkdb.com/docs/security/).
|
||||
|
||||
|
||||
## Install Python 3.4+
|
||||
|
||||
If you don't already have it, then you should [install Python 3.4+](https://www.python.org/downloads/).
|
||||
|
||||
If you're testing or developing BigchainDB on a stand-alone node, then you should probably create a Python 3.4+ virtual environment and activate it (e.g. using virtualenv or conda). Later we will install several Python packages and you probably only want those installed in the virtual environment.
|
||||
|
||||
|
||||
## Install BigchainDB Server
|
||||
|
||||
BigchainDB Server has some OS-level dependencies that must be installed.
|
||||
|
||||
On Ubuntu 14.04, we found that the following was enough:
|
||||
```text
|
||||
sudo apt-get update
|
||||
sudo apt-get install g++ python3-dev
|
||||
```
|
||||
|
||||
On Fedora 23, we found that the following was enough (tested in February 2015):
|
||||
```text
|
||||
sudo dnf update
|
||||
sudo dnf install gcc-c++ redhat-rpm-config python3-devel
|
||||
```
|
||||
|
||||
(If you're using a version of Fedora before version 22, you may have to use `yum` instead of `dnf`.)
|
||||
|
||||
With OS-level dependencies installed, you can install BigchainDB Server with `pip` or from source.
|
||||
|
||||
|
||||
### How to Install BigchainDB with pip
|
||||
|
||||
BigchainDB (i.e. both the Server and the officially-supported drivers) is distributed as a Python package on PyPI so you can install it using `pip`. First, make sure you have an up-to-date Python 3.4+ version of `pip` installed:
|
||||
```text
|
||||
pip -V
|
||||
```
|
||||
|
||||
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.4, then you must install a `pip` version associated with Python 3.4+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
|
||||
|
||||
On Ubuntu 14.04, we found that this works:
|
||||
```text
|
||||
sudo apt-get install python3-pip
|
||||
```
|
||||
|
||||
That should install a Python 3 version of `pip` named `pip3`. If that didn't work, then another way to get `pip3` is to do `sudo apt-get install python3-setuptools` followed by `sudo easy_install3 pip`.
|
||||
|
||||
You can upgrade `pip` (`pip3`) and `setuptools` to the latest versions using:
|
||||
```text
|
||||
pip3 install --upgrade pip setuptools
|
||||
pip3 -V
|
||||
```
|
||||
|
||||
Now you can install BigchainDB Server (and officially-supported BigchainDB drivers) using:
|
||||
```text
|
||||
pip3 install bigchaindb
|
||||
```
|
||||
|
||||
(If you're not in a virtualenv and you want to install bigchaindb system-wide, then put `sudo` in front.)
|
||||
|
||||
Note: You can use `pip3` to upgrade the `bigchaindb` package to the latest version using `pip3 install --upgrade bigchaindb`.
|
||||
|
||||
|
||||
### How to Install BigchainDB from Source
|
||||
|
||||
If you want to install BitchainDB from source because you want to use the very latest bleeding-edge code, clone the public repository:
|
||||
```text
|
||||
git clone git@github.com:bigchaindb/bigchaindb.git
|
||||
python setup.py install
|
||||
```
|
||||
|
||||
|
||||
## Configure BigchainDB Server
|
||||
|
||||
Start by creating a default BigchainDB config file:
|
||||
```text
|
||||
bigchaindb -y configure
|
||||
```
|
||||
|
||||
(There's documentation for the `bigchaindb` command is in the section on [the BigchainDB Command Line Interface (CLI)](bigchaindb-cli.html).)
|
||||
|
||||
Edit the created config file:
|
||||
|
||||
* Open `$HOME/.bigchaindb` (the created config file) in your text editor.
|
||||
* Change `"server": {"bind": "localhost:9984", ... }` to `"server": {"bind": "0.0.0.0:9984", ... }`. This makes it so traffic can come from any IP address to port 9984 (the HTTP Client-Server API port).
|
||||
* Change `"api_endpoint": "http://localhost:9984/api/v1"` to `"api_endpoint": "http://your_api_hostname:9984/api/v1"`
|
||||
* Change `"keyring": []` to `"keyring": ["public_key_of_other_node_A", "public_key_of_other_node_B", "..."]` i.e. a list of the public keys of all the other nodes in the federation. The keyring should _not_ include your node's public key.
|
||||
|
||||
For more information about the BigchainDB config file, see [Configuring a BigchainDB Node](configuration.html).
|
||||
|
||||
|
||||
## Run RethinkDB Server
|
||||
|
||||
Start RethinkDB using:
|
||||
```text
|
||||
rethinkdb --config-file path/to/instance1.conf
|
||||
```
|
||||
|
||||
except replace the path with the actual path to `instance1.conf`.
|
||||
|
||||
Note: It's possible to [make RethinkDB start at system startup](https://www.rethinkdb.com/docs/start-on-startup/).
|
||||
|
||||
You can verify that RethinkDB is running by opening the RethinkDB web interface in your web browser. It should be at `http://rethinkdb-hostname:8080/`. If you're running RethinkDB on localhost, that would be [http://localhost:8080/](http://localhost:8080/).
|
||||
|
||||
|
||||
## Run BigchainDB Server
|
||||
|
||||
After all node operators have started RethinkDB, but before they start BigchainDB, one designated node operator must configure the RethinkDB database by running the following commands:
|
||||
```text
|
||||
bigchaindb init
|
||||
bigchaindb set-shards numshards
|
||||
bigchaindb set-replicas numreplicas
|
||||
```
|
||||
|
||||
where:
|
||||
|
||||
* `bigchaindb init` creates the database within RethinkDB, the tables, the indexes, and the genesis block.
|
||||
* `numshards` should be set to the number of nodes in the initial cluster.
|
||||
* `numreplicas` should be set to the database replication factor decided by the federation. It must be 3 or more for [RethinkDB failover](https://rethinkdb.com/docs/failover/) to work.
|
||||
|
||||
Once the RethinkDB database is configured, every node operator can start BigchainDB using:
|
||||
```text
|
||||
bigchaindb start
|
||||
```
|
@ -120,7 +120,7 @@ When one creates a condition, one can calculate its fulfillment length (e.g. 96)
|
||||
|
||||
If someone tries to make a condition where the output of a threshold condition feeds into the input of another “earlier” threshold condition (i.e. in a closed logical circuit), then their computer will take forever to calculate the (infinite) “condition URI”, at least in theory. In practice, their computer will run out of memory or their client software will timeout after a while.
|
||||
|
||||
Aside: In what follows, the list of `new_owners` (in a condition) is always who owned the asset at the time the transaction completed, but before the next transaction started. The list of `current_owners` (in a fulfillment) is always equal to the list of `new_owners` in that asset's previous transaction.
|
||||
Aside: In what follows, the list of `owners_after` (in a condition) is always who owned the asset at the time the transaction completed, but before the next transaction started. The list of `owners_before` (in a fulfillment) is always equal to the list of `owners_after` in that asset's previous transaction.
|
||||
|
||||
### Conditions
|
||||
|
||||
@ -141,17 +141,17 @@ If there is only one _new owner_, the condition will be a simple signature condi
|
||||
},
|
||||
"uri": "<string>"
|
||||
},
|
||||
"new_owners": ["<new owner public key>"]
|
||||
"owners_after": ["<new owner public key>"]
|
||||
}
|
||||
```
|
||||
|
||||
- **Condition header**:
|
||||
- `cid`: Condition index so that we can reference this output as an input to another transaction. It also matches
|
||||
the input `fid`, making this the condition to fulfill in order to spend the asset used as input with `fid`.
|
||||
- `new_owners`: A list containing one item: the public key of the new owner.
|
||||
- `owners_after`: A list containing one item: the public key of the new owner.
|
||||
- **Condition body**:
|
||||
- `bitmask`: A set of bits representing the features required by the condition type.
|
||||
- `public_key`: The _new_owner's_ public key.
|
||||
- `public_key`: The new owner's public key.
|
||||
- `type_id`: The fulfillment type ID; see the [ILP spec](https://interledger.org/five-bells-condition/spec.html).
|
||||
- `uri`: Binary representation of the condition using only URL-safe characters.
|
||||
|
||||
@ -189,9 +189,9 @@ to spend the asset. For example:
|
||||
"type_id": 2
|
||||
},
|
||||
"uri": "cc:2:29:ytNK3X6-bZsbF-nCGDTuopUIMi1HCyCkyPewm6oLI3o:206"},
|
||||
"new_owners": [
|
||||
"<new owner 1 public key>",
|
||||
"<new owner 2 public key>"
|
||||
"owners_after": [
|
||||
"owner 1 public key>",
|
||||
"owner 2 public key>"
|
||||
]
|
||||
}
|
||||
```
|
||||
@ -210,7 +210,7 @@ If there is only one _current owner_, the fulfillment will be a simple signature
|
||||
|
||||
```json
|
||||
{
|
||||
"current_owners": ["<public key of current owner>"],
|
||||
"owners_before": ["<public key of the owner before the transaction happened>"],
|
||||
"fid": 0,
|
||||
"fulfillment": "cf:4:RxFzIE679tFBk8zwEgizhmTuciAylvTUwy6EL6ehddHFJOhK5F4IjwQ1xLu2oQK9iyRCZJdfWAefZVjTt3DeG5j2exqxpGliOPYseNkRAWEakqJ_UrCwgnj92dnFRAEE",
|
||||
"input": {
|
||||
@ -222,7 +222,7 @@ If there is only one _current owner_, the fulfillment will be a simple signature
|
||||
|
||||
- `fid`: Fulfillment index. It matches a `cid` in the conditions with a new _crypto-condition_ that the new owner
|
||||
needs to fulfill to spend this asset.
|
||||
- `current_owners`: A list of public keys of the current owners; in this case it has just one public key.
|
||||
- `owners_before`: A list of public keys of the owners before the transaction; in this case it has just one public key.
|
||||
- `fulfillment`: A crypto-conditions URI that encodes the cryptographic fulfillments like signatures and others, see [crypto-conditions](https://interledger.org/five-bells-condition/spec.html).
|
||||
- `input`: Pointer to the asset and condition of a previous transaction
|
||||
- `cid`: Condition index
|
||||
|
2
setup.py
2
setup.py
@ -102,7 +102,7 @@ setup(
|
||||
'logstats==0.2.1',
|
||||
'base58==0.2.2',
|
||||
'flask==0.10.1',
|
||||
'requests==2.9',
|
||||
'requests~=2.9',
|
||||
'gunicorn~=19.0',
|
||||
'multipipes~=0.1.0',
|
||||
],
|
||||
|
@ -110,25 +110,37 @@ class TestBigchainApi(object):
|
||||
assert response['inserted'] == 1
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_read_transaction(self, b, user_vk, user_sk):
|
||||
def test_read_transaction_undecided_block(self, b, user_vk, user_sk):
|
||||
input_tx = b.get_owned_ids(user_vk).pop()
|
||||
tx = b.create_transaction(user_vk, user_vk, input_tx, 'TRANSFER')
|
||||
tx_signed = b.sign_transaction(tx, user_sk)
|
||||
b.write_transaction(tx_signed)
|
||||
|
||||
# create block and write it to the bighcain before retrieving the transaction
|
||||
block = b.create_block([tx_signed])
|
||||
b.write_block(block, durability='hard')
|
||||
|
||||
response = b.get_transaction(tx_signed["id"])
|
||||
response, status = b.get_transaction(tx_signed["id"], include_status=True)
|
||||
# add validity information, which will be returned
|
||||
assert util.serialize(tx_signed) == util.serialize(response)
|
||||
assert status == b.TX_UNDECIDED
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_read_transaction_backlog(self, b, user_vk, user_sk):
|
||||
input_tx = b.get_owned_ids(user_vk).pop()
|
||||
tx = b.create_transaction(user_vk, user_vk, input_tx, 'TRANSFER')
|
||||
tx_signed = b.sign_transaction(tx, user_sk)
|
||||
b.write_transaction(tx_signed)
|
||||
|
||||
response, status = b.get_transaction(tx_signed["id"], include_status=True)
|
||||
# add validity information, which will be returned
|
||||
assert util.serialize(tx_signed) == util.serialize(response)
|
||||
assert status == b.TX_IN_BACKLOG
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_read_transaction_invalid_block(self, b, user_vk, user_sk):
|
||||
input_tx = b.get_owned_ids(user_vk).pop()
|
||||
tx = b.create_transaction(user_vk, user_vk, input_tx, 'TRANSFER')
|
||||
tx_signed = b.sign_transaction(tx, user_sk)
|
||||
b.write_transaction(tx_signed)
|
||||
|
||||
# create block
|
||||
block = b.create_block([tx_signed])
|
||||
@ -142,6 +154,26 @@ class TestBigchainApi(object):
|
||||
# should be None, because invalid blocks are ignored
|
||||
assert response is None
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_read_transaction_valid_block(self, b, user_vk, user_sk):
|
||||
input_tx = b.get_owned_ids(user_vk).pop()
|
||||
tx = b.create_transaction(user_vk, user_vk, input_tx, 'TRANSFER')
|
||||
tx_signed = b.sign_transaction(tx, user_sk)
|
||||
b.write_transaction(tx_signed)
|
||||
|
||||
# create block
|
||||
block = b.create_block([tx_signed])
|
||||
b.write_block(block, durability='hard')
|
||||
|
||||
# vote the block invalid
|
||||
vote = b.vote(block['id'], b.get_last_voted_block()['id'], True)
|
||||
b.write_vote(vote)
|
||||
|
||||
response, status = b.get_transaction(tx_signed["id"], include_status=True)
|
||||
# add validity information, which will be returned
|
||||
assert util.serialize(tx_signed) == util.serialize(response)
|
||||
assert status == b.TX_VALID
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_assign_transaction_one_node(self, b, user_vk, user_sk):
|
||||
input_tx = b.get_owned_ids(user_vk).pop()
|
||||
@ -422,7 +454,7 @@ class TestTransactionValidation(object):
|
||||
with pytest.raises(exceptions.InvalidSignature) as excinfo:
|
||||
b.validate_transaction(tx)
|
||||
|
||||
# assert excinfo.value.args[0] == 'current_owner `a` does not own the input `{}`'.format(valid_input)
|
||||
# assert excinfo.value.args[0] == 'owner_before `a` does not own the input `{}`'.format(valid_input)
|
||||
assert b.is_valid_transaction(tx) is False
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
@ -550,7 +582,7 @@ class TestBlockValidation(object):
|
||||
with pytest.raises(exceptions.TransactionOwnerError) as excinfo:
|
||||
b.validate_block(block)
|
||||
|
||||
assert excinfo.value.args[0] == 'current_owner `a` does not own the input `{}`'.format(valid_input)
|
||||
assert excinfo.value.args[0] == 'owner_before `a` does not own the input `{}`'.format(valid_input)
|
||||
|
||||
def test_invalid_block_id(self, b):
|
||||
block = dummy_block()
|
||||
@ -689,7 +721,7 @@ class TestMultipleInputs(object):
|
||||
assert len(tx_signed['transaction']['fulfillments']) == 1
|
||||
assert len(tx_signed['transaction']['conditions']) == 1
|
||||
|
||||
def test_single_current_owner_multiple_new_owners_single_input(self, b, user_sk, user_vk, inputs):
|
||||
def test_single_owner_before_multiple_owners_after_single_input(self, b, user_sk, user_vk, inputs):
|
||||
# create a new users
|
||||
user2_sk, user2_vk = crypto.generate_key_pair()
|
||||
user3_sk, user3_vk = crypto.generate_key_pair()
|
||||
@ -707,7 +739,7 @@ class TestMultipleInputs(object):
|
||||
assert len(tx_signed['transaction']['fulfillments']) == 1
|
||||
assert len(tx_signed['transaction']['conditions']) == 1
|
||||
|
||||
def test_single_current_owner_multiple_new_owners_multiple_inputs(self, b, user_sk, user_vk):
|
||||
def test_single_owner_before_multiple_owners_after_multiple_inputs(self, b, user_sk, user_vk):
|
||||
# create a new users
|
||||
user2_sk, user2_vk = crypto.generate_key_pair()
|
||||
user3_sk, user3_vk = crypto.generate_key_pair()
|
||||
@ -735,7 +767,7 @@ class TestMultipleInputs(object):
|
||||
assert len(tx_signed['transaction']['fulfillments']) == 3
|
||||
assert len(tx_signed['transaction']['conditions']) == 3
|
||||
|
||||
def test_multiple_current_owners_single_new_owner_single_input(self, b, user_sk, user_vk):
|
||||
def test_multiple_owners_before_single_owner_after_single_input(self, b, user_sk, user_vk):
|
||||
# create a new users
|
||||
user2_sk, user2_vk = crypto.generate_key_pair()
|
||||
user3_sk, user3_vk = crypto.generate_key_pair()
|
||||
@ -759,7 +791,7 @@ class TestMultipleInputs(object):
|
||||
assert len(tx_signed['transaction']['fulfillments']) == 1
|
||||
assert len(tx_signed['transaction']['conditions']) == 1
|
||||
|
||||
def test_multiple_current_owners_single_new_owner_multiple_inputs(self, b, user_sk, user_vk):
|
||||
def test_multiple_owners_before_single_owner_after_multiple_inputs(self, b, user_sk, user_vk):
|
||||
# create a new users
|
||||
user2_sk, user2_vk = crypto.generate_key_pair()
|
||||
user3_sk, user3_vk = crypto.generate_key_pair()
|
||||
@ -786,7 +818,7 @@ class TestMultipleInputs(object):
|
||||
assert len(tx_signed['transaction']['fulfillments']) == 3
|
||||
assert len(tx_signed['transaction']['conditions']) == 3
|
||||
|
||||
def test_multiple_current_owners_multiple_new_owners_single_input(self, b, user_sk, user_vk):
|
||||
def test_multiple_owners_before_multiple_owners_after_single_input(self, b, user_sk, user_vk):
|
||||
# create a new users
|
||||
user2_sk, user2_vk = crypto.generate_key_pair()
|
||||
user3_sk, user3_vk = crypto.generate_key_pair()
|
||||
@ -811,7 +843,7 @@ class TestMultipleInputs(object):
|
||||
assert len(tx_signed['transaction']['fulfillments']) == 1
|
||||
assert len(tx_signed['transaction']['conditions']) == 1
|
||||
|
||||
def test_multiple_current_owners_multiple_new_owners_multiple_inputs(self, b, user_sk, user_vk):
|
||||
def test_multiple_owners_before_multiple_owners_after_multiple_inputs(self, b, user_sk, user_vk):
|
||||
# create a new users
|
||||
user2_sk, user2_vk = crypto.generate_key_pair()
|
||||
user3_sk, user3_vk = crypto.generate_key_pair()
|
||||
@ -1121,7 +1153,7 @@ class TestFulfillmentMessage(object):
|
||||
assert fulfillment_message['data']['payload'] == tx['transaction']['data']['payload']
|
||||
assert fulfillment_message['id'] == tx['id']
|
||||
assert fulfillment_message['condition'] == tx['transaction']['conditions'][0]
|
||||
assert fulfillment_message['fulfillment']['current_owners'] == original_fulfillment['current_owners']
|
||||
assert fulfillment_message['fulfillment']['owners_before'] == original_fulfillment['owners_before']
|
||||
assert fulfillment_message['fulfillment']['fid'] == original_fulfillment['fid']
|
||||
assert fulfillment_message['fulfillment']['input'] == original_fulfillment['input']
|
||||
assert fulfillment_message['operation'] == tx['transaction']['operation']
|
||||
@ -1144,14 +1176,14 @@ class TestFulfillmentMessage(object):
|
||||
assert fulfillment_message['data']['payload'] == tx['transaction']['data']['payload']
|
||||
assert fulfillment_message['id'] == tx['id']
|
||||
assert fulfillment_message['condition'] == tx['transaction']['conditions'][0]
|
||||
assert fulfillment_message['fulfillment']['current_owners'] == original_fulfillment['current_owners']
|
||||
assert fulfillment_message['fulfillment']['owners_before'] == original_fulfillment['owners_before']
|
||||
assert fulfillment_message['fulfillment']['fid'] == original_fulfillment['fid']
|
||||
assert fulfillment_message['fulfillment']['input'] == original_fulfillment['input']
|
||||
assert fulfillment_message['operation'] == tx['transaction']['operation']
|
||||
assert fulfillment_message['timestamp'] == tx['transaction']['timestamp']
|
||||
assert fulfillment_message['version'] == tx['transaction']['version']
|
||||
|
||||
def test_fulfillment_message_multiple_current_owners_multiple_new_owners_multiple_inputs(self, b, user_vk):
|
||||
def test_fulfillment_message_multiple_owners_before_multiple_owners_after_multiple_inputs(self, b, user_vk):
|
||||
# create a new users
|
||||
user2_sk, user2_vk = crypto.generate_key_pair()
|
||||
user3_sk, user3_vk = crypto.generate_key_pair()
|
||||
@ -1182,7 +1214,7 @@ class TestFulfillmentMessage(object):
|
||||
assert fulfillment_message['data']['payload'] == tx['transaction']['data']['payload']
|
||||
assert fulfillment_message['id'] == tx['id']
|
||||
assert fulfillment_message['condition'] == tx['transaction']['conditions'][original_fulfillment['fid']]
|
||||
assert fulfillment_message['fulfillment']['current_owners'] == original_fulfillment['current_owners']
|
||||
assert fulfillment_message['fulfillment']['owners_before'] == original_fulfillment['owners_before']
|
||||
assert fulfillment_message['fulfillment']['fid'] == original_fulfillment['fid']
|
||||
assert fulfillment_message['fulfillment']['input'] == original_fulfillment['input']
|
||||
assert fulfillment_message['operation'] == tx['transaction']['operation']
|
||||
@ -1235,7 +1267,7 @@ class TestTransactionMalleability(object):
|
||||
tx_changed = copy.deepcopy(tx_signed)
|
||||
tx_changed['transaction']['fulfillments'] = [
|
||||
{
|
||||
"current_owners": [
|
||||
"owners_before": [
|
||||
"AFbofwJYEB7Cx2fgrPrCJzbdDVRzRKysoGXt4DsvuTGN"
|
||||
],
|
||||
"fid": 0,
|
||||
@ -1253,7 +1285,7 @@ class TestTransactionMalleability(object):
|
||||
assert b.is_valid_transaction(tx_changed) is False
|
||||
|
||||
tx_changed = copy.deepcopy(tx_signed)
|
||||
tx_changed['transaction']['fulfillments'][0]['current_owners'] = [
|
||||
tx_changed['transaction']['fulfillments'][0]['owners_before'] = [
|
||||
"AFbofwJYEB7Cx2fgrPrCJzbdDVRzRKysoGXt4DsvuTGN"]
|
||||
assert b.validate_fulfillments(tx_changed) is False
|
||||
assert b.is_valid_transaction(tx_changed) is False
|
||||
@ -1282,7 +1314,7 @@ class TestCryptoconditions(object):
|
||||
fulfillment = tx_signed['transaction']['fulfillments'][0]
|
||||
fulfillment_from_uri = cc.Fulfillment.from_uri(fulfillment['fulfillment'])
|
||||
|
||||
assert fulfillment['current_owners'][0] == b.me
|
||||
assert fulfillment['owners_before'][0] == b.me
|
||||
assert fulfillment_from_uri.public_key.to_ascii().decode() == b.me
|
||||
assert b.validate_fulfillments(tx_signed) == True
|
||||
assert b.is_valid_transaction(tx_signed) == tx_signed
|
||||
@ -1313,7 +1345,7 @@ class TestCryptoconditions(object):
|
||||
fulfillment = tx_signed['transaction']['fulfillments'][0]
|
||||
fulfillment_from_uri = cc.Fulfillment.from_uri(fulfillment['fulfillment'])
|
||||
|
||||
assert fulfillment['current_owners'][0] == user_vk
|
||||
assert fulfillment['owners_before'][0] == user_vk
|
||||
assert fulfillment_from_uri.public_key.to_ascii().decode() == user_vk
|
||||
assert fulfillment_from_uri.condition.serialize_uri() == prev_condition['uri']
|
||||
assert b.validate_fulfillments(tx_signed) == True
|
||||
@ -1332,7 +1364,7 @@ class TestCryptoconditions(object):
|
||||
fulfillment = tx_signed['transaction']['fulfillments'][0]
|
||||
fulfillment_from_uri = cc.Fulfillment.from_uri(fulfillment['fulfillment'])
|
||||
|
||||
assert fulfillment['current_owners'][0] == b.me
|
||||
assert fulfillment['owners_before'][0] == b.me
|
||||
assert fulfillment_from_uri.public_key.to_ascii().decode() == b.me
|
||||
assert b.validate_fulfillments(tx_signed) == True
|
||||
assert b.is_valid_transaction(tx_signed) == tx_signed
|
||||
@ -1354,7 +1386,7 @@ class TestCryptoconditions(object):
|
||||
fulfillment = tx_signed['transaction']['fulfillments'][0]
|
||||
fulfillment_from_uri = cc.Fulfillment.from_uri(fulfillment['fulfillment'])
|
||||
|
||||
assert fulfillment['current_owners'][0] == user_vk
|
||||
assert fulfillment['owners_before'][0] == user_vk
|
||||
assert fulfillment_from_uri.public_key.to_ascii().decode() == user_vk
|
||||
assert b.validate_fulfillments(tx_signed) == True
|
||||
assert b.is_valid_transaction(tx_signed) == tx_signed
|
||||
@ -1593,7 +1625,7 @@ class TestCryptoconditions(object):
|
||||
def test_default_threshold_conditions_for_multiple_owners(self, b, user_sk, user_vk):
|
||||
user2_sk, user2_vk = crypto.generate_key_pair()
|
||||
|
||||
# create transaction with multiple new_owners
|
||||
# create transaction with multiple owners_after
|
||||
tx = b.create_transaction(b.me, [user_vk, user2_vk], None, 'CREATE')
|
||||
|
||||
assert len(tx['transaction']['conditions']) == 1
|
||||
@ -1613,7 +1645,7 @@ class TestCryptoconditions(object):
|
||||
def test_default_threshold_fulfillments_for_multiple_owners(self, b, user_sk, user_vk):
|
||||
user2_sk, user2_vk = crypto.generate_key_pair()
|
||||
|
||||
# create transaction with multiple new_owners
|
||||
# create transaction with multiple owners_after
|
||||
tx_create = b.create_transaction(b.me, [user_vk, user2_vk], None, 'CREATE')
|
||||
tx_create_signed = b.sign_transaction(tx_create, b.me_private)
|
||||
block = b.create_block([tx_create_signed])
|
||||
@ -1654,7 +1686,7 @@ class TestCryptoconditions(object):
|
||||
'uri': first_tx_condition.condition.serialize_uri()
|
||||
},
|
||||
'cid': 0,
|
||||
'new_owners': None
|
||||
'owners_after': None
|
||||
})
|
||||
# conditions have been updated, so hash needs updating
|
||||
hashlock_tx['id'] = util.get_hash_data(hashlock_tx)
|
||||
@ -1686,7 +1718,7 @@ class TestCryptoconditions(object):
|
||||
'uri': first_tx_condition.condition.serialize_uri()
|
||||
},
|
||||
'cid': 0,
|
||||
'new_owners': None
|
||||
'owners_after': None
|
||||
})
|
||||
# conditions have been updated, so hash needs updating
|
||||
hashlock_tx['id'] = util.get_hash_data(hashlock_tx)
|
||||
@ -1717,7 +1749,7 @@ class TestCryptoconditions(object):
|
||||
'uri': first_tx_condition.condition.serialize_uri()
|
||||
},
|
||||
'cid': 0,
|
||||
'new_owners': None
|
||||
'owners_after': None
|
||||
})
|
||||
# conditions have been updated, so hash needs updating
|
||||
hashlock_tx['id'] = util.get_hash_data(hashlock_tx)
|
||||
@ -1779,15 +1811,15 @@ class TestCryptoconditions(object):
|
||||
user3_sk, user3_vk = crypto.generate_key_pair()
|
||||
user4_sk, user4_vk = crypto.generate_key_pair()
|
||||
user5_sk, user5_vk = crypto.generate_key_pair()
|
||||
new_owners = [user_vk, user2_vk, user3_vk, user4_vk, user5_vk]
|
||||
owners_after = [user_vk, user2_vk, user3_vk, user4_vk, user5_vk]
|
||||
|
||||
# create a transaction with multiple new_owners
|
||||
tx = b.create_transaction(b.me, new_owners, None, 'CREATE')
|
||||
# create a transaction with multiple owners_after
|
||||
tx = b.create_transaction(b.me, owners_after, None, 'CREATE')
|
||||
condition = cc.Fulfillment.from_dict(tx['transaction']['conditions'][0]['condition']['details'])
|
||||
|
||||
for new_owner in new_owners:
|
||||
subcondition = condition.get_subcondition_from_vk(new_owner)[0]
|
||||
assert subcondition.public_key.to_ascii().decode() == new_owner
|
||||
for owner_after in owners_after:
|
||||
subcondition = condition.get_subcondition_from_vk(owner_after)[0]
|
||||
assert subcondition.public_key.to_ascii().decode() == owner_after
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_transfer_asset_with_escrow_condition(self, b, user_vk, user_sk):
|
||||
|
@ -252,7 +252,7 @@ print(json.dumps(threshold_tx_transfer, sort_keys=True, indent=4, separators=(',
|
||||
Hashlocked Conditions
|
||||
"""
|
||||
|
||||
# Create a hash-locked asset without any new_owners
|
||||
# Create a hash-locked asset without any owners_after
|
||||
hashlock_tx = b.create_transaction(b.me, None, None, 'CREATE')
|
||||
|
||||
# Define a secret that will be hashed - fulfillments need to guess the secret
|
||||
@ -265,13 +265,13 @@ hashlock_tx['transaction']['conditions'].append({
|
||||
'uri': first_tx_condition.condition.serialize_uri()
|
||||
},
|
||||
'cid': 0,
|
||||
'new_owners': None
|
||||
'owners_after': None
|
||||
})
|
||||
|
||||
# Conditions have been updated, so hash needs updating
|
||||
hashlock_tx['id'] = util.get_hash_data(hashlock_tx)
|
||||
|
||||
# The asset needs to be signed by the current_owner
|
||||
# The asset needs to be signed by the owner_before
|
||||
hashlock_tx_signed = b.sign_transaction(hashlock_tx, b.me_private)
|
||||
|
||||
# Some validations
|
||||
@ -327,7 +327,7 @@ tx_timeout['transaction']['conditions'].append({
|
||||
'uri': condition_timeout.condition.serialize_uri()
|
||||
},
|
||||
'cid': 0,
|
||||
'new_owners': None
|
||||
'owners_after': None
|
||||
})
|
||||
|
||||
# conditions have been updated, so hash needs updating
|
||||
|
@ -3,7 +3,7 @@ from unittest.mock import patch
|
||||
import rethinkdb
|
||||
|
||||
from multipipes import Pipe
|
||||
from bigchaindb.pipelines import utils
|
||||
from bigchaindb.pipelines.utils import ChangeFeed
|
||||
|
||||
|
||||
MOCK_CHANGEFEED_DATA = [{
|
||||
@ -21,36 +21,50 @@ MOCK_CHANGEFEED_DATA = [{
|
||||
@patch.object(rethinkdb.ast.RqlQuery, 'run', return_value=MOCK_CHANGEFEED_DATA)
|
||||
def test_changefeed_insert(mock_run):
|
||||
outpipe = Pipe()
|
||||
changefeed = utils.ChangeFeed('backlog', 'insert')
|
||||
changefeed = ChangeFeed('backlog', ChangeFeed.INSERT)
|
||||
changefeed.outqueue = outpipe
|
||||
changefeed.run_forever()
|
||||
assert outpipe.get() == 'seems like we have an insert here'
|
||||
assert outpipe.qsize() == 0
|
||||
|
||||
|
||||
@patch.object(rethinkdb.ast.RqlQuery, 'run', return_value=MOCK_CHANGEFEED_DATA)
|
||||
def test_changefeed_delete(mock_run):
|
||||
outpipe = Pipe()
|
||||
changefeed = utils.ChangeFeed('backlog', 'delete')
|
||||
changefeed = ChangeFeed('backlog', ChangeFeed.DELETE)
|
||||
changefeed.outqueue = outpipe
|
||||
changefeed.run_forever()
|
||||
assert outpipe.get() == 'seems like we have a delete here'
|
||||
assert outpipe.qsize() == 0
|
||||
|
||||
|
||||
@patch.object(rethinkdb.ast.RqlQuery, 'run', return_value=MOCK_CHANGEFEED_DATA)
|
||||
def test_changefeed_update(mock_run):
|
||||
outpipe = Pipe()
|
||||
changefeed = utils.ChangeFeed('backlog', 'update')
|
||||
changefeed = ChangeFeed('backlog', ChangeFeed.UPDATE)
|
||||
changefeed.outqueue = outpipe
|
||||
changefeed.run_forever()
|
||||
assert outpipe.get() == {'new_val': 'seems like we have an update here',
|
||||
'old_val': 'seems like we have an update here'}
|
||||
assert outpipe.qsize() == 0
|
||||
|
||||
|
||||
@patch.object(rethinkdb.ast.RqlQuery, 'run', return_value=MOCK_CHANGEFEED_DATA)
|
||||
def test_changefeed_multiple_operations(mock_run):
|
||||
outpipe = Pipe()
|
||||
changefeed = ChangeFeed('backlog', ChangeFeed.INSERT | ChangeFeed.UPDATE)
|
||||
changefeed.outqueue = outpipe
|
||||
changefeed.run_forever()
|
||||
assert outpipe.get() == 'seems like we have an insert here'
|
||||
assert outpipe.get() == {'new_val': 'seems like we have an update here',
|
||||
'old_val': 'seems like we have an update here'}
|
||||
assert outpipe.qsize() == 0
|
||||
|
||||
|
||||
@patch.object(rethinkdb.ast.RqlQuery, 'run', return_value=MOCK_CHANGEFEED_DATA)
|
||||
def test_changefeed_prefeed(mock_run):
|
||||
outpipe = Pipe()
|
||||
changefeed = utils.ChangeFeed('backlog', 'insert', prefeed=[1, 2, 3])
|
||||
changefeed = ChangeFeed('backlog', ChangeFeed.INSERT, prefeed=[1, 2, 3])
|
||||
changefeed.outqueue = outpipe
|
||||
changefeed.run_forever()
|
||||
assert outpipe.qsize() == 4
|
||||
|
||||
|
@ -44,11 +44,11 @@ def test_client_can_create_assets(mock_requests_post, client):
|
||||
|
||||
# XXX: `CREATE` operations require the node that receives the transaction to modify the data in
|
||||
# the transaction itself.
|
||||
# `current_owner` will be overwritten with the public key of the node in the federation
|
||||
# `owner_before` will be overwritten with the public key of the node in the federation
|
||||
# that will create the real transaction. `signature` will be overwritten with the new signature.
|
||||
# Note that this scenario is ignored by this test.
|
||||
assert tx['transaction']['fulfillments'][0]['current_owners'][0] == client.public_key
|
||||
assert tx['transaction']['conditions'][0]['new_owners'][0] == client.public_key
|
||||
assert tx['transaction']['fulfillments'][0]['owners_before'][0] == client.public_key
|
||||
assert tx['transaction']['conditions'][0]['owners_after'][0] == client.public_key
|
||||
assert tx['transaction']['fulfillments'][0]['input'] is None
|
||||
|
||||
assert util.validate_fulfillments(tx)
|
||||
@ -56,8 +56,8 @@ def test_client_can_create_assets(mock_requests_post, client):
|
||||
|
||||
def test_client_can_transfer_assets(mock_requests_post, mock_bigchaindb_sign, client):
|
||||
tx = client.transfer(client.public_key, 123)
|
||||
assert tx['transaction']['fulfillments'][0]['current_owners'][0] == client.public_key
|
||||
assert tx['transaction']['conditions'][0]['new_owners'][0] == client.public_key
|
||||
assert tx['transaction']['fulfillments'][0]['owners_before'][0] == client.public_key
|
||||
assert tx['transaction']['conditions'][0]['owners_after'][0] == client.public_key
|
||||
assert tx['transaction']['fulfillments'][0]['input'] == 123
|
||||
|
||||
|
||||
|
@ -229,6 +229,13 @@ def test_file_config():
|
||||
assert config == {}
|
||||
|
||||
|
||||
def test_invalid_file_config():
|
||||
from bigchaindb.config_utils import file_config, CONFIG_DEFAULT_PATH
|
||||
with patch('builtins.open', mock_open(read_data='{_INVALID_JSON_}')) as m:
|
||||
with pytest.raises(exceptions.ConfigurationError):
|
||||
file_config()
|
||||
|
||||
|
||||
def test_write_config():
|
||||
from bigchaindb.config_utils import write_config, CONFIG_DEFAULT_PATH
|
||||
m = mock_open()
|
||||
|
@ -35,8 +35,8 @@ def test_transform_create(b, user_sk, user_vk):
|
||||
tx = util.transform_create(tx)
|
||||
tx = util.sign_tx(tx, b.me_private)
|
||||
|
||||
assert tx['transaction']['fulfillments'][0]['current_owners'][0] == b.me
|
||||
assert tx['transaction']['conditions'][0]['new_owners'][0] == user_vk
|
||||
assert tx['transaction']['fulfillments'][0]['owners_before'][0] == b.me
|
||||
assert tx['transaction']['conditions'][0]['owners_after'][0] == user_vk
|
||||
assert util.validate_fulfillments(tx)
|
||||
|
||||
|
||||
@ -159,7 +159,7 @@ def test_create_tx_with_empty_inputs():
|
||||
assert 'data' in tx['transaction']
|
||||
assert len(tx['transaction']['fulfillments']) == 1
|
||||
assert tx['transaction']['fulfillments'][0] == {
|
||||
'current_owners': [], 'input': None, 'fulfillment': None, 'fid': 0}
|
||||
'owners_before': [], 'input': None, 'fulfillment': None, 'fid': 0}
|
||||
|
||||
|
||||
def test_fulfill_threshold_signature_fulfillment_pubkey_notfound(monkeypatch):
|
||||
@ -170,7 +170,7 @@ def test_fulfill_threshold_signature_fulfillment_pubkey_notfound(monkeypatch):
|
||||
'get_subcondition_from_vk',
|
||||
lambda x, y: []
|
||||
)
|
||||
fulfillment = {'current_owners': (None,)}
|
||||
fulfillment = {'owners_before': (None,)}
|
||||
parsed_fulfillment = ThresholdSha256Fulfillment()
|
||||
with pytest.raises(KeypairMismatchException):
|
||||
fulfill_threshold_signature_fulfillment(
|
||||
@ -185,7 +185,7 @@ def test_fulfill_threshold_signature_fulfillment_wrong_privkeys(monkeypatch):
|
||||
'get_subcondition_from_vk',
|
||||
lambda x, y: (None,)
|
||||
)
|
||||
fulfillment = {'current_owners': ('alice-pub-key',)}
|
||||
fulfillment = {'owners_before': ('alice-pub-key',)}
|
||||
parsed_fulfillment = ThresholdSha256Fulfillment()
|
||||
with pytest.raises(KeypairMismatchException):
|
||||
fulfill_threshold_signature_fulfillment(
|
||||
|
@ -14,6 +14,7 @@ def test_get_transaction_endpoint(b, client, user_vk):
|
||||
tx = b.get_transaction(input_tx['txid'])
|
||||
res = client.get(TX_ENDPOINT + input_tx['txid'])
|
||||
assert tx == res.json
|
||||
assert res.status_code == 200
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
@ -30,23 +31,49 @@ def test_api_endpoint_shows_basic_info(client):
|
||||
|
||||
|
||||
def test_post_create_transaction_endpoint(b, client):
|
||||
keypair = crypto.generate_key_pair()
|
||||
sk, vk = crypto.generate_key_pair()
|
||||
|
||||
tx = util.create_and_sign_tx(keypair[0], keypair[1], keypair[1], None, 'CREATE')
|
||||
tx = util.create_and_sign_tx(sk, vk, vk, None, 'CREATE')
|
||||
|
||||
res = client.post(TX_ENDPOINT, data=json.dumps(tx))
|
||||
assert res.json['transaction']['fulfillments'][0]['current_owners'][0] == b.me
|
||||
assert res.json['transaction']['conditions'][0]['new_owners'][0] == keypair[1]
|
||||
assert res.json['transaction']['fulfillments'][0]['owners_before'][0] == b.me
|
||||
assert res.json['transaction']['conditions'][0]['owners_after'][0] == vk
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_post_transfer_transaction_endpoint(b, client, user_vk, user_sk):
|
||||
to_keypair = crypto.generate_key_pair()
|
||||
sk, vk = crypto.generate_key_pair()
|
||||
input_valid = b.get_owned_ids(user_vk).pop()
|
||||
|
||||
transfer = util.create_and_sign_tx(user_sk, user_vk, to_keypair[1], input_valid)
|
||||
transfer = util.create_and_sign_tx(user_sk, user_vk, vk, input_valid)
|
||||
res = client.post(TX_ENDPOINT, data=json.dumps(transfer))
|
||||
|
||||
assert res.json['transaction']['fulfillments'][0]['current_owners'][0] == user_vk
|
||||
assert res.json['transaction']['conditions'][0]['new_owners'][0] == to_keypair[1]
|
||||
assert res.json['transaction']['fulfillments'][0]['owners_before'][0] == user_vk
|
||||
assert res.json['transaction']['conditions'][0]['owners_after'][0] == vk
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_post_invalid_transfer_transaction_returns_400(b, client, user_vk, user_sk):
|
||||
sk, vk = crypto.generate_key_pair()
|
||||
input_valid = b.get_owned_ids(user_vk).pop()
|
||||
transfer = b.create_transaction(user_vk, vk, input_valid, 'TRANSFER')
|
||||
# transfer is not signed
|
||||
res = client.post(TX_ENDPOINT, data=json.dumps(transfer))
|
||||
|
||||
assert res.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_get_transaction_status_endpoint(b, client, user_vk):
|
||||
input_tx = b.get_owned_ids(user_vk).pop()
|
||||
tx, status = b.get_transaction(input_tx['txid'], include_status=True)
|
||||
res = client.get(TX_ENDPOINT + input_tx['txid'] + "/status")
|
||||
assert status == res.json['status']
|
||||
assert res.status_code == 200
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_get_transaction_status_returns_404_if_not_found(client):
|
||||
res = client.get(TX_ENDPOINT + '123' + "/status")
|
||||
assert res.status_code == 404
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user