Merge branch 'master' of github.com:bigchaindb/bigchaindb

This commit is contained in:
z-bowen 2018-08-08 10:48:35 +02:00
commit 20671155c8
75 changed files with 979 additions and 369 deletions

View File

@ -18,6 +18,18 @@ For reference, the possible headings are:
* **Known Issues** * **Known Issues**
* **Notes** * **Notes**
## [2.0 Beta 5] - 2018-08-01
Tag name: v2.0.0b5
### Changed
* Supported version of Tendermint `0.22.3` -> `0.22.8`. [Pull request #2429](https://github.com/bigchaindb/bigchaindb/pull/2429).
### Fixed
* Stateful validation raises a DoubleSpend exception if there is any other transaction that spends the same output(s) even if it has the same transaction ID. [Pull request #2422](https://github.com/bigchaindb/bigchaindb/pull/2422).
## [2.0 Beta 4] - 2018-07-30 ## [2.0 Beta 4] - 2018-07-30
Tag name: v2.0.0b4 Tag name: v2.0.0b4

View File

@ -7,7 +7,6 @@ RUN apt-get -qq update \
&& apt-get -y upgrade \ && apt-get -y upgrade \
&& apt-get install -y jq \ && apt-get install -y jq \
&& pip install --no-cache-dir --process-dependency-links . \ && pip install --no-cache-dir --process-dependency-links . \
&& pip install --no-cache-dir . \
&& apt-get autoremove \ && apt-get autoremove \
&& apt-get clean && apt-get clean

51
Dockerfile-all-in-one Normal file
View File

@ -0,0 +1,51 @@
FROM alpine:latest
LABEL maintainer "dev@bigchaindb.com"
ARG TM_VERSION=0.22.8
RUN mkdir -p /usr/src/app
ENV HOME /root
COPY . /usr/src/app/
WORKDIR /usr/src/app
RUN apk --update add sudo bash \
&& apk --update add python3 openssl ca-certificates git \
&& apk --update add --virtual build-dependencies python3-dev \
libffi-dev openssl-dev build-base jq \
&& apk add --no-cache libstdc++ dpkg gnupg \
&& pip3 install --upgrade pip cffi \
&& pip install --no-cache-dir --process-dependency-links -e . \
&& apk del build-dependencies \
&& rm -f /var/cache/apk/*
# Install mongodb and monit
RUN apk --update add mongodb monit
# Install Tendermint
RUN wget https://github.com/tendermint/tendermint/releases/download/v${TM_VERSION}-autodraft/tendermint_${TM_VERSION}_linux_amd64.zip \
&& unzip tendermint_${TM_VERSION}_linux_amd64.zip \
&& mv tendermint /usr/local/bin/ \
&& rm tendermint_${TM_VERSION}_linux_amd64.zip
ENV TMHOME=/tendermint
# Set permissions required for mongodb
RUN mkdir -p /data/db /data/configdb \
&& chown -R mongodb:mongodb /data/db /data/configdb
# BigchainDB enviroment variables
ENV BIGCHAINDB_DATABASE_PORT 27017
ENV BIGCHAINDB_DATABASE_BACKEND localmongodb
ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984
ENV BIGCHAINDB_WSSERVER_HOST 0.0.0.0
ENV BIGCHAINDB_WSSERVER_SCHEME ws
ENV BIGCHAINDB_WSSERVER_ADVERTISED_HOST 0.0.0.0
ENV BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME ws
ENV BIGCHAINDB_TENDERMINT_PORT 26657
VOLUME /data/db /data/configdb /tendermint
EXPOSE 27017 28017 9984 9985 26656 26657 26658
WORKDIR $HOME
ENTRYPOINT ["/usr/src/app/pkg/scripts/all-in-one.bash"]

View File

@ -15,7 +15,7 @@ For the licenses on all other BigchainDB-related code (i.e. in other repositorie
## Documentation Licenses ## Documentation Licenses
The official BigchainDB documentation, _except for the short code snippets embedded within it_, is licensed under a Creative Commons Attribution-ShareAlike 4.0 International license, the full text of which can be found at [http://creativecommons.org/licenses/by-sa/4.0/legalcode](http://creativecommons.org/licenses/by-sa/4.0/legalcode). The official BigchainDB documentation, _except for the short code snippets embedded within it_, is licensed under a Creative Commons Attribution 4.0 International license, the full text of which can be found at [http://creativecommons.org/licenses/by/4.0/legalcode](http://creativecommons.org/licenses/by/4.0/legalcode).
## Exceptions ## Exceptions

View File

@ -90,7 +90,9 @@ _config = copy.deepcopy(config)
from bigchaindb.common.transaction import Transaction # noqa from bigchaindb.common.transaction import Transaction # noqa
from bigchaindb import models # noqa from bigchaindb import models # noqa
from bigchaindb.upsert_validator import ValidatorElection # noqa from bigchaindb.upsert_validator import ValidatorElection # noqa
from bigchaindb.upsert_validator import ValidatorElectionVote # noqa
Transaction.register_type(Transaction.CREATE, models.Transaction) Transaction.register_type(Transaction.CREATE, models.Transaction)
Transaction.register_type(Transaction.TRANSFER, models.Transaction) Transaction.register_type(Transaction.TRANSFER, models.Transaction)
Transaction.register_type(ValidatorElection.VALIDATOR_ELECTION, ValidatorElection) Transaction.register_type(ValidatorElection.VALIDATOR_ELECTION, ValidatorElection)
Transaction.register_type(ValidatorElectionVote.VALIDATOR_ELECTION_VOTE, ValidatorElectionVote)

View File

@ -8,7 +8,6 @@ from bigchaindb.common.exceptions import MultipleValidatorOperationError
from bigchaindb.backend.utils import module_dispatch_registrar from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
from bigchaindb.common.transaction import Transaction from bigchaindb.common.transaction import Transaction
from bigchaindb.backend.query import VALIDATOR_UPDATE_ID
register_query = module_dispatch_registrar(backend.query) register_query = module_dispatch_registrar(backend.query)
@ -279,7 +278,7 @@ def get_pre_commit_state(conn, commit_id):
@register_query(LocalMongoDBConnection) @register_query(LocalMongoDBConnection)
def store_validator_update(conn, validator_update): def store_validator_set(conn, validator_update):
try: try:
return conn.run( return conn.run(
conn.collection('validators') conn.collection('validators')
@ -289,15 +288,16 @@ def store_validator_update(conn, validator_update):
@register_query(LocalMongoDBConnection) @register_query(LocalMongoDBConnection)
def get_validator_update(conn, update_id=VALIDATOR_UPDATE_ID): def get_validator_set(conn, height=None):
return conn.run( query = {}
conn.collection('validators') if height is not None:
.find_one({'update_id': update_id}, projection={'_id': False})) query = {'height': {'$lte': height}}
cursor = conn.run(
@register_query(LocalMongoDBConnection)
def delete_validator_update(conn, update_id=VALIDATOR_UPDATE_ID):
return conn.run(
conn.collection('validators') conn.collection('validators')
.delete_one({'update_id': update_id}) .find(query, projection={'_id': False})
.sort([('height', DESCENDING)])
.limit(1)
) )
return list(cursor)[0]

View File

@ -126,6 +126,6 @@ def create_pre_commit_secondary_index(conn, dbname):
def create_validators_secondary_index(conn, dbname): def create_validators_secondary_index(conn, dbname):
logger.info('Create `validators` secondary index.') logger.info('Create `validators` secondary index.')
conn.conn[dbname]['validators'].create_index('update_id', conn.conn[dbname]['validators'].create_index('height',
name='update_id', name='height',
unique=True,) unique=True,)

View File

@ -340,13 +340,6 @@ def store_pre_commit_state(connection, commit_id, state):
raise NotImplementedError raise NotImplementedError
@singledispatch
def store_validator_update(conn, validator_update):
"""Store a update for the validator set"""
raise NotImplementedError
@singledispatch @singledispatch
def get_pre_commit_state(connection, commit_id): def get_pre_commit_state(connection, commit_id):
"""Get pre-commit state where `id` is `commit_id`. """Get pre-commit state where `id` is `commit_id`.
@ -362,14 +355,15 @@ def get_pre_commit_state(connection, commit_id):
@singledispatch @singledispatch
def get_validator_update(conn): def store_validator_set(conn, validator_update):
"""Get validator updates which are not synced""" """Store updated validator set"""
raise NotImplementedError raise NotImplementedError
@singledispatch @singledispatch
def delete_validator_update(conn, id): def get_validator_set(conn, height):
"""Set the sync status for validator update documents""" """Get validator set for a given `height`, if `height` is not specified
then return the latest validator set"""
raise NotImplementedError raise NotImplementedError

View File

@ -34,6 +34,9 @@ _, TX_SCHEMA_TRANSFER = _load_schema('transaction_transfer_' +
_, TX_SCHEMA_VALIDATOR_ELECTION = _load_schema('transaction_validator_election_' + _, TX_SCHEMA_VALIDATOR_ELECTION = _load_schema('transaction_validator_election_' +
TX_SCHEMA_VERSION) TX_SCHEMA_VERSION)
_, TX_SCHEMA_VALIDATOR_ELECTION_VOTE = _load_schema('transaction_validator_election_vote_' +
TX_SCHEMA_VERSION)
def _validate_schema(schema, body): def _validate_schema(schema, body):
"""Validate data against a schema""" """Validate data against a schema"""

View File

@ -59,6 +59,7 @@ definitions:
- CREATE - CREATE
- TRANSFER - TRANSFER
- VALIDATOR_ELECTION - VALIDATOR_ELECTION
- VALIDATOR_ELECTION_VOTE
asset: asset:
type: object type: object
additionalProperties: false additionalProperties: false

View File

@ -0,0 +1,27 @@
---
"$schema": "http://json-schema.org/draft-04/schema#"
type: object
title: Validator Election Vote Schema - Vote on a validator set change
required:
- operation
- outputs
properties:
operation: "VALIDATOR_ELECTION_VOTE"
outputs:
type: array
items:
"$ref": "#/definitions/output"
definitions:
output:
type: object
properties:
condition:
type: object
required:
- uri
properties:
uri:
type: string
pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
(fpt=ed25519-sha-256(&)?|cost=[0-9]+(&)?|\
subtypes=ed25519-sha-256(&)?){2,3}$"

View File

@ -18,6 +18,7 @@ from sha3 import sha3_256
from bigchaindb.common.crypto import PrivateKey, hash_data from bigchaindb.common.crypto import PrivateKey, hash_data
from bigchaindb.common.exceptions import (KeypairMismatchException, from bigchaindb.common.exceptions import (KeypairMismatchException,
InputDoesNotExist, DoubleSpend,
InvalidHash, InvalidSignature, InvalidHash, InvalidSignature,
AmountError, AssetIdMismatch, AmountError, AssetIdMismatch,
ThresholdTooDeep) ThresholdTooDeep)
@ -523,11 +524,11 @@ class Transaction(object):
# Asset payloads for 'CREATE' operations must be None or # Asset payloads for 'CREATE' operations must be None or
# dicts holding a `data` property. Asset payloads for 'TRANSFER' # dicts holding a `data` property. Asset payloads for 'TRANSFER'
# operations must be dicts holding an `id` property. # operations must be dicts holding an `id` property.
if (operation == Transaction.CREATE and if (operation == self.CREATE and
asset is not None and not (isinstance(asset, dict) and 'data' in asset)): asset is not None and not (isinstance(asset, dict) and 'data' in asset)):
raise TypeError(('`asset` must be None or a dict holding a `data` ' raise TypeError(('`asset` must be None or a dict holding a `data` '
" property instance for '{}' Transactions".format(operation))) " property instance for '{}' Transactions".format(operation)))
elif (operation == Transaction.TRANSFER and elif (operation == self.TRANSFER and
not (isinstance(asset, dict) and 'id' in asset)): not (isinstance(asset, dict) and 'id' in asset)):
raise TypeError(('`asset` must be a dict holding an `id` property ' raise TypeError(('`asset` must be a dict holding an `id` property '
"for 'TRANSFER' Transactions".format(operation))) "for 'TRANSFER' Transactions".format(operation)))
@ -555,9 +556,9 @@ class Transaction(object):
structure containing relevant information for storing them in structure containing relevant information for storing them in
a UTXO set, and performing validation. a UTXO set, and performing validation.
""" """
if self.operation == Transaction.CREATE: if self.operation == self.CREATE:
self._asset_id = self._id self._asset_id = self._id
elif self.operation == Transaction.TRANSFER: elif self.operation == self.TRANSFER:
self._asset_id = self.asset['id'] self._asset_id = self.asset['id']
return (UnspentOutput( return (UnspentOutput(
transaction_id=self._id, transaction_id=self._id,
@ -649,6 +650,31 @@ class Transaction(object):
(inputs, outputs) = cls.validate_create(tx_signers, recipients, asset, metadata) (inputs, outputs) = cls.validate_create(tx_signers, recipients, asset, metadata)
return cls(cls.CREATE, {'data': asset}, inputs, outputs, metadata) return cls(cls.CREATE, {'data': asset}, inputs, outputs, metadata)
@classmethod
def validate_transfer(cls, inputs, recipients, asset_id, metadata):
if not isinstance(inputs, list):
raise TypeError('`inputs` must be a list instance')
if len(inputs) == 0:
raise ValueError('`inputs` must contain at least one item')
if not isinstance(recipients, list):
raise TypeError('`recipients` must be a list instance')
if len(recipients) == 0:
raise ValueError('`recipients` list cannot be empty')
outputs = []
for recipient in recipients:
if not isinstance(recipient, tuple) or len(recipient) != 2:
raise ValueError(('Each `recipient` in the list must be a'
' tuple of `([<list of public keys>],'
' <amount>)`'))
pub_keys, amount = recipient
outputs.append(Output.generate(pub_keys, amount))
if not isinstance(asset_id, str):
raise TypeError('`asset_id` must be a string')
return (deepcopy(inputs), outputs)
@classmethod @classmethod
def transfer(cls, inputs, recipients, asset_id, metadata=None): def transfer(cls, inputs, recipients, asset_id, metadata=None):
"""A simple way to generate a `TRANSFER` transaction. """A simple way to generate a `TRANSFER` transaction.
@ -688,28 +714,7 @@ class Transaction(object):
Returns: Returns:
:class:`~bigchaindb.common.transaction.Transaction` :class:`~bigchaindb.common.transaction.Transaction`
""" """
if not isinstance(inputs, list): (inputs, outputs) = cls.validate_transfer(inputs, recipients, asset_id, metadata)
raise TypeError('`inputs` must be a list instance')
if len(inputs) == 0:
raise ValueError('`inputs` must contain at least one item')
if not isinstance(recipients, list):
raise TypeError('`recipients` must be a list instance')
if len(recipients) == 0:
raise ValueError('`recipients` list cannot be empty')
outputs = []
for recipient in recipients:
if not isinstance(recipient, tuple) or len(recipient) != 2:
raise ValueError(('Each `recipient` in the list must be a'
' tuple of `([<list of public keys>],'
' <amount>)`'))
pub_keys, amount = recipient
outputs.append(Output.generate(pub_keys, amount))
if not isinstance(asset_id, str):
raise TypeError('`asset_id` must be a string')
inputs = deepcopy(inputs)
return cls(cls.TRANSFER, {'id': asset_id}, inputs, outputs, metadata) return cls(cls.TRANSFER, {'id': asset_id}, inputs, outputs, metadata)
def __eq__(self, other): def __eq__(self, other):
@ -954,7 +959,7 @@ class Transaction(object):
# greatly, as we do not have to check against `None` values. # greatly, as we do not have to check against `None` values.
return self._inputs_valid(['dummyvalue' return self._inputs_valid(['dummyvalue'
for _ in self.inputs]) for _ in self.inputs])
elif self.operation == Transaction.TRANSFER: elif self.operation == self.TRANSFER:
return self._inputs_valid([output.fulfillment.condition_uri return self._inputs_valid([output.fulfillment.condition_uri
for output in outputs]) for output in outputs])
else: else:
@ -1098,8 +1103,8 @@ class Transaction(object):
tx = Transaction._remove_signatures(self.to_dict()) tx = Transaction._remove_signatures(self.to_dict())
return Transaction._to_str(tx) return Transaction._to_str(tx)
@staticmethod @classmethod
def get_asset_id(transactions): def get_asset_id(cls, transactions):
"""Get the asset id from a list of :class:`~.Transactions`. """Get the asset id from a list of :class:`~.Transactions`.
This is useful when we want to check if the multiple inputs of a This is useful when we want to check if the multiple inputs of a
@ -1123,7 +1128,7 @@ class Transaction(object):
transactions = [transactions] transactions = [transactions]
# create a set of the transactions' asset ids # create a set of the transactions' asset ids
asset_ids = {tx.id if tx.operation == Transaction.CREATE asset_ids = {tx.id if tx.operation == tx.CREATE
else tx.asset['id'] else tx.asset['id']
for tx in transactions} for tx in transactions}
@ -1242,3 +1247,56 @@ class Transaction(object):
@classmethod @classmethod
def validate_schema(cls, tx): def validate_schema(cls, tx):
pass pass
def validate_transfer_inputs(self, bigchain, current_transactions=[]):
# store the inputs so that we can check if the asset ids match
input_txs = []
input_conditions = []
for input_ in self.inputs:
input_txid = input_.fulfills.txid
input_tx = bigchain.get_transaction(input_txid)
if input_tx is None:
for ctxn in current_transactions:
if ctxn.id == input_txid:
input_tx = ctxn
if input_tx is None:
raise InputDoesNotExist("input `{}` doesn't exist"
.format(input_txid))
spent = bigchain.get_spent(input_txid, input_.fulfills.output,
current_transactions)
if spent:
raise DoubleSpend('input `{}` was already spent'
.format(input_txid))
output = input_tx.outputs[input_.fulfills.output]
input_conditions.append(output)
input_txs.append(input_tx)
# Validate that all inputs are distinct
links = [i.fulfills.to_uri() for i in self.inputs]
if len(links) != len(set(links)):
raise DoubleSpend('tx "{}" spends inputs twice'.format(self.id))
# validate asset id
asset_id = self.get_asset_id(input_txs)
if asset_id != self.asset['id']:
raise AssetIdMismatch(('The asset id of the input does not'
' match the asset id of the'
' transaction'))
input_amount = sum([input_condition.amount for input_condition in input_conditions])
output_amount = sum([output_condition.amount for output_condition in self.outputs])
if output_amount != input_amount:
raise AmountError(('The amount used in the inputs `{}`'
' needs to be same as the amount used'
' in the outputs `{}`')
.format(input_amount, output_amount))
if not self.inputs_valid(input_conditions):
raise InvalidSignature('Transaction signature is invalid.')
return True

View File

@ -1,6 +1,7 @@
"""This module contains all the goodness to integrate BigchainDB """This module contains all the goodness to integrate BigchainDB
with Tendermint.""" with Tendermint."""
import logging import logging
import codecs
from abci.application import BaseApplication from abci.application import BaseApplication
from abci.types_pb2 import ( from abci.types_pb2 import (
@ -42,11 +43,13 @@ class App(BaseApplication):
self.validators = None self.validators = None
self.new_height = None self.new_height = None
def init_chain(self, validators): def init_chain(self, genesis):
"""Initialize chain with block of height 0""" """Initialize chain with block of height 0"""
validator_set = [decode_validator(v) for v in genesis.validators]
block = Block(app_hash='', height=0, transactions=[]) block = Block(app_hash='', height=0, transactions=[])
self.bigchaindb.store_block(block._asdict()) self.bigchaindb.store_block(block._asdict())
self.bigchaindb.store_validator_set(1, validator_set)
return ResponseInitChain() return ResponseInitChain()
def info(self, request): def info(self, request):
@ -129,11 +132,11 @@ class App(BaseApplication):
else: else:
self.block_txn_hash = block['app_hash'] self.block_txn_hash = block['app_hash']
validator_updates = self.bigchaindb.get_validator_update() # TODO: calculate if an election has concluded
validator_updates = [encode_validator(v) for v in validator_updates] # NOTE: ensure the local validator set is updated
# validator_updates = self.bigchaindb.get_validator_update()
# set sync status to true # validator_updates = [encode_validator(v) for v in validator_updates]
self.bigchaindb.delete_validator_update() validator_updates = []
# Store pre-commit state to recover in case there is a crash # Store pre-commit state to recover in case there is a crash
# during `commit` # during `commit`
@ -176,3 +179,10 @@ def encode_validator(v):
return Validator(pub_key=pub_key, return Validator(pub_key=pub_key,
address=b'', address=b'',
power=v['power']) power=v['power'])
def decode_validator(v):
return {'address': codecs.encode(v.address, 'hex').decode().upper().rstrip('\n'),
'pub_key': {'type': v.pub_key.type,
'data': codecs.encode(v.pub_key.data, 'base64').decode().rstrip('\n')},
'voting_power': v.power}

View File

@ -460,19 +460,13 @@ class BigchainDB(object):
def fastquery(self): def fastquery(self):
return fastquery.FastQuery(self.connection) return fastquery.FastQuery(self.connection)
def get_validators(self): def get_validators(self, height=None):
try: result = backend.query.get_validator_set(self.connection, height)
resp = requests.get('{}validators'.format(self.endpoint)) validators = result['validators']
validators = resp.json()['result']['validators'] for v in validators:
for v in validators: v.pop('address')
v.pop('accum')
v.pop('address')
return validators return validators
except requests.exceptions.RequestException as e:
logger.error('Error while connecting to Tendermint HTTP API')
raise e
def get_validator_update(self): def get_validator_update(self):
update = backend.query.get_validator_update(self.connection) update = backend.query.get_validator_update(self.connection)
@ -484,6 +478,14 @@ class BigchainDB(object):
def store_pre_commit_state(self, state): def store_pre_commit_state(self, state):
return backend.query.store_pre_commit_state(self.connection, state) return backend.query.store_pre_commit_state(self.connection, state)
def store_validator_set(self, height, validators):
"""Store validator set at a given `height`.
NOTE: If the validator set already exists at that `height` then an
exception will be raised.
"""
return backend.query.store_validator_set(self.connection, {'height': height,
'validators': validators})
Block = namedtuple('Block', ('app_hash', 'height', 'transactions')) Block = namedtuple('Block', ('app_hash', 'height', 'transactions'))

View File

@ -1,7 +1,4 @@
from bigchaindb.common.exceptions import (InvalidSignature, DoubleSpend, from bigchaindb.common.exceptions import (InvalidSignature,
InputDoesNotExist,
TransactionNotInValidBlock,
AssetIdMismatch, AmountError,
DuplicateTransaction) DuplicateTransaction)
from bigchaindb.common.transaction import Transaction from bigchaindb.common.transaction import Transaction
from bigchaindb.common.utils import (validate_txn_obj, validate_key) from bigchaindb.common.utils import (validate_txn_obj, validate_key)
@ -32,64 +29,12 @@ class Transaction(Transaction):
if bigchain.get_transaction(self.to_dict()['id']) or duplicates: if bigchain.get_transaction(self.to_dict()['id']) or duplicates:
raise DuplicateTransaction('transaction `{}` already exists' raise DuplicateTransaction('transaction `{}` already exists'
.format(self.id)) .format(self.id))
if not self.inputs_valid(input_conditions):
raise InvalidSignature('Transaction signature is invalid.')
elif self.operation == Transaction.TRANSFER: elif self.operation == Transaction.TRANSFER:
# store the inputs so that we can check if the asset ids match self.validate_transfer_inputs(bigchain, current_transactions)
input_txs = []
for input_ in self.inputs:
input_txid = input_.fulfills.txid
input_tx, status = bigchain.\
get_transaction(input_txid, include_status=True)
if input_tx is None:
for ctxn in current_transactions:
# assume that the status as valid for previously validated
# transactions in current round
if ctxn.id == input_txid:
input_tx = ctxn
status = bigchain.TX_VALID
if input_tx is None:
raise InputDoesNotExist("input `{}` doesn't exist"
.format(input_txid))
if status != bigchain.TX_VALID:
raise TransactionNotInValidBlock(
'input `{}` does not exist in a valid block'.format(
input_txid))
spent = bigchain.get_spent(input_txid, input_.fulfills.output,
current_transactions)
if spent and spent.id != self.id:
raise DoubleSpend('input `{}` was already spent'
.format(input_txid))
output = input_tx.outputs[input_.fulfills.output]
input_conditions.append(output)
input_txs.append(input_tx)
# Validate that all inputs are distinct
links = [i.fulfills.to_uri() for i in self.inputs]
if len(links) != len(set(links)):
raise DoubleSpend('tx "{}" spends inputs twice'.format(self.id))
# validate asset id
asset_id = Transaction.get_asset_id(input_txs)
if asset_id != self.asset['id']:
raise AssetIdMismatch(('The asset id of the input does not'
' match the asset id of the'
' transaction'))
input_amount = sum([input_condition.amount for input_condition in input_conditions])
output_amount = sum([output_condition.amount for output_condition in self.outputs])
if output_amount != input_amount:
raise AmountError(('The amount used in the inputs `{}`'
' needs to be same as the amount used'
' in the outputs `{}`')
.format(input_amount, output_amount))
if not self.inputs_valid(input_conditions):
raise InvalidSignature('Transaction signature is invalid.')
return self return self

View File

@ -1,2 +1,3 @@
from bigchaindb.upsert_validator.validator_election import ValidatorElection # noqa from bigchaindb.upsert_validator.validator_election import ValidatorElection # noqa
from bigchaindb.upsert_validator.validator_election_vote import ValidatorElectionVote # noqa

View File

@ -0,0 +1,65 @@
import base58
from bigchaindb.common.transaction import Transaction
from bigchaindb.common.schema import (_validate_schema,
TX_SCHEMA_COMMON,
TX_SCHEMA_TRANSFER,
TX_SCHEMA_VALIDATOR_ELECTION_VOTE)
class ValidatorElectionVote(Transaction):
VALIDATOR_ELECTION_VOTE = 'VALIDATOR_ELECTION_VOTE'
# NOTE: This class inherits TRANSFER txn type. The `TRANSFER` property is
# overriden to re-use methods from parent class
TRANSFER = VALIDATOR_ELECTION_VOTE
ALLOWED_OPERATIONS = (VALIDATOR_ELECTION_VOTE,)
def validate(self, bigchain, current_transactions=[]):
"""Validate election vote transaction
NOTE: There are no additional validity conditions on casting votes i.e.
a vote is just a valid TRANFER transaction
For more details refer BEP-21: https://github.com/bigchaindb/BEPs/tree/master/21
Args:
bigchain (BigchainDB): an instantiated bigchaindb.lib.BigchainDB object.
Returns:
`True` if the election vote is valid
Raises:
ValidationError: If the election vote is invalid
"""
self.validate_transfer_inputs(bigchain, current_transactions)
return self
@classmethod
def to_public_key(cls, election_id):
return base58.b58encode(bytes.fromhex(election_id))
@classmethod
def generate(cls, inputs, recipients, election_id, metadata=None):
(inputs, outputs) = cls.validate_transfer(inputs, recipients, election_id, metadata)
election_vote = cls(cls.VALIDATOR_ELECTION_VOTE, {'id': election_id}, inputs, outputs, metadata)
cls.validate_schema(election_vote.to_dict(), skip_id=True)
return election_vote
@classmethod
def validate_schema(cls, tx, skip_id=False):
"""Validate the validator election vote transaction. Since `VALIDATOR_ELECTION_VOTE` extends `TRANFER`
transaction, all the validations for `CREATE` transaction should be inherited
"""
if not skip_id:
cls.validate_id(tx)
_validate_schema(TX_SCHEMA_COMMON, tx)
_validate_schema(TX_SCHEMA_TRANSFER, tx)
_validate_schema(TX_SCHEMA_VALIDATOR_ELECTION_VOTE, tx)
@classmethod
def create(cls, tx_signers, recipients, metadata=None, asset=None):
raise NotImplementedError
@classmethod
def transfer(cls, tx_signers, recipients, metadata=None, asset=None):
raise NotImplementedError

View File

@ -1,2 +1,2 @@
__version__ = '2.0.0b4' __version__ = '2.0.0b5'
__short_version__ = '2.0b4' __short_version__ = '2.0b5'

View File

@ -44,7 +44,7 @@ services:
retries: 3 retries: 3
command: '.ci/entrypoint.sh' command: '.ci/entrypoint.sh'
tendermint: tendermint:
image: tendermint/tendermint:0.22.3 image: tendermint/tendermint:0.22.8
# volumes: # volumes:
# - ./tmdata:/tendermint # - ./tmdata:/tendermint
entrypoint: '' entrypoint: ''

View File

@ -32,7 +32,7 @@ $ curl -fOL https://raw.githubusercontent.com/bigchaindb/bigchaindb/${GIT_BRANCH
## Quick Start ## Quick Start
If you run `stack.sh` out of the box i.e. without any configuration changes, you will be able to deploy a 4 node If you run `stack.sh` out of the box i.e. without any configuration changes, you will be able to deploy a 4 node
BigchainDB network with Docker containers, created from `master` branch of `bigchaindb/bigchaindb` repo and Tendermint version `0.22.3`. BigchainDB network with Docker containers, created from `master` branch of `bigchaindb/bigchaindb` repo and Tendermint version `0.22.8`.
**Note**: Run `stack.sh` with either root or non-root user with sudo enabled. **Note**: Run `stack.sh` with either root or non-root user with sudo enabled.
@ -90,7 +90,7 @@ $ bash stack.sh -h
variable. (default: master) variable. (default: master)
ENV[TM_VERSION] ENV[TM_VERSION]
(Optional) Tendermint version to use for the setup. (default: 0.22.3) (Optional) Tendermint version to use for the setup. (default: 0.22.8)
ENV[MONGO_VERSION] ENV[MONGO_VERSION]
(Optional) MongoDB version to use with the setup. (default: 3.6) (Optional) MongoDB version to use with the setup. (default: 3.6)
@ -171,8 +171,8 @@ $ export STACK_REPO=bigchaindb/bigchaindb
# Default: master # Default: master
$ export STACK_BRANCH=master $ export STACK_BRANCH=master
#Optional, since 0.22.3 is the default tendermint version. #Optional, since 0.22.8 is the default tendermint version.
$ export TM_VERSION=0.22.3 $ export TM_VERSION=0.22.8
#Optional, since 3.6 is the default MongoDB version. #Optional, since 3.6 is the default MongoDB version.
$ export MONGO_VERSION=3.6 $ export MONGO_VERSION=3.6
@ -222,8 +222,8 @@ $ export STACK_REPO=bigchaindb/bigchaindb
# Default: master # Default: master
$ export STACK_BRANCH=master $ export STACK_BRANCH=master
#Optional, since 0.22.3 is the default tendermint version #Optional, since 0.22.8 is the default tendermint version
$ export TM_VERSION=0.22.3 $ export TM_VERSION=0.22.8
#Optional, since 3.6 is the default MongoDB version. #Optional, since 3.6 is the default MongoDB version.
$ export MONGO_VERSION=3.6 $ export MONGO_VERSION=3.6

View File

@ -19,13 +19,13 @@ After the installation of MongoDB is complete, run MongoDB using `sudo mongod`
### Installing a Tendermint Executable ### Installing a Tendermint Executable
Find [the version number of the latest Tendermint release](https://github.com/tendermint/tendermint/releases) and install it using the following, where 0.22.3 should be replaced by the latest released version number: Find [the version number of the latest Tendermint release](https://github.com/tendermint/tendermint/releases) and install it using the following, where 0.22.8 should be replaced by the latest released version number:
```bash ```bash
$ sudo apt install -y unzip $ sudo apt install -y unzip
$ wget https://github.com/tendermint/tendermint/releases/download/v0.22.3/tendermint_0.22.3_linux_amd64.zip $ wget https://github.com/tendermint/tendermint/releases/download/v0.22.8-autodraft/tendermint_0.22.8_linux_amd64.zip
$ unzip tendermint_0.22.3_linux_amd64.zip $ unzip tendermint_0.22.8_linux_amd64.zip
$ rm tendermint_0.22.3_linux_amd64.zip $ rm tendermint_0.22.8_linux_amd64.zip
$ sudo mv tendermint /usr/local/bin $ sudo mv tendermint /usr/local/bin
``` ```

View File

@ -91,4 +91,5 @@ More About BigchainDB
transaction-concepts transaction-concepts
store-files store-files
permissions permissions
private-data
Data Models <https://docs.bigchaindb.com/projects/server/en/latest/data-models/index.html> Data Models <https://docs.bigchaindb.com/projects/server/en/latest/data-models/index.html>

View File

@ -53,20 +53,7 @@ You could do more elaborate things too. As one example, each time someone writes
Read Permissions Read Permissions
================ ================
All the data stored in a BigchainDB network can be read by anyone with access to that network. One *can* store encrypted data, but if the decryption key ever leaks out, then the encrypted data can be read, decrypted, and leak out too. (Deleting the encrypted data is :doc:`not an option <immutable>`.) See the page titled, :doc:`BigchainDB, Privacy and Private Data <private-data>`.
The permission to read some specific information (e.g. a music file) can be thought of as an *asset*. (In many countries, that permission or "right" is a kind of intellectual property.)
BigchainDB can be used to register that asset and transfer it from owner to owner.
Today, BigchainDB does not have a way to restrict read access of data stored in a BigchainDB network, but many third-party services do offer that (e.g. Google Docs, Dropbox).
In principle, a third party service could ask a BigchainDB network to determine if a particular user has permission to read some particular data. Indeed they could use BigchainDB to keep track of *all* the rights a user has for some data (not just the right to read it).
That third party could also use BigchainDB to store audit logs, i.e. records of every read, write or other operation on stored data.
BigchainDB can be used in other ways to help parties exchange private data:
- It can be used to publicly disclose the *availability* of some private data (stored elsewhere). For example, there might be a description of the data and a price.
- It can be used to record the TLS handshakes which two parties sent to each other to establish an encrypted and authenticated TLS connection, which they could use to exchange private data with each other. (The stored handshake information wouldn't be enough, by itself, to decrypt the data.) It would be a "proof of TLS handshake."
- See the BigchainDB `Privacy Protocols repository <https://github.com/bigchaindb/privacy-protocols>`_ for more techniques.
Role-Based Access Control (RBAC) Role-Based Access Control (RBAC)
================================ ================================

View File

@ -0,0 +1,100 @@
BigchainDB, Privacy and Private Data
------------------------------------
Basic Facts
===========
#. One can store arbitrary data (including encrypted data) in a BigchainDB network, within limits: theres a maximum transaction size. Every transaction has a ``metadata`` section which can store almost any Unicode string (up to some maximum length). Similarly, every CREATE transaction has an ``asset.data`` section which can store almost any Unicode string.
#. The data stored in certain BigchainDB transaction fields must not be encrypted, e.g. public keys and amounts. BigchainDB doesnt offer private transactions akin to Zcoin.
#. Once data has been stored in a BigchainDB network, its best to assume it cant be change or deleted.
#. Every node in a BigchainDB network has a full copy of all the stored data.
#. Every node in a BigchainDB network can read all the stored data.
#. Everyone with full access to a BigchainDB node (e.g. the sysadmin of a node) can read all the data stored on that node.
#. Everyone given access to a node via the BigchainDB HTTP API can find and read all the data stored by BigchainDB. The list of people with access might be quite short.
#. If the connection between an external user and a BigchainDB node isnt encrypted (using HTTPS, for example), then a wiretapper can read all HTTP requests and responses in transit.
#. If someone gets access to plaintext (regardless of where they got it), then they can (in principle) share it with the whole world. One can make it difficult for them to do that, e.g. if it is a lot of data and they only get access inside a secure room where they are searched as they leave the room.
Storing Private Data Off-Chain
==============================
A system could store data off-chain, e.g. in a third-party database, document store, or content management system (CMS) and it could use BigchainDB to:
- Keep track of who has read permissions (or other permissions) in a third-party system. An example of how this could be done is described below.
- Keep a permanent record of all requests made to the third-party system.
- Store hashes of documents-stored-elsewhere, so that a change in any document can be detected.
- Record all handshake-establishing requests and responses between two off-chain parties (e.g. a Diffie-Hellman key exchange), so as to prove that they established an encrypted tunnel (without giving readers access to that tunnel). There are more details about this idea in `the BigchainDB Privacy Protocols repository <https://github.com/bigchaindb/privacy-protocols>`_.
A simple way to record who has read permission on a particular document would be for the third-party system (“DocPile”) to store a CREATE transaction in a BigchainDB network for every document+user pair, to indicate that that user has read permissions for that document. The transaction could be signed by DocPile (or maybe by a document owner, as a variation). The asset data field would contain 1) the unique ID of the user and 2) the unique ID of the document. The one output on the CREATE transaction would only be transferable/spendable by DocPile (or, again, a document owner).
To revoke the read permission, DocPile could create a TRANSFER transaction, to spend the one output on the original CREATE transaction, with a metadata field to say that the user in question no longer has read permission on that document.
This can be carried on indefinitely, i.e. another TRANSFER transaction could be created by DocPile to indicate that the user now has read permissions again.
DocPile can figure out if a given user has read permissions on a given document by reading the last transaction in the CREATE → TRANSFER → TRANSFER → etc. chain for that user+document pair.
There are other ways to accomplish the same thing. The above is just one example.
You might have noticed that the above example didnt treat the “read permission” as an asset owned (controlled) by a user because if the permission asset is given to (transferred to or created by) the user then it cannot be controlled any further (by DocPile) until the user transfers it back to DocPile. Moreover, the user could transfer the asset to someone else, which might be problematic.
Storing Private Data On-Chain, Encrypted
========================================
There are many ways to store private data on-chain, encrypted. Every use case has its own objectives and constraints, and the best solution depends on the use case. `The BigchainDB consulting team <https://www.bigchaindb.com/services/>`_, along with our partners, can help you design the best solution for your use case.
Below we describe some example system setups, using various crypto primitives, to give a sense of whats possible.
Please note:
- Ed25519 keypairs are designed for signing and verifying cryptographic signatures, `not for encrypting and decrypting messages <https://crypto.stackexchange.com/questions/27866/why-curve25519-for-encryption-but-ed25519-for-signatures>`_. For encryption, you should use keypairs designed for encryption, such as X25519.
- If someone (or some group) publishes how to decrypt some encrypted data on-chain, then anyone with access to that encrypted data will be able to get the plaintext. The data cant be deleted.
- Encrypted data cant be indexed or searched by MongoDB. (It can index and search the ciphertext, but thats not very useful.) One might use homomorphic encryption to index and search encrypted data, but MongoDB doesnt have any plans to support that any time soon. If there is indexing or keyword search needed, then some fields of the ``asset.data`` or ``metadata`` objects can be left as plain text and the sensitive information can be stored in an encrypted child-object.
System Example 1
~~~~~~~~~~~~~~~~
Encrypt the data with a symmetric key and store the ciphertext on-chain (in ``metadata`` or ``asset.data``). To communicate the key to a third party, use their public key to encrypt the symmetric key and send them that. They can decrypt the symmetric key with their private key, and then use that symmetric key to decrypt the on-chain ciphertext.
The reason for using a symmetric key along with public/private keypairs is so the ciphertext only has to be stored once.
System Example 2
~~~~~~~~~~~~~~~~
This example uses `proxy re-encryption <https://en.wikipedia.org/wiki/Proxy_re-encryption>`_:
#. MegaCorp encrypts some data using its own public key, then stores that encrypted data (ciphertext 1) in a BigchainDB network.
#. MegaCorp wants to let others read that encrypted data, but without ever sharing their private key and without having to re-encrypt themselves for every new recipient. Instead, they find a “proxy” named Moxie, to provide proxy re-encryption services.
#. Zorban contacts MegaCorp and asks for permission to read the data.
#. MegaCorp asks Zorban for his public key.
#. MegaCorp generates a “re-encryption key” and sends it to their proxy, Moxie.
#. Moxie (the proxy) uses the re-encryption key to encrypt ciphertext 1, creating ciphertext 2.
#. Moxie sends ciphertext 2 to Zorban (or to MegaCorp who forwards it to Zorban).
#. Zorban uses his private key to decrypt ciphertext 2, getting the original un-encrypted data.
Note:
- The proxy only ever sees ciphertext. They never see any un-encrypted data.
- Zorban never got the ability to decrypt ciphertext 1, i.e. the on-chain data.
- There are variations on the above flow.
System Example 3
~~~~~~~~~~~~~~~~
This example uses `erasure coding <https://en.wikipedia.org/wiki/Erasure_code>`_:
#. Erasure-code the data into n pieces.
#. Encrypt each of the n pieces with a different encryption key.
#. Store the n encrypted pieces on-chain, e.g. in n separate transactions.
#. Share each of the the n decryption keys with a different party.
If k < N of the key-holders gets and decrypts k of the pieces, they can reconstruct the original plaintext. Less than k would not be enough.
System Example 4
~~~~~~~~~~~~~~~~
This setup could be used in an enterprise blockchain scenario where a special node should be able to see parts of the data, but the others should not.
- The special node generates an X25519 keypair (or similar asymmetric *encryption* keypair).
- A BigchainDB end user finds out the X25519 public key (encryption key) of the special node.
- The end user creates a valid BigchainDB transaction, with either the asset.data or the metadata (or both) encrypted using the above-mentioned public key.
- This is only done for transactions where the contents of asset.data or metadata don't matter for validation, so all node operators can validate the transaction.
- The special node is able to decrypt the encrypted data, but the other node operators can't, and nor can any other end user.

View File

@ -0,0 +1,85 @@
# Run BigchainDB with all-in-one Docker
For those who like using Docker and wish to experiment with BigchainDB in
non-production environments, we currently maintain a BigchainDB all-in-one
Docker image and a
`Dockerfile-all-in-one` that can be used to build an image for `bigchaindb`.
This image contains all the services required for a BigchainDB node i.e.
- BigchainDB Server
- MongoDB
- Tendermint
**Note:** **NOT for Production Use:** *This is an single node opinionated image not well suited for a network deployment.*
*This image is to help quick deployment for early adopters, for a more standard approach please refer to one of our deployment guides:*
- [BigchainDB developer setup guides](https://docs.bigchaindb.com/projects/contributing/en/latest/dev-setup-coding-and-contribution-process/index.html).
- [BigchainDB with Kubernetes](http://docs.bigchaindb.com/projects/server/en/latest/k8s-deployment-template/index.html).
## Prerequisite(s)
- [Docker](https://docs.docker.com/engine/installation/)
## Pull and Run the Image from Docker Hub
With Docker installed, you can proceed as follows.
In a terminal shell, pull the latest version of the BigchainDB all-in-one Docker image using:
```text
$ docker pull bigchaindb/bigchaindb:all-in-one
$ docker run \
--detach \
--name bigchaindb \
--publish 9984:9984 \
--publish 9985:9985 \
--publish 27017:27017 \
--publish 26657:26657 \
--volume $HOME/bigchaindb_docker/mongodb/data/db:/data/db \
--volume $HOME/bigchaindb_docker/mongodb/data/configdb:/data/configdb \
--volume $HOME/bigchaindb_docker/tendermint:/tendermint \
bigchaindb/bigchaindb:all-in-one
```
Let's analyze that command:
* `docker run` tells Docker to run some image
* `--detach` run the container in the background
* `publish 9984:9984` map the host port `9984` to the container port `9984`
(the BigchainDB API server)
* `9985` BigchainDB Websocket server
* `27017` Default port for MongoDB
* `26657` Tendermint RPC server
* `--volume "$HOME/bigchaindb_docker/mongodb:/data"` map the host directory
`$HOME/bigchaindb_docker/mongodb` to the container directory `/data`;
this allows us to have the data persisted on the host machine,
you can read more in the [official Docker
documentation](https://docs.docker.com/engine/tutorials/dockervolumes)
* `$HOME/bigchaindb_docker/tendermint:/tendermint` to persist Tendermint data.
* `bigchaindb/bigchaindb:all-in-one` the image to use. All the options after the container name are passed on to the entrypoint inside the container.
## Verify
```text
$ docker ps | grep bigchaindb
```
Send your first transaction using [BigchainDB drivers](../drivers-clients/index.html).
## Building Your Own Image
Assuming you have Docker installed, you would proceed as follows.
In a terminal shell:
```text
git clone git@github.com:bigchaindb/bigchaindb.git
cd bigchaindb/
```
Build the Docker image:
```text
docker build --file Dockerfile-all-in-one --tag <tag/name:latest> .
```
Now you can use your own image to run BigchainDB all-in-one container.

View File

@ -4,7 +4,6 @@ Appendices
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
install-os-level-deps
json-serialization json-serialization
cryptography cryptography
the-bigchaindb-class the-bigchaindb-class
@ -15,3 +14,4 @@ Appendices
firewall-notes firewall-notes
ntp-notes ntp-notes
licenses licenses
all-in-one-bigchaindb

View File

@ -1,17 +0,0 @@
# How to Install OS-Level Dependencies
BigchainDB Server has some OS-level dependencies that must be installed.
On Ubuntu 16.04, we found that the following was enough:
```text
sudo apt-get update
sudo apt-get install libffi-dev libssl-dev
```
On Fedora 2325, we found that the following was enough:
```text
sudo dnf update
sudo dnf install gcc-c++ redhat-rpm-config python3-devel libffi-devel
```
(If you're using a version of Fedora before version 22, you may have to use `yum` instead of `dnf`.)

View File

@ -2,7 +2,6 @@
A **BigchainDB Cluster** is a set of connected **BigchainDB Nodes**, managed by a **BigchainDB Consortium** (i.e. an organization). Those terms are defined in the [BigchainDB Terminology page](https://docs.bigchaindb.com/en/latest/terminology.html). A **BigchainDB Cluster** is a set of connected **BigchainDB Nodes**, managed by a **BigchainDB Consortium** (i.e. an organization). Those terms are defined in the [BigchainDB Terminology page](https://docs.bigchaindb.com/en/latest/terminology.html).
## Consortium Structure & Governance ## Consortium Structure & Governance
The consortium might be a company, a foundation, a cooperative, or [some other form of organization](https://en.wikipedia.org/wiki/Organizational_structure). The consortium might be a company, a foundation, a cooperative, or [some other form of organization](https://en.wikipedia.org/wiki/Organizational_structure).
@ -13,13 +12,6 @@ This documentation doesn't explain how to create a consortium, nor does it outli
It's worth noting that the decentralization of a BigchainDB cluster depends, It's worth noting that the decentralization of a BigchainDB cluster depends,
to some extent, on the decentralization of the associated consortium. See the pages about [decentralization](https://docs.bigchaindb.com/en/latest/decentralized.html) and [node diversity](https://docs.bigchaindb.com/en/latest/diversity.html). to some extent, on the decentralization of the associated consortium. See the pages about [decentralization](https://docs.bigchaindb.com/en/latest/decentralized.html) and [node diversity](https://docs.bigchaindb.com/en/latest/diversity.html).
## Relevant Technical Documentation
Anyone building or managing a BigchainDB cluster may be interested
in [our production deployment template](production-deployment-template/index.html).
## Cluster DNS Records and SSL Certificates ## Cluster DNS Records and SSL Certificates
We now describe how *we* set up the external (public-facing) DNS records for a BigchainDB cluster. Your consortium may opt to do it differently. We now describe how *we* set up the external (public-facing) DNS records for a BigchainDB cluster. Your consortium may opt to do it differently.
@ -30,14 +22,12 @@ There were several goals:
* There should be no sharing of SSL certificates among BigchainDB node operators. * There should be no sharing of SSL certificates among BigchainDB node operators.
* Optional: Allow clients to connect to a "random" BigchainDB node in the cluster at one particular domain (or subdomain). * Optional: Allow clients to connect to a "random" BigchainDB node in the cluster at one particular domain (or subdomain).
### Node Operator Responsibilities ### Node Operator Responsibilities
1. Register a domain (or use one that you already have) for your BigchainDB node. You can use a subdomain if you like. For example, you might opt to use `abc-org73.net`, `api.dynabob8.io` or `figmentdb3.ninja`. 1. Register a domain (or use one that you already have) for your BigchainDB node. You can use a subdomain if you like. For example, you might opt to use `abc-org73.net`, `api.dynabob8.io` or `figmentdb3.ninja`.
2. Get an SSL certificate for your domain or subdomain, and properly install it in your node (e.g. in your NGINX instance). 2. Get an SSL certificate for your domain or subdomain, and properly install it in your node (e.g. in your NGINX instance).
3. Create a DNS A Record mapping your domain or subdomain to the public IP address of your node (i.e. the one that serves the BigchainDB HTTP API). 3. Create a DNS A Record mapping your domain or subdomain to the public IP address of your node (i.e. the one that serves the BigchainDB HTTP API).
### Consortium Responsibilities ### Consortium Responsibilities
Optional: The consortium managing the BigchainDB cluster could register a domain name and set up CNAME records mapping that domain name (or one of its subdomains) to each of the nodes in the cluster. For example, if the consortium registered `bdbcluster.io`, they could set up CNAME records like the following: Optional: The consortium managing the BigchainDB cluster could register a domain name and set up CNAME records mapping that domain name (or one of its subdomains) to each of the nodes in the cluster. For example, if the consortium registered `bdbcluster.io`, they could set up CNAME records like the following:

View File

@ -6,6 +6,7 @@ Libraries and Tools Maintained by the BigchainDB Team
* `Python Driver <https://docs.bigchaindb.com/projects/py-driver/en/latest/index.html>`_ * `Python Driver <https://docs.bigchaindb.com/projects/py-driver/en/latest/index.html>`_
* `JavaScript / Node.js Driver <https://github.com/bigchaindb/js-bigchaindb-driver>`_ * `JavaScript / Node.js Driver <https://github.com/bigchaindb/js-bigchaindb-driver>`_
* `Java driver <https://github.com/bigchaindb/java-bigchaindb-driver>`_
Community-Driven Libraries and Tools Community-Driven Libraries and Tools
------------------------------------ ------------------------------------
@ -17,6 +18,5 @@ Community-Driven Libraries and Tools
* `Haskell transaction builder <https://github.com/bigchaindb/bigchaindb-hs>`_ * `Haskell transaction builder <https://github.com/bigchaindb/bigchaindb-hs>`_
* `Go driver <https://github.com/zbo14/envoke/blob/master/bigchain/bigchain.go>`_ * `Go driver <https://github.com/zbo14/envoke/blob/master/bigchain/bigchain.go>`_
* `Java driver <https://github.com/authenteq/java-bigchaindb-driver>`_
* `Ruby driver <https://github.com/LicenseRocks/bigchaindb_ruby>`_ * `Ruby driver <https://github.com/LicenseRocks/bigchaindb_ruby>`_
* `Ruby library for preparing/signing transactions and submitting them or querying a BigchainDB node (MIT licensed) <https://rubygems.org/gems/bigchaindb>`_ * `Ruby library for preparing/signing transactions and submitting them or querying a BigchainDB node (MIT licensed) <https://rubygems.org/gems/bigchaindb>`_

View File

@ -10,13 +10,13 @@ BigchainDB Server Documentation
simple-network-setup simple-network-setup
production-nodes/index production-nodes/index
clusters clusters
production-deployment-template/index
dev-and-test/index dev-and-test/index
server-reference/index server-reference/index
http-client-server-api http-client-server-api
events/index events/index
drivers-clients/index drivers-clients/index
data-models/index data-models/index
k8s-deployment-template/index
release-notes release-notes
glossary glossary
appendices/index appendices/index

View File

@ -1,13 +1,25 @@
Architecture of a BigchainDB Node Architecture of a BigchainDB Node Running in a Kubernetes Cluster
================================== =================================================================
A BigchainDB Production deployment is hosted on a Kubernetes cluster and includes: .. note::
A highly-available Kubernetes cluster requires at least five virtual machines
(three for the master and two for your app's containers).
Therefore we don't recommend using Kubernetes to run a BigchainDB node
if that's the only thing the Kubernetes cluster will be running.
Instead, see **How to Set Up a BigchainDB Network**.
If your organization already *has* a big Kubernetes cluster running many containers,
and your organization has people who know Kubernetes,
then this Kubernetes deployment template might be helpful.
If you deploy a BigchainDB node into a Kubernetes cluster
as described in these docs, it will include:
* NGINX, OpenResty, BigchainDB, MongoDB and Tendermint * NGINX, OpenResty, BigchainDB, MongoDB and Tendermint
`Kubernetes Services <https://kubernetes.io/docs/concepts/services-networking/service/>`_. `Kubernetes Services <https://kubernetes.io/docs/concepts/services-networking/service/>`_.
* NGINX, OpenResty, BigchainDB and MongoDB Monitoring Agent. * NGINX, OpenResty, BigchainDB and MongoDB Monitoring Agent
`Kubernetes Deployments <https://kubernetes.io/docs/concepts/workloads/controllers/deployment/>`_. `Kubernetes Deployments <https://kubernetes.io/docs/concepts/workloads/controllers/deployment/>`_.
* MongoDB and Tendermint `Kubernetes StatefulSet <https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/>`_. * MongoDB and Tendermint `Kubernetes StatefulSets <https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/>`_.
* Third party services like `3scale <https://3scale.net>`_, * Third party services like `3scale <https://3scale.net>`_,
`MongoDB Cloud Manager <https://cloud.mongodb.com>`_ and the `MongoDB Cloud Manager <https://cloud.mongodb.com>`_ and the
`Azure Operations Management Suite `Azure Operations Management Suite

View File

@ -3,6 +3,17 @@
Kubernetes Template: Deploying a BigchainDB network Kubernetes Template: Deploying a BigchainDB network
=================================================== ===================================================
.. note::
A highly-available Kubernetes cluster requires at least five virtual machines
(three for the master and two for your app's containers).
Therefore we don't recommend using Kubernetes to run a BigchainDB node
if that's the only thing the Kubernetes cluster will be running.
Instead, see **How to Set Up a BigchainDB Network**.
If your organization already *has* a big Kubernetes cluster running many containers,
and your organization has people who know Kubernetes,
then this Kubernetes deployment template might be helpful.
This page describes how to deploy a static BigchainDB + Tendermint network. This page describes how to deploy a static BigchainDB + Tendermint network.
If you want to deploy a stand-alone BigchainDB node in a BigchainDB cluster, If you want to deploy a stand-alone BigchainDB node in a BigchainDB cluster,

View File

@ -41,7 +41,7 @@ Configure MongoDB Cloud Manager for Monitoring
* If you have authentication enabled, select the option to enable * If you have authentication enabled, select the option to enable
authentication and specify the authentication mechanism as per your authentication and specify the authentication mechanism as per your
deployment. The default BigchainDB production deployment currently deployment. The default BigchainDB Kubernetes deployment template currently
supports ``X.509 Client Certificate`` as the authentication mechanism. supports ``X.509 Client Certificate`` as the authentication mechanism.
* If you have TLS enabled, select the option to enable TLS/SSL for MongoDB * If you have TLS enabled, select the option to enable TLS/SSL for MongoDB

View File

@ -0,0 +1,40 @@
Kubernetes Deployment Template
==============================
.. note::
A highly-available Kubernetes cluster requires at least five virtual machines
(three for the master and two for your app's containers).
Therefore we don't recommend using Kubernetes to run a BigchainDB node
if that's the only thing the Kubernetes cluster will be running.
Instead, see **How to Set Up a BigchainDB Network**.
If your organization already *has* a big Kubernetes cluster running many containers,
and your organization has people who know Kubernetes,
then this Kubernetes deployment template might be helpful.
This section outlines a way to deploy a BigchainDB node (or BigchainDB cluster)
on Microsoft Azure using Kubernetes.
You may choose to use it as a template or reference for your own deployment,
but *we make no claim that it is suitable for your purposes*.
Feel free change things to suit your needs or preferences.
.. toctree::
:maxdepth: 1
workflow
ca-installation
server-tls-certificate
client-tls-certificate
revoke-tls-certificate
template-kubernetes-azure
node-on-kubernetes
node-config-map-and-secrets
log-analytics
cloud-manager
easy-rsa
upgrade-on-kubernetes
bigchaindb-network-on-kubernetes
tectonic-azure
troubleshoot
architecture

View File

@ -3,6 +3,17 @@
How to Configure a BigchainDB Node How to Configure a BigchainDB Node
================================== ==================================
.. note::
A highly-available Kubernetes cluster requires at least five virtual machines
(three for the master and two for your app's containers).
Therefore we don't recommend using Kubernetes to run a BigchainDB node
if that's the only thing the Kubernetes cluster will be running.
Instead, see **How to Set Up a BigchainDB Network**.
If your organization already *has* a big Kubernetes cluster running many containers,
and your organization has people who know Kubernetes,
then this Kubernetes deployment template might be helpful.
This page outlines the steps to set a bunch of configuration settings This page outlines the steps to set a bunch of configuration settings
in your BigchainDB node. in your BigchainDB node.
They are pushed to the Kubernetes cluster in two files, They are pushed to the Kubernetes cluster in two files,

View File

@ -3,7 +3,18 @@
Kubernetes Template: Deploy a Single BigchainDB Node Kubernetes Template: Deploy a Single BigchainDB Node
==================================================== ====================================================
This page describes how to deploy a BigchainDB + Tendermint node .. note::
A highly-available Kubernetes cluster requires at least five virtual machines
(three for the master and two for your app's containers).
Therefore we don't recommend using Kubernetes to run a BigchainDB node
if that's the only thing the Kubernetes cluster will be running.
Instead, see **How to Set Up a BigchainDB Network**.
If your organization already *has* a big Kubernetes cluster running many containers,
and your organization has people who know Kubernetes,
then this Kubernetes deployment template might be helpful.
This page describes how to deploy a BigchainDB node
using `Kubernetes <https://kubernetes.io/>`_. using `Kubernetes <https://kubernetes.io/>`_.
It assumes you already have a running Kubernetes cluster. It assumes you already have a running Kubernetes cluster.
@ -29,7 +40,7 @@ If you don't have that file, then you need to get it.
**Azure.** If you deployed your Kubernetes cluster on Azure **Azure.** If you deployed your Kubernetes cluster on Azure
using the Azure CLI 2.0 (as per :doc:`our template using the Azure CLI 2.0 (as per :doc:`our template
<../production-deployment-template/template-kubernetes-azure>`), <../k8s-deployment-template/template-kubernetes-azure>`),
then you can get the ``~/.kube/config`` file using: then you can get the ``~/.kube/config`` file using:
.. code:: bash .. code:: bash
@ -277,7 +288,7 @@ The first thing to do is create the Kubernetes storage classes.
First, you need an Azure storage account. First, you need an Azure storage account.
If you deployed your Kubernetes cluster on Azure If you deployed your Kubernetes cluster on Azure
using the Azure CLI 2.0 using the Azure CLI 2.0
(as per :doc:`our template <../production-deployment-template/template-kubernetes-azure>`), (as per :doc:`our template <../k8s-deployment-template/template-kubernetes-azure>`),
then the `az acs create` command already created a then the `az acs create` command already created a
storage account in the same location and resource group storage account in the same location and resource group
as your Kubernetes cluster. as your Kubernetes cluster.
@ -289,7 +300,7 @@ in the same data center.
Premium storage is higher-cost and higher-performance. Premium storage is higher-cost and higher-performance.
It uses solid state drives (SSD). It uses solid state drives (SSD).
We recommend using Premium storage for our production template. We recommend using Premium storage with our Kubernetes deployment template.
Create a `storage account <https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account>`_ Create a `storage account <https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account>`_
for Premium storage and associate it with your Azure resource group. for Premium storage and associate it with your Azure resource group.
For future reference, the command to create a storage account is For future reference, the command to create a storage account is
@ -372,7 +383,7 @@ but it should become "Bound" fairly quickly.
$ kubectl patch pv <pv-name> -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' $ kubectl patch pv <pv-name> -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
For notes on recreating a private volume form a released Azure disk resource consult For notes on recreating a private volume form a released Azure disk resource consult
:doc:`the page about cluster troubleshooting <../production-deployment-template/troubleshoot>`. :doc:`the page about cluster troubleshooting <../k8s-deployment-template/troubleshoot>`.
.. _start-kubernetes-stateful-set-mongodb: .. _start-kubernetes-stateful-set-mongodb:
@ -569,7 +580,7 @@ Step 19(Optional): Configure the MongoDB Cloud Manager
------------------------------------------------------ ------------------------------------------------------
Refer to the Refer to the
:doc:`documentation <../production-deployment-template/cloud-manager>` :doc:`documentation <../k8s-deployment-template/cloud-manager>`
for details on how to configure the MongoDB Cloud Manager to enable for details on how to configure the MongoDB Cloud Manager to enable
monitoring and backup. monitoring and backup.
@ -749,4 +760,4 @@ verify that your node or cluster works as expected.
Next, you can set up log analytics and monitoring, by following our templates: Next, you can set up log analytics and monitoring, by following our templates:
* :doc:`../production-deployment-template/log-analytics`. * :doc:`../k8s-deployment-template/log-analytics`.

View File

@ -1,6 +1,17 @@
Walkthrough: Deploy a Kubernetes Cluster on Azure using Tectonic by CoreOS Walkthrough: Deploy a Kubernetes Cluster on Azure using Tectonic by CoreOS
========================================================================== ==========================================================================
.. note::
A highly-available Kubernetes cluster requires at least five virtual machines
(three for the master and two for your app's containers).
Therefore we don't recommend using Kubernetes to run a BigchainDB node
if that's the only thing the Kubernetes cluster will be running.
Instead, see **How to Set Up a BigchainDB Network**.
If your organization already *has* a big Kubernetes cluster running many containers,
and your organization has people who know Kubernetes,
then this Kubernetes deployment template might be helpful.
A BigchainDB node can be run inside a `Kubernetes <https://kubernetes.io/>`_ A BigchainDB node can be run inside a `Kubernetes <https://kubernetes.io/>`_
cluster. cluster.
This page describes one way to deploy a Kubernetes cluster on Azure using Tectonic. This page describes one way to deploy a Kubernetes cluster on Azure using Tectonic.

View File

@ -1,6 +1,17 @@
Template: Deploy a Kubernetes Cluster on Azure Template: Deploy a Kubernetes Cluster on Azure
============================================== ==============================================
.. note::
A highly-available Kubernetes cluster requires at least five virtual machines
(three for the master and two for your app's containers).
Therefore we don't recommend using Kubernetes to run a BigchainDB node
if that's the only thing the Kubernetes cluster will be running.
Instead, see **How to Set Up a BigchainDB Network**.
If your organization already *has* a big Kubernetes cluster running many containers,
and your organization has people who know Kubernetes,
then this Kubernetes deployment template might be helpful.
A BigchainDB node can be run inside a `Kubernetes <https://kubernetes.io/>`_ A BigchainDB node can be run inside a `Kubernetes <https://kubernetes.io/>`_
cluster. cluster.
This page describes one way to deploy a Kubernetes cluster on Azure. This page describes one way to deploy a Kubernetes cluster on Azure.

View File

@ -1,6 +1,17 @@
Kubernetes Template: Upgrade all Software in a BigchainDB Node Kubernetes Template: Upgrade all Software in a BigchainDB Node
============================================================== ==============================================================
.. note::
A highly-available Kubernetes cluster requires at least five virtual machines
(three for the master and two for your app's containers).
Therefore we don't recommend using Kubernetes to run a BigchainDB node
if that's the only thing the Kubernetes cluster will be running.
Instead, see **How to Set Up a BigchainDB Network**.
If your organization already *has* a big Kubernetes cluster running many containers,
and your organization has people who know Kubernetes,
then this Kubernetes deployment template might be helpful.
This page outlines how to upgrade all the software associated This page outlines how to upgrade all the software associated
with a BigchainDB node running on Kubernetes, with a BigchainDB node running on Kubernetes,
including host operating systems, Docker, Kubernetes, including host operating systems, Docker, Kubernetes,

View File

@ -3,9 +3,19 @@
Overview Overview
======== ========
This page summarizes the steps *we* go through .. note::
to set up a production BigchainDB cluster.
We are constantly improving them. A highly-available Kubernetes cluster requires at least five virtual machines
(three for the master and two for your app's containers).
Therefore we don't recommend using Kubernetes to run a BigchainDB node
if that's the only thing the Kubernetes cluster will be running.
Instead, see **How to Set Up a BigchainDB Network**.
If your organization already *has* a big Kubernetes cluster running many containers,
and your organization has people who know Kubernetes,
then this Kubernetes deployment template might be helpful.
This page summarizes some steps to go through
to set up a BigchainDB cluster.
You can modify them to suit your needs. You can modify them to suit your needs.
.. _generate-the-blockchain-id-and-genesis-time: .. _generate-the-blockchain-id-and-genesis-time:
@ -44,7 +54,7 @@ you can do this:
.. code:: .. code::
$ mkdir $(pwd)/tmdata $ mkdir $(pwd)/tmdata
$ docker run --rm -v $(pwd)/tmdata:/tendermint/config tendermint/tendermint:0.22.3 init $ docker run --rm -v $(pwd)/tmdata:/tendermint/config tendermint/tendermint:0.22.8 init
$ cat $(pwd)/tmdata/genesis.json $ cat $(pwd)/tmdata/genesis.json
You should see something that looks like: You should see something that looks like:
@ -113,13 +123,13 @@ and set it equal to your secret token, e.g.
3. Deploy a Kubernetes cluster for your BigchainDB node. We have some instructions for how to 3. Deploy a Kubernetes cluster for your BigchainDB node. We have some instructions for how to
:doc:`Deploy a Kubernetes cluster on Azure <../production-deployment-template/template-kubernetes-azure>`. :doc:`Deploy a Kubernetes cluster on Azure <../k8s-deployment-template/template-kubernetes-azure>`.
.. warning:: .. warning::
In theory, you can deploy your BigchainDB node to any Kubernetes cluster, but there can be differences In theory, you can deploy your BigchainDB node to any Kubernetes cluster, but there can be differences
between different Kubernetes clusters, especially if they are running different versions of Kubernetes. between different Kubernetes clusters, especially if they are running different versions of Kubernetes.
We tested this Production Deployment Template on Azure ACS in February 2018 and at that time We tested this Kubernetes Deployment Template on Azure ACS in February 2018 and at that time
ACS was deploying a **Kubernetes 1.7.7** cluster. If you can force your cluster to have that version of Kubernetes, ACS was deploying a **Kubernetes 1.7.7** cluster. If you can force your cluster to have that version of Kubernetes,
then you'll increase the likelihood that everything will work in your cluster. then you'll increase the likelihood that everything will work in your cluster.

View File

@ -1,31 +0,0 @@
Production Deployment Template
==============================
This section outlines how *we* deploy production BigchainDB,
integrated with Tendermint(backend for BFT consensus),
clusters on Microsoft Azure using
Kubernetes. We improve it constantly.
You may choose to use it as a template or reference for your own deployment,
but *we make no claim that it is suitable for your purposes*.
Feel free change things to suit your needs or preferences.
.. toctree::
:maxdepth: 1
workflow
ca-installation
server-tls-certificate
client-tls-certificate
revoke-tls-certificate
template-kubernetes-azure
node-on-kubernetes
node-config-map-and-secrets
log-analytics
cloud-manager
easy-rsa
upgrade-on-kubernetes
bigchaindb-network-on-kubernetes
tectonic-azure
troubleshoot
architecture

View File

@ -4,7 +4,8 @@ Production Nodes
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
node-requirements
node-assumptions node-assumptions
node-components node-components
node-requirements node-security-and-privacy
reverse-proxy-notes reverse-proxy-notes

View File

@ -10,5 +10,3 @@ We make some assumptions about production nodes:
1. Production nodes use MongoDB (not RethinkDB, PostgreSQL, Couchbase or whatever). 1. Production nodes use MongoDB (not RethinkDB, PostgreSQL, Couchbase or whatever).
1. Each production node is set up and managed by an experienced professional system administrator or a team of them. 1. Each production node is set up and managed by an experienced professional system administrator or a team of them.
1. Each production node in a cluster is managed by a different person or team. 1. Each production node in a cluster is managed by a different person or team.
We don't provide a detailed cookbook explaining how to secure a server, or other things that a sysadmin should know. We do provide some templates, but those are just starting points.

View File

@ -0,0 +1,11 @@
# Production Node Security & Privacy
Here are some references about how to secure an Ubuntu 18.04 server:
- [Ubuntu 18.04 - Ubuntu Server Guide - Security](https://help.ubuntu.com/lts/serverguide/security.html.en)
- [Ubuntu Blog: National Cyber Security Centre publish Ubuntu 18.04 LTS Security Guide](https://blog.ubuntu.com/2018/07/30/national-cyber-security-centre-publish-ubuntu-18-04-lts-security-guide)
Also, here are some recommendations a node operator can follow to enhance the privacy of the data coming to, stored on, and leaving their node:
- Ensure that all data stored on a node is encrypted at rest, e.g. using full disk encryption. This can be provided as a service by the operating system, transparently to BigchainDB, MongoDB and Tendermint.
- Ensure that all data is encrypted in transit, i.e. enforce using HTTPS for the HTTP API and the Websocket API. This can be done using NGINX or similar, as we do with the BigchainDB Testnet.

View File

@ -16,7 +16,9 @@ A Network will stop working if more than one third of the Nodes are down or faul
## Before We Start ## Before We Start
This tutorial assumes you have basic knowledge on how to manage a GNU/Linux machine. The commands are tailored for an up-to-date *Debian-like* distribution. (We use an **Ubuntu 18.04 LTS** Virtual Machine on Microsoft Azure.) If you are on a different Linux distribution then you might need to adapt the names of the packages installed. This tutorial assumes you have basic knowledge on how to manage a GNU/Linux machine.
**Please note: The commands on this page work on Ubuntu 18.04. Similar commands will work on other versions of Ubuntu, and other recent Debian-like Linux distros, but you may have to change the names of the packages, or install more packages.**
We don't make any assumptions about **where** you run the Node. We don't make any assumptions about **where** you run the Node.
You can run BigchainDB Server on a Virtual Machine on the cloud, on a machine in your data center, or even on a Raspberry Pi. Just make sure that your Node is reachable by the other Nodes. Here's a **non-exhaustive list of examples**: You can run BigchainDB Server on a Virtual Machine on the cloud, on a machine in your data center, or even on a Raspberry Pi. Just make sure that your Node is reachable by the other Nodes. Here's a **non-exhaustive list of examples**:
@ -49,7 +51,9 @@ sudo apt full-upgrade
BigchainDB Server requires **Python 3.6+**, so make sure your system has it. Install the required packages: BigchainDB Server requires **Python 3.6+**, so make sure your system has it. Install the required packages:
``` ```
# For Ubuntu 18.04:
sudo apt install -y python3-pip libssl-dev sudo apt install -y python3-pip libssl-dev
# Ubuntu 16.04, and other Linux distros, may require other packages or more packages
``` ```
Now install the latest version of BigchainDB. You can find the latest version by going to the [BigchainDB project release history page on PyPI][bdb:pypi]. For example, to install version 2.0.0b3, you would do: Now install the latest version of BigchainDB. You can find the latest version by going to the [BigchainDB project release history page on PyPI][bdb:pypi]. For example, to install version 2.0.0b3, you would do:
@ -75,13 +79,13 @@ Note: The `mongodb` package is _not_ the official MongoDB package from MongoDB t
#### Install Tendermint #### Install Tendermint
Install a [recent version of Tendermint][tendermint:releases]. BigchainDB Server requires version 0.22.3 or newer. Install a [recent version of Tendermint][tendermint:releases]. BigchainDB Server requires version 0.22.8 or newer.
``` ```
sudo apt install -y unzip sudo apt install -y unzip
wget https://github.com/tendermint/tendermint/releases/download/v0.22.3/tendermint_0.22.3_linux_amd64.zip wget https://github.com/tendermint/tendermint/releases/download/v0.22.8/tendermint_0.22.8_linux_amd64.zip
unzip tendermint_0.22.3_linux_amd64.zip unzip tendermint_0.22.8_linux_amd64.zip
rm tendermint_0.22.3_linux_amd64.zip rm tendermint_0.22.8_linux_amd64.zip
sudo mv tendermint /usr/local/bin sudo mv tendermint /usr/local/bin
``` ```
@ -159,42 +163,64 @@ Share the `node_id`, `pub_key.value` and hostname of your Node with all other Me
At this point the Coordinator should have received the data from all the Members, and should combine them in the `.tendermint/config/genesis.json` file: At this point the Coordinator should have received the data from all the Members, and should combine them in the `.tendermint/config/genesis.json` file:
```json ```json
{ {
"genesis_time": "0001-01-01T00:00:00Z", "genesis_time":"0001-01-01T00:00:00Z",
"chain_id": "test-chain-la6HSr", "chain_id":"test-chain-la6HSr",
"validators": [ "consensus_params":{
{ "block_size_params":{
"pub_key": { "max_bytes":"22020096",
"type": "AC26791624DE60", "max_txs":"10000",
"value": "<Member 1 public key>" "max_gas":"-1"
}, },
"power": 10, "tx_size_params":{
"name": "<Member 1 name>" "max_bytes":"10240",
}, "max_gas":"-1"
{
"pub_key": {
"type": "AC26791624DE60",
"value": "<Member 2 public key>"
}, },
"power": 10, "block_gossip_params":{
"name": "<Member 2 name>" "block_part_size_bytes":"65536"
}, },
{ "evidence_params":{
"...": { }, "max_age":"100000"
}, }
{ },
"pub_key": { "validators":[
"type": "AC26791624DE60", {
"value": "<Member N public key>" "pub_key":{
}, "type":"AC26791624DE60",
"power": 10, "value":"<Member 1 public key>"
"name": "<Member N name>" },
} "power":10,
], "name":"<Member 1 name>"
"app_hash": "" },
{
"pub_key":{
"type":"AC26791624DE60",
"value":"<Member 2 public key>"
},
"power":10,
"name":"<Member 2 name>"
},
{
"...":{
},
},
{
"pub_key":{
"type":"AC26791624DE60",
"value":"<Member N public key>"
},
"power":10,
"name":"<Member N name>"
}
],
"app_hash":""
} }
``` ```
**Note:** `consensus_params` in the `genesis.json` are default values for Tendermint consensus.
The new `genesis.json` file contains the data that describes the Network. The key `name` is the Member's moniker; it can be any valid string, but put something human-readable like `"Alice's Node Shop"`. The new `genesis.json` file contains the data that describes the Network. The key `name` is the Member's moniker; it can be any valid string, but put something human-readable like `"Alice's Node Shop"`.
At this point, the Coordinator must share the new `genesis.json` file with all Members. At this point, the Coordinator must share the new `genesis.json` file with all Members.

View File

@ -154,7 +154,7 @@ spec:
timeoutSeconds: 15 timeoutSeconds: 15
# BigchainDB container # BigchainDB container
- name: bigchaindb - name: bigchaindb
image: bigchaindb/bigchaindb:2.0.0-beta4 image: bigchaindb/bigchaindb:2.0.0-beta5
imagePullPolicy: Always imagePullPolicy: Always
args: args:
- start - start

View File

@ -1,4 +1,4 @@
FROM tendermint/tendermint:0.22.3 FROM tendermint/tendermint:0.22.8
LABEL maintainer "dev@bigchaindb.com" LABEL maintainer "dev@bigchaindb.com"
WORKDIR / WORKDIR /
USER root USER root

View File

@ -34,7 +34,7 @@ spec:
terminationGracePeriodSeconds: 10 terminationGracePeriodSeconds: 10
containers: containers:
- name: bigchaindb - name: bigchaindb
image: bigchaindb/bigchaindb:2.0.0-beta4 image: bigchaindb/bigchaindb:2.0.0-beta5
imagePullPolicy: Always imagePullPolicy: Always
args: args:
- start - start

View File

@ -15,13 +15,11 @@ The derived files (`nginx.conf.template` and `nginx.lua.template`), along with
the other files in this directory, are _also_ licensed under an MIT License, the other files in this directory, are _also_ licensed under an MIT License,
the text of which can be found below. the text of which can be found below.
## Documentation Licenses
# Documentation Licenses The documentation in this directory is licensed under a Creative Commons Attribution
The documentation in this directory is licensed under a Creative Commons Attribution-ShareAlike
4.0 International license, the full text of which can be found at 4.0 International license, the full text of which can be found at
[http://creativecommons.org/licenses/by-sa/4.0/legalcode](http://creativecommons.org/licenses/by-sa/4.0/legalcode). [http://creativecommons.org/licenses/by/4.0/legalcode](http://creativecommons.org/licenses/by/4.0/legalcode).
<hr> <hr>
@ -47,7 +45,6 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. THE SOFTWARE.
<hr> <hr>
The MIT License The MIT License
@ -71,4 +68,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. THE SOFTWARE.

View File

@ -1,4 +1,4 @@
ARG tm_version=0.22.3 ARG tm_version=0.22.8
FROM tendermint/tendermint:${tm_version} FROM tendermint/tendermint:${tm_version}
LABEL maintainer "dev@bigchaindb.com" LABEL maintainer "dev@bigchaindb.com"
WORKDIR / WORKDIR /

14
pkg/scripts/all-in-one.bash Executable file
View File

@ -0,0 +1,14 @@
#!/bin/bash
# MongoDB configuration
[ "$(stat -c %U /data/db)" = mongodb ] || chown -R mongodb /data/db
# BigchainDB configuration
bigchaindb-monit-config
nohup mongod > "$HOME/.bigchaindb-monit/logs/mongodb_log_$(date +%Y%m%d_%H%M%S)" 2>&1 &
# Tendermint configuration
tendermint init
monit -d 5 -I -B

View File

@ -93,7 +93,7 @@ case \$1 in
start_bigchaindb) start_bigchaindb)
pushd \$4 pushd \$4
nohup bigchaindb start >> \$3/bigchaindb.out.log 2>> \$3/bigchaindb.err.log & nohup bigchaindb -l DEBUG start >> \$3/bigchaindb.out.log 2>> \$3/bigchaindb.err.log &
echo \$! > \$2 echo \$! > \$2
popd popd

View File

@ -11,7 +11,7 @@ stack_repo=${STACK_REPO:="bigchaindb/bigchaindb"}
stack_size=${STACK_SIZE:=4} stack_size=${STACK_SIZE:=4}
stack_type=${STACK_TYPE:="docker"} stack_type=${STACK_TYPE:="docker"}
stack_type_provider=${STACK_TYPE_PROVIDER:=""} stack_type_provider=${STACK_TYPE_PROVIDER:=""}
tm_version=${TM_VERSION:="0.22.3"} tm_version=${TM_VERSION:="0.22.8"}
mongo_version=${MONGO_VERSION:="3.6"} mongo_version=${MONGO_VERSION:="3.6"}
stack_vm_memory=${STACK_VM_MEMORY:=2048} stack_vm_memory=${STACK_VM_MEMORY:=2048}
stack_vm_cpus=${STACK_VM_CPUS:=2} stack_vm_cpus=${STACK_VM_CPUS:=2}

View File

@ -11,7 +11,7 @@ stack_repo=${STACK_REPO:="bigchaindb/bigchaindb"}
stack_size=${STACK_SIZE:=4} stack_size=${STACK_SIZE:=4}
stack_type=${STACK_TYPE:="docker"} stack_type=${STACK_TYPE:="docker"}
stack_type_provider=${STACK_TYPE_PROVIDER:=""} stack_type_provider=${STACK_TYPE_PROVIDER:=""}
tm_version=${TM_VERSION:="0.22.3"} tm_version=${TM_VERSION:="0.22.8"}
mongo_version=${MONGO_VERSION:="3.6"} mongo_version=${MONGO_VERSION:="3.6"}
stack_vm_memory=${STACK_VM_MEMORY:=2048} stack_vm_memory=${STACK_VM_MEMORY:=2048}
stack_vm_cpus=${STACK_VM_CPUS:=2} stack_vm_cpus=${STACK_VM_CPUS:=2}

View File

@ -56,7 +56,7 @@ tests_require = [
'flake8-quotes==0.8.1', 'flake8-quotes==0.8.1',
'hypothesis~=3.18.5', 'hypothesis~=3.18.5',
'hypothesis-regex', 'hypothesis-regex',
'pylint', # Removed pylint because its GPL license isn't Apache2-compatible
'pytest>=3.0.0', 'pytest>=3.0.0',
'pytest-cov>=2.2.1', 'pytest-cov>=2.2.1',
'pytest-mock', 'pytest-mock',

View File

@ -12,7 +12,7 @@ def test_asset_transfer(b, signed_create_tx, user_pk, user_sk):
signed_create_tx.id) signed_create_tx.id)
tx_transfer_signed = tx_transfer.sign([user_sk]) tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([signed_create_tx, tx_transfer]) b.store_bulk_transactions([signed_create_tx])
assert tx_transfer_signed.validate(b) == tx_transfer_signed assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert tx_transfer_signed.asset['id'] == signed_create_tx.id assert tx_transfer_signed.asset['id'] == signed_create_tx.id
@ -27,7 +27,7 @@ def test_validate_transfer_asset_id_mismatch(b, signed_create_tx, user_pk, user_
tx_transfer.asset['id'] = 'a' * 64 tx_transfer.asset['id'] = 'a' * 64
tx_transfer_signed = tx_transfer.sign([user_sk]) tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([signed_create_tx, tx_transfer_signed]) b.store_bulk_transactions([signed_create_tx])
with pytest.raises(AssetIdMismatch): with pytest.raises(AssetIdMismatch):
tx_transfer_signed.validate(b) tx_transfer_signed.validate(b)

View File

@ -1,6 +1,8 @@
import pytest import pytest
import random import random
from bigchaindb.common.exceptions import DoubleSpend
pytestmark = pytest.mark.tendermint pytestmark = pytest.mark.tendermint
@ -127,7 +129,7 @@ def test_single_in_single_own_single_out_single_own_transfer(alice, b, user_pk,
asset_id=tx_create.id) asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk]) tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) assert tx_transfer_signed.validate(b)
assert len(tx_transfer_signed.outputs) == 1 assert len(tx_transfer_signed.outputs) == 1
@ -154,7 +156,7 @@ def test_single_in_single_own_multiple_out_single_own_transfer(alice, b, user_pk
asset_id=tx_create.id) asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk]) tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 2 assert len(tx_transfer_signed.outputs) == 2
@ -182,7 +184,7 @@ def test_single_in_single_own_single_out_multiple_own_transfer(alice, b, user_pk
asset_id=tx_create.id) asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk]) tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 1 assert len(tx_transfer_signed.outputs) == 1
@ -194,6 +196,10 @@ def test_single_in_single_own_single_out_multiple_own_transfer(alice, b, user_pk
assert len(tx_transfer_signed.inputs) == 1 assert len(tx_transfer_signed.inputs) == 1
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
# TRANSFER divisible asset # TRANSFER divisible asset
# Single input # Single input
@ -215,7 +221,7 @@ def test_single_in_single_own_multiple_out_mix_own_transfer(alice, b, user_pk,
asset_id=tx_create.id) asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk]) tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 2 assert len(tx_transfer_signed.outputs) == 2
@ -228,6 +234,10 @@ def test_single_in_single_own_multiple_out_mix_own_transfer(alice, b, user_pk,
assert len(tx_transfer_signed.inputs) == 1 assert len(tx_transfer_signed.inputs) == 1
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
# TRANSFER divisible asset # TRANSFER divisible asset
# Single input # Single input
@ -249,7 +259,7 @@ def test_single_in_multiple_own_single_out_single_own_transfer(alice, b, user_pk
asset_id=tx_create.id) asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk]) tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk])
b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 1 assert len(tx_transfer_signed.outputs) == 1
@ -260,6 +270,10 @@ def test_single_in_multiple_own_single_out_single_own_transfer(alice, b, user_pk
assert 'subconditions' in ffill assert 'subconditions' in ffill
assert len(ffill['subconditions']) == 2 assert len(ffill['subconditions']) == 2
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
# TRANSFER divisible asset # TRANSFER divisible asset
# Multiple inputs # Multiple inputs
@ -280,13 +294,17 @@ def test_multiple_in_single_own_single_out_single_own_transfer(alice, b, user_pk
asset_id=tx_create.id) asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk]) tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) assert tx_transfer_signed.validate(b)
assert len(tx_transfer_signed.outputs) == 1 assert len(tx_transfer_signed.outputs) == 1
assert tx_transfer_signed.outputs[0].amount == 100 assert tx_transfer_signed.outputs[0].amount == 100
assert len(tx_transfer_signed.inputs) == 2 assert len(tx_transfer_signed.inputs) == 2
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
# TRANSFER divisible asset # TRANSFER divisible asset
# Multiple inputs # Multiple inputs
@ -309,9 +327,9 @@ def test_multiple_in_multiple_own_single_out_single_own_transfer(alice, b, user_
asset_id=tx_create.id) asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk]) tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk])
b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 1 assert len(tx_transfer_signed.outputs) == 1
assert tx_transfer_signed.outputs[0].amount == 100 assert tx_transfer_signed.outputs[0].amount == 100
assert len(tx_transfer_signed.inputs) == 2 assert len(tx_transfer_signed.inputs) == 2
@ -323,6 +341,10 @@ def test_multiple_in_multiple_own_single_out_single_own_transfer(alice, b, user_
assert len(ffill_fid0['subconditions']) == 2 assert len(ffill_fid0['subconditions']) == 2
assert len(ffill_fid1['subconditions']) == 2 assert len(ffill_fid1['subconditions']) == 2
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
# TRANSFER divisible asset # TRANSFER divisible asset
# Multiple inputs # Multiple inputs
@ -345,7 +367,7 @@ def test_muiltiple_in_mix_own_multiple_out_single_own_transfer(alice, b, user_pk
asset_id=tx_create.id) asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk]) tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk])
b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 1 assert len(tx_transfer_signed.outputs) == 1
@ -358,6 +380,10 @@ def test_muiltiple_in_mix_own_multiple_out_single_own_transfer(alice, b, user_pk
assert 'subconditions' in ffill_fid1 assert 'subconditions' in ffill_fid1
assert len(ffill_fid1['subconditions']) == 2 assert len(ffill_fid1['subconditions']) == 2
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
# TRANSFER divisible asset # TRANSFER divisible asset
# Multiple inputs # Multiple inputs
@ -382,7 +408,7 @@ def test_muiltiple_in_mix_own_multiple_out_mix_own_transfer(alice, b, user_pk,
asset_id=tx_create.id) asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk]) tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk])
b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 2 assert len(tx_transfer_signed.outputs) == 2
@ -402,6 +428,10 @@ def test_muiltiple_in_mix_own_multiple_out_mix_own_transfer(alice, b, user_pk,
assert 'subconditions' in ffill_fid1 assert 'subconditions' in ffill_fid1
assert len(ffill_fid1['subconditions']) == 2 assert len(ffill_fid1['subconditions']) == 2
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
# TRANSFER divisible asset # TRANSFER divisible asset
# Multiple inputs from different transactions # Multiple inputs from different transactions
@ -436,7 +466,7 @@ def test_multiple_in_different_transactions(alice, b, user_pk, user_sk):
asset_id=tx_create.id) asset_id=tx_create.id)
tx_transfer2_signed = tx_transfer2.sign([user_sk]) tx_transfer2_signed = tx_transfer2.sign([user_sk])
b.store_bulk_transactions([tx_create_signed, tx_transfer1_signed, tx_transfer2_signed]) b.store_bulk_transactions([tx_create_signed, tx_transfer1_signed])
assert tx_transfer2_signed.validate(b) == tx_transfer2_signed assert tx_transfer2_signed.validate(b) == tx_transfer2_signed
assert len(tx_transfer2_signed.outputs) == 1 assert len(tx_transfer2_signed.outputs) == 1
@ -501,10 +531,14 @@ def test_threshold_same_public_key(alice, b, user_pk, user_sk):
asset_id=tx_create.id) asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk, user_sk]) tx_transfer_signed = tx_transfer.sign([user_sk, user_sk])
b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed assert tx_transfer_signed.validate(b) == tx_transfer_signed
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
def test_sum_amount(alice, b, user_pk, user_sk): def test_sum_amount(alice, b, user_pk, user_sk):
from bigchaindb.models import Transaction from bigchaindb.models import Transaction
@ -520,12 +554,16 @@ def test_sum_amount(alice, b, user_pk, user_sk):
asset_id=tx_create.id) asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk]) tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 1 assert len(tx_transfer_signed.outputs) == 1
assert tx_transfer_signed.outputs[0].amount == 3 assert tx_transfer_signed.outputs[0].amount == 3
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)
def test_divide(alice, b, user_pk, user_sk): def test_divide(alice, b, user_pk, user_sk):
from bigchaindb.models import Transaction from bigchaindb.models import Transaction
@ -541,9 +579,13 @@ def test_divide(alice, b, user_pk, user_sk):
asset_id=tx_create.id) asset_id=tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk]) tx_transfer_signed = tx_transfer.sign([user_sk])
b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) b.store_bulk_transactions([tx_create_signed])
assert tx_transfer_signed.validate(b) == tx_transfer_signed assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert len(tx_transfer_signed.outputs) == 3 assert len(tx_transfer_signed.outputs) == 3
for output in tx_transfer_signed.outputs: for output in tx_transfer_signed.outputs:
assert output.amount == 1 assert output.amount == 1
b.store_bulk_transactions([tx_transfer_signed])
with pytest.raises(DoubleSpend):
tx_transfer_signed.validate(b)

View File

@ -370,22 +370,23 @@ def test_get_pre_commit_state(db_context):
assert resp == state._asdict() assert resp == state._asdict()
def test_store_validator_update(): def test_validator_update():
from bigchaindb.backend import connect, query from bigchaindb.backend import connect, query
from bigchaindb.backend.query import VALIDATOR_UPDATE_ID
from bigchaindb.common.exceptions import MultipleValidatorOperationError
conn = connect() conn = connect()
validator_update = {'validator': {'key': 'value'}, def gen_validator_update(height):
'update_id': VALIDATOR_UPDATE_ID} return {'data': 'somedata', 'height': height}
query.store_validator_update(conn, deepcopy(validator_update))
with pytest.raises(MultipleValidatorOperationError): for i in range(1, 100, 10):
query.store_validator_update(conn, deepcopy(validator_update)) value = gen_validator_update(i)
query.store_validator_set(conn, value)
resp = query.get_validator_update(conn, VALIDATOR_UPDATE_ID) v1 = query.get_validator_set(conn, 8)
assert v1['height'] == 1
assert resp == validator_update v41 = query.get_validator_set(conn, 50)
assert query.delete_validator_update(conn, VALIDATOR_UPDATE_ID) assert v41['height'] == 41
assert not query.get_validator_update(conn, VALIDATOR_UPDATE_ID)
v91 = query.get_validator_set(conn)
assert v91['height'] == 91

View File

@ -40,7 +40,7 @@ def test_init_creates_db_tables_and_indexes():
assert set(indexes) == {'_id_', 'pre_commit_id'} assert set(indexes) == {'_id_', 'pre_commit_id'}
indexes = conn.conn[dbname]['validators'].index_information().keys() indexes = conn.conn[dbname]['validators'].index_information().keys()
assert set(indexes) == {'_id_', 'update_id'} assert set(indexes) == {'_id_', 'height'}
def test_init_database_fails_if_db_exists(): def test_init_database_fails_if_db_exists():

View File

@ -341,6 +341,7 @@ class MockResponse():
return {'result': {'latest_block_height': self.height}} return {'result': {'latest_block_height': self.height}}
@pytest.mark.skip
@patch('bigchaindb.config_utils.autoconfigure') @patch('bigchaindb.config_utils.autoconfigure')
@patch('bigchaindb.backend.query.store_validator_update') @patch('bigchaindb.backend.query.store_validator_update')
@pytest.mark.tendermint @pytest.mark.tendermint

View File

@ -647,6 +647,17 @@ def node_key(node_keys):
return key_pair_from_ed25519_key(key_from_base64(priv)) return key_pair_from_ed25519_key(key_from_base64(priv))
@pytest.fixture
def ed25519_node_keys(node_keys):
(pub, priv) = list(node_keys.items())[0]
node_keys_dict = {}
for pub, priv in node_keys.items():
key = key_pair_from_ed25519_key(key_from_base64(priv))
node_keys_dict[key.public_key] = key
return node_keys_dict
@pytest.fixture(scope='session') @pytest.fixture(scope='session')
def node_keys(): def node_keys():
return {'zL/DasvKulXZzhSNFwx4cLRXKkSM9GPK7Y0nZ4FEylM=': return {'zL/DasvKulXZzhSNFwx4cLRXKkSM9GPK7Y0nZ4FEylM=':

View File

@ -1,4 +1,7 @@
import pytest import pytest
import codecs
import abci.types_pb2 as types
@pytest.fixture @pytest.fixture
@ -10,3 +13,13 @@ def b():
@pytest.fixture @pytest.fixture
def validator_pub_key(): def validator_pub_key():
return 'B0E42D2589A455EAD339A035D6CE1C8C3E25863F268120AA0162AD7D003A4014' return 'B0E42D2589A455EAD339A035D6CE1C8C3E25863F268120AA0162AD7D003A4014'
@pytest.fixture
def init_chain_request():
addr = codecs.decode(b'9FD479C869C7D7E7605BF99293457AA5D80C3033', 'hex')
pk = codecs.decode(b'VAgFZtYw8bNR5TMZHFOBDWk9cAmEu3/c6JgRBmddbbI=', 'base64')
val_a = types.Validator(address=addr, power=10,
pub_key=types.PubKey(type='ed25519', data=pk))
return types.RequestInitChain(validators=[val_a])

View File

@ -50,7 +50,7 @@ def test_check_tx__unsigned_create_is_error(b):
@pytest.mark.bdb @pytest.mark.bdb
def test_deliver_tx__valid_create_updates_db(b): def test_deliver_tx__valid_create_updates_db(b, init_chain_request):
from bigchaindb import App from bigchaindb import App
from bigchaindb.models import Transaction from bigchaindb.models import Transaction
from bigchaindb.common.crypto import generate_key_pair from bigchaindb.common.crypto import generate_key_pair
@ -64,8 +64,9 @@ def test_deliver_tx__valid_create_updates_db(b):
app = App(b) app = App(b)
app.init_chain(init_chain_request)
begin_block = RequestBeginBlock() begin_block = RequestBeginBlock()
app.init_chain(['ignore'])
app.begin_block(begin_block) app.begin_block(begin_block)
result = app.deliver_tx(encode_tx_to_bytes(tx)) result = app.deliver_tx(encode_tx_to_bytes(tx))
@ -83,7 +84,7 @@ def test_deliver_tx__valid_create_updates_db(b):
# next(unspent_outputs) # next(unspent_outputs)
def test_deliver_tx__double_spend_fails(b): def test_deliver_tx__double_spend_fails(b, init_chain_request):
from bigchaindb import App from bigchaindb import App
from bigchaindb.models import Transaction from bigchaindb.models import Transaction
from bigchaindb.common.crypto import generate_key_pair from bigchaindb.common.crypto import generate_key_pair
@ -96,7 +97,7 @@ def test_deliver_tx__double_spend_fails(b):
.sign([alice.private_key]) .sign([alice.private_key])
app = App(b) app = App(b)
app.init_chain(['ignore']) app.init_chain(init_chain_request)
begin_block = RequestBeginBlock() begin_block = RequestBeginBlock()
app.begin_block(begin_block) app.begin_block(begin_block)
@ -112,13 +113,13 @@ def test_deliver_tx__double_spend_fails(b):
assert result.code == CodeTypeError assert result.code == CodeTypeError
def test_deliver_transfer_tx__double_spend_fails(b): def test_deliver_transfer_tx__double_spend_fails(b, init_chain_request):
from bigchaindb import App from bigchaindb import App
from bigchaindb.models import Transaction from bigchaindb.models import Transaction
from bigchaindb.common.crypto import generate_key_pair from bigchaindb.common.crypto import generate_key_pair
app = App(b) app = App(b)
app.init_chain(['ignore']) app.init_chain(init_chain_request)
begin_block = RequestBeginBlock() begin_block = RequestBeginBlock()
app.begin_block(begin_block) app.begin_block(begin_block)
@ -156,14 +157,16 @@ def test_deliver_transfer_tx__double_spend_fails(b):
assert result.code == CodeTypeError assert result.code == CodeTypeError
def test_end_block_return_validator_updates(b): # The test below has to re-written one election conclusion logic has been implemented
@pytest.mark.skip
def test_end_block_return_validator_updates(b, init_chain_request):
from bigchaindb import App from bigchaindb import App
from bigchaindb.backend import query from bigchaindb.backend import query
from bigchaindb.core import encode_validator from bigchaindb.core import encode_validator
from bigchaindb.backend.query import VALIDATOR_UPDATE_ID from bigchaindb.backend.query import VALIDATOR_UPDATE_ID
app = App(b) app = App(b)
app.init_chain(['ignore']) app.init_chain(init_chain_request)
begin_block = RequestBeginBlock() begin_block = RequestBeginBlock()
app.begin_block(begin_block) app.begin_block(begin_block)
@ -182,7 +185,7 @@ def test_end_block_return_validator_updates(b):
assert updates == [] assert updates == []
def test_store_pre_commit_state_in_end_block(b, alice): def test_store_pre_commit_state_in_end_block(b, alice, init_chain_request):
from bigchaindb import App from bigchaindb import App
from bigchaindb.backend import query from bigchaindb.backend import query
from bigchaindb.models import Transaction from bigchaindb.models import Transaction
@ -194,7 +197,7 @@ def test_store_pre_commit_state_in_end_block(b, alice):
.sign([alice.private_key]) .sign([alice.private_key])
app = App(b) app = App(b)
app.init_chain(['ignore']) app.init_chain(init_chain_request)
begin_block = RequestBeginBlock() begin_block = RequestBeginBlock()
app.begin_block(begin_block) app.begin_block(begin_block)

View File

@ -1,3 +1,5 @@
import codecs
import abci.types_pb2 as types import abci.types_pb2 as types
import json import json
import pytest import pytest
@ -11,7 +13,7 @@ from io import BytesIO
@pytest.mark.tendermint @pytest.mark.tendermint
@pytest.mark.bdb @pytest.mark.bdb
def test_app(tb): def test_app(tb, init_chain_request):
from bigchaindb import App from bigchaindb import App
from bigchaindb.tendermint_utils import calculate_hash from bigchaindb.tendermint_utils import calculate_hash
from bigchaindb.common.crypto import generate_key_pair from bigchaindb.common.crypto import generate_key_pair
@ -28,12 +30,17 @@ def test_app(tb):
assert res.info.last_block_height == 0 assert res.info.last_block_height == 0
assert not b.get_latest_block() assert not b.get_latest_block()
p.process('init_chain', types.Request(init_chain=types.RequestInitChain())) p.process('init_chain', types.Request(init_chain=init_chain_request))
block0 = b.get_latest_block() block0 = b.get_latest_block()
assert block0 assert block0
assert block0['height'] == 0 assert block0['height'] == 0
assert block0['app_hash'] == '' assert block0['app_hash'] == ''
pk = codecs.encode(init_chain_request.validators[0].pub_key.data, 'base64').decode().strip('\n')
[validator] = b.get_validators(height=1)
assert validator['pub_key']['data'] == pk
assert validator['voting_power'] == 10
alice = generate_key_pair() alice = generate_key_pair()
bob = generate_key_pair() bob = generate_key_pair()
tx = Transaction.create([alice.public_key], tx = Transaction.create([alice.public_key],
@ -98,6 +105,7 @@ def test_app(tb):
assert block0['app_hash'] == new_block_hash assert block0['app_hash'] == new_block_hash
@pytest.mark.skip
@pytest.mark.abci @pytest.mark.abci
def test_upsert_validator(b, alice): def test_upsert_validator(b, alice):
from bigchaindb.backend.query import VALIDATOR_UPDATE_ID from bigchaindb.backend.query import VALIDATOR_UPDATE_ID

View File

@ -139,6 +139,7 @@ def test_post_transaction_invalid_mode(b):
b.write_transaction(tx, 'nope') b.write_transaction(tx, 'nope')
@pytest.mark.skip
@pytest.mark.bdb @pytest.mark.bdb
def test_validator_updates(b, validator_pub_key): def test_validator_updates(b, validator_pub_key):
from bigchaindb.backend import query from bigchaindb.backend import query
@ -382,8 +383,16 @@ def test_get_spent_transaction_critical_double_spend(b, alice, bob, carol):
asset_id=tx.id)\ asset_id=tx.id)\
.sign([alice.private_key]) .sign([alice.private_key])
same_input_double_spend = Transaction.transfer(tx.to_inputs() + tx.to_inputs(),
[([bob.public_key], 1)],
asset_id=tx.id)\
.sign([alice.private_key])
b.store_bulk_transactions([tx]) b.store_bulk_transactions([tx])
with pytest.raises(DoubleSpend):
same_input_double_spend.validate(b)
assert b.get_spent(tx.id, tx_transfer.inputs[0].fulfills.output, [tx_transfer]) assert b.get_spent(tx.id, tx_transfer.inputs[0].fulfills.output, [tx_transfer])
with pytest.raises(DoubleSpend): with pytest.raises(DoubleSpend):

View File

@ -1,5 +1,7 @@
import pytest import pytest
from bigchaindb.upsert_validator import ValidatorElection
@pytest.fixture @pytest.fixture
def b_mock(b, network_validators): def b_mock(b, network_validators):
@ -30,3 +32,11 @@ def mock_get_validators(network_validators):
return validators return validators
return validator_set return validator_set
@pytest.fixture
def valid_election(b_mock, node_key, new_validator):
voters = ValidatorElection.recipients(b_mock)
return ValidatorElection.generate([node_key.public_key],
voters,
new_validator, None).sign([node_key.private_key])

View File

@ -0,0 +1,80 @@
import pytest
from bigchaindb.upsert_validator import ValidatorElectionVote
from bigchaindb.common.exceptions import AmountError
pytestmark = [pytest.mark.tendermint, pytest.mark.bdb]
def test_upsert_validator_valid_election_vote(b_mock, valid_election, ed25519_node_keys):
b_mock.store_bulk_transactions([valid_election])
input0 = valid_election.to_inputs()[0]
votes = valid_election.outputs[0].amount
public_key0 = input0.owners_before[0]
key0 = ed25519_node_keys[public_key0]
election_pub_key = ValidatorElectionVote.to_public_key(valid_election.id)
vote = ValidatorElectionVote.generate([input0],
[([election_pub_key], votes)],
election_id=valid_election.id)\
.sign([key0.private_key])
assert vote.validate(b_mock)
def test_upsert_validator_delegate_election_vote(b_mock, valid_election, ed25519_node_keys):
from bigchaindb.common.crypto import generate_key_pair
alice = generate_key_pair()
b_mock.store_bulk_transactions([valid_election])
input0 = valid_election.to_inputs()[0]
votes = valid_election.outputs[0].amount
public_key0 = input0.owners_before[0]
key0 = ed25519_node_keys[public_key0]
delegate_vote = ValidatorElectionVote.generate([input0],
[([alice.public_key], 3), ([key0.public_key], votes-3)],
election_id=valid_election.id)\
.sign([key0.private_key])
assert delegate_vote.validate(b_mock)
b_mock.store_bulk_transactions([delegate_vote])
election_pub_key = ValidatorElectionVote.to_public_key(valid_election.id)
alice_votes = delegate_vote.to_inputs()[0]
alice_casted_vote = ValidatorElectionVote.generate([alice_votes],
[([election_pub_key], 3)],
election_id=valid_election.id)\
.sign([alice.private_key])
assert alice_casted_vote.validate(b_mock)
key0_votes = delegate_vote.to_inputs()[1]
key0_casted_vote = ValidatorElectionVote.generate([key0_votes],
[([election_pub_key], votes-3)],
election_id=valid_election.id)\
.sign([key0.private_key])
assert key0_casted_vote.validate(b_mock)
def test_upsert_validator_invalid_election_vote(b_mock, valid_election, ed25519_node_keys):
b_mock.store_bulk_transactions([valid_election])
input0 = valid_election.to_inputs()[0]
votes = valid_election.outputs[0].amount
public_key0 = input0.owners_before[0]
key0 = ed25519_node_keys[public_key0]
election_pub_key = ValidatorElectionVote.to_public_key(valid_election.id)
vote = ValidatorElectionVote.generate([input0],
[([election_pub_key], votes+1)],
election_id=valid_election.id)\
.sign([key0.private_key])
with pytest.raises(AmountError):
assert vote.validate(b_mock)

View File

@ -1,49 +1,22 @@
import pytest import pytest
from requests.exceptions import RequestException
pytestmark = pytest.mark.tendermint pytestmark = pytest.mark.tendermint
VALIDATORS_ENDPOINT = '/api/v1/validators/' VALIDATORS_ENDPOINT = '/api/v1/validators/'
def test_get_validators_endpoint(b, client, monkeypatch): def test_get_validators_endpoint(b, client, monkeypatch):
validator_set = [{'address': 'F5426F0980E36E03044F74DD414248D29ABCBDB2',
def mock_get(uri): 'pub_key': {'data': '4E2685D9016126864733225BE00F005515200727FBAB1312FC78C8B76831255A',
return MockResponse() 'type': 'ed25519'},
monkeypatch.setattr('requests.get', mock_get) 'voting_power': 10}]
b.store_validator_set(23, validator_set)
res = client.get(VALIDATORS_ENDPOINT) res = client.get(VALIDATORS_ENDPOINT)
assert is_validator(res.json[0]) assert is_validator(res.json[0])
assert res.status_code == 200 assert res.status_code == 200
def test_get_validators_500_endpoint(b, client, monkeypatch):
def mock_get(uri):
raise RequestException
monkeypatch.setattr('requests.get', mock_get)
with pytest.raises(RequestException):
client.get(VALIDATORS_ENDPOINT)
# Helper # Helper
def is_validator(v): def is_validator(v):
return ('pub_key' in v) and ('voting_power' in v) return ('pub_key' in v) and ('voting_power' in v)
class MockResponse():
def json(self):
return {'id': '',
'jsonrpc': '2.0',
'result':
{'block_height': 5,
'validators': [
{'accum': 0,
'address': 'F5426F0980E36E03044F74DD414248D29ABCBDB2',
'pub_key': {'data': '4E2685D9016126864733225BE00F005515200727FBAB1312FC78C8B76831255A',
'type': 'ed25519'},
'voting_power': 10}]}}