diff --git a/CHANGELOG.md b/CHANGELOG.md index 69b752a5..5c531b83 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,18 @@ For reference, the possible headings are: * **Known Issues** * **Notes** +## [2.0 Beta 5] - 2018-08-01 + +Tag name: v2.0.0b5 + +### Changed + +* Supported version of Tendermint `0.22.3` -> `0.22.8`. [Pull request #2429](https://github.com/bigchaindb/bigchaindb/pull/2429). + +### Fixed + +* Stateful validation raises a DoubleSpend exception if there is any other transaction that spends the same output(s) even if it has the same transaction ID. [Pull request #2422](https://github.com/bigchaindb/bigchaindb/pull/2422). + ## [2.0 Beta 4] - 2018-07-30 Tag name: v2.0.0b4 diff --git a/Dockerfile b/Dockerfile index 1c050dc3..2fb0781e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,6 @@ RUN apt-get -qq update \ && apt-get -y upgrade \ && apt-get install -y jq \ && pip install --no-cache-dir --process-dependency-links . \ - && pip install --no-cache-dir . \ && apt-get autoremove \ && apt-get clean diff --git a/Dockerfile-all-in-one b/Dockerfile-all-in-one new file mode 100644 index 00000000..6c50d971 --- /dev/null +++ b/Dockerfile-all-in-one @@ -0,0 +1,51 @@ +FROM alpine:latest +LABEL maintainer "dev@bigchaindb.com" + +ARG TM_VERSION=0.22.8 +RUN mkdir -p /usr/src/app +ENV HOME /root +COPY . /usr/src/app/ +WORKDIR /usr/src/app + +RUN apk --update add sudo bash \ + && apk --update add python3 openssl ca-certificates git \ + && apk --update add --virtual build-dependencies python3-dev \ + libffi-dev openssl-dev build-base jq \ + && apk add --no-cache libstdc++ dpkg gnupg \ + && pip3 install --upgrade pip cffi \ + && pip install --no-cache-dir --process-dependency-links -e . \ + && apk del build-dependencies \ + && rm -f /var/cache/apk/* + +# Install mongodb and monit +RUN apk --update add mongodb monit + +# Install Tendermint +RUN wget https://github.com/tendermint/tendermint/releases/download/v${TM_VERSION}-autodraft/tendermint_${TM_VERSION}_linux_amd64.zip \ + && unzip tendermint_${TM_VERSION}_linux_amd64.zip \ + && mv tendermint /usr/local/bin/ \ + && rm tendermint_${TM_VERSION}_linux_amd64.zip + +ENV TMHOME=/tendermint + +# Set permissions required for mongodb +RUN mkdir -p /data/db /data/configdb \ + && chown -R mongodb:mongodb /data/db /data/configdb + +# BigchainDB enviroment variables +ENV BIGCHAINDB_DATABASE_PORT 27017 +ENV BIGCHAINDB_DATABASE_BACKEND localmongodb +ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984 +ENV BIGCHAINDB_WSSERVER_HOST 0.0.0.0 +ENV BIGCHAINDB_WSSERVER_SCHEME ws + +ENV BIGCHAINDB_WSSERVER_ADVERTISED_HOST 0.0.0.0 +ENV BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME ws +ENV BIGCHAINDB_TENDERMINT_PORT 26657 + +VOLUME /data/db /data/configdb /tendermint + +EXPOSE 27017 28017 9984 9985 26656 26657 26658 + +WORKDIR $HOME +ENTRYPOINT ["/usr/src/app/pkg/scripts/all-in-one.bash"] diff --git a/LICENSES.md b/LICENSES.md index c5b2a5d9..6e817636 100644 --- a/LICENSES.md +++ b/LICENSES.md @@ -15,7 +15,7 @@ For the licenses on all other BigchainDB-related code (i.e. in other repositorie ## Documentation Licenses -The official BigchainDB documentation, _except for the short code snippets embedded within it_, is licensed under a Creative Commons Attribution-ShareAlike 4.0 International license, the full text of which can be found at [http://creativecommons.org/licenses/by-sa/4.0/legalcode](http://creativecommons.org/licenses/by-sa/4.0/legalcode). +The official BigchainDB documentation, _except for the short code snippets embedded within it_, is licensed under a Creative Commons Attribution 4.0 International license, the full text of which can be found at [http://creativecommons.org/licenses/by/4.0/legalcode](http://creativecommons.org/licenses/by/4.0/legalcode). ## Exceptions diff --git a/bigchaindb/__init__.py b/bigchaindb/__init__.py index 2999b42e..f0abc7fb 100644 --- a/bigchaindb/__init__.py +++ b/bigchaindb/__init__.py @@ -90,7 +90,9 @@ _config = copy.deepcopy(config) from bigchaindb.common.transaction import Transaction # noqa from bigchaindb import models # noqa from bigchaindb.upsert_validator import ValidatorElection # noqa +from bigchaindb.upsert_validator import ValidatorElectionVote # noqa Transaction.register_type(Transaction.CREATE, models.Transaction) Transaction.register_type(Transaction.TRANSFER, models.Transaction) Transaction.register_type(ValidatorElection.VALIDATOR_ELECTION, ValidatorElection) +Transaction.register_type(ValidatorElectionVote.VALIDATOR_ELECTION_VOTE, ValidatorElectionVote) diff --git a/bigchaindb/backend/localmongodb/query.py b/bigchaindb/backend/localmongodb/query.py index b2b381c4..4f56dd73 100644 --- a/bigchaindb/backend/localmongodb/query.py +++ b/bigchaindb/backend/localmongodb/query.py @@ -8,7 +8,6 @@ from bigchaindb.common.exceptions import MultipleValidatorOperationError from bigchaindb.backend.utils import module_dispatch_registrar from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection from bigchaindb.common.transaction import Transaction -from bigchaindb.backend.query import VALIDATOR_UPDATE_ID register_query = module_dispatch_registrar(backend.query) @@ -279,7 +278,7 @@ def get_pre_commit_state(conn, commit_id): @register_query(LocalMongoDBConnection) -def store_validator_update(conn, validator_update): +def store_validator_set(conn, validator_update): try: return conn.run( conn.collection('validators') @@ -289,15 +288,16 @@ def store_validator_update(conn, validator_update): @register_query(LocalMongoDBConnection) -def get_validator_update(conn, update_id=VALIDATOR_UPDATE_ID): - return conn.run( - conn.collection('validators') - .find_one({'update_id': update_id}, projection={'_id': False})) +def get_validator_set(conn, height=None): + query = {} + if height is not None: + query = {'height': {'$lte': height}} - -@register_query(LocalMongoDBConnection) -def delete_validator_update(conn, update_id=VALIDATOR_UPDATE_ID): - return conn.run( + cursor = conn.run( conn.collection('validators') - .delete_one({'update_id': update_id}) + .find(query, projection={'_id': False}) + .sort([('height', DESCENDING)]) + .limit(1) ) + + return list(cursor)[0] diff --git a/bigchaindb/backend/localmongodb/schema.py b/bigchaindb/backend/localmongodb/schema.py index 58ee582f..1e7f3614 100644 --- a/bigchaindb/backend/localmongodb/schema.py +++ b/bigchaindb/backend/localmongodb/schema.py @@ -126,6 +126,6 @@ def create_pre_commit_secondary_index(conn, dbname): def create_validators_secondary_index(conn, dbname): logger.info('Create `validators` secondary index.') - conn.conn[dbname]['validators'].create_index('update_id', - name='update_id', + conn.conn[dbname]['validators'].create_index('height', + name='height', unique=True,) diff --git a/bigchaindb/backend/query.py b/bigchaindb/backend/query.py index 5ac6058f..cd4646f5 100644 --- a/bigchaindb/backend/query.py +++ b/bigchaindb/backend/query.py @@ -340,13 +340,6 @@ def store_pre_commit_state(connection, commit_id, state): raise NotImplementedError -@singledispatch -def store_validator_update(conn, validator_update): - """Store a update for the validator set""" - - raise NotImplementedError - - @singledispatch def get_pre_commit_state(connection, commit_id): """Get pre-commit state where `id` is `commit_id`. @@ -362,14 +355,15 @@ def get_pre_commit_state(connection, commit_id): @singledispatch -def get_validator_update(conn): - """Get validator updates which are not synced""" +def store_validator_set(conn, validator_update): + """Store updated validator set""" raise NotImplementedError @singledispatch -def delete_validator_update(conn, id): - """Set the sync status for validator update documents""" +def get_validator_set(conn, height): + """Get validator set for a given `height`, if `height` is not specified + then return the latest validator set""" raise NotImplementedError diff --git a/bigchaindb/common/schema/__init__.py b/bigchaindb/common/schema/__init__.py index 59fac431..2e44b7ae 100644 --- a/bigchaindb/common/schema/__init__.py +++ b/bigchaindb/common/schema/__init__.py @@ -34,6 +34,9 @@ _, TX_SCHEMA_TRANSFER = _load_schema('transaction_transfer_' + _, TX_SCHEMA_VALIDATOR_ELECTION = _load_schema('transaction_validator_election_' + TX_SCHEMA_VERSION) +_, TX_SCHEMA_VALIDATOR_ELECTION_VOTE = _load_schema('transaction_validator_election_vote_' + + TX_SCHEMA_VERSION) + def _validate_schema(schema, body): """Validate data against a schema""" diff --git a/bigchaindb/common/schema/transaction_v2.0.yaml b/bigchaindb/common/schema/transaction_v2.0.yaml index c727b278..6f7deea9 100644 --- a/bigchaindb/common/schema/transaction_v2.0.yaml +++ b/bigchaindb/common/schema/transaction_v2.0.yaml @@ -59,6 +59,7 @@ definitions: - CREATE - TRANSFER - VALIDATOR_ELECTION + - VALIDATOR_ELECTION_VOTE asset: type: object additionalProperties: false diff --git a/bigchaindb/common/schema/transaction_validator_election_vote_v2.0.yaml b/bigchaindb/common/schema/transaction_validator_election_vote_v2.0.yaml new file mode 100644 index 00000000..714cfe3e --- /dev/null +++ b/bigchaindb/common/schema/transaction_validator_election_vote_v2.0.yaml @@ -0,0 +1,27 @@ +--- +"$schema": "http://json-schema.org/draft-04/schema#" +type: object +title: Validator Election Vote Schema - Vote on a validator set change +required: +- operation +- outputs +properties: + operation: "VALIDATOR_ELECTION_VOTE" + outputs: + type: array + items: + "$ref": "#/definitions/output" +definitions: + output: + type: object + properties: + condition: + type: object + required: + - uri + properties: + uri: + type: string + pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\ + (fpt=ed25519-sha-256(&)?|cost=[0-9]+(&)?|\ + subtypes=ed25519-sha-256(&)?){2,3}$" diff --git a/bigchaindb/common/transaction.py b/bigchaindb/common/transaction.py index 7098d366..f232eb81 100644 --- a/bigchaindb/common/transaction.py +++ b/bigchaindb/common/transaction.py @@ -18,6 +18,7 @@ from sha3 import sha3_256 from bigchaindb.common.crypto import PrivateKey, hash_data from bigchaindb.common.exceptions import (KeypairMismatchException, + InputDoesNotExist, DoubleSpend, InvalidHash, InvalidSignature, AmountError, AssetIdMismatch, ThresholdTooDeep) @@ -523,11 +524,11 @@ class Transaction(object): # Asset payloads for 'CREATE' operations must be None or # dicts holding a `data` property. Asset payloads for 'TRANSFER' # operations must be dicts holding an `id` property. - if (operation == Transaction.CREATE and + if (operation == self.CREATE and asset is not None and not (isinstance(asset, dict) and 'data' in asset)): raise TypeError(('`asset` must be None or a dict holding a `data` ' " property instance for '{}' Transactions".format(operation))) - elif (operation == Transaction.TRANSFER and + elif (operation == self.TRANSFER and not (isinstance(asset, dict) and 'id' in asset)): raise TypeError(('`asset` must be a dict holding an `id` property ' "for 'TRANSFER' Transactions".format(operation))) @@ -555,9 +556,9 @@ class Transaction(object): structure containing relevant information for storing them in a UTXO set, and performing validation. """ - if self.operation == Transaction.CREATE: + if self.operation == self.CREATE: self._asset_id = self._id - elif self.operation == Transaction.TRANSFER: + elif self.operation == self.TRANSFER: self._asset_id = self.asset['id'] return (UnspentOutput( transaction_id=self._id, @@ -649,6 +650,31 @@ class Transaction(object): (inputs, outputs) = cls.validate_create(tx_signers, recipients, asset, metadata) return cls(cls.CREATE, {'data': asset}, inputs, outputs, metadata) + @classmethod + def validate_transfer(cls, inputs, recipients, asset_id, metadata): + if not isinstance(inputs, list): + raise TypeError('`inputs` must be a list instance') + if len(inputs) == 0: + raise ValueError('`inputs` must contain at least one item') + if not isinstance(recipients, list): + raise TypeError('`recipients` must be a list instance') + if len(recipients) == 0: + raise ValueError('`recipients` list cannot be empty') + + outputs = [] + for recipient in recipients: + if not isinstance(recipient, tuple) or len(recipient) != 2: + raise ValueError(('Each `recipient` in the list must be a' + ' tuple of `([],' + ' )`')) + pub_keys, amount = recipient + outputs.append(Output.generate(pub_keys, amount)) + + if not isinstance(asset_id, str): + raise TypeError('`asset_id` must be a string') + + return (deepcopy(inputs), outputs) + @classmethod def transfer(cls, inputs, recipients, asset_id, metadata=None): """A simple way to generate a `TRANSFER` transaction. @@ -688,28 +714,7 @@ class Transaction(object): Returns: :class:`~bigchaindb.common.transaction.Transaction` """ - if not isinstance(inputs, list): - raise TypeError('`inputs` must be a list instance') - if len(inputs) == 0: - raise ValueError('`inputs` must contain at least one item') - if not isinstance(recipients, list): - raise TypeError('`recipients` must be a list instance') - if len(recipients) == 0: - raise ValueError('`recipients` list cannot be empty') - - outputs = [] - for recipient in recipients: - if not isinstance(recipient, tuple) or len(recipient) != 2: - raise ValueError(('Each `recipient` in the list must be a' - ' tuple of `([],' - ' )`')) - pub_keys, amount = recipient - outputs.append(Output.generate(pub_keys, amount)) - - if not isinstance(asset_id, str): - raise TypeError('`asset_id` must be a string') - - inputs = deepcopy(inputs) + (inputs, outputs) = cls.validate_transfer(inputs, recipients, asset_id, metadata) return cls(cls.TRANSFER, {'id': asset_id}, inputs, outputs, metadata) def __eq__(self, other): @@ -954,7 +959,7 @@ class Transaction(object): # greatly, as we do not have to check against `None` values. return self._inputs_valid(['dummyvalue' for _ in self.inputs]) - elif self.operation == Transaction.TRANSFER: + elif self.operation == self.TRANSFER: return self._inputs_valid([output.fulfillment.condition_uri for output in outputs]) else: @@ -1098,8 +1103,8 @@ class Transaction(object): tx = Transaction._remove_signatures(self.to_dict()) return Transaction._to_str(tx) - @staticmethod - def get_asset_id(transactions): + @classmethod + def get_asset_id(cls, transactions): """Get the asset id from a list of :class:`~.Transactions`. This is useful when we want to check if the multiple inputs of a @@ -1123,7 +1128,7 @@ class Transaction(object): transactions = [transactions] # create a set of the transactions' asset ids - asset_ids = {tx.id if tx.operation == Transaction.CREATE + asset_ids = {tx.id if tx.operation == tx.CREATE else tx.asset['id'] for tx in transactions} @@ -1242,3 +1247,56 @@ class Transaction(object): @classmethod def validate_schema(cls, tx): pass + + def validate_transfer_inputs(self, bigchain, current_transactions=[]): + # store the inputs so that we can check if the asset ids match + input_txs = [] + input_conditions = [] + for input_ in self.inputs: + input_txid = input_.fulfills.txid + input_tx = bigchain.get_transaction(input_txid) + + if input_tx is None: + for ctxn in current_transactions: + if ctxn.id == input_txid: + input_tx = ctxn + + if input_tx is None: + raise InputDoesNotExist("input `{}` doesn't exist" + .format(input_txid)) + + spent = bigchain.get_spent(input_txid, input_.fulfills.output, + current_transactions) + if spent: + raise DoubleSpend('input `{}` was already spent' + .format(input_txid)) + + output = input_tx.outputs[input_.fulfills.output] + input_conditions.append(output) + input_txs.append(input_tx) + + # Validate that all inputs are distinct + links = [i.fulfills.to_uri() for i in self.inputs] + if len(links) != len(set(links)): + raise DoubleSpend('tx "{}" spends inputs twice'.format(self.id)) + + # validate asset id + asset_id = self.get_asset_id(input_txs) + if asset_id != self.asset['id']: + raise AssetIdMismatch(('The asset id of the input does not' + ' match the asset id of the' + ' transaction')) + + input_amount = sum([input_condition.amount for input_condition in input_conditions]) + output_amount = sum([output_condition.amount for output_condition in self.outputs]) + + if output_amount != input_amount: + raise AmountError(('The amount used in the inputs `{}`' + ' needs to be same as the amount used' + ' in the outputs `{}`') + .format(input_amount, output_amount)) + + if not self.inputs_valid(input_conditions): + raise InvalidSignature('Transaction signature is invalid.') + + return True diff --git a/bigchaindb/core.py b/bigchaindb/core.py index 354a39a0..f9613fe0 100644 --- a/bigchaindb/core.py +++ b/bigchaindb/core.py @@ -1,6 +1,7 @@ """This module contains all the goodness to integrate BigchainDB with Tendermint.""" import logging +import codecs from abci.application import BaseApplication from abci.types_pb2 import ( @@ -42,11 +43,13 @@ class App(BaseApplication): self.validators = None self.new_height = None - def init_chain(self, validators): + def init_chain(self, genesis): """Initialize chain with block of height 0""" + validator_set = [decode_validator(v) for v in genesis.validators] block = Block(app_hash='', height=0, transactions=[]) self.bigchaindb.store_block(block._asdict()) + self.bigchaindb.store_validator_set(1, validator_set) return ResponseInitChain() def info(self, request): @@ -129,11 +132,11 @@ class App(BaseApplication): else: self.block_txn_hash = block['app_hash'] - validator_updates = self.bigchaindb.get_validator_update() - validator_updates = [encode_validator(v) for v in validator_updates] - - # set sync status to true - self.bigchaindb.delete_validator_update() + # TODO: calculate if an election has concluded + # NOTE: ensure the local validator set is updated + # validator_updates = self.bigchaindb.get_validator_update() + # validator_updates = [encode_validator(v) for v in validator_updates] + validator_updates = [] # Store pre-commit state to recover in case there is a crash # during `commit` @@ -176,3 +179,10 @@ def encode_validator(v): return Validator(pub_key=pub_key, address=b'', power=v['power']) + + +def decode_validator(v): + return {'address': codecs.encode(v.address, 'hex').decode().upper().rstrip('\n'), + 'pub_key': {'type': v.pub_key.type, + 'data': codecs.encode(v.pub_key.data, 'base64').decode().rstrip('\n')}, + 'voting_power': v.power} diff --git a/bigchaindb/lib.py b/bigchaindb/lib.py index b0545d83..13d1ab71 100644 --- a/bigchaindb/lib.py +++ b/bigchaindb/lib.py @@ -460,19 +460,13 @@ class BigchainDB(object): def fastquery(self): return fastquery.FastQuery(self.connection) - def get_validators(self): - try: - resp = requests.get('{}validators'.format(self.endpoint)) - validators = resp.json()['result']['validators'] - for v in validators: - v.pop('accum') - v.pop('address') + def get_validators(self, height=None): + result = backend.query.get_validator_set(self.connection, height) + validators = result['validators'] + for v in validators: + v.pop('address') - return validators - - except requests.exceptions.RequestException as e: - logger.error('Error while connecting to Tendermint HTTP API') - raise e + return validators def get_validator_update(self): update = backend.query.get_validator_update(self.connection) @@ -484,6 +478,14 @@ class BigchainDB(object): def store_pre_commit_state(self, state): return backend.query.store_pre_commit_state(self.connection, state) + def store_validator_set(self, height, validators): + """Store validator set at a given `height`. + NOTE: If the validator set already exists at that `height` then an + exception will be raised. + """ + return backend.query.store_validator_set(self.connection, {'height': height, + 'validators': validators}) + Block = namedtuple('Block', ('app_hash', 'height', 'transactions')) diff --git a/bigchaindb/models.py b/bigchaindb/models.py index 1ab0dd0e..dd9a210a 100644 --- a/bigchaindb/models.py +++ b/bigchaindb/models.py @@ -1,7 +1,4 @@ -from bigchaindb.common.exceptions import (InvalidSignature, DoubleSpend, - InputDoesNotExist, - TransactionNotInValidBlock, - AssetIdMismatch, AmountError, +from bigchaindb.common.exceptions import (InvalidSignature, DuplicateTransaction) from bigchaindb.common.transaction import Transaction from bigchaindb.common.utils import (validate_txn_obj, validate_key) @@ -32,64 +29,12 @@ class Transaction(Transaction): if bigchain.get_transaction(self.to_dict()['id']) or duplicates: raise DuplicateTransaction('transaction `{}` already exists' .format(self.id)) + + if not self.inputs_valid(input_conditions): + raise InvalidSignature('Transaction signature is invalid.') + elif self.operation == Transaction.TRANSFER: - # store the inputs so that we can check if the asset ids match - input_txs = [] - for input_ in self.inputs: - input_txid = input_.fulfills.txid - input_tx, status = bigchain.\ - get_transaction(input_txid, include_status=True) - - if input_tx is None: - for ctxn in current_transactions: - # assume that the status as valid for previously validated - # transactions in current round - if ctxn.id == input_txid: - input_tx = ctxn - status = bigchain.TX_VALID - - if input_tx is None: - raise InputDoesNotExist("input `{}` doesn't exist" - .format(input_txid)) - - if status != bigchain.TX_VALID: - raise TransactionNotInValidBlock( - 'input `{}` does not exist in a valid block'.format( - input_txid)) - - spent = bigchain.get_spent(input_txid, input_.fulfills.output, - current_transactions) - if spent and spent.id != self.id: - raise DoubleSpend('input `{}` was already spent' - .format(input_txid)) - - output = input_tx.outputs[input_.fulfills.output] - input_conditions.append(output) - input_txs.append(input_tx) - - # Validate that all inputs are distinct - links = [i.fulfills.to_uri() for i in self.inputs] - if len(links) != len(set(links)): - raise DoubleSpend('tx "{}" spends inputs twice'.format(self.id)) - - # validate asset id - asset_id = Transaction.get_asset_id(input_txs) - if asset_id != self.asset['id']: - raise AssetIdMismatch(('The asset id of the input does not' - ' match the asset id of the' - ' transaction')) - - input_amount = sum([input_condition.amount for input_condition in input_conditions]) - output_amount = sum([output_condition.amount for output_condition in self.outputs]) - - if output_amount != input_amount: - raise AmountError(('The amount used in the inputs `{}`' - ' needs to be same as the amount used' - ' in the outputs `{}`') - .format(input_amount, output_amount)) - - if not self.inputs_valid(input_conditions): - raise InvalidSignature('Transaction signature is invalid.') + self.validate_transfer_inputs(bigchain, current_transactions) return self diff --git a/bigchaindb/upsert_validator/__init__.py b/bigchaindb/upsert_validator/__init__.py index 2d415220..a318e861 100644 --- a/bigchaindb/upsert_validator/__init__.py +++ b/bigchaindb/upsert_validator/__init__.py @@ -1,2 +1,3 @@ from bigchaindb.upsert_validator.validator_election import ValidatorElection # noqa +from bigchaindb.upsert_validator.validator_election_vote import ValidatorElectionVote # noqa diff --git a/bigchaindb/upsert_validator/validator_election_vote.py b/bigchaindb/upsert_validator/validator_election_vote.py new file mode 100644 index 00000000..1405ff8f --- /dev/null +++ b/bigchaindb/upsert_validator/validator_election_vote.py @@ -0,0 +1,65 @@ +import base58 + +from bigchaindb.common.transaction import Transaction +from bigchaindb.common.schema import (_validate_schema, + TX_SCHEMA_COMMON, + TX_SCHEMA_TRANSFER, + TX_SCHEMA_VALIDATOR_ELECTION_VOTE) + + +class ValidatorElectionVote(Transaction): + + VALIDATOR_ELECTION_VOTE = 'VALIDATOR_ELECTION_VOTE' + # NOTE: This class inherits TRANSFER txn type. The `TRANSFER` property is + # overriden to re-use methods from parent class + TRANSFER = VALIDATOR_ELECTION_VOTE + ALLOWED_OPERATIONS = (VALIDATOR_ELECTION_VOTE,) + + def validate(self, bigchain, current_transactions=[]): + """Validate election vote transaction + NOTE: There are no additional validity conditions on casting votes i.e. + a vote is just a valid TRANFER transaction + + For more details refer BEP-21: https://github.com/bigchaindb/BEPs/tree/master/21 + + Args: + bigchain (BigchainDB): an instantiated bigchaindb.lib.BigchainDB object. + + Returns: + `True` if the election vote is valid + + Raises: + ValidationError: If the election vote is invalid + """ + self.validate_transfer_inputs(bigchain, current_transactions) + return self + + @classmethod + def to_public_key(cls, election_id): + return base58.b58encode(bytes.fromhex(election_id)) + + @classmethod + def generate(cls, inputs, recipients, election_id, metadata=None): + (inputs, outputs) = cls.validate_transfer(inputs, recipients, election_id, metadata) + election_vote = cls(cls.VALIDATOR_ELECTION_VOTE, {'id': election_id}, inputs, outputs, metadata) + cls.validate_schema(election_vote.to_dict(), skip_id=True) + return election_vote + + @classmethod + def validate_schema(cls, tx, skip_id=False): + """Validate the validator election vote transaction. Since `VALIDATOR_ELECTION_VOTE` extends `TRANFER` + transaction, all the validations for `CREATE` transaction should be inherited + """ + if not skip_id: + cls.validate_id(tx) + _validate_schema(TX_SCHEMA_COMMON, tx) + _validate_schema(TX_SCHEMA_TRANSFER, tx) + _validate_schema(TX_SCHEMA_VALIDATOR_ELECTION_VOTE, tx) + + @classmethod + def create(cls, tx_signers, recipients, metadata=None, asset=None): + raise NotImplementedError + + @classmethod + def transfer(cls, tx_signers, recipients, metadata=None, asset=None): + raise NotImplementedError diff --git a/bigchaindb/version.py b/bigchaindb/version.py index a48ee6ed..70929438 100644 --- a/bigchaindb/version.py +++ b/bigchaindb/version.py @@ -1,2 +1,2 @@ -__version__ = '2.0.0b4' -__short_version__ = '2.0b4' +__version__ = '2.0.0b5' +__short_version__ = '2.0b5' diff --git a/docker-compose.yml b/docker-compose.yml index 9201e12c..9c8fa3a7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -44,7 +44,7 @@ services: retries: 3 command: '.ci/entrypoint.sh' tendermint: - image: tendermint/tendermint:0.22.3 + image: tendermint/tendermint:0.22.8 # volumes: # - ./tmdata:/tendermint entrypoint: '' diff --git a/docs/contributing/source/dev-setup-coding-and-contribution-process/run-dev-network-stack.md b/docs/contributing/source/dev-setup-coding-and-contribution-process/run-dev-network-stack.md index cb8555f3..b4719128 100644 --- a/docs/contributing/source/dev-setup-coding-and-contribution-process/run-dev-network-stack.md +++ b/docs/contributing/source/dev-setup-coding-and-contribution-process/run-dev-network-stack.md @@ -32,7 +32,7 @@ $ curl -fOL https://raw.githubusercontent.com/bigchaindb/bigchaindb/${GIT_BRANCH ## Quick Start If you run `stack.sh` out of the box i.e. without any configuration changes, you will be able to deploy a 4 node -BigchainDB network with Docker containers, created from `master` branch of `bigchaindb/bigchaindb` repo and Tendermint version `0.22.3`. +BigchainDB network with Docker containers, created from `master` branch of `bigchaindb/bigchaindb` repo and Tendermint version `0.22.8`. **Note**: Run `stack.sh` with either root or non-root user with sudo enabled. @@ -90,7 +90,7 @@ $ bash stack.sh -h variable. (default: master) ENV[TM_VERSION] - (Optional) Tendermint version to use for the setup. (default: 0.22.3) + (Optional) Tendermint version to use for the setup. (default: 0.22.8) ENV[MONGO_VERSION] (Optional) MongoDB version to use with the setup. (default: 3.6) @@ -171,8 +171,8 @@ $ export STACK_REPO=bigchaindb/bigchaindb # Default: master $ export STACK_BRANCH=master -#Optional, since 0.22.3 is the default tendermint version. -$ export TM_VERSION=0.22.3 +#Optional, since 0.22.8 is the default tendermint version. +$ export TM_VERSION=0.22.8 #Optional, since 3.6 is the default MongoDB version. $ export MONGO_VERSION=3.6 @@ -222,8 +222,8 @@ $ export STACK_REPO=bigchaindb/bigchaindb # Default: master $ export STACK_BRANCH=master -#Optional, since 0.22.3 is the default tendermint version -$ export TM_VERSION=0.22.3 +#Optional, since 0.22.8 is the default tendermint version +$ export TM_VERSION=0.22.8 #Optional, since 3.6 is the default MongoDB version. $ export MONGO_VERSION=3.6 diff --git a/docs/contributing/source/dev-setup-coding-and-contribution-process/run-node-as-processes.md b/docs/contributing/source/dev-setup-coding-and-contribution-process/run-node-as-processes.md index 2c739555..4c98578a 100644 --- a/docs/contributing/source/dev-setup-coding-and-contribution-process/run-node-as-processes.md +++ b/docs/contributing/source/dev-setup-coding-and-contribution-process/run-node-as-processes.md @@ -19,13 +19,13 @@ After the installation of MongoDB is complete, run MongoDB using `sudo mongod` ### Installing a Tendermint Executable -Find [the version number of the latest Tendermint release](https://github.com/tendermint/tendermint/releases) and install it using the following, where 0.22.3 should be replaced by the latest released version number: +Find [the version number of the latest Tendermint release](https://github.com/tendermint/tendermint/releases) and install it using the following, where 0.22.8 should be replaced by the latest released version number: ```bash $ sudo apt install -y unzip -$ wget https://github.com/tendermint/tendermint/releases/download/v0.22.3/tendermint_0.22.3_linux_amd64.zip -$ unzip tendermint_0.22.3_linux_amd64.zip -$ rm tendermint_0.22.3_linux_amd64.zip +$ wget https://github.com/tendermint/tendermint/releases/download/v0.22.8-autodraft/tendermint_0.22.8_linux_amd64.zip +$ unzip tendermint_0.22.8_linux_amd64.zip +$ rm tendermint_0.22.8_linux_amd64.zip $ sudo mv tendermint /usr/local/bin ``` diff --git a/docs/root/source/index.rst b/docs/root/source/index.rst index 69030496..b6f478f8 100644 --- a/docs/root/source/index.rst +++ b/docs/root/source/index.rst @@ -91,4 +91,5 @@ More About BigchainDB transaction-concepts store-files permissions + private-data Data Models diff --git a/docs/root/source/permissions.rst b/docs/root/source/permissions.rst index 16b339eb..0c31d1dc 100644 --- a/docs/root/source/permissions.rst +++ b/docs/root/source/permissions.rst @@ -53,20 +53,7 @@ You could do more elaborate things too. As one example, each time someone writes Read Permissions ================ -All the data stored in a BigchainDB network can be read by anyone with access to that network. One *can* store encrypted data, but if the decryption key ever leaks out, then the encrypted data can be read, decrypted, and leak out too. (Deleting the encrypted data is :doc:`not an option `.) - -The permission to read some specific information (e.g. a music file) can be thought of as an *asset*. (In many countries, that permission or "right" is a kind of intellectual property.) -BigchainDB can be used to register that asset and transfer it from owner to owner. -Today, BigchainDB does not have a way to restrict read access of data stored in a BigchainDB network, but many third-party services do offer that (e.g. Google Docs, Dropbox). -In principle, a third party service could ask a BigchainDB network to determine if a particular user has permission to read some particular data. Indeed they could use BigchainDB to keep track of *all* the rights a user has for some data (not just the right to read it). -That third party could also use BigchainDB to store audit logs, i.e. records of every read, write or other operation on stored data. - -BigchainDB can be used in other ways to help parties exchange private data: - -- It can be used to publicly disclose the *availability* of some private data (stored elsewhere). For example, there might be a description of the data and a price. -- It can be used to record the TLS handshakes which two parties sent to each other to establish an encrypted and authenticated TLS connection, which they could use to exchange private data with each other. (The stored handshake information wouldn't be enough, by itself, to decrypt the data.) It would be a "proof of TLS handshake." -- See the BigchainDB `Privacy Protocols repository `_ for more techniques. - +See the page titled, :doc:`BigchainDB, Privacy and Private Data `. Role-Based Access Control (RBAC) ================================ diff --git a/docs/root/source/private-data.rst b/docs/root/source/private-data.rst new file mode 100644 index 00000000..7088931f --- /dev/null +++ b/docs/root/source/private-data.rst @@ -0,0 +1,100 @@ +BigchainDB, Privacy and Private Data +------------------------------------ + +Basic Facts +=========== + +#. One can store arbitrary data (including encrypted data) in a BigchainDB network, within limits: there’s a maximum transaction size. Every transaction has a ``metadata`` section which can store almost any Unicode string (up to some maximum length). Similarly, every CREATE transaction has an ``asset.data`` section which can store almost any Unicode string. +#. The data stored in certain BigchainDB transaction fields must not be encrypted, e.g. public keys and amounts. BigchainDB doesn’t offer private transactions akin to Zcoin. +#. Once data has been stored in a BigchainDB network, it’s best to assume it can’t be change or deleted. +#. Every node in a BigchainDB network has a full copy of all the stored data. +#. Every node in a BigchainDB network can read all the stored data. +#. Everyone with full access to a BigchainDB node (e.g. the sysadmin of a node) can read all the data stored on that node. +#. Everyone given access to a node via the BigchainDB HTTP API can find and read all the data stored by BigchainDB. The list of people with access might be quite short. +#. If the connection between an external user and a BigchainDB node isn’t encrypted (using HTTPS, for example), then a wiretapper can read all HTTP requests and responses in transit. +#. If someone gets access to plaintext (regardless of where they got it), then they can (in principle) share it with the whole world. One can make it difficult for them to do that, e.g. if it is a lot of data and they only get access inside a secure room where they are searched as they leave the room. + +Storing Private Data Off-Chain +============================== + +A system could store data off-chain, e.g. in a third-party database, document store, or content management system (CMS) and it could use BigchainDB to: + +- Keep track of who has read permissions (or other permissions) in a third-party system. An example of how this could be done is described below. +- Keep a permanent record of all requests made to the third-party system. +- Store hashes of documents-stored-elsewhere, so that a change in any document can be detected. +- Record all handshake-establishing requests and responses between two off-chain parties (e.g. a Diffie-Hellman key exchange), so as to prove that they established an encrypted tunnel (without giving readers access to that tunnel). There are more details about this idea in `the BigchainDB Privacy Protocols repository `_. + +A simple way to record who has read permission on a particular document would be for the third-party system (“DocPile”) to store a CREATE transaction in a BigchainDB network for every document+user pair, to indicate that that user has read permissions for that document. The transaction could be signed by DocPile (or maybe by a document owner, as a variation). The asset data field would contain 1) the unique ID of the user and 2) the unique ID of the document. The one output on the CREATE transaction would only be transferable/spendable by DocPile (or, again, a document owner). + +To revoke the read permission, DocPile could create a TRANSFER transaction, to spend the one output on the original CREATE transaction, with a metadata field to say that the user in question no longer has read permission on that document. + +This can be carried on indefinitely, i.e. another TRANSFER transaction could be created by DocPile to indicate that the user now has read permissions again. + +DocPile can figure out if a given user has read permissions on a given document by reading the last transaction in the CREATE → TRANSFER → TRANSFER → etc. chain for that user+document pair. + +There are other ways to accomplish the same thing. The above is just one example. + +You might have noticed that the above example didn’t treat the “read permission” as an asset owned (controlled) by a user because if the permission asset is given to (transferred to or created by) the user then it cannot be controlled any further (by DocPile) until the user transfers it back to DocPile. Moreover, the user could transfer the asset to someone else, which might be problematic. + +Storing Private Data On-Chain, Encrypted +======================================== + +There are many ways to store private data on-chain, encrypted. Every use case has its own objectives and constraints, and the best solution depends on the use case. `The BigchainDB consulting team `_, along with our partners, can help you design the best solution for your use case. + +Below we describe some example system setups, using various crypto primitives, to give a sense of what’s possible. + +Please note: + +- Ed25519 keypairs are designed for signing and verifying cryptographic signatures, `not for encrypting and decrypting messages `_. For encryption, you should use keypairs designed for encryption, such as X25519. +- If someone (or some group) publishes how to decrypt some encrypted data on-chain, then anyone with access to that encrypted data will be able to get the plaintext. The data can’t be deleted. +- Encrypted data can’t be indexed or searched by MongoDB. (It can index and search the ciphertext, but that’s not very useful.) One might use homomorphic encryption to index and search encrypted data, but MongoDB doesn’t have any plans to support that any time soon. If there is indexing or keyword search needed, then some fields of the ``asset.data`` or ``metadata`` objects can be left as plain text and the sensitive information can be stored in an encrypted child-object. + +System Example 1 +~~~~~~~~~~~~~~~~ + +Encrypt the data with a symmetric key and store the ciphertext on-chain (in ``metadata`` or ``asset.data``). To communicate the key to a third party, use their public key to encrypt the symmetric key and send them that. They can decrypt the symmetric key with their private key, and then use that symmetric key to decrypt the on-chain ciphertext. + +The reason for using a symmetric key along with public/private keypairs is so the ciphertext only has to be stored once. + +System Example 2 +~~~~~~~~~~~~~~~~ + +This example uses `proxy re-encryption `_: + +#. MegaCorp encrypts some data using its own public key, then stores that encrypted data (ciphertext 1) in a BigchainDB network. +#. MegaCorp wants to let others read that encrypted data, but without ever sharing their private key and without having to re-encrypt themselves for every new recipient. Instead, they find a “proxy” named Moxie, to provide proxy re-encryption services. +#. Zorban contacts MegaCorp and asks for permission to read the data. +#. MegaCorp asks Zorban for his public key. +#. MegaCorp generates a “re-encryption key” and sends it to their proxy, Moxie. +#. Moxie (the proxy) uses the re-encryption key to encrypt ciphertext 1, creating ciphertext 2. +#. Moxie sends ciphertext 2 to Zorban (or to MegaCorp who forwards it to Zorban). +#. Zorban uses his private key to decrypt ciphertext 2, getting the original un-encrypted data. + +Note: + +- The proxy only ever sees ciphertext. They never see any un-encrypted data. +- Zorban never got the ability to decrypt ciphertext 1, i.e. the on-chain data. +- There are variations on the above flow. + +System Example 3 +~~~~~~~~~~~~~~~~ + +This example uses `erasure coding `_: + +#. Erasure-code the data into n pieces. +#. Encrypt each of the n pieces with a different encryption key. +#. Store the n encrypted pieces on-chain, e.g. in n separate transactions. +#. Share each of the the n decryption keys with a different party. + +If k < N of the key-holders gets and decrypts k of the pieces, they can reconstruct the original plaintext. Less than k would not be enough. + +System Example 4 +~~~~~~~~~~~~~~~~ + +This setup could be used in an enterprise blockchain scenario where a special node should be able to see parts of the data, but the others should not. + +- The special node generates an X25519 keypair (or similar asymmetric *encryption* keypair). +- A BigchainDB end user finds out the X25519 public key (encryption key) of the special node. +- The end user creates a valid BigchainDB transaction, with either the asset.data or the metadata (or both) encrypted using the above-mentioned public key. +- This is only done for transactions where the contents of asset.data or metadata don't matter for validation, so all node operators can validate the transaction. +- The special node is able to decrypt the encrypted data, but the other node operators can't, and nor can any other end user. diff --git a/docs/server/source/appendices/all-in-one-bigchaindb.md b/docs/server/source/appendices/all-in-one-bigchaindb.md new file mode 100644 index 00000000..246b4e50 --- /dev/null +++ b/docs/server/source/appendices/all-in-one-bigchaindb.md @@ -0,0 +1,85 @@ +# Run BigchainDB with all-in-one Docker + +For those who like using Docker and wish to experiment with BigchainDB in +non-production environments, we currently maintain a BigchainDB all-in-one +Docker image and a +`Dockerfile-all-in-one` that can be used to build an image for `bigchaindb`. + +This image contains all the services required for a BigchainDB node i.e. + +- BigchainDB Server +- MongoDB +- Tendermint + +**Note:** **NOT for Production Use:** *This is an single node opinionated image not well suited for a network deployment.* +*This image is to help quick deployment for early adopters, for a more standard approach please refer to one of our deployment guides:* + +- [BigchainDB developer setup guides](https://docs.bigchaindb.com/projects/contributing/en/latest/dev-setup-coding-and-contribution-process/index.html). +- [BigchainDB with Kubernetes](http://docs.bigchaindb.com/projects/server/en/latest/k8s-deployment-template/index.html). + +## Prerequisite(s) +- [Docker](https://docs.docker.com/engine/installation/) + +## Pull and Run the Image from Docker Hub + +With Docker installed, you can proceed as follows. + +In a terminal shell, pull the latest version of the BigchainDB all-in-one Docker image using: +```text +$ docker pull bigchaindb/bigchaindb:all-in-one + +$ docker run \ + --detach \ + --name bigchaindb \ + --publish 9984:9984 \ + --publish 9985:9985 \ + --publish 27017:27017 \ + --publish 26657:26657 \ + --volume $HOME/bigchaindb_docker/mongodb/data/db:/data/db \ + --volume $HOME/bigchaindb_docker/mongodb/data/configdb:/data/configdb \ + --volume $HOME/bigchaindb_docker/tendermint:/tendermint \ + bigchaindb/bigchaindb:all-in-one +``` + +Let's analyze that command: + +* `docker run` tells Docker to run some image +* `--detach` run the container in the background +* `publish 9984:9984` map the host port `9984` to the container port `9984` + (the BigchainDB API server) + * `9985` BigchainDB Websocket server + * `27017` Default port for MongoDB + * `26657` Tendermint RPC server +* `--volume "$HOME/bigchaindb_docker/mongodb:/data"` map the host directory + `$HOME/bigchaindb_docker/mongodb` to the container directory `/data`; + this allows us to have the data persisted on the host machine, + you can read more in the [official Docker + documentation](https://docs.docker.com/engine/tutorials/dockervolumes) + * `$HOME/bigchaindb_docker/tendermint:/tendermint` to persist Tendermint data. +* `bigchaindb/bigchaindb:all-in-one` the image to use. All the options after the container name are passed on to the entrypoint inside the container. + +## Verify + +```text +$ docker ps | grep bigchaindb +``` + +Send your first transaction using [BigchainDB drivers](../drivers-clients/index.html). + + +## Building Your Own Image + +Assuming you have Docker installed, you would proceed as follows. + +In a terminal shell: +```text +git clone git@github.com:bigchaindb/bigchaindb.git +cd bigchaindb/ +``` + +Build the Docker image: +```text +docker build --file Dockerfile-all-in-one --tag . +``` + +Now you can use your own image to run BigchainDB all-in-one container. diff --git a/docs/server/source/appendices/index.rst b/docs/server/source/appendices/index.rst index 033a84d2..18e12ba2 100755 --- a/docs/server/source/appendices/index.rst +++ b/docs/server/source/appendices/index.rst @@ -4,7 +4,6 @@ Appendices .. toctree:: :maxdepth: 1 - install-os-level-deps json-serialization cryptography the-bigchaindb-class @@ -15,3 +14,4 @@ Appendices firewall-notes ntp-notes licenses + all-in-one-bigchaindb diff --git a/docs/server/source/appendices/install-os-level-deps.md b/docs/server/source/appendices/install-os-level-deps.md deleted file mode 100644 index 4aa65c67..00000000 --- a/docs/server/source/appendices/install-os-level-deps.md +++ /dev/null @@ -1,17 +0,0 @@ -# How to Install OS-Level Dependencies - -BigchainDB Server has some OS-level dependencies that must be installed. - -On Ubuntu 16.04, we found that the following was enough: -```text -sudo apt-get update -sudo apt-get install libffi-dev libssl-dev -``` - -On Fedora 23–25, we found that the following was enough: -```text -sudo dnf update -sudo dnf install gcc-c++ redhat-rpm-config python3-devel libffi-devel -``` - -(If you're using a version of Fedora before version 22, you may have to use `yum` instead of `dnf`.) diff --git a/docs/server/source/clusters.md b/docs/server/source/clusters.md index be1d1c9c..23cbf1f3 100644 --- a/docs/server/source/clusters.md +++ b/docs/server/source/clusters.md @@ -2,7 +2,6 @@ A **BigchainDB Cluster** is a set of connected **BigchainDB Nodes**, managed by a **BigchainDB Consortium** (i.e. an organization). Those terms are defined in the [BigchainDB Terminology page](https://docs.bigchaindb.com/en/latest/terminology.html). - ## Consortium Structure & Governance The consortium might be a company, a foundation, a cooperative, or [some other form of organization](https://en.wikipedia.org/wiki/Organizational_structure). @@ -13,13 +12,6 @@ This documentation doesn't explain how to create a consortium, nor does it outli It's worth noting that the decentralization of a BigchainDB cluster depends, to some extent, on the decentralization of the associated consortium. See the pages about [decentralization](https://docs.bigchaindb.com/en/latest/decentralized.html) and [node diversity](https://docs.bigchaindb.com/en/latest/diversity.html). - -## Relevant Technical Documentation - -Anyone building or managing a BigchainDB cluster may be interested -in [our production deployment template](production-deployment-template/index.html). - - ## Cluster DNS Records and SSL Certificates We now describe how *we* set up the external (public-facing) DNS records for a BigchainDB cluster. Your consortium may opt to do it differently. @@ -30,14 +22,12 @@ There were several goals: * There should be no sharing of SSL certificates among BigchainDB node operators. * Optional: Allow clients to connect to a "random" BigchainDB node in the cluster at one particular domain (or subdomain). - ### Node Operator Responsibilities 1. Register a domain (or use one that you already have) for your BigchainDB node. You can use a subdomain if you like. For example, you might opt to use `abc-org73.net`, `api.dynabob8.io` or `figmentdb3.ninja`. 2. Get an SSL certificate for your domain or subdomain, and properly install it in your node (e.g. in your NGINX instance). 3. Create a DNS A Record mapping your domain or subdomain to the public IP address of your node (i.e. the one that serves the BigchainDB HTTP API). - ### Consortium Responsibilities Optional: The consortium managing the BigchainDB cluster could register a domain name and set up CNAME records mapping that domain name (or one of its subdomains) to each of the nodes in the cluster. For example, if the consortium registered `bdbcluster.io`, they could set up CNAME records like the following: diff --git a/docs/server/source/drivers-clients/index.rst b/docs/server/source/drivers-clients/index.rst index b124a598..3cf4d0fa 100644 --- a/docs/server/source/drivers-clients/index.rst +++ b/docs/server/source/drivers-clients/index.rst @@ -6,6 +6,7 @@ Libraries and Tools Maintained by the BigchainDB Team * `Python Driver `_ * `JavaScript / Node.js Driver `_ +* `Java driver `_ Community-Driven Libraries and Tools ------------------------------------ @@ -17,6 +18,5 @@ Community-Driven Libraries and Tools * `Haskell transaction builder `_ * `Go driver `_ -* `Java driver `_ * `Ruby driver `_ * `Ruby library for preparing/signing transactions and submitting them or querying a BigchainDB node (MIT licensed) `_ diff --git a/docs/server/source/index.rst b/docs/server/source/index.rst index c8f16d02..be9d1a10 100644 --- a/docs/server/source/index.rst +++ b/docs/server/source/index.rst @@ -10,13 +10,13 @@ BigchainDB Server Documentation simple-network-setup production-nodes/index clusters - production-deployment-template/index dev-and-test/index server-reference/index http-client-server-api events/index drivers-clients/index data-models/index + k8s-deployment-template/index release-notes glossary appendices/index diff --git a/docs/server/source/production-deployment-template/architecture.rst b/docs/server/source/k8s-deployment-template/architecture.rst similarity index 94% rename from docs/server/source/production-deployment-template/architecture.rst rename to docs/server/source/k8s-deployment-template/architecture.rst index 4d029f5c..2bfc9e4c 100644 --- a/docs/server/source/production-deployment-template/architecture.rst +++ b/docs/server/source/k8s-deployment-template/architecture.rst @@ -1,13 +1,25 @@ -Architecture of a BigchainDB Node -================================== +Architecture of a BigchainDB Node Running in a Kubernetes Cluster +================================================================= -A BigchainDB Production deployment is hosted on a Kubernetes cluster and includes: +.. note:: + + A highly-available Kubernetes cluster requires at least five virtual machines + (three for the master and two for your app's containers). + Therefore we don't recommend using Kubernetes to run a BigchainDB node + if that's the only thing the Kubernetes cluster will be running. + Instead, see **How to Set Up a BigchainDB Network**. + If your organization already *has* a big Kubernetes cluster running many containers, + and your organization has people who know Kubernetes, + then this Kubernetes deployment template might be helpful. + +If you deploy a BigchainDB node into a Kubernetes cluster +as described in these docs, it will include: * NGINX, OpenResty, BigchainDB, MongoDB and Tendermint `Kubernetes Services `_. -* NGINX, OpenResty, BigchainDB and MongoDB Monitoring Agent. +* NGINX, OpenResty, BigchainDB and MongoDB Monitoring Agent `Kubernetes Deployments `_. -* MongoDB and Tendermint `Kubernetes StatefulSet `_. +* MongoDB and Tendermint `Kubernetes StatefulSets `_. * Third party services like `3scale `_, `MongoDB Cloud Manager `_ and the `Azure Operations Management Suite diff --git a/docs/server/source/production-deployment-template/bigchaindb-network-on-kubernetes.rst b/docs/server/source/k8s-deployment-template/bigchaindb-network-on-kubernetes.rst similarity index 97% rename from docs/server/source/production-deployment-template/bigchaindb-network-on-kubernetes.rst rename to docs/server/source/k8s-deployment-template/bigchaindb-network-on-kubernetes.rst index 100dafb5..d3caf6a3 100644 --- a/docs/server/source/production-deployment-template/bigchaindb-network-on-kubernetes.rst +++ b/docs/server/source/k8s-deployment-template/bigchaindb-network-on-kubernetes.rst @@ -3,6 +3,17 @@ Kubernetes Template: Deploying a BigchainDB network =================================================== +.. note:: + + A highly-available Kubernetes cluster requires at least five virtual machines + (three for the master and two for your app's containers). + Therefore we don't recommend using Kubernetes to run a BigchainDB node + if that's the only thing the Kubernetes cluster will be running. + Instead, see **How to Set Up a BigchainDB Network**. + If your organization already *has* a big Kubernetes cluster running many containers, + and your organization has people who know Kubernetes, + then this Kubernetes deployment template might be helpful. + This page describes how to deploy a static BigchainDB + Tendermint network. If you want to deploy a stand-alone BigchainDB node in a BigchainDB cluster, diff --git a/docs/server/source/production-deployment-template/ca-installation.rst b/docs/server/source/k8s-deployment-template/ca-installation.rst similarity index 100% rename from docs/server/source/production-deployment-template/ca-installation.rst rename to docs/server/source/k8s-deployment-template/ca-installation.rst diff --git a/docs/server/source/production-deployment-template/client-tls-certificate.rst b/docs/server/source/k8s-deployment-template/client-tls-certificate.rst similarity index 100% rename from docs/server/source/production-deployment-template/client-tls-certificate.rst rename to docs/server/source/k8s-deployment-template/client-tls-certificate.rst diff --git a/docs/server/source/production-deployment-template/cloud-manager.rst b/docs/server/source/k8s-deployment-template/cloud-manager.rst similarity index 96% rename from docs/server/source/production-deployment-template/cloud-manager.rst rename to docs/server/source/k8s-deployment-template/cloud-manager.rst index fb0512df..458a8eeb 100644 --- a/docs/server/source/production-deployment-template/cloud-manager.rst +++ b/docs/server/source/k8s-deployment-template/cloud-manager.rst @@ -41,7 +41,7 @@ Configure MongoDB Cloud Manager for Monitoring * If you have authentication enabled, select the option to enable authentication and specify the authentication mechanism as per your - deployment. The default BigchainDB production deployment currently + deployment. The default BigchainDB Kubernetes deployment template currently supports ``X.509 Client Certificate`` as the authentication mechanism. * If you have TLS enabled, select the option to enable TLS/SSL for MongoDB diff --git a/docs/server/source/production-deployment-template/easy-rsa.rst b/docs/server/source/k8s-deployment-template/easy-rsa.rst similarity index 100% rename from docs/server/source/production-deployment-template/easy-rsa.rst rename to docs/server/source/k8s-deployment-template/easy-rsa.rst diff --git a/docs/server/source/k8s-deployment-template/index.rst b/docs/server/source/k8s-deployment-template/index.rst new file mode 100644 index 00000000..44a3fc07 --- /dev/null +++ b/docs/server/source/k8s-deployment-template/index.rst @@ -0,0 +1,40 @@ +Kubernetes Deployment Template +============================== + +.. note:: + + A highly-available Kubernetes cluster requires at least five virtual machines + (three for the master and two for your app's containers). + Therefore we don't recommend using Kubernetes to run a BigchainDB node + if that's the only thing the Kubernetes cluster will be running. + Instead, see **How to Set Up a BigchainDB Network**. + If your organization already *has* a big Kubernetes cluster running many containers, + and your organization has people who know Kubernetes, + then this Kubernetes deployment template might be helpful. + +This section outlines a way to deploy a BigchainDB node (or BigchainDB cluster) +on Microsoft Azure using Kubernetes. +You may choose to use it as a template or reference for your own deployment, +but *we make no claim that it is suitable for your purposes*. +Feel free change things to suit your needs or preferences. + + +.. toctree:: + :maxdepth: 1 + + workflow + ca-installation + server-tls-certificate + client-tls-certificate + revoke-tls-certificate + template-kubernetes-azure + node-on-kubernetes + node-config-map-and-secrets + log-analytics + cloud-manager + easy-rsa + upgrade-on-kubernetes + bigchaindb-network-on-kubernetes + tectonic-azure + troubleshoot + architecture diff --git a/docs/server/source/production-deployment-template/log-analytics.rst b/docs/server/source/k8s-deployment-template/log-analytics.rst similarity index 100% rename from docs/server/source/production-deployment-template/log-analytics.rst rename to docs/server/source/k8s-deployment-template/log-analytics.rst diff --git a/docs/server/source/production-deployment-template/node-config-map-and-secrets.rst b/docs/server/source/k8s-deployment-template/node-config-map-and-secrets.rst similarity index 86% rename from docs/server/source/production-deployment-template/node-config-map-and-secrets.rst rename to docs/server/source/k8s-deployment-template/node-config-map-and-secrets.rst index 7bf8d0de..d308bc11 100644 --- a/docs/server/source/production-deployment-template/node-config-map-and-secrets.rst +++ b/docs/server/source/k8s-deployment-template/node-config-map-and-secrets.rst @@ -3,6 +3,17 @@ How to Configure a BigchainDB Node ================================== +.. note:: + + A highly-available Kubernetes cluster requires at least five virtual machines + (three for the master and two for your app's containers). + Therefore we don't recommend using Kubernetes to run a BigchainDB node + if that's the only thing the Kubernetes cluster will be running. + Instead, see **How to Set Up a BigchainDB Network**. + If your organization already *has* a big Kubernetes cluster running many containers, + and your organization has people who know Kubernetes, + then this Kubernetes deployment template might be helpful. + This page outlines the steps to set a bunch of configuration settings in your BigchainDB node. They are pushed to the Kubernetes cluster in two files, diff --git a/docs/server/source/production-deployment-template/node-on-kubernetes.rst b/docs/server/source/k8s-deployment-template/node-on-kubernetes.rst similarity index 96% rename from docs/server/source/production-deployment-template/node-on-kubernetes.rst rename to docs/server/source/k8s-deployment-template/node-on-kubernetes.rst index b6fbeedd..e9032ec4 100644 --- a/docs/server/source/production-deployment-template/node-on-kubernetes.rst +++ b/docs/server/source/k8s-deployment-template/node-on-kubernetes.rst @@ -3,7 +3,18 @@ Kubernetes Template: Deploy a Single BigchainDB Node ==================================================== -This page describes how to deploy a BigchainDB + Tendermint node +.. note:: + + A highly-available Kubernetes cluster requires at least five virtual machines + (three for the master and two for your app's containers). + Therefore we don't recommend using Kubernetes to run a BigchainDB node + if that's the only thing the Kubernetes cluster will be running. + Instead, see **How to Set Up a BigchainDB Network**. + If your organization already *has* a big Kubernetes cluster running many containers, + and your organization has people who know Kubernetes, + then this Kubernetes deployment template might be helpful. + +This page describes how to deploy a BigchainDB node using `Kubernetes `_. It assumes you already have a running Kubernetes cluster. @@ -29,7 +40,7 @@ If you don't have that file, then you need to get it. **Azure.** If you deployed your Kubernetes cluster on Azure using the Azure CLI 2.0 (as per :doc:`our template -<../production-deployment-template/template-kubernetes-azure>`), +<../k8s-deployment-template/template-kubernetes-azure>`), then you can get the ``~/.kube/config`` file using: .. code:: bash @@ -277,7 +288,7 @@ The first thing to do is create the Kubernetes storage classes. First, you need an Azure storage account. If you deployed your Kubernetes cluster on Azure using the Azure CLI 2.0 -(as per :doc:`our template <../production-deployment-template/template-kubernetes-azure>`), +(as per :doc:`our template <../k8s-deployment-template/template-kubernetes-azure>`), then the `az acs create` command already created a storage account in the same location and resource group as your Kubernetes cluster. @@ -289,7 +300,7 @@ in the same data center. Premium storage is higher-cost and higher-performance. It uses solid state drives (SSD). -We recommend using Premium storage for our production template. +We recommend using Premium storage with our Kubernetes deployment template. Create a `storage account `_ for Premium storage and associate it with your Azure resource group. For future reference, the command to create a storage account is @@ -372,7 +383,7 @@ but it should become "Bound" fairly quickly. $ kubectl patch pv -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' For notes on recreating a private volume form a released Azure disk resource consult - :doc:`the page about cluster troubleshooting <../production-deployment-template/troubleshoot>`. + :doc:`the page about cluster troubleshooting <../k8s-deployment-template/troubleshoot>`. .. _start-kubernetes-stateful-set-mongodb: @@ -569,7 +580,7 @@ Step 19(Optional): Configure the MongoDB Cloud Manager ------------------------------------------------------ Refer to the -:doc:`documentation <../production-deployment-template/cloud-manager>` +:doc:`documentation <../k8s-deployment-template/cloud-manager>` for details on how to configure the MongoDB Cloud Manager to enable monitoring and backup. @@ -749,4 +760,4 @@ verify that your node or cluster works as expected. Next, you can set up log analytics and monitoring, by following our templates: -* :doc:`../production-deployment-template/log-analytics`. +* :doc:`../k8s-deployment-template/log-analytics`. diff --git a/docs/server/source/production-deployment-template/revoke-tls-certificate.rst b/docs/server/source/k8s-deployment-template/revoke-tls-certificate.rst similarity index 100% rename from docs/server/source/production-deployment-template/revoke-tls-certificate.rst rename to docs/server/source/k8s-deployment-template/revoke-tls-certificate.rst diff --git a/docs/server/source/production-deployment-template/server-tls-certificate.rst b/docs/server/source/k8s-deployment-template/server-tls-certificate.rst similarity index 100% rename from docs/server/source/production-deployment-template/server-tls-certificate.rst rename to docs/server/source/k8s-deployment-template/server-tls-certificate.rst diff --git a/docs/server/source/production-deployment-template/tectonic-azure.rst b/docs/server/source/k8s-deployment-template/tectonic-azure.rst similarity index 90% rename from docs/server/source/production-deployment-template/tectonic-azure.rst rename to docs/server/source/k8s-deployment-template/tectonic-azure.rst index f9d58074..03cfb433 100644 --- a/docs/server/source/production-deployment-template/tectonic-azure.rst +++ b/docs/server/source/k8s-deployment-template/tectonic-azure.rst @@ -1,6 +1,17 @@ Walkthrough: Deploy a Kubernetes Cluster on Azure using Tectonic by CoreOS ========================================================================== +.. note:: + + A highly-available Kubernetes cluster requires at least five virtual machines + (three for the master and two for your app's containers). + Therefore we don't recommend using Kubernetes to run a BigchainDB node + if that's the only thing the Kubernetes cluster will be running. + Instead, see **How to Set Up a BigchainDB Network**. + If your organization already *has* a big Kubernetes cluster running many containers, + and your organization has people who know Kubernetes, + then this Kubernetes deployment template might be helpful. + A BigchainDB node can be run inside a `Kubernetes `_ cluster. This page describes one way to deploy a Kubernetes cluster on Azure using Tectonic. diff --git a/docs/server/source/production-deployment-template/template-kubernetes-azure.rst b/docs/server/source/k8s-deployment-template/template-kubernetes-azure.rst similarity index 93% rename from docs/server/source/production-deployment-template/template-kubernetes-azure.rst rename to docs/server/source/k8s-deployment-template/template-kubernetes-azure.rst index 7b6b4e2c..9ff10cac 100644 --- a/docs/server/source/production-deployment-template/template-kubernetes-azure.rst +++ b/docs/server/source/k8s-deployment-template/template-kubernetes-azure.rst @@ -1,6 +1,17 @@ Template: Deploy a Kubernetes Cluster on Azure ============================================== +.. note:: + + A highly-available Kubernetes cluster requires at least five virtual machines + (three for the master and two for your app's containers). + Therefore we don't recommend using Kubernetes to run a BigchainDB node + if that's the only thing the Kubernetes cluster will be running. + Instead, see **How to Set Up a BigchainDB Network**. + If your organization already *has* a big Kubernetes cluster running many containers, + and your organization has people who know Kubernetes, + then this Kubernetes deployment template might be helpful. + A BigchainDB node can be run inside a `Kubernetes `_ cluster. This page describes one way to deploy a Kubernetes cluster on Azure. diff --git a/docs/server/source/production-deployment-template/troubleshoot.rst b/docs/server/source/k8s-deployment-template/troubleshoot.rst similarity index 100% rename from docs/server/source/production-deployment-template/troubleshoot.rst rename to docs/server/source/k8s-deployment-template/troubleshoot.rst diff --git a/docs/server/source/production-deployment-template/upgrade-on-kubernetes.rst b/docs/server/source/k8s-deployment-template/upgrade-on-kubernetes.rst similarity index 87% rename from docs/server/source/production-deployment-template/upgrade-on-kubernetes.rst rename to docs/server/source/k8s-deployment-template/upgrade-on-kubernetes.rst index 07d63f7b..8d836564 100644 --- a/docs/server/source/production-deployment-template/upgrade-on-kubernetes.rst +++ b/docs/server/source/k8s-deployment-template/upgrade-on-kubernetes.rst @@ -1,6 +1,17 @@ Kubernetes Template: Upgrade all Software in a BigchainDB Node ============================================================== +.. note:: + + A highly-available Kubernetes cluster requires at least five virtual machines + (three for the master and two for your app's containers). + Therefore we don't recommend using Kubernetes to run a BigchainDB node + if that's the only thing the Kubernetes cluster will be running. + Instead, see **How to Set Up a BigchainDB Network**. + If your organization already *has* a big Kubernetes cluster running many containers, + and your organization has people who know Kubernetes, + then this Kubernetes deployment template might be helpful. + This page outlines how to upgrade all the software associated with a BigchainDB node running on Kubernetes, including host operating systems, Docker, Kubernetes, diff --git a/docs/server/source/production-deployment-template/workflow.rst b/docs/server/source/k8s-deployment-template/workflow.rst similarity index 85% rename from docs/server/source/production-deployment-template/workflow.rst rename to docs/server/source/k8s-deployment-template/workflow.rst index 197cd231..668f1a3c 100644 --- a/docs/server/source/production-deployment-template/workflow.rst +++ b/docs/server/source/k8s-deployment-template/workflow.rst @@ -3,9 +3,19 @@ Overview ======== -This page summarizes the steps *we* go through -to set up a production BigchainDB cluster. -We are constantly improving them. +.. note:: + + A highly-available Kubernetes cluster requires at least five virtual machines + (three for the master and two for your app's containers). + Therefore we don't recommend using Kubernetes to run a BigchainDB node + if that's the only thing the Kubernetes cluster will be running. + Instead, see **How to Set Up a BigchainDB Network**. + If your organization already *has* a big Kubernetes cluster running many containers, + and your organization has people who know Kubernetes, + then this Kubernetes deployment template might be helpful. + +This page summarizes some steps to go through +to set up a BigchainDB cluster. You can modify them to suit your needs. .. _generate-the-blockchain-id-and-genesis-time: @@ -44,7 +54,7 @@ you can do this: .. code:: $ mkdir $(pwd)/tmdata - $ docker run --rm -v $(pwd)/tmdata:/tendermint/config tendermint/tendermint:0.22.3 init + $ docker run --rm -v $(pwd)/tmdata:/tendermint/config tendermint/tendermint:0.22.8 init $ cat $(pwd)/tmdata/genesis.json You should see something that looks like: @@ -113,13 +123,13 @@ and set it equal to your secret token, e.g. 3. Deploy a Kubernetes cluster for your BigchainDB node. We have some instructions for how to -:doc:`Deploy a Kubernetes cluster on Azure <../production-deployment-template/template-kubernetes-azure>`. +:doc:`Deploy a Kubernetes cluster on Azure <../k8s-deployment-template/template-kubernetes-azure>`. .. warning:: In theory, you can deploy your BigchainDB node to any Kubernetes cluster, but there can be differences between different Kubernetes clusters, especially if they are running different versions of Kubernetes. - We tested this Production Deployment Template on Azure ACS in February 2018 and at that time + We tested this Kubernetes Deployment Template on Azure ACS in February 2018 and at that time ACS was deploying a **Kubernetes 1.7.7** cluster. If you can force your cluster to have that version of Kubernetes, then you'll increase the likelihood that everything will work in your cluster. diff --git a/docs/server/source/production-deployment-template/index.rst b/docs/server/source/production-deployment-template/index.rst deleted file mode 100644 index 64a834db..00000000 --- a/docs/server/source/production-deployment-template/index.rst +++ /dev/null @@ -1,31 +0,0 @@ -Production Deployment Template -============================== - -This section outlines how *we* deploy production BigchainDB, -integrated with Tendermint(backend for BFT consensus), -clusters on Microsoft Azure using -Kubernetes. We improve it constantly. -You may choose to use it as a template or reference for your own deployment, -but *we make no claim that it is suitable for your purposes*. -Feel free change things to suit your needs or preferences. - - -.. toctree:: - :maxdepth: 1 - - workflow - ca-installation - server-tls-certificate - client-tls-certificate - revoke-tls-certificate - template-kubernetes-azure - node-on-kubernetes - node-config-map-and-secrets - log-analytics - cloud-manager - easy-rsa - upgrade-on-kubernetes - bigchaindb-network-on-kubernetes - tectonic-azure - troubleshoot - architecture diff --git a/docs/server/source/production-nodes/index.rst b/docs/server/source/production-nodes/index.rst index c669827e..6ada923e 100644 --- a/docs/server/source/production-nodes/index.rst +++ b/docs/server/source/production-nodes/index.rst @@ -4,7 +4,8 @@ Production Nodes .. toctree:: :maxdepth: 1 + node-requirements node-assumptions node-components - node-requirements + node-security-and-privacy reverse-proxy-notes diff --git a/docs/server/source/production-nodes/node-assumptions.md b/docs/server/source/production-nodes/node-assumptions.md index 3335804c..d8d74f0e 100644 --- a/docs/server/source/production-nodes/node-assumptions.md +++ b/docs/server/source/production-nodes/node-assumptions.md @@ -10,5 +10,3 @@ We make some assumptions about production nodes: 1. Production nodes use MongoDB (not RethinkDB, PostgreSQL, Couchbase or whatever). 1. Each production node is set up and managed by an experienced professional system administrator or a team of them. 1. Each production node in a cluster is managed by a different person or team. - -We don't provide a detailed cookbook explaining how to secure a server, or other things that a sysadmin should know. We do provide some templates, but those are just starting points. diff --git a/docs/server/source/production-nodes/node-security-and-privacy.md b/docs/server/source/production-nodes/node-security-and-privacy.md new file mode 100644 index 00000000..3b26ddbb --- /dev/null +++ b/docs/server/source/production-nodes/node-security-and-privacy.md @@ -0,0 +1,11 @@ +# Production Node Security & Privacy + +Here are some references about how to secure an Ubuntu 18.04 server: + +- [Ubuntu 18.04 - Ubuntu Server Guide - Security](https://help.ubuntu.com/lts/serverguide/security.html.en) +- [Ubuntu Blog: National Cyber Security Centre publish Ubuntu 18.04 LTS Security Guide](https://blog.ubuntu.com/2018/07/30/national-cyber-security-centre-publish-ubuntu-18-04-lts-security-guide) + +Also, here are some recommendations a node operator can follow to enhance the privacy of the data coming to, stored on, and leaving their node: + +- Ensure that all data stored on a node is encrypted at rest, e.g. using full disk encryption. This can be provided as a service by the operating system, transparently to BigchainDB, MongoDB and Tendermint. +- Ensure that all data is encrypted in transit, i.e. enforce using HTTPS for the HTTP API and the Websocket API. This can be done using NGINX or similar, as we do with the BigchainDB Testnet. diff --git a/docs/server/source/simple-network-setup.md b/docs/server/source/simple-network-setup.md index d1403a08..a1f07172 100644 --- a/docs/server/source/simple-network-setup.md +++ b/docs/server/source/simple-network-setup.md @@ -16,7 +16,9 @@ A Network will stop working if more than one third of the Nodes are down or faul ## Before We Start -This tutorial assumes you have basic knowledge on how to manage a GNU/Linux machine. The commands are tailored for an up-to-date *Debian-like* distribution. (We use an **Ubuntu 18.04 LTS** Virtual Machine on Microsoft Azure.) If you are on a different Linux distribution then you might need to adapt the names of the packages installed. +This tutorial assumes you have basic knowledge on how to manage a GNU/Linux machine. + +**Please note: The commands on this page work on Ubuntu 18.04. Similar commands will work on other versions of Ubuntu, and other recent Debian-like Linux distros, but you may have to change the names of the packages, or install more packages.** We don't make any assumptions about **where** you run the Node. You can run BigchainDB Server on a Virtual Machine on the cloud, on a machine in your data center, or even on a Raspberry Pi. Just make sure that your Node is reachable by the other Nodes. Here's a **non-exhaustive list of examples**: @@ -49,7 +51,9 @@ sudo apt full-upgrade BigchainDB Server requires **Python 3.6+**, so make sure your system has it. Install the required packages: ``` +# For Ubuntu 18.04: sudo apt install -y python3-pip libssl-dev +# Ubuntu 16.04, and other Linux distros, may require other packages or more packages ``` Now install the latest version of BigchainDB. You can find the latest version by going to the [BigchainDB project release history page on PyPI][bdb:pypi]. For example, to install version 2.0.0b3, you would do: @@ -75,13 +79,13 @@ Note: The `mongodb` package is _not_ the official MongoDB package from MongoDB t #### Install Tendermint -Install a [recent version of Tendermint][tendermint:releases]. BigchainDB Server requires version 0.22.3 or newer. +Install a [recent version of Tendermint][tendermint:releases]. BigchainDB Server requires version 0.22.8 or newer. ``` sudo apt install -y unzip -wget https://github.com/tendermint/tendermint/releases/download/v0.22.3/tendermint_0.22.3_linux_amd64.zip -unzip tendermint_0.22.3_linux_amd64.zip -rm tendermint_0.22.3_linux_amd64.zip +wget https://github.com/tendermint/tendermint/releases/download/v0.22.8/tendermint_0.22.8_linux_amd64.zip +unzip tendermint_0.22.8_linux_amd64.zip +rm tendermint_0.22.8_linux_amd64.zip sudo mv tendermint /usr/local/bin ``` @@ -159,42 +163,64 @@ Share the `node_id`, `pub_key.value` and hostname of your Node with all other Me At this point the Coordinator should have received the data from all the Members, and should combine them in the `.tendermint/config/genesis.json` file: ```json -{ - "genesis_time": "0001-01-01T00:00:00Z", - "chain_id": "test-chain-la6HSr", - "validators": [ - { - "pub_key": { - "type": "AC26791624DE60", - "value": "" +{ + "genesis_time":"0001-01-01T00:00:00Z", + "chain_id":"test-chain-la6HSr", + "consensus_params":{ + "block_size_params":{ + "max_bytes":"22020096", + "max_txs":"10000", + "max_gas":"-1" }, - "power": 10, - "name": "" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "" + "tx_size_params":{ + "max_bytes":"10240", + "max_gas":"-1" }, - "power": 10, - "name": "" - }, - { - "...": { }, - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "" - }, - "power": 10, - "name": "" - } - ], - "app_hash": "" + "block_gossip_params":{ + "block_part_size_bytes":"65536" + }, + "evidence_params":{ + "max_age":"100000" + } + }, + "validators":[ + { + "pub_key":{ + "type":"AC26791624DE60", + "value":"" + }, + "power":10, + "name":"" + }, + { + "pub_key":{ + "type":"AC26791624DE60", + "value":"" + }, + "power":10, + "name":"" + }, + { + "...":{ + + }, + + }, + { + "pub_key":{ + "type":"AC26791624DE60", + "value":"" + }, + "power":10, + "name":"" + } + ], + "app_hash":"" } ``` +**Note:** `consensus_params` in the `genesis.json` are default values for Tendermint consensus. + The new `genesis.json` file contains the data that describes the Network. The key `name` is the Member's moniker; it can be any valid string, but put something human-readable like `"Alice's Node Shop"`. At this point, the Coordinator must share the new `genesis.json` file with all Members. diff --git a/k8s/bigchaindb/bigchaindb-ss.yaml b/k8s/bigchaindb/bigchaindb-ss.yaml index 403369bb..96575962 100644 --- a/k8s/bigchaindb/bigchaindb-ss.yaml +++ b/k8s/bigchaindb/bigchaindb-ss.yaml @@ -154,7 +154,7 @@ spec: timeoutSeconds: 15 # BigchainDB container - name: bigchaindb - image: bigchaindb/bigchaindb:2.0.0-beta4 + image: bigchaindb/bigchaindb:2.0.0-beta5 imagePullPolicy: Always args: - start diff --git a/k8s/bigchaindb/tendermint_container/Dockerfile b/k8s/bigchaindb/tendermint_container/Dockerfile index 25a523c0..1c66e38d 100644 --- a/k8s/bigchaindb/tendermint_container/Dockerfile +++ b/k8s/bigchaindb/tendermint_container/Dockerfile @@ -1,4 +1,4 @@ -FROM tendermint/tendermint:0.22.3 +FROM tendermint/tendermint:0.22.8 LABEL maintainer "dev@bigchaindb.com" WORKDIR / USER root diff --git a/k8s/dev-setup/bigchaindb.yaml b/k8s/dev-setup/bigchaindb.yaml index 55a98307..481b7890 100644 --- a/k8s/dev-setup/bigchaindb.yaml +++ b/k8s/dev-setup/bigchaindb.yaml @@ -34,7 +34,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: bigchaindb - image: bigchaindb/bigchaindb:2.0.0-beta4 + image: bigchaindb/bigchaindb:2.0.0-beta5 imagePullPolicy: Always args: - start diff --git a/k8s/nginx-openresty/LICENSE.md b/k8s/nginx-openresty/LICENSE.md index 7451bedf..254565ca 100644 --- a/k8s/nginx-openresty/LICENSE.md +++ b/k8s/nginx-openresty/LICENSE.md @@ -15,13 +15,11 @@ The derived files (`nginx.conf.template` and `nginx.lua.template`), along with the other files in this directory, are _also_ licensed under an MIT License, the text of which can be found below. +## Documentation Licenses -# Documentation Licenses - -The documentation in this directory is licensed under a Creative Commons Attribution-ShareAlike +The documentation in this directory is licensed under a Creative Commons Attribution 4.0 International license, the full text of which can be found at -[http://creativecommons.org/licenses/by-sa/4.0/legalcode](http://creativecommons.org/licenses/by-sa/4.0/legalcode). - +[http://creativecommons.org/licenses/by/4.0/legalcode](http://creativecommons.org/licenses/by/4.0/legalcode).
@@ -47,7 +45,6 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -
The MIT License @@ -71,4 +68,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/pkg/configuration/roles/tendermint/files/Dockerfile b/pkg/configuration/roles/tendermint/files/Dockerfile index 68dab41c..8269a351 100644 --- a/pkg/configuration/roles/tendermint/files/Dockerfile +++ b/pkg/configuration/roles/tendermint/files/Dockerfile @@ -1,4 +1,4 @@ -ARG tm_version=0.22.3 +ARG tm_version=0.22.8 FROM tendermint/tendermint:${tm_version} LABEL maintainer "dev@bigchaindb.com" WORKDIR / diff --git a/pkg/scripts/all-in-one.bash b/pkg/scripts/all-in-one.bash new file mode 100755 index 00000000..e28444fa --- /dev/null +++ b/pkg/scripts/all-in-one.bash @@ -0,0 +1,14 @@ +#!/bin/bash + +# MongoDB configuration +[ "$(stat -c %U /data/db)" = mongodb ] || chown -R mongodb /data/db + +# BigchainDB configuration +bigchaindb-monit-config + +nohup mongod > "$HOME/.bigchaindb-monit/logs/mongodb_log_$(date +%Y%m%d_%H%M%S)" 2>&1 & + +# Tendermint configuration +tendermint init + +monit -d 5 -I -B diff --git a/pkg/scripts/bigchaindb-monit-config b/pkg/scripts/bigchaindb-monit-config index 0a3419aa..14caaf95 100644 --- a/pkg/scripts/bigchaindb-monit-config +++ b/pkg/scripts/bigchaindb-monit-config @@ -93,7 +93,7 @@ case \$1 in start_bigchaindb) pushd \$4 - nohup bigchaindb start >> \$3/bigchaindb.out.log 2>> \$3/bigchaindb.err.log & + nohup bigchaindb -l DEBUG start >> \$3/bigchaindb.out.log 2>> \$3/bigchaindb.err.log & echo \$! > \$2 popd diff --git a/pkg/scripts/stack.sh b/pkg/scripts/stack.sh index 25292065..0e24033d 100755 --- a/pkg/scripts/stack.sh +++ b/pkg/scripts/stack.sh @@ -11,7 +11,7 @@ stack_repo=${STACK_REPO:="bigchaindb/bigchaindb"} stack_size=${STACK_SIZE:=4} stack_type=${STACK_TYPE:="docker"} stack_type_provider=${STACK_TYPE_PROVIDER:=""} -tm_version=${TM_VERSION:="0.22.3"} +tm_version=${TM_VERSION:="0.22.8"} mongo_version=${MONGO_VERSION:="3.6"} stack_vm_memory=${STACK_VM_MEMORY:=2048} stack_vm_cpus=${STACK_VM_CPUS:=2} diff --git a/pkg/scripts/unstack.sh b/pkg/scripts/unstack.sh index 2c5359d9..9c67315c 100755 --- a/pkg/scripts/unstack.sh +++ b/pkg/scripts/unstack.sh @@ -11,7 +11,7 @@ stack_repo=${STACK_REPO:="bigchaindb/bigchaindb"} stack_size=${STACK_SIZE:=4} stack_type=${STACK_TYPE:="docker"} stack_type_provider=${STACK_TYPE_PROVIDER:=""} -tm_version=${TM_VERSION:="0.22.3"} +tm_version=${TM_VERSION:="0.22.8"} mongo_version=${MONGO_VERSION:="3.6"} stack_vm_memory=${STACK_VM_MEMORY:=2048} stack_vm_cpus=${STACK_VM_CPUS:=2} diff --git a/setup.py b/setup.py index 0fc31bec..c851e61f 100644 --- a/setup.py +++ b/setup.py @@ -56,7 +56,7 @@ tests_require = [ 'flake8-quotes==0.8.1', 'hypothesis~=3.18.5', 'hypothesis-regex', - 'pylint', + # Removed pylint because its GPL license isn't Apache2-compatible 'pytest>=3.0.0', 'pytest-cov>=2.2.1', 'pytest-mock', diff --git a/tests/assets/test_digital_assets.py b/tests/assets/test_digital_assets.py index 2a36892e..5111d164 100644 --- a/tests/assets/test_digital_assets.py +++ b/tests/assets/test_digital_assets.py @@ -12,7 +12,7 @@ def test_asset_transfer(b, signed_create_tx, user_pk, user_sk): signed_create_tx.id) tx_transfer_signed = tx_transfer.sign([user_sk]) - b.store_bulk_transactions([signed_create_tx, tx_transfer]) + b.store_bulk_transactions([signed_create_tx]) assert tx_transfer_signed.validate(b) == tx_transfer_signed assert tx_transfer_signed.asset['id'] == signed_create_tx.id @@ -27,7 +27,7 @@ def test_validate_transfer_asset_id_mismatch(b, signed_create_tx, user_pk, user_ tx_transfer.asset['id'] = 'a' * 64 tx_transfer_signed = tx_transfer.sign([user_sk]) - b.store_bulk_transactions([signed_create_tx, tx_transfer_signed]) + b.store_bulk_transactions([signed_create_tx]) with pytest.raises(AssetIdMismatch): tx_transfer_signed.validate(b) diff --git a/tests/assets/test_divisible_assets.py b/tests/assets/test_divisible_assets.py index 5fdf59c3..bb31efd8 100644 --- a/tests/assets/test_divisible_assets.py +++ b/tests/assets/test_divisible_assets.py @@ -1,6 +1,8 @@ import pytest import random +from bigchaindb.common.exceptions import DoubleSpend + pytestmark = pytest.mark.tendermint @@ -127,7 +129,7 @@ def test_single_in_single_own_single_out_single_own_transfer(alice, b, user_pk, asset_id=tx_create.id) tx_transfer_signed = tx_transfer.sign([user_sk]) - b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) + b.store_bulk_transactions([tx_create_signed]) assert tx_transfer_signed.validate(b) assert len(tx_transfer_signed.outputs) == 1 @@ -154,7 +156,7 @@ def test_single_in_single_own_multiple_out_single_own_transfer(alice, b, user_pk asset_id=tx_create.id) tx_transfer_signed = tx_transfer.sign([user_sk]) - b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) + b.store_bulk_transactions([tx_create_signed]) assert tx_transfer_signed.validate(b) == tx_transfer_signed assert len(tx_transfer_signed.outputs) == 2 @@ -182,7 +184,7 @@ def test_single_in_single_own_single_out_multiple_own_transfer(alice, b, user_pk asset_id=tx_create.id) tx_transfer_signed = tx_transfer.sign([user_sk]) - b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) + b.store_bulk_transactions([tx_create_signed]) assert tx_transfer_signed.validate(b) == tx_transfer_signed assert len(tx_transfer_signed.outputs) == 1 @@ -194,6 +196,10 @@ def test_single_in_single_own_single_out_multiple_own_transfer(alice, b, user_pk assert len(tx_transfer_signed.inputs) == 1 + b.store_bulk_transactions([tx_transfer_signed]) + with pytest.raises(DoubleSpend): + tx_transfer_signed.validate(b) + # TRANSFER divisible asset # Single input @@ -215,7 +221,7 @@ def test_single_in_single_own_multiple_out_mix_own_transfer(alice, b, user_pk, asset_id=tx_create.id) tx_transfer_signed = tx_transfer.sign([user_sk]) - b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) + b.store_bulk_transactions([tx_create_signed]) assert tx_transfer_signed.validate(b) == tx_transfer_signed assert len(tx_transfer_signed.outputs) == 2 @@ -228,6 +234,10 @@ def test_single_in_single_own_multiple_out_mix_own_transfer(alice, b, user_pk, assert len(tx_transfer_signed.inputs) == 1 + b.store_bulk_transactions([tx_transfer_signed]) + with pytest.raises(DoubleSpend): + tx_transfer_signed.validate(b) + # TRANSFER divisible asset # Single input @@ -249,7 +259,7 @@ def test_single_in_multiple_own_single_out_single_own_transfer(alice, b, user_pk asset_id=tx_create.id) tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk]) - b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) + b.store_bulk_transactions([tx_create_signed]) assert tx_transfer_signed.validate(b) == tx_transfer_signed assert len(tx_transfer_signed.outputs) == 1 @@ -260,6 +270,10 @@ def test_single_in_multiple_own_single_out_single_own_transfer(alice, b, user_pk assert 'subconditions' in ffill assert len(ffill['subconditions']) == 2 + b.store_bulk_transactions([tx_transfer_signed]) + with pytest.raises(DoubleSpend): + tx_transfer_signed.validate(b) + # TRANSFER divisible asset # Multiple inputs @@ -280,13 +294,17 @@ def test_multiple_in_single_own_single_out_single_own_transfer(alice, b, user_pk asset_id=tx_create.id) tx_transfer_signed = tx_transfer.sign([user_sk]) - b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) + b.store_bulk_transactions([tx_create_signed]) assert tx_transfer_signed.validate(b) assert len(tx_transfer_signed.outputs) == 1 assert tx_transfer_signed.outputs[0].amount == 100 assert len(tx_transfer_signed.inputs) == 2 + b.store_bulk_transactions([tx_transfer_signed]) + with pytest.raises(DoubleSpend): + tx_transfer_signed.validate(b) + # TRANSFER divisible asset # Multiple inputs @@ -309,9 +327,9 @@ def test_multiple_in_multiple_own_single_out_single_own_transfer(alice, b, user_ asset_id=tx_create.id) tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk]) - b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) + b.store_bulk_transactions([tx_create_signed]) - assert tx_transfer_signed.validate(b) + assert tx_transfer_signed.validate(b) == tx_transfer_signed assert len(tx_transfer_signed.outputs) == 1 assert tx_transfer_signed.outputs[0].amount == 100 assert len(tx_transfer_signed.inputs) == 2 @@ -323,6 +341,10 @@ def test_multiple_in_multiple_own_single_out_single_own_transfer(alice, b, user_ assert len(ffill_fid0['subconditions']) == 2 assert len(ffill_fid1['subconditions']) == 2 + b.store_bulk_transactions([tx_transfer_signed]) + with pytest.raises(DoubleSpend): + tx_transfer_signed.validate(b) + # TRANSFER divisible asset # Multiple inputs @@ -345,7 +367,7 @@ def test_muiltiple_in_mix_own_multiple_out_single_own_transfer(alice, b, user_pk asset_id=tx_create.id) tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk]) - b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) + b.store_bulk_transactions([tx_create_signed]) assert tx_transfer_signed.validate(b) == tx_transfer_signed assert len(tx_transfer_signed.outputs) == 1 @@ -358,6 +380,10 @@ def test_muiltiple_in_mix_own_multiple_out_single_own_transfer(alice, b, user_pk assert 'subconditions' in ffill_fid1 assert len(ffill_fid1['subconditions']) == 2 + b.store_bulk_transactions([tx_transfer_signed]) + with pytest.raises(DoubleSpend): + tx_transfer_signed.validate(b) + # TRANSFER divisible asset # Multiple inputs @@ -382,7 +408,7 @@ def test_muiltiple_in_mix_own_multiple_out_mix_own_transfer(alice, b, user_pk, asset_id=tx_create.id) tx_transfer_signed = tx_transfer.sign([alice.private_key, user_sk]) - b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) + b.store_bulk_transactions([tx_create_signed]) assert tx_transfer_signed.validate(b) == tx_transfer_signed assert len(tx_transfer_signed.outputs) == 2 @@ -402,6 +428,10 @@ def test_muiltiple_in_mix_own_multiple_out_mix_own_transfer(alice, b, user_pk, assert 'subconditions' in ffill_fid1 assert len(ffill_fid1['subconditions']) == 2 + b.store_bulk_transactions([tx_transfer_signed]) + with pytest.raises(DoubleSpend): + tx_transfer_signed.validate(b) + # TRANSFER divisible asset # Multiple inputs from different transactions @@ -436,7 +466,7 @@ def test_multiple_in_different_transactions(alice, b, user_pk, user_sk): asset_id=tx_create.id) tx_transfer2_signed = tx_transfer2.sign([user_sk]) - b.store_bulk_transactions([tx_create_signed, tx_transfer1_signed, tx_transfer2_signed]) + b.store_bulk_transactions([tx_create_signed, tx_transfer1_signed]) assert tx_transfer2_signed.validate(b) == tx_transfer2_signed assert len(tx_transfer2_signed.outputs) == 1 @@ -501,10 +531,14 @@ def test_threshold_same_public_key(alice, b, user_pk, user_sk): asset_id=tx_create.id) tx_transfer_signed = tx_transfer.sign([user_sk, user_sk]) - b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) + b.store_bulk_transactions([tx_create_signed]) assert tx_transfer_signed.validate(b) == tx_transfer_signed + b.store_bulk_transactions([tx_transfer_signed]) + with pytest.raises(DoubleSpend): + tx_transfer_signed.validate(b) + def test_sum_amount(alice, b, user_pk, user_sk): from bigchaindb.models import Transaction @@ -520,12 +554,16 @@ def test_sum_amount(alice, b, user_pk, user_sk): asset_id=tx_create.id) tx_transfer_signed = tx_transfer.sign([user_sk]) - b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) + b.store_bulk_transactions([tx_create_signed]) assert tx_transfer_signed.validate(b) == tx_transfer_signed assert len(tx_transfer_signed.outputs) == 1 assert tx_transfer_signed.outputs[0].amount == 3 + b.store_bulk_transactions([tx_transfer_signed]) + with pytest.raises(DoubleSpend): + tx_transfer_signed.validate(b) + def test_divide(alice, b, user_pk, user_sk): from bigchaindb.models import Transaction @@ -541,9 +579,13 @@ def test_divide(alice, b, user_pk, user_sk): asset_id=tx_create.id) tx_transfer_signed = tx_transfer.sign([user_sk]) - b.store_bulk_transactions([tx_create_signed, tx_transfer_signed]) + b.store_bulk_transactions([tx_create_signed]) assert tx_transfer_signed.validate(b) == tx_transfer_signed assert len(tx_transfer_signed.outputs) == 3 for output in tx_transfer_signed.outputs: assert output.amount == 1 + + b.store_bulk_transactions([tx_transfer_signed]) + with pytest.raises(DoubleSpend): + tx_transfer_signed.validate(b) diff --git a/tests/backend/localmongodb/test_queries.py b/tests/backend/localmongodb/test_queries.py index a9e64ac0..2d3b9be6 100644 --- a/tests/backend/localmongodb/test_queries.py +++ b/tests/backend/localmongodb/test_queries.py @@ -370,22 +370,23 @@ def test_get_pre_commit_state(db_context): assert resp == state._asdict() -def test_store_validator_update(): +def test_validator_update(): from bigchaindb.backend import connect, query - from bigchaindb.backend.query import VALIDATOR_UPDATE_ID - from bigchaindb.common.exceptions import MultipleValidatorOperationError conn = connect() - validator_update = {'validator': {'key': 'value'}, - 'update_id': VALIDATOR_UPDATE_ID} - query.store_validator_update(conn, deepcopy(validator_update)) + def gen_validator_update(height): + return {'data': 'somedata', 'height': height} - with pytest.raises(MultipleValidatorOperationError): - query.store_validator_update(conn, deepcopy(validator_update)) + for i in range(1, 100, 10): + value = gen_validator_update(i) + query.store_validator_set(conn, value) - resp = query.get_validator_update(conn, VALIDATOR_UPDATE_ID) + v1 = query.get_validator_set(conn, 8) + assert v1['height'] == 1 - assert resp == validator_update - assert query.delete_validator_update(conn, VALIDATOR_UPDATE_ID) - assert not query.get_validator_update(conn, VALIDATOR_UPDATE_ID) + v41 = query.get_validator_set(conn, 50) + assert v41['height'] == 41 + + v91 = query.get_validator_set(conn) + assert v91['height'] == 91 diff --git a/tests/backend/localmongodb/test_schema.py b/tests/backend/localmongodb/test_schema.py index 99d06b0b..54a20694 100644 --- a/tests/backend/localmongodb/test_schema.py +++ b/tests/backend/localmongodb/test_schema.py @@ -40,7 +40,7 @@ def test_init_creates_db_tables_and_indexes(): assert set(indexes) == {'_id_', 'pre_commit_id'} indexes = conn.conn[dbname]['validators'].index_information().keys() - assert set(indexes) == {'_id_', 'update_id'} + assert set(indexes) == {'_id_', 'height'} def test_init_database_fails_if_db_exists(): diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index 578f06b1..a7c6418b 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -341,6 +341,7 @@ class MockResponse(): return {'result': {'latest_block_height': self.height}} +@pytest.mark.skip @patch('bigchaindb.config_utils.autoconfigure') @patch('bigchaindb.backend.query.store_validator_update') @pytest.mark.tendermint diff --git a/tests/conftest.py b/tests/conftest.py index 47f8ce30..66d85d3a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -647,6 +647,17 @@ def node_key(node_keys): return key_pair_from_ed25519_key(key_from_base64(priv)) +@pytest.fixture +def ed25519_node_keys(node_keys): + (pub, priv) = list(node_keys.items())[0] + node_keys_dict = {} + for pub, priv in node_keys.items(): + key = key_pair_from_ed25519_key(key_from_base64(priv)) + node_keys_dict[key.public_key] = key + + return node_keys_dict + + @pytest.fixture(scope='session') def node_keys(): return {'zL/DasvKulXZzhSNFwx4cLRXKkSM9GPK7Y0nZ4FEylM=': diff --git a/tests/tendermint/conftest.py b/tests/tendermint/conftest.py index 2d63d7c4..a0f50e9e 100644 --- a/tests/tendermint/conftest.py +++ b/tests/tendermint/conftest.py @@ -1,4 +1,7 @@ import pytest +import codecs + +import abci.types_pb2 as types @pytest.fixture @@ -10,3 +13,13 @@ def b(): @pytest.fixture def validator_pub_key(): return 'B0E42D2589A455EAD339A035D6CE1C8C3E25863F268120AA0162AD7D003A4014' + + +@pytest.fixture +def init_chain_request(): + addr = codecs.decode(b'9FD479C869C7D7E7605BF99293457AA5D80C3033', 'hex') + pk = codecs.decode(b'VAgFZtYw8bNR5TMZHFOBDWk9cAmEu3/c6JgRBmddbbI=', 'base64') + val_a = types.Validator(address=addr, power=10, + pub_key=types.PubKey(type='ed25519', data=pk)) + + return types.RequestInitChain(validators=[val_a]) diff --git a/tests/tendermint/test_core.py b/tests/tendermint/test_core.py index 1a047e7d..f1a3d92d 100644 --- a/tests/tendermint/test_core.py +++ b/tests/tendermint/test_core.py @@ -50,7 +50,7 @@ def test_check_tx__unsigned_create_is_error(b): @pytest.mark.bdb -def test_deliver_tx__valid_create_updates_db(b): +def test_deliver_tx__valid_create_updates_db(b, init_chain_request): from bigchaindb import App from bigchaindb.models import Transaction from bigchaindb.common.crypto import generate_key_pair @@ -64,8 +64,9 @@ def test_deliver_tx__valid_create_updates_db(b): app = App(b) + app.init_chain(init_chain_request) + begin_block = RequestBeginBlock() - app.init_chain(['ignore']) app.begin_block(begin_block) result = app.deliver_tx(encode_tx_to_bytes(tx)) @@ -83,7 +84,7 @@ def test_deliver_tx__valid_create_updates_db(b): # next(unspent_outputs) -def test_deliver_tx__double_spend_fails(b): +def test_deliver_tx__double_spend_fails(b, init_chain_request): from bigchaindb import App from bigchaindb.models import Transaction from bigchaindb.common.crypto import generate_key_pair @@ -96,7 +97,7 @@ def test_deliver_tx__double_spend_fails(b): .sign([alice.private_key]) app = App(b) - app.init_chain(['ignore']) + app.init_chain(init_chain_request) begin_block = RequestBeginBlock() app.begin_block(begin_block) @@ -112,13 +113,13 @@ def test_deliver_tx__double_spend_fails(b): assert result.code == CodeTypeError -def test_deliver_transfer_tx__double_spend_fails(b): +def test_deliver_transfer_tx__double_spend_fails(b, init_chain_request): from bigchaindb import App from bigchaindb.models import Transaction from bigchaindb.common.crypto import generate_key_pair app = App(b) - app.init_chain(['ignore']) + app.init_chain(init_chain_request) begin_block = RequestBeginBlock() app.begin_block(begin_block) @@ -156,14 +157,16 @@ def test_deliver_transfer_tx__double_spend_fails(b): assert result.code == CodeTypeError -def test_end_block_return_validator_updates(b): +# The test below has to re-written one election conclusion logic has been implemented +@pytest.mark.skip +def test_end_block_return_validator_updates(b, init_chain_request): from bigchaindb import App from bigchaindb.backend import query from bigchaindb.core import encode_validator from bigchaindb.backend.query import VALIDATOR_UPDATE_ID app = App(b) - app.init_chain(['ignore']) + app.init_chain(init_chain_request) begin_block = RequestBeginBlock() app.begin_block(begin_block) @@ -182,7 +185,7 @@ def test_end_block_return_validator_updates(b): assert updates == [] -def test_store_pre_commit_state_in_end_block(b, alice): +def test_store_pre_commit_state_in_end_block(b, alice, init_chain_request): from bigchaindb import App from bigchaindb.backend import query from bigchaindb.models import Transaction @@ -194,7 +197,7 @@ def test_store_pre_commit_state_in_end_block(b, alice): .sign([alice.private_key]) app = App(b) - app.init_chain(['ignore']) + app.init_chain(init_chain_request) begin_block = RequestBeginBlock() app.begin_block(begin_block) diff --git a/tests/tendermint/test_integration.py b/tests/tendermint/test_integration.py index 4f6b530b..ee2af7c8 100644 --- a/tests/tendermint/test_integration.py +++ b/tests/tendermint/test_integration.py @@ -1,3 +1,5 @@ +import codecs + import abci.types_pb2 as types import json import pytest @@ -11,7 +13,7 @@ from io import BytesIO @pytest.mark.tendermint @pytest.mark.bdb -def test_app(tb): +def test_app(tb, init_chain_request): from bigchaindb import App from bigchaindb.tendermint_utils import calculate_hash from bigchaindb.common.crypto import generate_key_pair @@ -28,12 +30,17 @@ def test_app(tb): assert res.info.last_block_height == 0 assert not b.get_latest_block() - p.process('init_chain', types.Request(init_chain=types.RequestInitChain())) + p.process('init_chain', types.Request(init_chain=init_chain_request)) block0 = b.get_latest_block() assert block0 assert block0['height'] == 0 assert block0['app_hash'] == '' + pk = codecs.encode(init_chain_request.validators[0].pub_key.data, 'base64').decode().strip('\n') + [validator] = b.get_validators(height=1) + assert validator['pub_key']['data'] == pk + assert validator['voting_power'] == 10 + alice = generate_key_pair() bob = generate_key_pair() tx = Transaction.create([alice.public_key], @@ -98,6 +105,7 @@ def test_app(tb): assert block0['app_hash'] == new_block_hash +@pytest.mark.skip @pytest.mark.abci def test_upsert_validator(b, alice): from bigchaindb.backend.query import VALIDATOR_UPDATE_ID diff --git a/tests/tendermint/test_lib.py b/tests/tendermint/test_lib.py index e6aa8f07..d9d9e320 100644 --- a/tests/tendermint/test_lib.py +++ b/tests/tendermint/test_lib.py @@ -139,6 +139,7 @@ def test_post_transaction_invalid_mode(b): b.write_transaction(tx, 'nope') +@pytest.mark.skip @pytest.mark.bdb def test_validator_updates(b, validator_pub_key): from bigchaindb.backend import query @@ -382,8 +383,16 @@ def test_get_spent_transaction_critical_double_spend(b, alice, bob, carol): asset_id=tx.id)\ .sign([alice.private_key]) + same_input_double_spend = Transaction.transfer(tx.to_inputs() + tx.to_inputs(), + [([bob.public_key], 1)], + asset_id=tx.id)\ + .sign([alice.private_key]) + b.store_bulk_transactions([tx]) + with pytest.raises(DoubleSpend): + same_input_double_spend.validate(b) + assert b.get_spent(tx.id, tx_transfer.inputs[0].fulfills.output, [tx_transfer]) with pytest.raises(DoubleSpend): diff --git a/tests/upsert_validator/conftest.py b/tests/upsert_validator/conftest.py index 9f3d37b9..d388ab46 100644 --- a/tests/upsert_validator/conftest.py +++ b/tests/upsert_validator/conftest.py @@ -1,5 +1,7 @@ import pytest +from bigchaindb.upsert_validator import ValidatorElection + @pytest.fixture def b_mock(b, network_validators): @@ -30,3 +32,11 @@ def mock_get_validators(network_validators): return validators return validator_set + + +@pytest.fixture +def valid_election(b_mock, node_key, new_validator): + voters = ValidatorElection.recipients(b_mock) + return ValidatorElection.generate([node_key.public_key], + voters, + new_validator, None).sign([node_key.private_key]) diff --git a/tests/upsert_validator/test_validator_election_vote.py b/tests/upsert_validator/test_validator_election_vote.py new file mode 100644 index 00000000..78b2d528 --- /dev/null +++ b/tests/upsert_validator/test_validator_election_vote.py @@ -0,0 +1,80 @@ +import pytest + +from bigchaindb.upsert_validator import ValidatorElectionVote +from bigchaindb.common.exceptions import AmountError + + +pytestmark = [pytest.mark.tendermint, pytest.mark.bdb] + + +def test_upsert_validator_valid_election_vote(b_mock, valid_election, ed25519_node_keys): + b_mock.store_bulk_transactions([valid_election]) + + input0 = valid_election.to_inputs()[0] + votes = valid_election.outputs[0].amount + public_key0 = input0.owners_before[0] + key0 = ed25519_node_keys[public_key0] + + election_pub_key = ValidatorElectionVote.to_public_key(valid_election.id) + + vote = ValidatorElectionVote.generate([input0], + [([election_pub_key], votes)], + election_id=valid_election.id)\ + .sign([key0.private_key]) + assert vote.validate(b_mock) + + +def test_upsert_validator_delegate_election_vote(b_mock, valid_election, ed25519_node_keys): + from bigchaindb.common.crypto import generate_key_pair + + alice = generate_key_pair() + + b_mock.store_bulk_transactions([valid_election]) + + input0 = valid_election.to_inputs()[0] + votes = valid_election.outputs[0].amount + public_key0 = input0.owners_before[0] + key0 = ed25519_node_keys[public_key0] + + delegate_vote = ValidatorElectionVote.generate([input0], + [([alice.public_key], 3), ([key0.public_key], votes-3)], + election_id=valid_election.id)\ + .sign([key0.private_key]) + + assert delegate_vote.validate(b_mock) + + b_mock.store_bulk_transactions([delegate_vote]) + election_pub_key = ValidatorElectionVote.to_public_key(valid_election.id) + + alice_votes = delegate_vote.to_inputs()[0] + alice_casted_vote = ValidatorElectionVote.generate([alice_votes], + [([election_pub_key], 3)], + election_id=valid_election.id)\ + .sign([alice.private_key]) + assert alice_casted_vote.validate(b_mock) + + key0_votes = delegate_vote.to_inputs()[1] + key0_casted_vote = ValidatorElectionVote.generate([key0_votes], + [([election_pub_key], votes-3)], + election_id=valid_election.id)\ + .sign([key0.private_key]) + assert key0_casted_vote.validate(b_mock) + + +def test_upsert_validator_invalid_election_vote(b_mock, valid_election, ed25519_node_keys): + b_mock.store_bulk_transactions([valid_election]) + + input0 = valid_election.to_inputs()[0] + votes = valid_election.outputs[0].amount + public_key0 = input0.owners_before[0] + key0 = ed25519_node_keys[public_key0] + + election_pub_key = ValidatorElectionVote.to_public_key(valid_election.id) + + vote = ValidatorElectionVote.generate([input0], + [([election_pub_key], votes+1)], + election_id=valid_election.id)\ + .sign([key0.private_key]) + + with pytest.raises(AmountError): + assert vote.validate(b_mock) diff --git a/tests/web/test_validators.py b/tests/web/test_validators.py index 7a0e20ce..c453257f 100644 --- a/tests/web/test_validators.py +++ b/tests/web/test_validators.py @@ -1,49 +1,22 @@ import pytest -from requests.exceptions import RequestException - pytestmark = pytest.mark.tendermint VALIDATORS_ENDPOINT = '/api/v1/validators/' def test_get_validators_endpoint(b, client, monkeypatch): - - def mock_get(uri): - return MockResponse() - monkeypatch.setattr('requests.get', mock_get) + validator_set = [{'address': 'F5426F0980E36E03044F74DD414248D29ABCBDB2', + 'pub_key': {'data': '4E2685D9016126864733225BE00F005515200727FBAB1312FC78C8B76831255A', + 'type': 'ed25519'}, + 'voting_power': 10}] + b.store_validator_set(23, validator_set) res = client.get(VALIDATORS_ENDPOINT) - assert is_validator(res.json[0]) assert res.status_code == 200 -def test_get_validators_500_endpoint(b, client, monkeypatch): - - def mock_get(uri): - raise RequestException - monkeypatch.setattr('requests.get', mock_get) - - with pytest.raises(RequestException): - client.get(VALIDATORS_ENDPOINT) - - # Helper def is_validator(v): return ('pub_key' in v) and ('voting_power' in v) - - -class MockResponse(): - - def json(self): - return {'id': '', - 'jsonrpc': '2.0', - 'result': - {'block_height': 5, - 'validators': [ - {'accum': 0, - 'address': 'F5426F0980E36E03044F74DD414248D29ABCBDB2', - 'pub_key': {'data': '4E2685D9016126864733225BE00F005515200727FBAB1312FC78C8B76831255A', - 'type': 'ed25519'}, - 'voting_power': 10}]}}