From 41a2687b9be14165b31dec1e8a0cb067bb202b8e Mon Sep 17 00:00:00 2001 From: vrde Date: Mon, 27 Aug 2018 15:00:02 +0200 Subject: [PATCH 01/22] Problem: websocket fails with ELECTION transaction (#2482) Solution: have a more general approach to process transaction types. If a transaction does *not* contain `asset.id`, then the `id` of the `asset` is the `id` of the transaction. --- bigchaindb/web/websocket_server.py | 24 ++++++++++++++---------- tests/web/test_websocket_server.py | 27 +++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 10 deletions(-) diff --git a/bigchaindb/web/websocket_server.py b/bigchaindb/web/websocket_server.py index 8aea0e20..f0a9f886 100644 --- a/bigchaindb/web/websocket_server.py +++ b/bigchaindb/web/websocket_server.py @@ -48,6 +48,17 @@ def _multiprocessing_to_asyncio(in_queue, out_queue, loop): loop.call_soon_threadsafe(out_queue.put_nowait, value) +def eventify_block(block): + for tx in block['transactions']: + try: + asset_id = tx['asset']['id'] + except KeyError: + asset_id = tx['id'] + yield {'height': block['height'], + 'asset_id': asset_id, + 'transaction_id': tx['id']} + + class Dispatcher: """Dispatch events to websockets. @@ -99,17 +110,10 @@ class Dispatcher: str_buffer.append(event) elif event.type == EventTypes.BLOCK_VALID: - block = event.data + str_buffer = map(json.dumps, eventify_block(event.data)) - for tx in block['transactions']: - asset_id = tx['id'] if tx['operation'] == 'CREATE' else tx['asset']['id'] - data = {'height': block['height'], - 'asset_id': asset_id, - 'transaction_id': tx['id']} - str_buffer.append(json.dumps(data)) - - for _, websocket in self.subscribers.items(): - for str_item in str_buffer: + for str_item in str_buffer: + for _, websocket in self.subscribers.items(): yield from websocket.send_str(str_item) diff --git a/tests/web/test_websocket_server.py b/tests/web/test_websocket_server.py index 1a2b55af..a3de508a 100644 --- a/tests/web/test_websocket_server.py +++ b/tests/web/test_websocket_server.py @@ -21,6 +21,33 @@ class MockWebSocket: self.received.append(s) +def test_eventify_block_works_with_any_transaction(): + from bigchaindb.web.websocket_server import eventify_block + + block = { + 'height': 1, + 'transactions': [{ + 'id': 1 + }, { + 'id': 2, + 'asset': {'id': 1} + }] + } + + expected_events = [{ + 'height': 1, + 'asset_id': 1, + 'transaction_id': 1 + }, { + 'height': 1, + 'asset_id': 1, + 'transaction_id': 2 + }] + + for event, expected in zip(eventify_block(block), expected_events): + assert event == expected + + @asyncio.coroutine def test_bridge_sync_async_queue(loop): from bigchaindb.web.websocket_server import _multiprocessing_to_asyncio From d78ff75225c4ace8f83ae3e92959463ad8320092 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Mon, 27 Aug 2018 15:01:55 +0200 Subject: [PATCH 02/22] Problem: HTTP API docs wrong regarding the mode parameter (#2481) Solution: Edit those docs to be accurate --- docs/server/source/http-client-server-api.rst | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/docs/server/source/http-client-server-api.rst b/docs/server/source/http-client-server-api.rst index 2026b6db..0f47acb5 100644 --- a/docs/server/source/http-client-server-api.rst +++ b/docs/server/source/http-client-server-api.rst @@ -135,13 +135,21 @@ Transactions :query string mode: (Optional) One of the three supported modes to send a transaction: ``async``, ``sync``, ``commit``. The default is ``async``. - The ``mode`` query parameter inhereted from the mode parameter in Tendermint's - `broadcast API - `_. - ``mode=async`` means the HTTP response will come back immediately, without - even checking to see if the transaction is valid. - ``mode=sync`` means the HTTP response will come back once the node has - checked the validity of the transaction. + Once the posted transaction arrives at a BigchainDB node, + that node will check to see if the transaction is valid. + If it's invalid, the node will return an HTTP 400 (error). + Otherwise, the node will send the transaction to Tendermint (in the same node) using the + `Tendermint broadcast API + `_. + + The meaning of the ``mode`` query parameter is inherited from the mode parameter in + `Tendermint's broadcast API + `_. + ``mode=async`` means the HTTP response will come back immediately, + before Tendermint asks BigchainDB Server to check the validity of the transaction (a second time). + ``mode=sync`` means the HTTP response will come back + after Tendermint gets a response from BigchainDB Server + regarding the validity of the transaction. ``mode=commit`` means the HTTP response will come back once the transaction is in a committed block. From dfadbff60f4737ca9c8f3a21281e1555d5134409 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Tue, 28 Aug 2018 11:13:59 +0200 Subject: [PATCH 03/22] Problem: All Tendermint docs moved (#2483) Solution: Update all links to Tendermint docs --- CHANGELOG.md | 2 +- .../run-node-as-processes.md | 2 +- docs/root/source/decentralized.md | 2 +- proposals/extend-post-txn.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index abf01d7a..b8f3201a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -189,7 +189,7 @@ Re-enabled multi-threading. [Pull request #2258](https://github.com/bigchaindb/b ### Known Issues -Tendermint changed how it responds to a request to store data (via the [Tendermint Broadcast API](http://tendermint.readthedocs.io/projects/tools/en/master/using-tendermint.html#broadcast-api)) between version 0.12 and 0.19.2. We started modifying the code of BigchainDB Server to account for those changes in responses (in [pull request #2239](https://github.com/bigchaindb/bigchaindb/pull/2239)), but we found that there's a difference between what the Tendermint documentation _says_ about those responses and how Tendermint actually responds. We need to determine Tendermint's intent before we can finalize that pull request. +Tendermint changed how it responds to a request to store data (via the [Tendermint Broadcast API](https://tendermint.com/docs/tendermint-core/using-tendermint.html#broadcast-api)) between version 0.12 and 0.19.2. We started modifying the code of BigchainDB Server to account for those changes in responses (in [pull request #2239](https://github.com/bigchaindb/bigchaindb/pull/2239)), but we found that there's a difference between what the Tendermint documentation _says_ about those responses and how Tendermint actually responds. We need to determine Tendermint's intent before we can finalize that pull request. ### Notes diff --git a/docs/contributing/source/dev-setup-coding-and-contribution-process/run-node-as-processes.md b/docs/contributing/source/dev-setup-coding-and-contribution-process/run-node-as-processes.md index a0a0af7e..7c1556a3 100644 --- a/docs/contributing/source/dev-setup-coding-and-contribution-process/run-node-as-processes.md +++ b/docs/contributing/source/dev-setup-coding-and-contribution-process/run-node-as-processes.md @@ -69,7 +69,7 @@ The commands are: export PATH=${PATH}:${GOPATH}/bin ``` -Follow [the Tendermint docs](https://tendermint.readthedocs.io/en/master/install.html#from-source) to install Tendermint from source. +Follow [the Tendermint docs](https://tendermint.com/docs/introduction/install.html#from-source) to install Tendermint from source. If the installation is successful then Tendermint is installed at `$GOPATH/bin`. To ensure Tendermint's installed fine execute the following command, diff --git a/docs/root/source/decentralized.md b/docs/root/source/decentralized.md index 4486839d..fc88cf99 100644 --- a/docs/root/source/decentralized.md +++ b/docs/root/source/decentralized.md @@ -20,6 +20,6 @@ A consortium can increase its decentralization (and its resilience) by increasin There’s no node that has a long-term special position in the BigchainDB network. All nodes run the same software and perform the same duties. -If someone has (or gets) admin access to a node, they can mess with that node (e.g. change or delete data stored on that node), but those changes should remain isolated to that node. The BigchainDB network can only be compromised if more than one third of the nodes get compromised. See the [Tendermint documentation](https://tendermint.readthedocs.io/projects/tools/en/master/introduction.html) for more details. +If someone has (or gets) admin access to a node, they can mess with that node (e.g. change or delete data stored on that node), but those changes should remain isolated to that node. The BigchainDB network can only be compromised if more than one third of the nodes get compromised. See the [Tendermint documentation](https://tendermint.com/docs/introduction/introduction.html) for more details. It’s worth noting that not even the admin or superuser of a node can transfer assets. The only way to create a valid transfer transaction is to fulfill the current crypto-conditions on the asset, and the admin/superuser can’t do that because the admin user doesn’t have the necessary information (e.g. private keys). diff --git a/proposals/extend-post-txn.md b/proposals/extend-post-txn.md index 04dbb04e..5259153a 100644 --- a/proposals/extend-post-txn.md +++ b/proposals/extend-post-txn.md @@ -12,7 +12,7 @@ Add new query parameter `mode` to the [post transaction api](https://docs.bigcha ## Problem Description When posting a transaction it is broadcast asynchronously to Tendermint which enables the client to return immediately. Furthermore, the transaction status API would allow the client to get the current status for a given transaction. The above workflow seems efficient when the client doesn't need to wait until a transaction gets committed. In case a client wishes to wait until a transaction gets committed it would need to poll the transaction status api. -The Tendermint api allows to post a transaction in [three modes](http://tendermint.readthedocs.io/projects/tools/en/master/using-tendermint.html#broadcast-api), +The Tendermint api allows to post a transaction in [three modes](https://tendermint.com/docs/tendermint-core/using-tendermint.html#broadcast-api), - `/broadcast_tx_async` post transaction and return - `/broadcast_tx_sync` post transaction and return after `checkTx` is executed From 8e97c753eb8567cce27a92e2284b9b878c412bc3 Mon Sep 17 00:00:00 2001 From: Vanshdeep Singh Date: Tue, 28 Aug 2018 14:21:59 +0200 Subject: [PATCH 04/22] Problem: upsert-validator 'approve' command does not transfer vote to election public key (#2480) Solution: Transfer vote to the election public key; generalize implemenation to handle election txn type * Problem: Upsert valdiator 'new' doesn't accept public in base64 format Solution: Tendermint stores all keys in base64 format so it would suitable to abandon base58 encoding in favour of base64 encoding * Problem: Not test for invalid execution of upsert-validator 'new' Solution: Write tests to when invalid power or private key path has been supplied * Problem: Exceptions are not informational when executing upsert-validator Solution: generate error logs or print statement indicating success --- bigchaindb/commands/bigchaindb.py | 52 +++++---- .../upsert_validator/validator_election.py | 1 + .../source/server-reference/bigchaindb-cli.md | 41 ++++--- tests/commands/test_commands.py | 105 +++++++++++++++--- .../test_validator_election_vote.py | 28 ++++- 5 files changed, 175 insertions(+), 52 deletions(-) diff --git a/bigchaindb/commands/bigchaindb.py b/bigchaindb/commands/bigchaindb.py index 184b82de..1a970f88 100644 --- a/bigchaindb/commands/bigchaindb.py +++ b/bigchaindb/commands/bigchaindb.py @@ -16,7 +16,7 @@ import sys from bigchaindb.utils import load_node_key from bigchaindb.common.exceptions import (DatabaseAlreadyExists, DatabaseDoesNotExist, - OperationError, KeypairMismatchException) + ValidationError) import bigchaindb from bigchaindb import (backend, ValidatorElection, BigchainDB, ValidatorElectionVote) @@ -27,6 +27,8 @@ from bigchaindb.commands import utils from bigchaindb.commands.utils import (configure_bigchaindb, input_on_stderr) from bigchaindb.log import setup_logging +from bigchaindb.tendermint_utils import public_key_from_base64 + logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) @@ -119,29 +121,36 @@ def run_upsert_validator_new(args, bigchain): 'sk': the path to the private key of the node calling the election (str) } :param bigchain: an instance of BigchainDB - :return: election_id (tx_id) - :raises: OperationError if the write transaction fails for any reason + :return: election_id or `False` in case of failure """ new_validator = { - 'public_key': args.public_key, + 'public_key': public_key_from_base64(args.public_key), 'power': args.power, 'node_id': args.node_id } - key = load_node_key(args.sk) + try: + key = load_node_key(args.sk) + voters = ValidatorElection.recipients(bigchain) + election = ValidatorElection.generate([key.public_key], + voters, + new_validator, None).sign([key.private_key]) + election.validate(bigchain) + except ValidationError as e: + logger.error(e) + return False + except FileNotFoundError as fd_404: + logger.error(fd_404) + return False - voters = ValidatorElection.recipients(bigchain) - - election = ValidatorElection.generate([key.public_key], - voters, - new_validator, None).sign([key.private_key]) - election.validate(bigchain) resp = bigchain.write_transaction(election, 'broadcast_tx_commit') if resp == (202, ''): + logger.info('[SUCCESS] Submitted proposal with id: {}'.format(election.id)) return election.id else: - raise OperationError('Failed to commit election') + logger.error('Failed to commit election proposal') + return False def run_upsert_validator_approve(args, bigchain): @@ -153,8 +162,7 @@ def run_upsert_validator_approve(args, bigchain): 'sk': the path to the private key of the signer (str) } :param bigchain: an instance of BigchainDB - :return: a success message - :raises: OperationError if the write transaction fails for any reason + :return: success log message or `False` in case of error """ key = load_node_key(args.sk) @@ -163,22 +171,24 @@ def run_upsert_validator_approve(args, bigchain): if len(voting_powers) > 0: voting_power = voting_powers[0] else: - raise KeypairMismatchException( - 'The key you provided does not match any of the eligible voters in this election.' - ) + logger.error('The key you provided does not match any of the eligible voters in this election.') + return False inputs = [i for i in tx.to_inputs() if key.public_key in i.owners_before] - approval = ValidatorElectionVote.generate(inputs, [ - ([key.public_key], voting_power)], tx.id).sign([key.private_key]) + election_pub_key = ValidatorElection.to_public_key(tx.id) + approval = ValidatorElectionVote.generate(inputs, + [([election_pub_key], voting_power)], + tx.id).sign([key.private_key]) approval.validate(bigchain) resp = bigchain.write_transaction(approval, 'broadcast_tx_commit') if resp == (202, ''): - print('Your vote has been submitted.') + logger.info('[SUCCESS] Your vote has been submitted') return approval.id else: - raise OperationError('Failed to vote for election') + logger.error('Failed to commit vote') + return False def _run_init(): diff --git a/bigchaindb/upsert_validator/validator_election.py b/bigchaindb/upsert_validator/validator_election.py index 17ea298f..f007c38b 100644 --- a/bigchaindb/upsert_validator/validator_election.py +++ b/bigchaindb/upsert_validator/validator_election.py @@ -220,6 +220,7 @@ class ValidatorElection(Transaction): updated_validator_set = new_validator_set(curr_validator_set, new_height, validator_updates) + updated_validator_set = [v for v in updated_validator_set if v['voting_power'] > 0] bigchain.store_validator_set(new_height+1, updated_validator_set) return [encode_validator(election.asset['data'])] return [] diff --git a/docs/server/source/server-reference/bigchaindb-cli.md b/docs/server/source/server-reference/bigchaindb-cli.md index 9f5c87d3..80f1656f 100644 --- a/docs/server/source/server-reference/bigchaindb-cli.md +++ b/docs/server/source/server-reference/bigchaindb-cli.md @@ -83,44 +83,59 @@ configuration file as documented under ## bigchaindb upsert-validator -**This is an experimental feature. Users are advised not to use it in production.** - - -Manage elections to add, update, or remove a validator from the validators set of the local node. The upsert-validator subcommands implement [BEP-21](https://github.com/bigchaindb/BEPs/tree/master/21). Check it out if you need more details on how this is orchestrated. +Manage elections to add, update, or remove a validator from the validators set. The upsert-validator subcommands implement [BEP-21](https://github.com/bigchaindb/BEPs/tree/master/21), please refer it for more details. Election management is broken into several subcommands. Below is the command line syntax for each, #### upsert-validator new -Calls a new election, proposing a change to the validator set. +Create a new election which proposes a change to the validator set. An election can be used to add/update/remove a validator from the validator set. Below is the command line syntax and the return value, ```bash $ bigchaindb upsert-validator new E_PUBKEY E_POWER E_NODE_ID --private-key PATH_TO_YOUR_PRIVATE_KEY - +[SUCCESS] Submitted proposal with id: ``` -Here, `E_PUBKEY`, `E_POWER`, and `E_NODE_ID` are the public key, proposed power, and node id of the validator being voted on. `--private-key` should be the path to wherever the private key for your validator node is stored, (*not* the private key itself.). For example, to add a new validator, provide the public key and node id for some node not already in the validator set, along with whatever voting power you'd like them to have. To remove an existing validator, provide their public key and node id, and set `E_POWER` to `0`. +- `E_PUBKEY`: Public key of the node to be added/updated/removed. +- `E_POWER`: The new power for the `E_PUBKEY`. NOTE, if power is set to `0` then `E_PUBKEY` will be removed from the validator set when the election concludes. +- `E_NODE_ID`: Node id of `E_PUBKEY`. The node operator of `E_PUBKEY` can generate the node id via `tendermint show_node_id`. +- `--private-key`: The path to Tendermint's private key which can be generally found at `/home/user/.tendermint/config/priv_validator.json`. For example, to add a new validator, provide the public key and node id for some node not already in the validator set, along with whatever voting power you'd like them to have. To remove an existing validator, provide their public key and node id, and set `E_POWER` to `0`. Please note that the private key provided here is of the node which is generating this election i.e. + + +NOTE: A change to the validator set can only be proposed by one of the exisitng validators. Example usage, ```bash -$ bigchaindb upsert-validator new B0E42D2589A455EAD339A035D6CE1C8C3E25863F268120AA0162AD7D003A4014 1 12345 --private-key /home/user/.tendermint/config/priv_validator.json +$ bigchaindb upsert-validator new HHG0IQRybpT6nJMIWWFWhMczCLHt6xcm7eP52GnGuPY= 1 fb7140f03a4ffad899fabbbf655b97e0321add66 --private-key /home/user/.tendermint/config/priv_validator.json +[SUCCESS] Submitted proposal with id: 04a067582cf03eba2b53b82e4adb5ece424474cbd4f7183780855a93ac5e3caa ``` -If the command succeeds, it will create an election and return an `election_id`. Elections consist of one vote token per voting power, issued to the members of the validator set. Validators can cast their votes to approve the change to the validator set by spending their vote tokens. The status of the election can be monitored by providing the `election_id` to the `show` subcommand. +If the command succeeds, it will create an election and return an `election_id`. A successful execution of the above command **doesn't** imply that the validator set will be immediately updated but rather it means the proposal has been succcessfully accepted by the network. Once the `election_id` has been generated the node operator should share this `election_id` with other validators in the network and urge them to approve the proposal. Note that the node operator should themsleves also approve the proposal. + + +**NOTE**: The election proposal consists of vote tokens allocated to each current validator as per their voting power. Validators then cast their votes to approve the change to the validator set by spending their vote tokens. + #### upsert-validator approve - Approve an election by voting for it. - Below is the command line syntax and the return value, + +Approve an election by voting for it. The propsal generated by executing `bigchaindb upsert-valdiator approve ...` can approved by the validators using this command. The validator who is approving the proposal will spend all their votes i.e. if the validator has a network power of `10` then they will cast `10` votes for the proposal.` +Below is the command line syntax and the return value, + ```bash $ bigchaindb upsert-validator approve --private-key PATH_TO_YOUR_PRIVATE_KEY +[SUCCESS] Your vote has been submitted ``` - Here, `` is the transaction id of the election the approval should be given for. `--private-key` should be the path to Tendermint's private key which can be generally found at `/home/user/.tendermint/config/priv_validator.json`. + +- `election_id` is the transaction id of the election the approval should be given for. +- `--private-key` should be the path to Tendermint's private key which can be generally found at `/home/user/.tendermint/config/priv_validator.json`. Example usage, ```bash $ bigchaindb upsert-validator approve 04a067582cf03eba2b53b82e4adb5ece424474cbd4f7183780855a93ac5e3caa --private-key /home/user/.tendermint/config/priv_validator.json +[SUCCESS] Your vote has been submitted ``` - If the command succeeds, a message will be returned, that the vote was submitted successfully. + +If the command succeeds a message will be returned stating that the vote was submitted successfully. Once a proposal has been approved by sufficent validators (more than `2/3` of the total voting power) then the proposed change is applied to the network. For example, consider a network wherein the total power is `90` then the proposed changed applied only after `60` (`2/3 * 90`) have been received. diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index 5e2bd31e..b3f8e84c 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -3,6 +3,7 @@ # Code is Apache-2.0 and docs are CC-BY-4.0 import json +import logging from unittest.mock import Mock, patch from argparse import Namespace @@ -10,7 +11,6 @@ from argparse import Namespace import pytest from bigchaindb import ValidatorElection -from bigchaindb.common.exceptions import KeypairMismatchException from tests.conftest import node_keys @@ -379,7 +379,7 @@ def test_upsert_validator_new_with_tendermint(b, priv_validator_path, user_sk, m @pytest.mark.tendermint @pytest.mark.bdb -def test_upsert_validator_new_without_tendermint(b, priv_validator_path, user_sk): +def test_upsert_validator_new_without_tendermint(caplog, b, priv_validator_path, user_sk): from bigchaindb.commands.bigchaindb import run_upsert_validator_new def mock_write(tx, mode): @@ -392,24 +392,67 @@ def test_upsert_validator_new_without_tendermint(b, priv_validator_path, user_sk args = Namespace(action='new', public_key='CJxdItf4lz2PwEf4SmYNAu/c/VpmX39JEgC5YpH7fxg=', power=1, - node_id='12345', + node_id='fb7140f03a4ffad899fabbbf655b97e0321add66', sk=priv_validator_path, config={}) - resp = run_upsert_validator_new(args, b) - assert b.get_transaction(resp) + with caplog.at_level(logging.INFO): + election_id = run_upsert_validator_new(args, b) + assert caplog.records[0].msg == '[SUCCESS] Submitted proposal with id: ' + election_id + assert b.get_transaction(election_id) + + +@pytest.mark.tendermint +@pytest.mark.bdb +def test_upsert_validator_new_invalid_election(caplog, b, priv_validator_path, user_sk): + from bigchaindb.commands.bigchaindb import run_upsert_validator_new + + args = Namespace(action='new', + public_key='CJxdItf4lz2PwEf4SmYNAu/c/VpmX39JEgC5YpH7fxg=', + power=10, + node_id='fb7140f03a4ffad899fabbbf655b97e0321add66', + sk='/tmp/invalid/path/key.json', + config={}) + + with caplog.at_level(logging.ERROR): + assert not run_upsert_validator_new(args, b) + assert caplog.records[0].msg.__class__ == FileNotFoundError + + +@pytest.mark.tendermint +@pytest.mark.bdb +def test_upsert_validator_new_election_invalid_power(caplog, b, priv_validator_path, user_sk): + from bigchaindb.commands.bigchaindb import run_upsert_validator_new + from bigchaindb.common.exceptions import InvalidPowerChange + + def mock_write(tx, mode): + b.store_bulk_transactions([tx]) + return (400, '') + + b.write_transaction = mock_write + b.get_validators = mock_get + args = Namespace(action='new', + public_key='CJxdItf4lz2PwEf4SmYNAu/c/VpmX39JEgC5YpH7fxg=', + power=10, + node_id='fb7140f03a4ffad899fabbbf655b97e0321add66', + sk=priv_validator_path, + config={}) + + with caplog.at_level(logging.ERROR): + assert not run_upsert_validator_new(args, b) + assert caplog.records[0].msg.__class__ == InvalidPowerChange @pytest.mark.abci def test_upsert_validator_approve_with_tendermint(b, priv_validator_path, user_sk, validators): - from bigchaindb.commands.bigchaindb import run_upsert_validator_new, \ - run_upsert_validator_approve + from bigchaindb.commands.bigchaindb import (run_upsert_validator_new, + run_upsert_validator_approve) - public_key = '8eJ8q9ZQpReWyQT5aFCiwtZ5wDZC4eDnCen88p3tQ6ie' + public_key = 'CJxdItf4lz2PwEf4SmYNAu/c/VpmX39JEgC5YpH7fxg=' new_args = Namespace(action='new', public_key=public_key, power=1, - node_id='12345', + node_id='fb7140f03a4ffad899fabbbf655b97e0321add66', sk=priv_validator_path, config={}) @@ -426,7 +469,7 @@ def test_upsert_validator_approve_with_tendermint(b, priv_validator_path, user_s @pytest.mark.bdb @pytest.mark.tendermint -def test_upsert_validator_approve_without_tendermint(b, priv_validator_path, new_validator, node_key): +def test_upsert_validator_approve_without_tendermint(caplog, b, priv_validator_path, new_validator, node_key): from bigchaindb.commands.bigchaindb import run_upsert_validator_approve from argparse import Namespace @@ -438,15 +481,41 @@ def test_upsert_validator_approve_without_tendermint(b, priv_validator_path, new sk=priv_validator_path, config={}) - approval_id = run_upsert_validator_approve(args, b) - # assert returned id is in the db - assert b.get_transaction(approval_id) + with caplog.at_level(logging.INFO): + approval_id = run_upsert_validator_approve(args, b) + assert caplog.records[0].msg == '[SUCCESS] Your vote has been submitted' + assert b.get_transaction(approval_id) -@pytest.mark.bdb @pytest.mark.tendermint -def test_upsert_validator_approve_called_with_bad_key(b, bad_validator_path, new_validator, node_key): +@pytest.mark.bdb +def test_upsert_validator_approve_failure(caplog, b, priv_validator_path, new_validator, node_key): + from bigchaindb.commands.bigchaindb import run_upsert_validator_approve + from argparse import Namespace + + b, election_id = call_election(b, new_validator, node_key) + + def mock_write(tx, mode): + b.store_bulk_transactions([tx]) + return (400, '') + + b.write_transaction = mock_write + + # call run_upsert_validator_approve with args that point to the election + args = Namespace(action='approve', + election_id=election_id, + sk=priv_validator_path, + config={}) + + with caplog.at_level(logging.ERROR): + assert not run_upsert_validator_approve(args, b) + assert caplog.records[0].msg == 'Failed to commit vote' + + +@pytest.mark.tendermint +@pytest.mark.bdb +def test_upsert_validator_approve_called_with_bad_key(caplog, b, bad_validator_path, new_validator, node_key): from bigchaindb.commands.bigchaindb import run_upsert_validator_approve from argparse import Namespace @@ -458,8 +527,10 @@ def test_upsert_validator_approve_called_with_bad_key(b, bad_validator_path, new sk=bad_validator_path, config={}) - with pytest.raises(KeypairMismatchException): - run_upsert_validator_approve(args, b) + with caplog.at_level(logging.ERROR): + assert not run_upsert_validator_approve(args, b) + assert caplog.records[0].msg == 'The key you provided does not match any of '\ + 'the eligible voters in this election.' def mock_get(height): diff --git a/tests/upsert_validator/test_validator_election_vote.py b/tests/upsert_validator/test_validator_election_vote.py index d555b0d7..19eacbfa 100644 --- a/tests/upsert_validator/test_validator_election_vote.py +++ b/tests/upsert_validator/test_validator_election_vote.py @@ -310,11 +310,37 @@ def test_get_validator_update(b, node_keys, node_key, ed25519_node_keys): b.store_bulk_transactions([tx_vote0, tx_vote1]) update = ValidatorElection.get_validator_update(b, 4, [tx_vote2]) - print('update', update) update_public_key = codecs.encode(update[0].pub_key.data, 'base64').decode().rstrip('\n') assert len(update) == 1 assert update_public_key == public_key64 + # remove validator + power = 0 + new_validator = {'public_key': public_key, + 'node_id': 'some_node_id', + 'power': power} + voters = ValidatorElection.recipients(b) + election = ValidatorElection.generate([node_key.public_key], + voters, + new_validator).sign([node_key.private_key]) + # store election + b.store_bulk_transactions([election]) + + tx_vote0 = gen_vote(election, 0, ed25519_node_keys) + tx_vote1 = gen_vote(election, 1, ed25519_node_keys) + tx_vote2 = gen_vote(election, 2, ed25519_node_keys) + + b.store_bulk_transactions([tx_vote0, tx_vote1]) + + update = ValidatorElection.get_validator_update(b, 9, [tx_vote2]) + update_public_key = codecs.encode(update[0].pub_key.data, 'base64').decode().rstrip('\n') + assert len(update) == 1 + assert update_public_key == public_key64 + + # assert that the public key is not a part of the current validator set + for v in b.get_validators(10): + assert not v['pub_key']['data'] == public_key64 + # ============================================================================ # Helper functions From 407b771185276c0a462308cd5f516a719ad36ea7 Mon Sep 17 00:00:00 2001 From: vrde Date: Wed, 29 Aug 2018 14:13:22 +0200 Subject: [PATCH 05/22] Problem: validating create+transfer might crash the system (#2487) Solution: if a TRANSFER transaction is validated after a CREATE transaction, the system crashes with `AttributeError: 'NoneType' object has no attribute 'txid'`. This happens because querying `get_spent` checks the attributes `txid` and `output` of `input.fulfills` for every transaction in the current buffer (`current_transactions`). For a CREATE, `input.fulfills` is None, so the check would fail. The solution is to check if `input.fulfills` is defined. If not, then the current transaction cannot spend any output, so we can safely skip it. --- bigchaindb/lib.py | 3 ++- tests/tendermint/test_lib.py | 22 ++++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/bigchaindb/lib.py b/bigchaindb/lib.py index cbabf06a..e75153d5 100644 --- a/bigchaindb/lib.py +++ b/bigchaindb/lib.py @@ -285,7 +285,8 @@ class BigchainDB(object): current_spent_transactions = [] for ctxn in current_transactions: for ctxn_input in ctxn.inputs: - if ctxn_input.fulfills.txid == txid and\ + if ctxn_input.fulfills and\ + ctxn_input.fulfills.txid == txid and\ ctxn_input.fulfills.output == output: current_spent_transactions.append(ctxn) diff --git a/tests/tendermint/test_lib.py b/tests/tendermint/test_lib.py index d8223a25..4e8ff6b3 100644 --- a/tests/tendermint/test_lib.py +++ b/tests/tendermint/test_lib.py @@ -419,3 +419,25 @@ def test_get_spent_transaction_critical_double_spend(b, alice, bob, carol): with pytest.raises(CriticalDoubleSpend): b.get_spent(tx.id, tx_transfer.inputs[0].fulfills.output) + + +def test_validation_with_transaction_buffer(b): + from bigchaindb.common.crypto import generate_key_pair + from bigchaindb.models import Transaction + + priv_key, pub_key = generate_key_pair() + + create_tx = Transaction.create([pub_key], [([pub_key], 10)]).sign([priv_key]) + transfer_tx = Transaction.transfer(create_tx.to_inputs(), + [([pub_key], 10)], + asset_id=create_tx.id).sign([priv_key]) + double_spend = Transaction.transfer(create_tx.to_inputs(), + [([pub_key], 10)], + asset_id=create_tx.id).sign([priv_key]) + + assert b.is_valid_transaction(create_tx) + assert b.is_valid_transaction(transfer_tx, [create_tx]) + + assert not b.is_valid_transaction(create_tx, [create_tx]) + assert not b.is_valid_transaction(transfer_tx, [create_tx, transfer_tx]) + assert not b.is_valid_transaction(double_spend, [create_tx, transfer_tx]) From 3f7b521809682a74265ec38830b692b1bf05530e Mon Sep 17 00:00:00 2001 From: vrde Date: Wed, 29 Aug 2018 14:14:47 +0200 Subject: [PATCH 06/22] Problem: make unit-test-watch loops forever (#2486) Solution: BigchainDB by default writes logs in the same directory it is run. The `looponfail` feature provided by pytest waits for changes in the current directory, so it is continuously triggered. This patch tells pytest to only watch the `bigchaindb` and `tests` directories. --- pytest.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/pytest.ini b/pytest.ini index e604b0b9..006bc2bc 100644 --- a/pytest.ini +++ b/pytest.ini @@ -2,3 +2,4 @@ testpaths = tests/ norecursedirs = .* *.egg *.egg-info env* devenv* docs addopts = -m tendermint +looponfailroots = bigchaindb tests From 3cf368aab7d64eab35fd3414333ca614e86e14c3 Mon Sep 17 00:00:00 2001 From: Vanshdeep Singh Date: Wed, 29 Aug 2018 17:45:08 +0200 Subject: [PATCH 07/22] Problem: No unique indexes on transaction id and block height (#2492) Solution: Created unique indexes when setting up the collections --- bigchaindb/backend/localmongodb/schema.py | 3 ++- tests/backend/localmongodb/test_queries.py | 6 +++--- tests/backend/localmongodb/test_schema.py | 8 ++++++-- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/bigchaindb/backend/localmongodb/schema.py b/bigchaindb/backend/localmongodb/schema.py index 25ef7f4e..0607c883 100644 --- a/bigchaindb/backend/localmongodb/schema.py +++ b/bigchaindb/backend/localmongodb/schema.py @@ -59,6 +59,7 @@ def create_transactions_secondary_index(conn, dbname): # to query the transactions for a transaction id, this field is unique conn.conn[dbname]['transactions'].create_index('id', + unique=True, name='transaction_id') # secondary index for asset uuid, this field is unique @@ -93,7 +94,7 @@ def create_assets_secondary_index(conn, dbname): def create_blocks_secondary_index(conn, dbname): conn.conn[dbname]['blocks']\ - .create_index([('height', DESCENDING)], name='height') + .create_index([('height', DESCENDING)], name='height', unique=True) def create_metadata_secondary_index(conn, dbname): diff --git a/tests/backend/localmongodb/test_queries.py b/tests/backend/localmongodb/test_queries.py index 363d02a1..51e20265 100644 --- a/tests/backend/localmongodb/test_queries.py +++ b/tests/backend/localmongodb/test_queries.py @@ -218,9 +218,9 @@ def test_get_spending_transactions(user_pk, user_sk): tx1 = Transaction.create([user_pk], out * 3) tx1.sign([user_sk]) inputs = tx1.to_inputs() - tx2 = Transaction.transfer([inputs[0]], out, tx1.id) - tx3 = Transaction.transfer([inputs[1]], out, tx1.id) - tx4 = Transaction.transfer([inputs[2]], out, tx1.id) + tx2 = Transaction.transfer([inputs[0]], out, tx1.id).sign([user_sk]) + tx3 = Transaction.transfer([inputs[1]], out, tx1.id).sign([user_sk]) + tx4 = Transaction.transfer([inputs[2]], out, tx1.id).sign([user_sk]) txns = [tx.to_dict() for tx in [tx1, tx2, tx3, tx4]] conn.db.transactions.insert_many(txns) diff --git a/tests/backend/localmongodb/test_schema.py b/tests/backend/localmongodb/test_schema.py index b9b22639..136715ab 100644 --- a/tests/backend/localmongodb/test_schema.py +++ b/tests/backend/localmongodb/test_schema.py @@ -99,12 +99,16 @@ def test_create_secondary_indexes(): indexes = conn.conn[dbname]['assets'].index_information().keys() assert set(indexes) == {'_id_', 'asset_id', 'text'} - indexes = conn.conn[dbname]['transactions'].index_information().keys() + index_info = conn.conn[dbname]['transactions'].index_information() + indexes = index_info.keys() assert set(indexes) == { '_id_', 'transaction_id', 'asset_id', 'outputs', 'inputs'} + assert index_info['transaction_id']['unique'] - indexes = conn.conn[dbname]['blocks'].index_information().keys() + index_info = conn.conn[dbname]['blocks'].index_information() + indexes = index_info.keys() assert set(indexes) == {'_id_', 'height'} + assert index_info['height']['unique'] index_info = conn.conn[dbname]['utxos'].index_information() assert set(index_info.keys()) == {'_id_', 'utxo'} From 7a0b474d118fa7a1f57c630b5fe57d1ad9261a05 Mon Sep 17 00:00:00 2001 From: Zachary Bowen Date: Thu, 30 Aug 2018 10:47:37 +0200 Subject: [PATCH 08/22] Problem: Users want to know upsert-validator election status. Solution: Introduce the `upsert-validator show` command. Soon to be re-implemented via storing and querying identifiers of concluded elections. --- Dockerfile-dev | 2 +- bigchaindb/commands/bigchaindb.py | 38 +++++++++- .../upsert_validator/validator_election.py | 57 ++++++++++++++- .../upsert_validator/validator_utils.py | 2 +- .../source/server-reference/bigchaindb-cli.md | 16 +++++ tests/commands/test_commands.py | 42 ++++------- tests/commands/test_utils.py | 6 ++ tests/tendermint/test_core.py | 2 +- tests/upsert_validator/conftest.py | 50 +++++++++++++ .../test_validator_election.py | 71 +++++++++++++++++-- 10 files changed, 248 insertions(+), 38 deletions(-) diff --git a/Dockerfile-dev b/Dockerfile-dev index 4425e0d2..9f5a4f43 100644 --- a/Dockerfile-dev +++ b/Dockerfile-dev @@ -33,4 +33,4 @@ RUN mkdir -p /usr/src/app COPY . /usr/src/app/ WORKDIR /usr/src/app RUN pip install --no-cache-dir --process-dependency-links -e .[dev] -RUN bigchaindb -y configure +RUN bigchaindb -y configure \ No newline at end of file diff --git a/bigchaindb/commands/bigchaindb.py b/bigchaindb/commands/bigchaindb.py index 1a970f88..19cb0e2d 100644 --- a/bigchaindb/commands/bigchaindb.py +++ b/bigchaindb/commands/bigchaindb.py @@ -27,8 +27,7 @@ from bigchaindb.commands import utils from bigchaindb.commands.utils import (configure_bigchaindb, input_on_stderr) from bigchaindb.log import setup_logging -from bigchaindb.tendermint_utils import public_key_from_base64 - +from bigchaindb.tendermint_utils import public_key_from_base64, public_key_to_base64 logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) @@ -191,6 +190,35 @@ def run_upsert_validator_approve(args, bigchain): return False +def run_upsert_validator_show(args, bigchain): + """Retrieves information about an upsert-validator election + + :param args: dict + args = { + 'election_id': the transaction_id for an election (str) + } + :param bigchain: an instance of BigchainDB + """ + + election = bigchain.get_transaction(args.election_id) + if not election: + logger.error(f'No election found with election_id {args.election_id}') + return + + new_validator = election.asset['data'] + + public_key = public_key_to_base64(new_validator['public_key']) + power = new_validator['power'] + node_id = new_validator['node_id'] + status = election.get_status(bigchain) + + response = f'public_key={public_key}\npower={power}\nnode_id={node_id}\nstatus={status}' + + logger.info(response) + + return response + + def _run_init(): bdb = bigchaindb.BigchainDB() @@ -320,6 +348,12 @@ def create_parser(): dest='sk', help='Path to the private key of the election initiator.') + show_election_parser = validator_subparser.add_parser('show', + help='Provides information about an election.') + + show_election_parser.add_argument('election_id', + help='The transaction id of the election you wish to query.') + # parsers for showing/exporting config values subparsers.add_parser('show-config', help='Show the current configuration') diff --git a/bigchaindb/upsert_validator/validator_election.py b/bigchaindb/upsert_validator/validator_election.py index f007c38b..89e60335 100644 --- a/bigchaindb/upsert_validator/validator_election.py +++ b/bigchaindb/upsert_validator/validator_election.py @@ -5,6 +5,7 @@ import base58 from bigchaindb import backend +from bigchaindb.backend.localmongodb.query import get_asset_tokens_for_public_key from bigchaindb.common.exceptions import (InvalidSignature, MultipleInputsError, InvalidProposer, @@ -29,6 +30,11 @@ class ValidatorElection(Transaction): # by renaming CREATE to VALIDATOR_ELECTION CREATE = VALIDATOR_ELECTION ALLOWED_OPERATIONS = (VALIDATOR_ELECTION,) + # Election Statuses: + ONGOING = 'ongoing' + CONCLUDED = 'concluded' + INCONCLUSIVE = 'inconclusive' + ELECTION_THRESHOLD = 2 / 3 def __init__(self, operation, asset, inputs, outputs, metadata=None, version=None, hash_id=None): @@ -218,9 +224,58 @@ class ValidatorElection(Transaction): validator_updates = [election.asset['data']] curr_validator_set = bigchain.get_validators(new_height) updated_validator_set = new_validator_set(curr_validator_set, - new_height, validator_updates) + validator_updates) updated_validator_set = [v for v in updated_validator_set if v['voting_power'] > 0] bigchain.store_validator_set(new_height+1, updated_validator_set) return [encode_validator(election.asset['data'])] return [] + + def _vote_ratio(self, bigchain, height): + cast_votes = self._get_vote_ids(bigchain) + votes = [(tx['outputs'][0]['amount'], bigchain.get_block_containing_tx(tx['id'])[0]) for tx in cast_votes] + votes_cast = [int(vote[0]) for vote in votes if vote[1] <= height] + total_votes_cast = sum(votes_cast) + total_votes = sum([voter.amount for voter in self.outputs]) + vote_ratio = total_votes_cast/total_votes + return vote_ratio + + def _get_vote_ids(self, bigchain): + election_key = self.to_public_key(self.id) + votes = get_asset_tokens_for_public_key(bigchain.connection, self.id, election_key) + return votes + + def initial_height(self, bigchain): + heights = bigchain.get_block_containing_tx(self.id) + initial_height = 0 + if len(heights) != 0: + initial_height = min(bigchain.get_block_containing_tx(self.id)) + return initial_height + + def get_status(self, bigchain, height=None): + + initial_validators = self.get_validators(bigchain, height=self.initial_height(bigchain)) + + # get all heights where a vote was cast + vote_heights = set([bigchain.get_block_containing_tx(tx['id'])[0] for tx in self._get_vote_ids(bigchain)]) + + # find the least height where the vote succeeds + confirmation_height = None + confirmed_heights = [h for h in vote_heights if self._vote_ratio(bigchain, h) > self.ELECTION_THRESHOLD] + if height: + confirmed_heights = [h for h in confirmed_heights if h <= height] + if len(confirmed_heights) > 0: + confirmation_height = min(confirmed_heights) + + # get the validator set at the confirmation height/current height + if confirmation_height: + final_validators = self.get_validators(bigchain, height=confirmation_height) + else: + final_validators = self.get_validators(bigchain) + + if initial_validators != final_validators: + return self.INCONCLUSIVE + elif confirmation_height: + return self.CONCLUDED + else: + return self.ONGOING diff --git a/bigchaindb/upsert_validator/validator_utils.py b/bigchaindb/upsert_validator/validator_utils.py index 7cb924d8..75c7baf5 100644 --- a/bigchaindb/upsert_validator/validator_utils.py +++ b/bigchaindb/upsert_validator/validator_utils.py @@ -21,7 +21,7 @@ def decode_validator(v): 'voting_power': v.power} -def new_validator_set(validators, height, updates): +def new_validator_set(validators, updates): validators_dict = {} for v in validators: validators_dict[v['pub_key']['data']] = v diff --git a/docs/server/source/server-reference/bigchaindb-cli.md b/docs/server/source/server-reference/bigchaindb-cli.md index 80f1656f..80dbccdb 100644 --- a/docs/server/source/server-reference/bigchaindb-cli.md +++ b/docs/server/source/server-reference/bigchaindb-cli.md @@ -139,3 +139,19 @@ $ bigchaindb upsert-validator approve 04a067582cf03eba2b53b82e4adb5ece424474cbd4 ``` If the command succeeds a message will be returned stating that the vote was submitted successfully. Once a proposal has been approved by sufficent validators (more than `2/3` of the total voting power) then the proposed change is applied to the network. For example, consider a network wherein the total power is `90` then the proposed changed applied only after `60` (`2/3 * 90`) have been received. + +#### upsert-validator show + +Retrieves information about an election initiated by `upsert-validator new`. + +Below is the command line syntax and the return value, + +```bash +$ bigchaindb upsert-validator show ELECTION_ID +public_key= +power= +node_id= +status= +``` + +The `public_key`, `power`, and `node_id` are the same values used in the `upsert-validator new` command that originally triggered the election. `status` takes three possible values, `ongoing`, if the election has not yet reached a 2/3 majority, `concluded`, if the election reached the 2/3 majority needed to pass, or `inconclusive`, if the validator set changed while the election was in process, rendering it undecidable. \ No newline at end of file diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index b3f8e84c..7b2f8e28 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -347,34 +347,20 @@ class MockResponse(): return {'result': {'latest_block_height': self.height}} -# @pytest.mark.execute -# @patch('bigchaindb.lib.BigchainDB.get_validators') -# @pytest.mark.abci -@pytest.mark.skip -def test_upsert_validator_new_with_tendermint(b, priv_validator_path, user_sk, monkeypatch): - """WIP: Will be fixed and activated in the next PR - """ +@pytest.mark.abci +def test_upsert_validator_new_with_tendermint(b, priv_validator_path, user_sk, validators): from bigchaindb.commands.bigchaindb import run_upsert_validator_new - import time - time.sleep(3) + new_args = Namespace(action='new', + public_key='8eJ8q9ZQpReWyQT5aFCiwtZ5wDZC4eDnCen88p3tQ6ie', + power=1, + node_id='unique_node_id_for_test_upsert_validator_new_with_tendermint', + sk=priv_validator_path, + config={}) - # b.get_validators = mock_get - # mock_get_validators = mock_get - # monkeypatch.setattr('requests.get', mock_get) + election_id = run_upsert_validator_new(new_args, b) - proposer_key = b.get_validators()[0]['pub_key']['value'] - - args = Namespace(action='new', - public_key=proposer_key, - power=1, - node_id='12345', - sk=priv_validator_path, - config={}) - resp = run_upsert_validator_new(args, b) - time.sleep(3) - - assert b.get_transaction(resp) + assert b.get_transaction(election_id) @pytest.mark.tendermint @@ -386,7 +372,7 @@ def test_upsert_validator_new_without_tendermint(caplog, b, priv_validator_path, b.store_bulk_transactions([tx]) return (202, '') - b.get_validators = mock_get + b.get_validators = mock_get_validators b.write_transaction = mock_write args = Namespace(action='new', @@ -430,7 +416,7 @@ def test_upsert_validator_new_election_invalid_power(caplog, b, priv_validator_p return (400, '') b.write_transaction = mock_write - b.get_validators = mock_get + b.get_validators = mock_get_validators args = Namespace(action='new', public_key='CJxdItf4lz2PwEf4SmYNAu/c/VpmX39JEgC5YpH7fxg=', power=10, @@ -533,7 +519,7 @@ def test_upsert_validator_approve_called_with_bad_key(caplog, b, bad_validator_p 'the eligible voters in this election.' -def mock_get(height): +def mock_get_validators(height): keys = node_keys() pub_key = list(keys.keys())[0] return [ @@ -550,7 +536,7 @@ def call_election(b, new_validator, node_key): return (202, '') # patch the validator set. We now have one validator with power 10 - b.get_validators = mock_get + b.get_validators = mock_get_validators b.write_transaction = mock_write # our voters is a list of length 1, populated from our mocked validator diff --git a/tests/commands/test_utils.py b/tests/commands/test_utils.py index 6b00cd30..11ff41a8 100644 --- a/tests/commands/test_utils.py +++ b/tests/commands/test_utils.py @@ -72,6 +72,12 @@ def test_configure_bigchaindb_configures_bigchaindb(): logging.CRITICAL) ))) def test_configure_bigchaindb_logging(log_level): + # TODO: See following comment: + # This is a dirty test. If a test *preceding* this test makes use of the logger, and then another test *after* this + # test also makes use of the logger, somehow we get logger.disabled == True, and the later test fails. We need to + # either engineer this somehow to leave the test env in the same state as it finds it, or make an assessment + # whether or not we even need this test, and potentially just remove it. + from bigchaindb.commands.utils import configure_bigchaindb @configure_bigchaindb diff --git a/tests/tendermint/test_core.py b/tests/tendermint/test_core.py index e698d45a..6887264c 100644 --- a/tests/tendermint/test_core.py +++ b/tests/tendermint/test_core.py @@ -238,7 +238,7 @@ def test_new_validator_set(b): validators = [node1] updates = [node1_new_power, node2] b.store_validator_set(1, validators) - updated_validator_set = new_validator_set(b.get_validators(1), 1, updates) + updated_validator_set = new_validator_set(b.get_validators(1), updates) updated_validators = [] for u in updates: diff --git a/tests/upsert_validator/conftest.py b/tests/upsert_validator/conftest.py index 49b41b0f..4a1b6925 100644 --- a/tests/upsert_validator/conftest.py +++ b/tests/upsert_validator/conftest.py @@ -4,6 +4,9 @@ import pytest +from bigchaindb import ValidatorElectionVote +from bigchaindb.backend.localmongodb import query +from bigchaindb.lib import Block from bigchaindb.upsert_validator import ValidatorElection @@ -41,3 +44,50 @@ def valid_election_b(b, node_key, new_validator): return ValidatorElection.generate([node_key.public_key], voters, new_validator, None).sign([node_key.private_key]) + + +@pytest.fixture +def ongoing_election(b, valid_election, ed25519_node_keys): + b.store_bulk_transactions([valid_election]) + block_1 = Block(app_hash='hash_1', height=1, transactions=[valid_election.id]) + vote_0 = vote(valid_election, 0, ed25519_node_keys, b) + vote_1 = vote(valid_election, 1, ed25519_node_keys, b) + block_2 = Block(app_hash='hash_2', height=2, transactions=[vote_0.id, vote_1.id]) + b.store_block(block_1._asdict()) + b.store_block(block_2._asdict()) + return valid_election + + +@pytest.fixture +def concluded_election(b, ongoing_election, ed25519_node_keys): + vote_2 = vote(ongoing_election, 2, ed25519_node_keys, b) + block_4 = Block(app_hash='hash_4', height=4, transactions=[vote_2.id]) + b.store_block(block_4._asdict()) + return ongoing_election + + +@pytest.fixture +def inconclusive_election(b, concluded_election, new_validator): + validators = b.get_validators(height=1) + validators[0]['voting_power'] = 15 + validator_update = {'validators': validators, + 'height': 3} + + query.store_validator_set(b.connection, validator_update) + return concluded_election + + +def vote(election, voter, keys, b): + election_input = election.to_inputs()[voter] + votes = election.outputs[voter].amount + public_key = election_input.owners_before[0] + key = keys[public_key] + + election_pub_key = ValidatorElection.to_public_key(election.id) + + v = ValidatorElectionVote.generate([election_input], + [([election_pub_key], votes)], + election_id=election.id)\ + .sign([key.private_key]) + b.store_bulk_transactions([v]) + return v diff --git a/tests/upsert_validator/test_validator_election.py b/tests/upsert_validator/test_validator_election.py index d44d9f37..d09d6403 100644 --- a/tests/upsert_validator/test_validator_election.py +++ b/tests/upsert_validator/test_validator_election.py @@ -1,9 +1,11 @@ # Copyright BigchainDB GmbH and BigchainDB contributors # SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0) # Code is Apache-2.0 and docs are CC-BY-4.0 +from argparse import Namespace import pytest +from bigchaindb.tendermint_utils import public_key_to_base64 from bigchaindb.upsert_validator import ValidatorElection from bigchaindb.common.exceptions import (DuplicateTransaction, UnequalValidatorSet, @@ -57,11 +59,8 @@ def test_upsert_validator_invalid_inputs_election(b_mock, new_validator, node_ke election.validate(b_mock) -def test_upsert_validator_invalid_election(b_mock, new_validator, node_key): +def test_upsert_validator_invalid_election(b_mock, new_validator, node_key, valid_election): voters = ValidatorElection.recipients(b_mock) - valid_election = ValidatorElection.generate([node_key.public_key], - voters, - new_validator, None).sign([node_key.private_key]) duplicate_election = ValidatorElection.generate([node_key.public_key], voters, new_validator, None).sign([node_key.private_key]) @@ -95,3 +94,67 @@ def test_upsert_validator_invalid_election(b_mock, new_validator, node_key): with pytest.raises(UnequalValidatorSet): tx_election.validate(b_mock) + + +def test_get_status_ongoing(b, ongoing_election, new_validator): + status = ValidatorElection.ONGOING + resp = ongoing_election.get_status(b) + assert resp == status + + +def test_get_status_concluded(b, concluded_election, new_validator): + status = ValidatorElection.CONCLUDED + resp = concluded_election.get_status(b) + assert resp == status + + +def test_get_status_inconclusive(b, inconclusive_election, new_validator): + def custom_mock_get_validators(height): + if height >= 3: + return [{'pub_key': {'data': 'zL/DasvKulXZzhSNFwx4cLRXKkSM9GPK7Y0nZ4FEylM=', + 'type': 'AC26791624DE60'}, + 'voting_power': 15}, + {'pub_key': {'data': 'GIijU7GBcVyiVUcB0GwWZbxCxdk2xV6pxdvL24s/AqM=', + 'type': 'AC26791624DE60'}, + 'voting_power': 7}, + {'pub_key': {'data': 'JbfwrLvCVIwOPm8tj8936ki7IYbmGHjPiKb6nAZegRA=', + 'type': 'AC26791624DE60'}, + 'voting_power': 10}, + {'pub_key': {'data': 'PecJ58SaNRsWJZodDmqjpCWqG6btdwXFHLyE40RYlYM=', + 'type': 'AC26791624DE60'}, + 'voting_power': 8}] + else: + return [{'pub_key': {'data': 'zL/DasvKulXZzhSNFwx4cLRXKkSM9GPK7Y0nZ4FEylM=', + 'type': 'AC26791624DE60'}, + 'voting_power': 9}, + {'pub_key': {'data': 'GIijU7GBcVyiVUcB0GwWZbxCxdk2xV6pxdvL24s/AqM=', + 'type': 'AC26791624DE60'}, + 'voting_power': 7}, + {'pub_key': {'data': 'JbfwrLvCVIwOPm8tj8936ki7IYbmGHjPiKb6nAZegRA=', + 'type': 'AC26791624DE60'}, + 'voting_power': 10}, + {'pub_key': {'data': 'PecJ58SaNRsWJZodDmqjpCWqG6btdwXFHLyE40RYlYM=', + 'type': 'AC26791624DE60'}, + 'voting_power': 8}] + + b.get_validators = custom_mock_get_validators + status = ValidatorElection.INCONCLUSIVE + resp = inconclusive_election.get_status(b) + assert resp == status + + +def test_upsert_validator_show(caplog, ongoing_election, b): + from bigchaindb.commands.bigchaindb import run_upsert_validator_show + + election_id = ongoing_election.id + public_key = public_key_to_base64(ongoing_election.asset['data']['public_key']) + power = ongoing_election.asset['data']['power'] + node_id = ongoing_election.asset['data']['node_id'] + status = ValidatorElection.ONGOING + + show_args = Namespace(action='show', + election_id=election_id) + + msg = run_upsert_validator_show(show_args, b) + + assert msg == f'public_key={public_key}\npower={power}\nnode_id={node_id}\nstatus={status}' From cfc2c5900bfe8787dacbfeade64611bdfd79ef2a Mon Sep 17 00:00:00 2001 From: Zachary Bowen Date: Fri, 31 Aug 2018 09:47:47 +0200 Subject: [PATCH 09/22] Rework `upsert-validator show status` (#2496) * Problem: We need to store the `election_id` as part of the `validator_update` so we can efficiently check which election was resposible for the change Solution: Added the parameter to `store_validator_set` and aligned the tests * Problem: Logic for `upsert-validator show` is convoluted Solution: Rewrote the function to be much simpler * Problem: Need a uniqueness constraint for election_id wrt validator changes Solution: Added a new key to the db schema --- bigchaindb/backend/localmongodb/query.py | 12 +++ bigchaindb/backend/localmongodb/schema.py | 3 + bigchaindb/backend/query.py | 8 ++ bigchaindb/core.py | 2 +- bigchaindb/lib.py | 14 +++- .../upsert_validator/validator_election.py | 75 +++++++------------ tests/backend/localmongodb/test_queries.py | 2 +- tests/backend/localmongodb/test_schema.py | 2 +- tests/conftest.py | 10 ++- tests/tendermint/test_core.py | 2 +- tests/upsert_validator/conftest.py | 26 ++++--- .../test_validator_election_vote.py | 4 +- tests/web/test_validators.py | 2 +- 13 files changed, 94 insertions(+), 68 deletions(-) diff --git a/bigchaindb/backend/localmongodb/query.py b/bigchaindb/backend/localmongodb/query.py index c85e0854..29993b30 100644 --- a/bigchaindb/backend/localmongodb/query.py +++ b/bigchaindb/backend/localmongodb/query.py @@ -299,6 +299,18 @@ def get_validator_set(conn, height=None): return list(cursor)[0] +@register_query(LocalMongoDBConnection) +def get_validator_set_by_election_id(conn, election_id): + query = {'election_id': election_id} + + cursor = conn.run( + conn.collection('validators') + .find(query, projection={'_id': False}) + ) + + return next(cursor, None) + + @register_query(LocalMongoDBConnection) def get_asset_tokens_for_public_key(conn, asset_id, public_key): query = {'outputs.public_keys': [public_key], diff --git a/bigchaindb/backend/localmongodb/schema.py b/bigchaindb/backend/localmongodb/schema.py index 0607c883..1ad88928 100644 --- a/bigchaindb/backend/localmongodb/schema.py +++ b/bigchaindb/backend/localmongodb/schema.py @@ -133,3 +133,6 @@ def create_validators_secondary_index(conn, dbname): conn.conn[dbname]['validators'].create_index('height', name='height', unique=True,) + conn.conn[dbname]['validators'].create_index('election_id', + name='election_id', + unique=True,) diff --git a/bigchaindb/backend/query.py b/bigchaindb/backend/query.py index b2cef080..4d62c633 100644 --- a/bigchaindb/backend/query.py +++ b/bigchaindb/backend/query.py @@ -360,6 +360,14 @@ def get_validator_set(conn, height): raise NotImplementedError +@singledispatch +def get_validator_set_by_election_id(conn, election_id): + """Return a validator set change with the specified election_id + """ + + raise NotImplementedError + + @singledispatch def get_asset_tokens_for_public_key(connection, asset_id, public_key, operation): diff --git a/bigchaindb/core.py b/bigchaindb/core.py index 67b48df5..621960fa 100644 --- a/bigchaindb/core.py +++ b/bigchaindb/core.py @@ -54,7 +54,7 @@ class App(BaseApplication): validator_set = [vutils.decode_validator(v) for v in genesis.validators] block = Block(app_hash='', height=0, transactions=[]) self.bigchaindb.store_block(block._asdict()) - self.bigchaindb.store_validator_set(1, validator_set) + self.bigchaindb.store_validator_set(1, validator_set, None) return ResponseInitChain() def info(self, request): diff --git a/bigchaindb/lib.py b/bigchaindb/lib.py index e75153d5..05ca3e69 100644 --- a/bigchaindb/lib.py +++ b/bigchaindb/lib.py @@ -421,24 +421,32 @@ class BigchainDB(object): def fastquery(self): return fastquery.FastQuery(self.connection) + def get_validator_change(self, height=None): + return backend.query.get_validator_set(self.connection, height) + def get_validators(self, height=None): - result = backend.query.get_validator_set(self.connection, height) + result = self.get_validator_change(height) validators = result['validators'] return validators + def get_validators_by_election_id(self, election_id): + result = backend.query.get_validator_set_by_election_id(self.connection, election_id) + return result + def delete_validator_update(self): return backend.query.delete_validator_update(self.connection) def store_pre_commit_state(self, state): return backend.query.store_pre_commit_state(self.connection, state) - def store_validator_set(self, height, validators): + def store_validator_set(self, height, validators, election_id): """Store validator set at a given `height`. NOTE: If the validator set already exists at that `height` then an exception will be raised. """ return backend.query.store_validator_set(self.connection, {'height': height, - 'validators': validators}) + 'validators': validators, + 'election_id': election_id}) Block = namedtuple('Block', ('app_hash', 'height', 'transactions')) diff --git a/bigchaindb/upsert_validator/validator_election.py b/bigchaindb/upsert_validator/validator_election.py index 89e60335..7d9a6fbc 100644 --- a/bigchaindb/upsert_validator/validator_election.py +++ b/bigchaindb/upsert_validator/validator_election.py @@ -5,7 +5,6 @@ import base58 from bigchaindb import backend -from bigchaindb.backend.localmongodb.query import get_asset_tokens_for_public_key from bigchaindb.common.exceptions import (InvalidSignature, MultipleInputsError, InvalidProposer, @@ -42,6 +41,21 @@ class ValidatorElection(Transaction): # of `CREATE` and any validation on `CREATE` in the parent class should apply to it super().__init__(operation, asset, inputs, outputs, metadata, version, hash_id) + @classmethod + def get_validator_change(cls, bigchain, height=None): + """Return the latest change to the validator set + + :return: { + 'height': , + 'asset': { + 'height': , + 'validators': , + 'election_id': + } + } + """ + return bigchain.get_validator_change(height) + @classmethod def get_validators(cls, bigchain, height=None): """Return a dictionary of validators with key as `public_key` and @@ -227,55 +241,24 @@ class ValidatorElection(Transaction): validator_updates) updated_validator_set = [v for v in updated_validator_set if v['voting_power'] > 0] - bigchain.store_validator_set(new_height+1, updated_validator_set) + bigchain.store_validator_set(new_height+1, updated_validator_set, election.id) return [encode_validator(election.asset['data'])] return [] - def _vote_ratio(self, bigchain, height): - cast_votes = self._get_vote_ids(bigchain) - votes = [(tx['outputs'][0]['amount'], bigchain.get_block_containing_tx(tx['id'])[0]) for tx in cast_votes] - votes_cast = [int(vote[0]) for vote in votes if vote[1] <= height] - total_votes_cast = sum(votes_cast) - total_votes = sum([voter.amount for voter in self.outputs]) - vote_ratio = total_votes_cast/total_votes - return vote_ratio + def get_validator_update_by_election_id(self, election_id, bigchain): + result = bigchain.get_validators_by_election_id(election_id) + return result - def _get_vote_ids(self, bigchain): - election_key = self.to_public_key(self.id) - votes = get_asset_tokens_for_public_key(bigchain.connection, self.id, election_key) - return votes - - def initial_height(self, bigchain): - heights = bigchain.get_block_containing_tx(self.id) - initial_height = 0 - if len(heights) != 0: - initial_height = min(bigchain.get_block_containing_tx(self.id)) - return initial_height - - def get_status(self, bigchain, height=None): - - initial_validators = self.get_validators(bigchain, height=self.initial_height(bigchain)) - - # get all heights where a vote was cast - vote_heights = set([bigchain.get_block_containing_tx(tx['id'])[0] for tx in self._get_vote_ids(bigchain)]) - - # find the least height where the vote succeeds - confirmation_height = None - confirmed_heights = [h for h in vote_heights if self._vote_ratio(bigchain, h) > self.ELECTION_THRESHOLD] - if height: - confirmed_heights = [h for h in confirmed_heights if h <= height] - if len(confirmed_heights) > 0: - confirmation_height = min(confirmed_heights) - - # get the validator set at the confirmation height/current height - if confirmation_height: - final_validators = self.get_validators(bigchain, height=confirmation_height) - else: - final_validators = self.get_validators(bigchain) - - if initial_validators != final_validators: - return self.INCONCLUSIVE - elif confirmation_height: + def get_status(self, bigchain): + concluded = self.get_validator_update_by_election_id(self.id, bigchain) + if concluded: return self.CONCLUDED + + latest_change = self.get_validator_change(bigchain) + latest_change_height = latest_change['height'] + election_height = bigchain.get_block_containing_tx(self.id)[0] + + if latest_change_height >= election_height: + return self.INCONCLUSIVE else: return self.ONGOING diff --git a/tests/backend/localmongodb/test_queries.py b/tests/backend/localmongodb/test_queries.py index 51e20265..77bfddd0 100644 --- a/tests/backend/localmongodb/test_queries.py +++ b/tests/backend/localmongodb/test_queries.py @@ -380,7 +380,7 @@ def test_validator_update(): conn = connect() def gen_validator_update(height): - return {'data': 'somedata', 'height': height} + return {'data': 'somedata', 'height': height, 'election_id': f'election_id_at_height_{height}'} for i in range(1, 100, 10): value = gen_validator_update(i) diff --git a/tests/backend/localmongodb/test_schema.py b/tests/backend/localmongodb/test_schema.py index 136715ab..2481cd60 100644 --- a/tests/backend/localmongodb/test_schema.py +++ b/tests/backend/localmongodb/test_schema.py @@ -44,7 +44,7 @@ def test_init_creates_db_tables_and_indexes(): assert set(indexes) == {'_id_', 'pre_commit_id'} indexes = conn.conn[dbname]['validators'].index_information().keys() - assert set(indexes) == {'_id_', 'height'} + assert set(indexes) == {'_id_', 'height', 'election_id'} def test_init_database_fails_if_db_exists(): diff --git a/tests/conftest.py b/tests/conftest.py index 0fd4d671..7faa66c6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -631,6 +631,10 @@ def bad_validator_path(node_keys): @pytest.fixture def validators(b, node_keys): from bigchaindb.backend import query + import time + + def timestamp(): # we need this to force unique election_ids for setup and teardown of fixtures + return str(time.time()) height = get_block_height(b) @@ -645,7 +649,8 @@ def validators(b, node_keys): 'voting_power': 10}] validator_update = {'validators': validator_set, - 'height': height + 1} + 'height': height + 1, + 'election_id': f'setup_at_{timestamp()}'} query.store_validator_set(b.connection, validator_update) @@ -654,7 +659,8 @@ def validators(b, node_keys): height = get_block_height(b) validator_update = {'validators': original_validators, - 'height': height} + 'height': height, + 'election_id': f'teardown_at_{timestamp()}'} query.store_validator_set(b.connection, validator_update) diff --git a/tests/tendermint/test_core.py b/tests/tendermint/test_core.py index 6887264c..40958aa2 100644 --- a/tests/tendermint/test_core.py +++ b/tests/tendermint/test_core.py @@ -237,7 +237,7 @@ def test_new_validator_set(b): validators = [node1] updates = [node1_new_power, node2] - b.store_validator_set(1, validators) + b.store_validator_set(1, validators, 'election_id') updated_validator_set = new_validator_set(b.get_validators(1), updates) updated_validators = [] diff --git a/tests/upsert_validator/conftest.py b/tests/upsert_validator/conftest.py index 4a1b6925..20906ada 100644 --- a/tests/upsert_validator/conftest.py +++ b/tests/upsert_validator/conftest.py @@ -48,33 +48,39 @@ def valid_election_b(b, node_key, new_validator): @pytest.fixture def ongoing_election(b, valid_election, ed25519_node_keys): + validators = b.get_validators(height=1) + genesis_validators = {'validators': validators, + 'height': 0, + 'election_id': None} + query.store_validator_set(b.connection, genesis_validators) + b.store_bulk_transactions([valid_election]) block_1 = Block(app_hash='hash_1', height=1, transactions=[valid_election.id]) - vote_0 = vote(valid_election, 0, ed25519_node_keys, b) - vote_1 = vote(valid_election, 1, ed25519_node_keys, b) - block_2 = Block(app_hash='hash_2', height=2, transactions=[vote_0.id, vote_1.id]) b.store_block(block_1._asdict()) - b.store_block(block_2._asdict()) return valid_election @pytest.fixture def concluded_election(b, ongoing_election, ed25519_node_keys): - vote_2 = vote(ongoing_election, 2, ed25519_node_keys, b) - block_4 = Block(app_hash='hash_4', height=4, transactions=[vote_2.id]) - b.store_block(block_4._asdict()) + validators = b.get_validators(height=1) + validator_update = {'validators': validators, + 'height': 2, + 'election_id': ongoing_election.id} + + query.store_validator_set(b.connection, validator_update) return ongoing_election @pytest.fixture -def inconclusive_election(b, concluded_election, new_validator): +def inconclusive_election(b, ongoing_election, new_validator): validators = b.get_validators(height=1) validators[0]['voting_power'] = 15 validator_update = {'validators': validators, - 'height': 3} + 'height': 2, + 'election_id': 'some_other_election'} query.store_validator_set(b.connection, validator_update) - return concluded_election + return ongoing_election def vote(election, voter, keys, b): diff --git a/tests/upsert_validator/test_validator_election_vote.py b/tests/upsert_validator/test_validator_election_vote.py index 19eacbfa..6c92af99 100644 --- a/tests/upsert_validator/test_validator_election_vote.py +++ b/tests/upsert_validator/test_validator_election_vote.py @@ -234,7 +234,7 @@ def test_upsert_validator(b, node_key, node_keys, ed25519_node_keys): latest_block = b.get_latest_block() # reset the validator set - b.store_validator_set(latest_block['height'], validators) + b.store_validator_set(latest_block['height'], validators, 'previous_election_id') power = 1 public_key = '9B3119650DF82B9A5D8A12E38953EA47475C09F0C48A4E6A0ECE182944B24403' @@ -368,4 +368,4 @@ def reset_validator_set(b, node_keys, height): validators.append({'pub_key': {'type': 'ed25519', 'data': node_pub}, 'voting_power': 10}) - b.store_validator_set(height, validators) + b.store_validator_set(height, validators, 'election_id') diff --git a/tests/web/test_validators.py b/tests/web/test_validators.py index 714ceee4..52420828 100644 --- a/tests/web/test_validators.py +++ b/tests/web/test_validators.py @@ -14,7 +14,7 @@ def test_get_validators_endpoint(b, client): 'pub_key': {'data': '4E2685D9016126864733225BE00F005515200727FBAB1312FC78C8B76831255A', 'type': 'ed25519'}, 'voting_power': 10}] - b.store_validator_set(23, validator_set) + b.store_validator_set(23, validator_set, 'election_id') res = client.get(VALIDATORS_ENDPOINT) assert is_validator(res.json[0]) From cb418265b6d475a0b373fd7d1ef72c5b45762a70 Mon Sep 17 00:00:00 2001 From: Shahbaz Nazir Date: Fri, 31 Aug 2018 13:24:04 +0200 Subject: [PATCH 10/22] Problem: No documentation for hosted MongoDB (#2495) Solution: Add documentation. --- .../source/server-reference/configuration.md | 30 +++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/docs/server/source/server-reference/configuration.md b/docs/server/source/server-reference/configuration.md index 9dd892b5..663d99eb 100644 --- a/docs/server/source/server-reference/configuration.md +++ b/docs/server/source/server-reference/configuration.md @@ -40,8 +40,34 @@ The settings with names of the form `database.*` are for the backend database * `database.connection_timeout` is the maximum number of milliseconds that BigchainDB will wait before giving up on one attempt to connect to the backend database. * `database.max_tries` is the maximum number of times that BigchainDB will try to establish a connection with the backend database. If 0, then it will try forever. * `database.replicaset` is the name of the MongoDB replica set. The default value is `null` because in BighainDB 2.0+, each BigchainDB node has its own independent MongoDB database and no replica set is necessary. -* `database.login` and `database.password` are the login and password used to authenticate to the backend database, specified in plaintext. -* `database.ssl` determines if BigchainDB connects to MongoDB over TLS/SSL or not. It can be set to `true` or `false`. + +There are three ways for BigchainDB Server to authenticate itself with MongoDB (or a specific MongoDB database): no authentication, username/password, and x.509 certificate authentication. + +**No Authentication** + +If you use all the default BigchainDB configuration settings, then no authentication will be used. + +**Username/Password Authentication** + +To use username/password authentication, a MongoDB instance must already be running somewhere (maybe in another machine), it must already have a database for use by BigchainDB (usually named `bigchain`, which is the default `database.name`), and that database must already have a "readWrite" user with associated username and password. To create such a user, login to your MongoDB instance as Admin and run the following commands: + +```text +use +db.createUser({user: "", pwd: "", roles: [{role: "readWrite", db: ""}]}) +``` + +* `database.login` is the user's username. +* `database.password` is the user's password, given in plaintext. +* `database.ssl` must be `false` (the default value). +* `database.ca_cert`, `database.certfile`, `database.keyfile`, `database.crlfile`, and `database.keyfile_passphrase` are not used so they can have their default values. + +**x.509 Certificate Authentication** + +To use x.509 certificate authentication, a MongoDB instance must be running somewhere (maybe in another machine), it must already have a database for use by BigchainDB (usually named `bigchain`, which is the default `database.name`), and that database must be set up to use x.509 authentication. See the MongoDB docs about how to do that. + +* `database.login` is the user's username. +* `database.password` isn't used so the default value (`null`) is fine. +* `database.ssl` must be `true`. * `database.ca_cert`, `database.certfile`, `database.keyfile` and `database.crlfile` are the paths to the CA, signed certificate, private key and certificate revocation list files respectively. * `database.keyfile_passphrase` is the private key decryption passphrase, specified in plaintext. From 2d1f670eecd9032e8a256229fd0d757b3e5e171d Mon Sep 17 00:00:00 2001 From: Shahbaz Nazir Date: Fri, 31 Aug 2018 13:54:32 +0200 Subject: [PATCH 11/22] Problem: BigchainDB has un-necessary code to initialize a replica set and check if MongoDB was started with replicaSet (#2491) Solution: Remove un-necessary code. Deployment of MongoDB with or without replicaSet should be the responsibility of MongoDB admin which can and cannot be a BigchainDB node operator. As far as BigchainDB is concerned replicaset, if provided in bigchaindb configs, should be used to establish connection with MongoDB. --- bigchaindb/backend/localmongodb/connection.py | 137 ------------------ .../source/server-reference/configuration.md | 2 +- tests/backend/localmongodb/test_connection.py | 85 ----------- 3 files changed, 1 insertion(+), 223 deletions(-) diff --git a/bigchaindb/backend/localmongodb/connection.py b/bigchaindb/backend/localmongodb/connection.py index 45d234ec..ffdb84b6 100644 --- a/bigchaindb/backend/localmongodb/connection.py +++ b/bigchaindb/backend/localmongodb/connection.py @@ -2,7 +2,6 @@ # SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0) # Code is Apache-2.0 and docs are CC-BY-4.0 -import time import logging from ssl import CERT_REQUIRED @@ -88,23 +87,6 @@ class LocalMongoDBConnection(Connection): """ try: - if self.replicaset: - # we should only return a connection if the replica set is - # initialized. initialize_replica_set will check if the - # replica set is initialized else it will initialize it. - initialize_replica_set(self.host, - self.port, - self.connection_timeout, - self.dbname, - self.ssl, - self.login, - self.password, - self.ca_cert, - self.certfile, - self.keyfile, - self.keyfile_passphrase, - self.crlfile) - # FYI: the connection process might raise a # `ServerSelectionTimeoutError`, that is a subclass of # `ConnectionFailure`. @@ -140,8 +122,6 @@ class LocalMongoDBConnection(Connection): return client - # `initialize_replica_set` might raise `ConnectionFailure`, - # `OperationFailure` or `ConfigurationError`. except (pymongo.errors.ConnectionFailure, pymongo.errors.OperationFailure) as exc: logger.info('Exception in _connect(): {}'.format(exc)) @@ -153,120 +133,3 @@ class LocalMongoDBConnection(Connection): MONGO_OPTS = { 'socketTimeoutMS': 20000, } - - -def initialize_replica_set(host, port, connection_timeout, dbname, ssl, login, - password, ca_cert, certfile, keyfile, - keyfile_passphrase, crlfile): - """Initialize a replica set. If already initialized skip.""" - - # Setup a MongoDB connection - # The reason we do this instead of `backend.connect` is that - # `backend.connect` will connect you to a replica set but this fails if - # you try to connect to a replica set that is not yet initialized - try: - # The presence of ca_cert, certfile, keyfile, crlfile implies the - # use of certificates for TLS connectivity. - if ca_cert is None or certfile is None or keyfile is None or \ - crlfile is None: - conn = pymongo.MongoClient(host, - port, - serverselectiontimeoutms=connection_timeout, - ssl=ssl, - **MONGO_OPTS) - if login is not None and password is not None: - conn[dbname].authenticate(login, password) - else: - logger.info('Connecting to MongoDB over TLS/SSL...') - conn = pymongo.MongoClient(host, - port, - serverselectiontimeoutms=connection_timeout, - ssl=ssl, - ssl_ca_certs=ca_cert, - ssl_certfile=certfile, - ssl_keyfile=keyfile, - ssl_pem_passphrase=keyfile_passphrase, - ssl_crlfile=crlfile, - ssl_cert_reqs=CERT_REQUIRED, - **MONGO_OPTS) - if login is not None: - logger.info('Authenticating to the database...') - conn[dbname].authenticate(login, mechanism='MONGODB-X509') - - except (pymongo.errors.ConnectionFailure, - pymongo.errors.OperationFailure) as exc: - logger.info('Exception in _connect(): {}'.format(exc)) - raise ConnectionError(str(exc)) from exc - except pymongo.errors.ConfigurationError as exc: - raise ConfigurationError from exc - - _check_replica_set(conn) - host = '{}:{}'.format(bigchaindb.config['database']['host'], - bigchaindb.config['database']['port']) - config = {'_id': bigchaindb.config['database']['replicaset'], - 'members': [{'_id': 0, 'host': host}]} - - try: - conn.admin.command('replSetInitiate', config) - except pymongo.errors.OperationFailure as exc_info: - if exc_info.details['codeName'] == 'AlreadyInitialized': - return - raise - else: - _wait_for_replica_set_initialization(conn) - logger.info('Initialized replica set') - finally: - if conn is not None: - logger.info('Closing initial connection to MongoDB') - conn.close() - - -def _check_replica_set(conn): - """Checks if the replSet option was enabled either through the command - line option or config file and if it matches the one provided by - bigchaindb configuration. - - Note: - The setting we are looking for will have a different name depending - if it was set by the config file (`replSetName`) or by command - line arguments (`replSet`). - - Raise: - :exc:`~ConfigurationError`: If mongod was not started with the - replSet option. - """ - options = conn.admin.command('getCmdLineOpts') - try: - repl_opts = options['parsed']['replication'] - repl_set_name = repl_opts.get('replSetName', repl_opts.get('replSet')) - except KeyError: - raise ConfigurationError('mongod was not started with' - ' the replSet option.') - - bdb_repl_set_name = bigchaindb.config['database']['replicaset'] - if repl_set_name != bdb_repl_set_name: - raise ConfigurationError('The replicaset configuration of ' - 'bigchaindb (`{}`) needs to match ' - 'the replica set name from MongoDB' - ' (`{}`)'.format(bdb_repl_set_name, - repl_set_name)) - - -def _wait_for_replica_set_initialization(conn): - """Wait for a replica set to finish initialization. - - If a replica set is being initialized for the first time it takes some - time. Nodes need to discover each other and an election needs to take - place. During this time the database is not writable so we need to wait - before continuing with the rest of the initialization - """ - - # I did not find a better way to do this for now. - # To check if the database is ready we will poll the mongodb logs until - # we find the line that says the database is ready - logger.info('Waiting for mongodb replica set initialization') - while True: - logs = conn.admin.command('getLog', 'rs')['log'] - if any('database writes are now permitted' in line for line in logs): - return - time.sleep(0.1) diff --git a/docs/server/source/server-reference/configuration.md b/docs/server/source/server-reference/configuration.md index 663d99eb..ef54229e 100644 --- a/docs/server/source/server-reference/configuration.md +++ b/docs/server/source/server-reference/configuration.md @@ -39,7 +39,7 @@ The settings with names of the form `database.*` are for the backend database * `database.name` is a user-chosen name for the database inside MongoDB, e.g. `bigchain`. * `database.connection_timeout` is the maximum number of milliseconds that BigchainDB will wait before giving up on one attempt to connect to the backend database. * `database.max_tries` is the maximum number of times that BigchainDB will try to establish a connection with the backend database. If 0, then it will try forever. -* `database.replicaset` is the name of the MongoDB replica set. The default value is `null` because in BighainDB 2.0+, each BigchainDB node has its own independent MongoDB database and no replica set is necessary. +* `database.replicaset` is the name of the MongoDB replica set. The default value is `null` because in BighainDB 2.0+, each BigchainDB node has its own independent MongoDB database and no replica set is necessary. Replica set must already exist if this option is configured, BigchainDB will not create it. There are three ways for BigchainDB Server to authenticate itself with MongoDB (or a specific MongoDB database): no authentication, username/password, and x.509 certificate authentication. diff --git a/tests/backend/localmongodb/test_connection.py b/tests/backend/localmongodb/test_connection.py index 77692abe..9bb3d9aa 100644 --- a/tests/backend/localmongodb/test_connection.py +++ b/tests/backend/localmongodb/test_connection.py @@ -7,7 +7,6 @@ from unittest import mock import pytest import pymongo from pymongo import MongoClient -from pymongo.database import Database pytestmark = [pytest.mark.bdb, pytest.mark.tendermint] @@ -109,87 +108,3 @@ def test_connection_with_credentials(mock_authenticate): password='secret') conn.connect() assert mock_authenticate.call_count == 1 - - -def test_check_replica_set_not_enabled(mongodb_connection): - from bigchaindb.backend.localmongodb.connection import _check_replica_set - from bigchaindb.common.exceptions import ConfigurationError - - # no replSet option set - cmd_line_opts = {'argv': ['mongod', '--dbpath=/data'], - 'ok': 1.0, - 'parsed': {'storage': {'dbPath': '/data'}}} - with mock.patch.object(Database, 'command', return_value=cmd_line_opts): - with pytest.raises(ConfigurationError): - _check_replica_set(mongodb_connection) - - -def test_check_replica_set_command_line(mongodb_connection, - mock_cmd_line_opts): - from bigchaindb.backend.localmongodb.connection import _check_replica_set - - # replSet option set through the command line - with mock.patch.object(Database, 'command', - return_value=mock_cmd_line_opts): - assert _check_replica_set(mongodb_connection) is None - - -def test_check_replica_set_config_file(mongodb_connection, mock_config_opts): - from bigchaindb.backend.localmongodb.connection import _check_replica_set - - # replSet option set through the config file - with mock.patch.object(Database, 'command', return_value=mock_config_opts): - assert _check_replica_set(mongodb_connection) is None - - -def test_check_replica_set_name_mismatch(mongodb_connection, - mock_cmd_line_opts): - from bigchaindb.backend.localmongodb.connection import _check_replica_set - from bigchaindb.common.exceptions import ConfigurationError - - # change the replica set name so it does not match the bigchaindb config - mock_cmd_line_opts['parsed']['replication']['replSet'] = 'rs0' - - with mock.patch.object(Database, 'command', - return_value=mock_cmd_line_opts): - with pytest.raises(ConfigurationError): - _check_replica_set(mongodb_connection) - - -def test_wait_for_replica_set_initialization(mongodb_connection): - from bigchaindb.backend.localmongodb.connection import _wait_for_replica_set_initialization # noqa - - with mock.patch.object(Database, 'command') as mock_command: - mock_command.side_effect = [ - {'log': ['a line']}, - {'log': ['database writes are now permitted']}, - ] - - # check that it returns - assert _wait_for_replica_set_initialization(mongodb_connection) is None - - -def test_initialize_replica_set(mock_cmd_line_opts): - from bigchaindb.backend.localmongodb.connection import initialize_replica_set - - with mock.patch.object(Database, 'command') as mock_command: - mock_command.side_effect = [ - mock_cmd_line_opts, - None, - {'log': ['database writes are now permitted']}, - ] - - # check that it returns - assert initialize_replica_set('host', 1337, 1000, 'dbname', False, None, None, - None, None, None, None, None) is None - - # test it raises OperationError if anything wrong - with mock.patch.object(Database, 'command') as mock_command: - mock_command.side_effect = [ - mock_cmd_line_opts, - pymongo.errors.OperationFailure(None, details={'codeName': ''}) - ] - - with pytest.raises(pymongo.errors.OperationFailure): - initialize_replica_set('host', 1337, 1000, 'dbname', False, None, - None, None, None, None, None, None) is None From 96932793b1c05ed809e186a1c1e17185fb1742cb Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Fri, 31 Aug 2018 15:27:35 +0200 Subject: [PATCH 12/22] Problem: The docs about database.ssl are wrong (#2500) Solution: Rewrite the docs about the database.ssl config setting --- docs/server/source/server-reference/configuration.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/server/source/server-reference/configuration.md b/docs/server/source/server-reference/configuration.md index ef54229e..47d06de8 100644 --- a/docs/server/source/server-reference/configuration.md +++ b/docs/server/source/server-reference/configuration.md @@ -39,7 +39,8 @@ The settings with names of the form `database.*` are for the backend database * `database.name` is a user-chosen name for the database inside MongoDB, e.g. `bigchain`. * `database.connection_timeout` is the maximum number of milliseconds that BigchainDB will wait before giving up on one attempt to connect to the backend database. * `database.max_tries` is the maximum number of times that BigchainDB will try to establish a connection with the backend database. If 0, then it will try forever. -* `database.replicaset` is the name of the MongoDB replica set. The default value is `null` because in BighainDB 2.0+, each BigchainDB node has its own independent MongoDB database and no replica set is necessary. Replica set must already exist if this option is configured, BigchainDB will not create it. +* `database.replicaset` is the name of the MongoDB replica set. The default value is `null` because in BigchainDB 2.0+, each BigchainDB node has its own independent MongoDB database and no replica set is necessary. Replica set must already exist if this option is configured, BigchainDB will not create it. +* `database.ssl` must be `true` or `false`. It tells BigchainDB Server whether it should connect to MongoDB using TLS/SSL or not. The default value is `false`. There are three ways for BigchainDB Server to authenticate itself with MongoDB (or a specific MongoDB database): no authentication, username/password, and x.509 certificate authentication. @@ -58,7 +59,6 @@ db.createUser({user: "", pwd: "", roles: [{ro * `database.login` is the user's username. * `database.password` is the user's password, given in plaintext. -* `database.ssl` must be `false` (the default value). * `database.ca_cert`, `database.certfile`, `database.keyfile`, `database.crlfile`, and `database.keyfile_passphrase` are not used so they can have their default values. **x.509 Certificate Authentication** @@ -67,7 +67,6 @@ To use x.509 certificate authentication, a MongoDB instance must be running some * `database.login` is the user's username. * `database.password` isn't used so the default value (`null`) is fine. -* `database.ssl` must be `true`. * `database.ca_cert`, `database.certfile`, `database.keyfile` and `database.crlfile` are the paths to the CA, signed certificate, private key and certificate revocation list files respectively. * `database.keyfile_passphrase` is the private key decryption passphrase, specified in plaintext. From cf1f253019dd11452e1897e7a221ceace20c7b88 Mon Sep 17 00:00:00 2001 From: Muawia Khan Date: Fri, 31 Aug 2018 16:56:34 +0200 Subject: [PATCH 13/22] Update bigchaindb-driver version -> 0.5.2 in acceptance test (#2503) --- acceptance/python/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/acceptance/python/Dockerfile b/acceptance/python/Dockerfile index a473a56a..af29629a 100644 --- a/acceptance/python/Dockerfile +++ b/acceptance/python/Dockerfile @@ -5,5 +5,5 @@ RUN pip install --upgrade \ pycco \ websocket-client~=0.47.0 \ pytest~=3.0 \ - bigchaindb-driver==0.5.1 \ + bigchaindb-driver==0.5.2 \ blns From 905b1a514161343a642dae1c10af7867b90d1939 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Fri, 31 Aug 2018 20:43:08 +0200 Subject: [PATCH 14/22] Problem: Root docs page about decentralization has two incorrect sentences (#2505) Solution: Delete them --- docs/root/source/decentralized.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/root/source/decentralized.md b/docs/root/source/decentralized.md index fc88cf99..a5258143 100644 --- a/docs/root/source/decentralized.md +++ b/docs/root/source/decentralized.md @@ -12,10 +12,6 @@ Ideally, each node in a BigchainDB network is owned and controlled by a differen We use the phrase "BigchainDB consortium" (or just "consortium") to refer to the set of people and/or organizations who run the nodes of a BigchainDB network. A consortium requires some form of governance to make decisions such as membership and policies. The exact details of the governance process are determined by each consortium, but it can be very decentralized. -If sharding is turned on (i.e. if the number of shards is larger than one), then the actual data is decentralized in that no one node stores all the data. - -Every node has its own locally-stored list of the public keys of other consortium members: the so-called keyring. There's no centrally-stored or centrally-shared keyring. - A consortium can increase its decentralization (and its resilience) by increasing its jurisdictional diversity, geographic diversity, and other kinds of diversity. This idea is expanded upon in [the section on node diversity](diversity.html). There’s no node that has a long-term special position in the BigchainDB network. All nodes run the same software and perform the same duties. From 80b688179734d02f1b846cb3d669f53397ec8eed Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Sun, 2 Sep 2018 00:23:17 +0200 Subject: [PATCH 15/22] Problem: Docs page 'BigchainDB and Smart Contracts' is stale (#2504) Solution: Update that page to reflect our current thinking on how BigchainDB relates to smart contracts --- docs/root/source/permissions.rst | 2 ++ docs/root/source/smart-contracts.rst | 14 +++----------- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/docs/root/source/permissions.rst b/docs/root/source/permissions.rst index 009929fb..33871016 100644 --- a/docs/root/source/permissions.rst +++ b/docs/root/source/permissions.rst @@ -3,6 +3,8 @@ SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0) Code is Apache-2.0 and docs are CC-BY-4.0 +.. _permissions-in-bigchaindb: + Permissions in BigchainDB ------------------------- diff --git a/docs/root/source/smart-contracts.rst b/docs/root/source/smart-contracts.rst index a9cd59f7..86a6a033 100644 --- a/docs/root/source/smart-contracts.rst +++ b/docs/root/source/smart-contracts.rst @@ -8,16 +8,8 @@ BigchainDB and Smart Contracts One can store the source code of any smart contract (i.e. a computer program) in BigchainDB, but BigchainDB won't run arbitrary smart contracts. -BigchainDB will run the subset of smart contracts expressible using `Crypto-Conditions `_. +BigchainDB can be used to enforce who has permission to transfer assets, both fungible assets and non-fungible assets. It will prevent double-spending. In other words, a BigchainDB network could be used instead of an ERC-20 (fungible token) or ERC-721 (non-fungible token) smart contract. -The owners of an asset can impose conditions on it that must be met for the asset to be transferred to new owners. Examples of possible conditions (crypto-conditions) include: +Asset transfer permissions can also be interpreted as write permissions, so they can be used to control who can write to a log, journal or audit trail. There is more about that idea in :ref:`the page about permissions in BigchainDB `. -- The current owner must sign the transfer transaction (one which transfers ownership to new owners). -- Three out of five current owners must sign the transfer transaction. -- (Shannon and Kelly) or Morgan must sign the transfer transaction. - -Crypto-conditions can be quite complex. They can't include loops or recursion and therefore will always run/check in finite time. - -.. note:: - - We used the word "owners" somewhat loosely above. A more accurate word might be fulfillers, signers, controllers, or transfer-enablers. See the section titled **A Note about Owners** in the relevant `BigchainDB Transactions Spec `_. \ No newline at end of file +A BigchainDB network can be connected to other blockchain networks, via oracles or inter-chain communications protocols. That means BigchainDB can be used as part of a solution that uses *other* blockchains to run arbitrary smart contracts. From 8e55b11da2bec4b576e85bd9ee56c30b04812c28 Mon Sep 17 00:00:00 2001 From: Muawia Khan Date: Mon, 3 Sep 2018 13:25:58 +0200 Subject: [PATCH 16/22] Problem: cyrptoconditions dependency updated because of vulnerability (#2494) * Problem: cyrptoconditions dependency updated because of vulnerability CVE-2018-10903 * update cc to ~=0.7.2 * Fix test using b58encode * Fixing some more tests failing because of base58 update --- bigchaindb/common/transaction.py | 4 ++-- bigchaindb/upsert_validator/validator_election.py | 2 +- setup.py | 2 +- tests/common/test_transaction.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bigchaindb/common/transaction.py b/bigchaindb/common/transaction.py index d5b3eef8..8baf8196 100644 --- a/bigchaindb/common/transaction.py +++ b/bigchaindb/common/transaction.py @@ -94,7 +94,7 @@ class Input(object): """ try: fulfillment = self.fulfillment.serialize_uri() - except (TypeError, AttributeError, ASN1EncodeError): + except (TypeError, AttributeError, ASN1EncodeError, ASN1DecodeError): fulfillment = _fulfillment_to_details(self.fulfillment) try: @@ -161,7 +161,7 @@ def _fulfillment_to_details(fulfillment): if fulfillment.type_name == 'ed25519-sha-256': return { 'type': 'ed25519-sha-256', - 'public_key': base58.b58encode(fulfillment.public_key), + 'public_key': base58.b58encode(fulfillment.public_key).decode(), } if fulfillment.type_name == 'threshold-sha-256': diff --git a/bigchaindb/upsert_validator/validator_election.py b/bigchaindb/upsert_validator/validator_election.py index 7d9a6fbc..95162442 100644 --- a/bigchaindb/upsert_validator/validator_election.py +++ b/bigchaindb/upsert_validator/validator_election.py @@ -172,7 +172,7 @@ class ValidatorElection(Transaction): @classmethod def to_public_key(cls, election_id): - return base58.b58encode(bytes.fromhex(election_id)) + return base58.b58encode(bytes.fromhex(election_id)).decode() @classmethod def count_votes(cls, election_pk, transactions, getter=getattr): diff --git a/setup.py b/setup.py index dcecca51..071ddd45 100644 --- a/setup.py +++ b/setup.py @@ -79,7 +79,7 @@ install_requires = [ # TODO Consider not installing the db drivers, or putting them in extras. 'pymongo~=3.6', 'pysha3~=1.0.2', - 'cryptoconditions~=0.6.0.dev', + 'cryptoconditions~=0.7.2', 'python-rapidjson~=0.6.0', 'logstats~=0.2.1', 'flask>=0.10.1', diff --git a/tests/common/test_transaction.py b/tests/common/test_transaction.py index 7b8dde99..4a1586ca 100644 --- a/tests/common/test_transaction.py +++ b/tests/common/test_transaction.py @@ -93,7 +93,7 @@ def test_output_serialization(user_Ed25519, user_pub): 'uri': user_Ed25519.condition_uri, 'details': { 'type': 'ed25519-sha-256', - 'public_key': b58encode(user_Ed25519.public_key), + 'public_key': b58encode(user_Ed25519.public_key).decode(), }, }, 'public_keys': [user_pub], @@ -114,7 +114,7 @@ def test_output_deserialization(user_Ed25519, user_pub): 'uri': user_Ed25519.condition_uri, 'details': { 'type': 'ed25519-sha-256', - 'public_key': b58encode(user_Ed25519.public_key), + 'public_key': b58encode(user_Ed25519.public_key).decode(), }, }, 'public_keys': [user_pub], From fe0a4c494bee3170ec54eb4deab45bbe07c08a9e Mon Sep 17 00:00:00 2001 From: Lev Berman Date: Mon, 3 Sep 2018 15:09:16 +0200 Subject: [PATCH 17/22] Problem: No MongoDB support for election ID index. (#2507) Solution: Election ID has to be unique but not every validator set record has it. MongoDB does not support partial indexes, does not even allow for multiple Nones. This is a temporary fix since we are introducing an `election` collection to store election IDs in #2498. --- bigchaindb/backend/localmongodb/schema.py | 3 --- tests/backend/localmongodb/test_schema.py | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/bigchaindb/backend/localmongodb/schema.py b/bigchaindb/backend/localmongodb/schema.py index 1ad88928..0607c883 100644 --- a/bigchaindb/backend/localmongodb/schema.py +++ b/bigchaindb/backend/localmongodb/schema.py @@ -133,6 +133,3 @@ def create_validators_secondary_index(conn, dbname): conn.conn[dbname]['validators'].create_index('height', name='height', unique=True,) - conn.conn[dbname]['validators'].create_index('election_id', - name='election_id', - unique=True,) diff --git a/tests/backend/localmongodb/test_schema.py b/tests/backend/localmongodb/test_schema.py index 2481cd60..136715ab 100644 --- a/tests/backend/localmongodb/test_schema.py +++ b/tests/backend/localmongodb/test_schema.py @@ -44,7 +44,7 @@ def test_init_creates_db_tables_and_indexes(): assert set(indexes) == {'_id_', 'pre_commit_id'} indexes = conn.conn[dbname]['validators'].index_information().keys() - assert set(indexes) == {'_id_', 'height', 'election_id'} + assert set(indexes) == {'_id_', 'height'} def test_init_database_fails_if_db_exists(): From 230a5b2d692d62b5a3c64588a9e6517abad623a8 Mon Sep 17 00:00:00 2001 From: Lev Berman Date: Mon, 3 Sep 2018 15:41:49 +0200 Subject: [PATCH 18/22] ABCI chain migration conclusion (#2488) * Problem: No good way to check for val set absence. Solution: Make get_validator_set/get_validators return None/[] when there are no validators yet. * Problem: Incompatible ABCI chain upgrades. Solution: Record known chains and sync through InitChain. Triggering the migration and adjusting other ABCI endpoints will follow. --- bigchaindb/backend/localmongodb/query.py | 22 +- bigchaindb/backend/localmongodb/schema.py | 14 ++ bigchaindb/backend/query.py | 20 ++ bigchaindb/backend/schema.py | 2 +- bigchaindb/core.py | 83 +++++++- bigchaindb/lib.py | 34 ++- tests/backend/localmongodb/test_queries.py | 51 +++++ tests/backend/localmongodb/test_schema.py | 8 +- tests/tendermint/conftest.py | 3 +- tests/tendermint/test_core.py | 234 ++++++++++++++++++++- tests/tendermint/test_lib.py | 33 +++ tests/tendermint/test_utils.py | 1 + tests/utils.py | 10 +- 13 files changed, 492 insertions(+), 23 deletions(-) diff --git a/bigchaindb/backend/localmongodb/query.py b/bigchaindb/backend/localmongodb/query.py index 29993b30..6587d494 100644 --- a/bigchaindb/backend/localmongodb/query.py +++ b/bigchaindb/backend/localmongodb/query.py @@ -296,7 +296,7 @@ def get_validator_set(conn, height=None): .limit(1) ) - return list(cursor)[0] + return next(cursor, None) @register_query(LocalMongoDBConnection) @@ -322,3 +322,23 @@ def get_asset_tokens_for_public_key(conn, asset_id, public_key): {'$project': {'_id': False}} ])) return cursor + + +@register_query(LocalMongoDBConnection) +def store_abci_chain(conn, height, chain_id, is_synced=True): + return conn.run( + conn.collection('abci_chains').replace_one( + {'height': height}, + {'height': height, 'chain_id': chain_id, + 'is_synced': is_synced}, + upsert=True, + ) + ) + + +@register_query(LocalMongoDBConnection) +def get_latest_abci_chain(conn): + return conn.run( + conn.collection('abci_chains') + .find_one(projection={'_id': False}, sort=[('height', DESCENDING)]) + ) diff --git a/bigchaindb/backend/localmongodb/schema.py b/bigchaindb/backend/localmongodb/schema.py index 0607c883..e58f1def 100644 --- a/bigchaindb/backend/localmongodb/schema.py +++ b/bigchaindb/backend/localmongodb/schema.py @@ -47,6 +47,7 @@ def create_indexes(conn, dbname): create_utxos_secondary_index(conn, dbname) create_pre_commit_secondary_index(conn, dbname) create_validators_secondary_index(conn, dbname) + create_abci_chains_indexes(conn, dbname) @register_schema(LocalMongoDBConnection) @@ -133,3 +134,16 @@ def create_validators_secondary_index(conn, dbname): conn.conn[dbname]['validators'].create_index('height', name='height', unique=True,) + + +def create_abci_chains_indexes(conn, dbname): + logger.info('Create `abci_chains.height` secondary index.') + + conn.conn[dbname]['abci_chains'].create_index('height', + name='height', + unique=True,) + + logger.info('Create `abci_chains.chain_id` secondary index.') + conn.conn[dbname]['abci_chains'].create_index('chain_id', + name='chain_id', + unique=True) diff --git a/bigchaindb/backend/query.py b/bigchaindb/backend/query.py index 4d62c633..7965d5ee 100644 --- a/bigchaindb/backend/query.py +++ b/bigchaindb/backend/query.py @@ -380,3 +380,23 @@ def get_asset_tokens_for_public_key(connection, asset_id, Iterator of transaction that list given owner in conditions. """ raise NotImplementedError + + +@singledispatch +def store_abci_chain(conn, height, chain_id, is_synced=True): + """Create or update an ABCI chain at the given height. + Usually invoked in the beginning of the ABCI communications (height=0) + or when ABCI client (like Tendermint) is migrated (any height). + + Args: + is_synced: True if the chain is known by both ABCI client and server + """ + raise NotImplementedError + + +@singledispatch +def get_latest_abci_chain(conn): + """Returns the ABCI chain stored at the biggest height, if any, + None otherwise. + """ + raise NotImplementedError diff --git a/bigchaindb/backend/schema.py b/bigchaindb/backend/schema.py index 04c1bdd0..108bd39b 100644 --- a/bigchaindb/backend/schema.py +++ b/bigchaindb/backend/schema.py @@ -16,7 +16,7 @@ logger = logging.getLogger(__name__) # Tables/collections that every backend database must create TABLES = ('transactions', 'blocks', 'assets', 'metadata', - 'validators', 'pre_commit', 'utxos') + 'validators', 'pre_commit', 'utxos', 'abci_chains') VALID_LANGUAGES = ('danish', 'dutch', 'english', 'finnish', 'french', 'german', 'hungarian', 'italian', 'norwegian', 'portuguese', 'romanian', diff --git a/bigchaindb/core.py b/bigchaindb/core.py index 621960fa..a0983472 100644 --- a/bigchaindb/core.py +++ b/bigchaindb/core.py @@ -6,6 +6,7 @@ with Tendermint. """ import logging +import sys from abci.application import BaseApplication from abci.types_pb2 import ( @@ -47,22 +48,76 @@ class App(BaseApplication): self.block_transactions = [] self.validators = None self.new_height = None + self.chain = self.bigchaindb.get_latest_abci_chain() + + def log_abci_migration_error(self, chain_id, validators): + logger.error(f'An ABCI chain migration is in process. ' + + 'Download the new ABCI client and configure it with ' + + 'chain_id={chain_id} and validators={validators}.') + + def abort_if_abci_chain_is_not_synced(self): + if self.chain is None or self.chain['is_synced']: + return + + validators = self.bigchaindb.get_validators() + self.log_abci_migration_error(self.chain['chain_id'], validators) + sys.exit(1) def init_chain(self, genesis): - """Initialize chain with block of height 0""" + """Initialize chain upon genesis or a migration""" - validator_set = [vutils.decode_validator(v) for v in genesis.validators] - block = Block(app_hash='', height=0, transactions=[]) + app_hash = '' + height = 0 + + known_chain = self.bigchaindb.get_latest_abci_chain() + if known_chain is not None: + chain_id = known_chain['chain_id'] + + if known_chain['is_synced']: + msg = f'Got invalid InitChain ABCI request ({genesis}) - ' + \ + 'the chain {chain_id} is already synced.' + logger.error(msg) + sys.exit(1) + + if chain_id != genesis.chain_id: + validators = self.bigchaindb.get_validators() + self.log_abci_migration_error(chain_id, validators) + sys.exit(1) + + # set migration values for app hash and height + block = self.bigchaindb.get_latest_block() + app_hash = '' if block is None else block['app_hash'] + height = 0 if block is None else block['height'] + 1 + + known_validators = self.bigchaindb.get_validators() + validator_set = [vutils.decode_validator(v) + for v in genesis.validators] + + if known_validators and known_validators != validator_set: + self.log_abci_migration_error(known_chain['chain_id'], + known_validators) + sys.exit(1) + + block = Block(app_hash=app_hash, height=height, transactions=[]) self.bigchaindb.store_block(block._asdict()) - self.bigchaindb.store_validator_set(1, validator_set, None) + self.bigchaindb.store_validator_set(height + 1, validator_set, None) + abci_chain_height = 0 if known_chain is None else known_chain['height'] + self.bigchaindb.store_abci_chain(abci_chain_height, + genesis.chain_id, True) + self.chain = {'height': abci_chain_height, 'is_synced': True, + 'chain_id': genesis.chain_id} return ResponseInitChain() def info(self, request): """Return height of the latest committed block.""" + + self.abort_if_abci_chain_is_not_synced() + r = ResponseInfo() block = self.bigchaindb.get_latest_block() if block: - r.last_block_height = block['height'] + chain_shift = 0 if self.chain is None else self.chain['height'] + r.last_block_height = block['height'] - chain_shift r.last_block_app_hash = block['app_hash'].encode('utf-8') else: r.last_block_height = 0 @@ -77,6 +132,8 @@ class App(BaseApplication): raw_tx: a raw string (in bytes) transaction. """ + self.abort_if_abci_chain_is_not_synced() + logger.benchmark('CHECK_TX_INIT') logger.debug('check_tx: %s', raw_transaction) transaction = decode_transaction(raw_transaction) @@ -95,8 +152,11 @@ class App(BaseApplication): req_begin_block: block object which contains block header and block hash. """ + self.abort_if_abci_chain_is_not_synced() + + chain_shift = 0 if self.chain is None else self.chain['height'] logger.benchmark('BEGIN BLOCK, height:%s, num_txs:%s', - req_begin_block.header.height, + req_begin_block.header.height + chain_shift, req_begin_block.header.num_txs) self.block_txn_ids = [] @@ -109,6 +169,9 @@ class App(BaseApplication): Args: raw_tx: a raw string (in bytes) transaction. """ + + self.abort_if_abci_chain_is_not_synced() + logger.debug('deliver_tx: %s', raw_transaction) transaction = self.bigchaindb.is_valid_transaction( decode_transaction(raw_transaction), self.block_transactions) @@ -130,7 +193,11 @@ class App(BaseApplication): height (int): new height of the chain. """ - height = request_end_block.height + self.abort_if_abci_chain_is_not_synced() + + chain_shift = 0 if self.chain is None else self.chain['height'] + + height = request_end_block.height + chain_shift self.new_height = height block_txn_hash = calculate_hash(self.block_txn_ids) block = self.bigchaindb.get_latest_block() @@ -158,6 +225,8 @@ class App(BaseApplication): def commit(self): """Store the new height and along with block hash.""" + self.abort_if_abci_chain_is_not_synced() + data = self.block_txn_hash.encode('utf-8') # register a new block only when new transactions are received diff --git a/bigchaindb/lib.py b/bigchaindb/lib.py index 05ca3e69..fa72a506 100644 --- a/bigchaindb/lib.py +++ b/bigchaindb/lib.py @@ -426,8 +426,7 @@ class BigchainDB(object): def get_validators(self, height=None): result = self.get_validator_change(height) - validators = result['validators'] - return validators + return [] if result is None else result['validators'] def get_validators_by_election_id(self, election_id): result = backend.query.get_validator_set_by_election_id(self.connection, election_id) @@ -448,6 +447,37 @@ class BigchainDB(object): 'validators': validators, 'election_id': election_id}) + def store_abci_chain(self, height, chain_id, is_synced=True): + return backend.query.store_abci_chain(self.connection, height, + chain_id, is_synced) + + def get_latest_abci_chain(self): + return backend.query.get_latest_abci_chain(self.connection) + + def migrate_abci_chain(self): + """Generate and record a new ABCI chain ID. New blocks are not + accepted until we receive an InitChain ABCI request with + the matching chain ID and validator set. + + Chain ID is generated based on the current chain and height. + `chain-X` => `chain-X-migrated-at-height-5`. + `chain-X-migrated-at-height-5` => `chain-X-migrated-at-height-21`. + + If there is no known chain (we are at genesis), the function returns. + """ + latest_chain = self.get_latest_abci_chain() + if latest_chain is None: + return + + block = self.get_latest_block() + + suffix = '-migrated-at-height-' + chain_id = latest_chain['chain_id'] + block_height_str = str(block['height']) + new_chain_id = chain_id.split(suffix)[0] + suffix + block_height_str + + self.store_abci_chain(block['height'] + 1, new_chain_id, False) + Block = namedtuple('Block', ('app_hash', 'height', 'transactions')) diff --git a/tests/backend/localmongodb/test_queries.py b/tests/backend/localmongodb/test_queries.py index 77bfddd0..9c38d822 100644 --- a/tests/backend/localmongodb/test_queries.py +++ b/tests/backend/localmongodb/test_queries.py @@ -7,6 +7,9 @@ from copy import deepcopy import pytest import pymongo +from bigchaindb.backend import connect, query + + pytestmark = [pytest.mark.tendermint, pytest.mark.bdb] @@ -394,3 +397,51 @@ def test_validator_update(): v91 = query.get_validator_set(conn) assert v91['height'] == 91 + + +@pytest.mark.parametrize('description,stores,expected', [ + ( + 'Query empty database.', + [], + None, + ), + ( + 'Store one chain with the default value for `is_synced`.', + [ + {'height': 0, 'chain_id': 'some-id'}, + ], + {'height': 0, 'chain_id': 'some-id', 'is_synced': True}, + ), + ( + 'Store one chain with a custom value for `is_synced`.', + [ + {'height': 0, 'chain_id': 'some-id', 'is_synced': False}, + ], + {'height': 0, 'chain_id': 'some-id', 'is_synced': False}, + ), + ( + 'Store one chain, then update it.', + [ + {'height': 0, 'chain_id': 'some-id', 'is_synced': True}, + {'height': 0, 'chain_id': 'new-id', 'is_synced': False}, + ], + {'height': 0, 'chain_id': 'new-id', 'is_synced': False}, + ), + ( + 'Store a chain, update it, store another chain.', + [ + {'height': 0, 'chain_id': 'some-id', 'is_synced': True}, + {'height': 0, 'chain_id': 'some-id', 'is_synced': False}, + {'height': 10, 'chain_id': 'another-id', 'is_synced': True}, + ], + {'height': 10, 'chain_id': 'another-id', 'is_synced': True}, + ), +]) +def test_store_abci_chain(description, stores, expected): + conn = connect() + + for store in stores: + query.store_abci_chain(conn, **store) + + actual = query.get_latest_abci_chain(conn) + assert expected == actual, description diff --git a/tests/backend/localmongodb/test_schema.py b/tests/backend/localmongodb/test_schema.py index 136715ab..aeadbe05 100644 --- a/tests/backend/localmongodb/test_schema.py +++ b/tests/backend/localmongodb/test_schema.py @@ -24,7 +24,7 @@ def test_init_creates_db_tables_and_indexes(): collection_names = conn.conn[dbname].collection_names() assert set(collection_names) == { 'transactions', 'assets', 'metadata', 'blocks', 'utxos', 'pre_commit', - 'validators' + 'validators', 'abci_chains', } indexes = conn.conn[dbname]['assets'].index_information().keys() @@ -46,6 +46,9 @@ def test_init_creates_db_tables_and_indexes(): indexes = conn.conn[dbname]['validators'].index_information().keys() assert set(indexes) == {'_id_', 'height'} + indexes = conn.conn[dbname]['abci_chains'].index_information().keys() + assert set(indexes) == {'_id_', 'height', 'chain_id'} + def test_init_database_fails_if_db_exists(): import bigchaindb @@ -79,7 +82,8 @@ def test_create_tables(): collection_names = conn.conn[dbname].collection_names() assert set(collection_names) == { 'transactions', 'assets', 'metadata', 'blocks', 'utxos', 'validators', - 'pre_commit'} + 'pre_commit', 'abci_chains', + } def test_create_secondary_indexes(): diff --git a/tests/tendermint/conftest.py b/tests/tendermint/conftest.py index 8efa8e76..fccd2ade 100644 --- a/tests/tendermint/conftest.py +++ b/tests/tendermint/conftest.py @@ -16,7 +16,8 @@ def validator_pub_key(): @pytest.fixture def init_chain_request(): addr = codecs.decode(b'9FD479C869C7D7E7605BF99293457AA5D80C3033', 'hex') - pk = codecs.decode(b'VAgFZtYw8bNR5TMZHFOBDWk9cAmEu3/c6JgRBmddbbI=', 'base64') + pk = codecs.decode(b'VAgFZtYw8bNR5TMZHFOBDWk9cAmEu3/c6JgRBmddbbI=', + 'base64') val_a = types.Validator(address=addr, power=10, pub_key=types.PubKey(type='ed25519', data=pk)) diff --git a/tests/tendermint/test_core.py b/tests/tendermint/test_core.py index 40958aa2..64ee2887 100644 --- a/tests/tendermint/test_core.py +++ b/tests/tendermint/test_core.py @@ -2,17 +2,28 @@ # SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0) # Code is Apache-2.0 and docs are CC-BY-4.0 +import codecs import json import pytest +import random from abci.types_pb2 import ( + PubKey, + ResponseInitChain, + RequestInitChain, + RequestInfo, RequestBeginBlock, - RequestEndBlock + RequestEndBlock, + Validator, ) +from bigchaindb import App +from bigchaindb.backend.localmongodb import query +from bigchaindb.common.crypto import generate_key_pair from bigchaindb.core import (CodeTypeOk, CodeTypeError, ) +from bigchaindb.lib import Block from bigchaindb.upsert_validator.validator_utils import new_validator_set from bigchaindb.tendermint_utils import public_key_to_base64 @@ -24,6 +35,173 @@ def encode_tx_to_bytes(transaction): return json.dumps(transaction.to_dict()).encode('utf8') +def generate_address(): + return ''.join(random.choices('1,2,3,4,5,6,7,8,9,A,B,C,D,E,F'.split(','), + k=40)).encode() + + +def generate_validator(): + addr = codecs.decode(generate_address(), 'hex') + pk, _ = generate_key_pair() + pub_key = PubKey(type='ed25519', data=pk.encode()) + val = Validator(address=addr, power=10, pub_key=pub_key) + return val + + +def generate_init_chain_request(chain_id, vals=None): + vals = vals if vals is not None else [generate_validator()] + return RequestInitChain(validators=vals, chain_id=chain_id) + + +def test_init_chain_successfully_registers_chain(b): + request = generate_init_chain_request('chain-XYZ') + res = App(b).init_chain(request) + assert res == ResponseInitChain() + chain = query.get_latest_abci_chain(b.connection) + assert chain == {'height': 0, 'chain_id': 'chain-XYZ', 'is_synced': True} + assert query.get_latest_block(b.connection) == { + 'height': 0, + 'app_hash': '', + 'transactions': [], + } + + +def test_init_chain_ignores_invalid_init_chain_requests(b): + validators = [generate_validator()] + request = generate_init_chain_request('chain-XYZ', validators) + res = App(b).init_chain(request) + assert res == ResponseInitChain() + + validator_set = query.get_validator_set(b.connection) + + invalid_requests = [ + request, # the same request again + # different validator set + generate_init_chain_request('chain-XYZ'), + # different chain ID + generate_init_chain_request('chain-ABC', validators), + ] + for r in invalid_requests: + with pytest.raises(SystemExit): + App(b).init_chain(r) + # assert nothing changed - neither validator set, nor chain ID + new_validator_set = query.get_validator_set(b.connection) + assert new_validator_set == validator_set + new_chain_id = query.get_latest_abci_chain(b.connection)['chain_id'] + assert new_chain_id == 'chain-XYZ' + assert query.get_latest_block(b.connection) == { + 'height': 0, + 'app_hash': '', + 'transactions': [], + } + + +def test_init_chain_recognizes_new_chain_after_migration(b): + validators = [generate_validator()] + request = generate_init_chain_request('chain-XYZ', validators) + res = App(b).init_chain(request) + assert res == ResponseInitChain() + + validator_set = query.get_validator_set(b.connection)['validators'] + + # simulate a migration + query.store_block(b.connection, Block(app_hash='', height=1, + transactions=[])._asdict()) + b.migrate_abci_chain() + + # the same or other mismatching requests are ignored + invalid_requests = [ + request, + generate_init_chain_request('unknown', validators), + generate_init_chain_request('chain-XYZ'), + generate_init_chain_request('chain-XYZ-migrated-at-height-1'), + ] + for r in invalid_requests: + with pytest.raises(SystemExit): + App(b).init_chain(r) + assert query.get_latest_abci_chain(b.connection) == { + 'chain_id': 'chain-XYZ-migrated-at-height-1', + 'is_synced': False, + 'height': 2, + } + new_validator_set = query.get_validator_set(b.connection)['validators'] + assert new_validator_set == validator_set + + # a request with the matching chain ID and matching validator set + # completes the migration + request = generate_init_chain_request('chain-XYZ-migrated-at-height-1', + validators) + res = App(b).init_chain(request) + assert res == ResponseInitChain() + assert query.get_latest_abci_chain(b.connection) == { + 'chain_id': 'chain-XYZ-migrated-at-height-1', + 'is_synced': True, + 'height': 2, + } + assert query.get_latest_block(b.connection) == { + 'height': 2, + 'app_hash': '', + 'transactions': [], + } + + # requests with old chain ID and other requests are ignored + invalid_requests = [ + request, + generate_init_chain_request('chain-XYZ', validators), + generate_init_chain_request('chain-XYZ-migrated-at-height-1'), + ] + for r in invalid_requests: + with pytest.raises(SystemExit): + App(b).init_chain(r) + assert query.get_latest_abci_chain(b.connection) == { + 'chain_id': 'chain-XYZ-migrated-at-height-1', + 'is_synced': True, + 'height': 2, + } + new_validator_set = query.get_validator_set(b.connection)['validators'] + assert new_validator_set == validator_set + assert query.get_latest_block(b.connection) == { + 'height': 2, + 'app_hash': '', + 'transactions': [], + } + + +def test_info(b): + r = RequestInfo() + app = App(b) + + res = app.info(r) + assert res.last_block_height == 0 + assert res.last_block_app_hash == b'' + + b.store_block(Block(app_hash='1', height=1, transactions=[])._asdict()) + res = app.info(r) + assert res.last_block_height == 1 + assert res.last_block_app_hash == b'1' + + # simulate a migration and assert the height is shifted + b.store_abci_chain(2, 'chain-XYZ') + app = App(b) + b.store_block(Block(app_hash='2', height=2, transactions=[])._asdict()) + res = app.info(r) + assert res.last_block_height == 0 + assert res.last_block_app_hash == b'2' + + b.store_block(Block(app_hash='3', height=3, transactions=[])._asdict()) + res = app.info(r) + assert res.last_block_height == 1 + assert res.last_block_app_hash == b'3' + + # it's always the latest migration that is taken into account + b.store_abci_chain(4, 'chain-XYZ-new') + app = App(b) + b.store_block(Block(app_hash='4', height=4, transactions=[])._asdict()) + res = app.info(r) + assert res.last_block_height == 0 + assert res.last_block_app_hash == b'4' + + def test_check_tx__signed_create_is_ok(b): from bigchaindb import App from bigchaindb.models import Transaction @@ -57,7 +235,6 @@ def test_check_tx__unsigned_create_is_error(b): assert result.code == CodeTypeError -@pytest.mark.bdb def test_deliver_tx__valid_create_updates_db(b, init_chain_request): from bigchaindb import App from bigchaindb.models import Transaction @@ -225,6 +402,17 @@ def test_store_pre_commit_state_in_end_block(b, alice, init_chain_request): assert resp['height'] == 100 assert resp['transactions'] == [tx.id] + # simulate a chain migration and assert the height is shifted + b.store_abci_chain(100, 'new-chain') + app = App(b) + app.begin_block(begin_block) + app.deliver_tx(encode_tx_to_bytes(tx)) + app.end_block(RequestEndBlock(height=1)) + resp = query.get_pre_commit_state(b.connection, PRE_COMMIT_ID) + assert resp['commit_id'] == PRE_COMMIT_ID + assert resp['height'] == 101 + assert resp['transactions'] == [tx.id] + def test_new_validator_set(b): node1 = {'pub_key': {'type': 'ed25519', @@ -247,3 +435,45 @@ def test_new_validator_set(b): 'voting_power': u['power']}) assert updated_validator_set == updated_validators + + +def test_info_aborts_if_chain_is_not_synced(b): + b.store_abci_chain(0, 'chain-XYZ', False) + + with pytest.raises(SystemExit): + App(b).info(RequestInfo()) + + +def test_check_tx_aborts_if_chain_is_not_synced(b): + b.store_abci_chain(0, 'chain-XYZ', False) + + with pytest.raises(SystemExit): + App(b).check_tx('some bytes') + + +def test_begin_aborts_if_chain_is_not_synced(b): + b.store_abci_chain(0, 'chain-XYZ', False) + + with pytest.raises(SystemExit): + App(b).info(RequestBeginBlock()) + + +def test_deliver_tx_aborts_if_chain_is_not_synced(b): + b.store_abci_chain(0, 'chain-XYZ', False) + + with pytest.raises(SystemExit): + App(b).deliver_tx('some bytes') + + +def test_end_block_aborts_if_chain_is_not_synced(b): + b.store_abci_chain(0, 'chain-XYZ', False) + + with pytest.raises(SystemExit): + App(b).info(RequestEndBlock()) + + +def test_commit_aborts_if_chain_is_not_synced(b): + b.store_abci_chain(0, 'chain-XYZ', False) + + with pytest.raises(SystemExit): + App(b).commit() diff --git a/tests/tendermint/test_lib.py b/tests/tendermint/test_lib.py index 4e8ff6b3..3ea91329 100644 --- a/tests/tendermint/test_lib.py +++ b/tests/tendermint/test_lib.py @@ -15,6 +15,7 @@ import pytest from pymongo import MongoClient from bigchaindb import backend +from bigchaindb.lib import Block pytestmark = pytest.mark.tendermint @@ -441,3 +442,35 @@ def test_validation_with_transaction_buffer(b): assert not b.is_valid_transaction(create_tx, [create_tx]) assert not b.is_valid_transaction(transfer_tx, [create_tx, transfer_tx]) assert not b.is_valid_transaction(double_spend, [create_tx, transfer_tx]) + + +@pytest.mark.bdb +def test_migrate_abci_chain_yields_on_genesis(b): + b.migrate_abci_chain() + latest_chain = b.get_latest_abci_chain() + assert latest_chain is None + + +@pytest.mark.bdb +@pytest.mark.parametrize('chain,block_height,expected', [ + ( + (1, 'chain-XYZ', True), + 4, + {'height': 5, 'chain_id': 'chain-XYZ-migrated-at-height-4', + 'is_synced': False}, + ), + ( + (5, 'chain-XYZ-migrated-at-height-4', True), + 13, + {'height': 14, 'chain_id': 'chain-XYZ-migrated-at-height-13', + 'is_synced': False}, + ), +]) +def test_migrate_abci_chain_generates_new_chains(b, chain, block_height, + expected): + b.store_abci_chain(*chain) + b.store_block(Block(app_hash='', height=block_height, + transactions=[])._asdict()) + b.migrate_abci_chain() + latest_chain = b.get_latest_abci_chain() + assert latest_chain == expected diff --git a/tests/tendermint/test_utils.py b/tests/tendermint/test_utils.py index ae34104f..46b36525 100644 --- a/tests/tendermint/test_utils.py +++ b/tests/tendermint/test_utils.py @@ -12,6 +12,7 @@ except ImportError: import pytest + pytestmark = pytest.mark.tendermint diff --git a/tests/utils.py b/tests/utils.py index 349f7e5a..87c07a79 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -5,6 +5,7 @@ from functools import singledispatch from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection +from bigchaindb.backend.schema import TABLES @singledispatch @@ -14,13 +15,8 @@ def flush_db(connection, dbname): @flush_db.register(LocalMongoDBConnection) def flush_localmongo_db(connection, dbname): - connection.conn[dbname].bigchain.delete_many({}) - connection.conn[dbname].blocks.delete_many({}) - connection.conn[dbname].transactions.delete_many({}) - connection.conn[dbname].assets.delete_many({}) - connection.conn[dbname].metadata.delete_many({}) - connection.conn[dbname].utxos.delete_many({}) - connection.conn[dbname].validators.delete_many({}) + for t in TABLES: + getattr(connection.conn[dbname], t).delete_many({}) def generate_block(bigchain): From f8bb29535ae4e1b875360e44ebaae27488e3c433 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Tue, 4 Sep 2018 12:17:27 +0200 Subject: [PATCH 19/22] Problem: Code Reference docs hidden in appendices (#2473) Solution: Move the Code Reference docs into their own section --- docs/server/source/appendices/index.rst | 3 --- .../backend.rst | 0 .../commands.rst | 0 docs/server/source/code-reference/index.rst | 24 +++++++++++++++++++ .../the-bigchaindb-class.rst | 0 docs/server/source/index.rst | 1 + 6 files changed, 25 insertions(+), 3 deletions(-) rename docs/server/source/{appendices => code-reference}/backend.rst (100%) rename docs/server/source/{appendices => code-reference}/commands.rst (100%) create mode 100644 docs/server/source/code-reference/index.rst rename docs/server/source/{appendices => code-reference}/the-bigchaindb-class.rst (100%) diff --git a/docs/server/source/appendices/index.rst b/docs/server/source/appendices/index.rst index ea02aed6..24276f2d 100755 --- a/docs/server/source/appendices/index.rst +++ b/docs/server/source/appendices/index.rst @@ -11,9 +11,6 @@ Appendices json-serialization cryptography - the-bigchaindb-class - backend - commands aws-setup generate-key-pair-for-ssh firewall-notes diff --git a/docs/server/source/appendices/backend.rst b/docs/server/source/code-reference/backend.rst similarity index 100% rename from docs/server/source/appendices/backend.rst rename to docs/server/source/code-reference/backend.rst diff --git a/docs/server/source/appendices/commands.rst b/docs/server/source/code-reference/commands.rst similarity index 100% rename from docs/server/source/appendices/commands.rst rename to docs/server/source/code-reference/commands.rst diff --git a/docs/server/source/code-reference/index.rst b/docs/server/source/code-reference/index.rst new file mode 100644 index 00000000..11d42b66 --- /dev/null +++ b/docs/server/source/code-reference/index.rst @@ -0,0 +1,24 @@ + +.. Copyright BigchainDB GmbH and BigchainDB contributors + SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0) + Code is Apache-2.0 and docs are CC-BY-4.0 + +Code Reference +============== + +This section contains auto-generated documentation of various functions, classes and methods +in the BigchainDB Server code, based on Python docstrings in the code itself. + +.. warning:: + + While we try to keep docstrings accurate, + if you want to know *for sure* what the code does, + then you have to read the code itself. + +.. toctree:: + :maxdepth: 1 + + the-bigchaindb-class + backend + commands + \ No newline at end of file diff --git a/docs/server/source/appendices/the-bigchaindb-class.rst b/docs/server/source/code-reference/the-bigchaindb-class.rst similarity index 100% rename from docs/server/source/appendices/the-bigchaindb-class.rst rename to docs/server/source/code-reference/the-bigchaindb-class.rst diff --git a/docs/server/source/index.rst b/docs/server/source/index.rst index ced2a433..534cc6fd 100644 --- a/docs/server/source/index.rst +++ b/docs/server/source/index.rst @@ -23,4 +23,5 @@ BigchainDB Server Documentation data-models/index k8s-deployment-template/index release-notes + code-reference/index appendices/index From c72c7a46263607b8e15912be889e2b48f7fd8599 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Tue, 4 Sep 2018 13:31:00 +0200 Subject: [PATCH 20/22] Problem: tests/README.md either wrong or redundant (#2479) The information on that page was either wrong or redundant, so I either deleted it or linked to where to find the same information in the docs about contributing to BigchainDB. --- tests/README.md | 104 ++++-------------------------------------------- 1 file changed, 7 insertions(+), 97 deletions(-) diff --git a/tests/README.md b/tests/README.md index 6ae9be24..fe63b32d 100644 --- a/tests/README.md +++ b/tests/README.md @@ -4,103 +4,13 @@ SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0) Code is Apache-2.0 and docs are CC-BY-4.0 ---> -# BigchainDB Server Tests +# BigchainDB Server Unit Tests -## The tests/ Folder +Most of the tests in the `tests/` folder are unit tests. For info about how to write and run tests, see [the docs about contributing to BigchainDB](http://docs.bigchaindb.com/projects/contributing/en/latest/index.html), especially: -The `tests/` folder is where all the tests for BigchainDB Server live. Most of them are unit tests. Integration tests are in the [`tests/integration/` folder](./integration/). +- [Write Code - Remember to Write Tests](http://docs.bigchaindb.com/projects/contributing/en/latest/dev-setup-coding-and-contribution-process/write-code.html#remember-to-write-tests) +- [Notes on Running a Local Dev Node with Docker Compose](http://docs.bigchaindb.com/projects/contributing/en/latest/dev-setup-coding-and-contribution-process/run-node-with-docker-compose.html), especially `make test` +- [ +Notes on Running a Local Dev Node as Processes (and Running All Tests)](http://docs.bigchaindb.com/projects/contributing/en/latest/dev-setup-coding-and-contribution-process/run-node-as-processes.html) -A few notes: - -- [`tests/common/`](./common/) contains self-contained tests only testing - [`bigchaindb/common/`](../bigchaindb/common/) -- [`tests/backend/`](./backend/) contains tests requiring - the database backend (MongoDB) - - -## Writing Tests - -We write unit and integration tests for our Python code using the [pytest](http://pytest.org/latest/) framework. You can use the tests in the `tests/` folder as templates or examples. - - -## Running Tests - -### Running Tests Directly - -If you installed BigchainDB Server using `pip install bigchaindb`, then you -didn't install the tests. Before you can run all the tests, you must install -BigchainDB from source. The [`CONTRIBUTING.md` file](../CONTRIBUTING.md) has -instructions for how to do that. - -Next, make sure you have Local MongoDB running in the background. You -can run MongoDB using `mongod`. - -The `pytest` command has many options. If you want to learn about all the -things you can do with pytest, see [the pytest -documentation](http://pytest.org/latest/). We've also added a customization to -pytest: - -`--database-backend`: Defines the backend to use for the tests. It defaults to -`localmongodb`. - -Now you can run all tests using: -```text -pytest -v -``` - -or, if that doesn't work, try: -```text -python -m pytest -v -``` - -or: -```text -python setup.py test -``` - -How does `python setup.py test` work? The documentation for [pytest-runner](https://pypi.python.org/pypi/pytest-runner) explains. - -The `pytest` command has many options. If you want to learn about all the things you can do with pytest, see [the pytest documentation](http://pytest.org/latest/). We've also added a customization to pytest: - - -### Running Tests with Docker Compose - -You can also use [Docker Compose](https://docs.docker.com/compose/) to run all the tests. - -First, bring up all the services BigchainDB, MongoDB, Tendermint in the background: - -```text -$ docker-compose up -d bdb -``` - -then run the tests using: - -```text -$ docker-compose run --rm --no-deps bigchaindb pytest -v -``` - -## Automated Testing of All Pull Requests - -We use [Travis CI](https://travis-ci.com/), so that whenever someone creates a new BigchainDB pull request on GitHub, Travis CI gets the new code and does _a bunch of stuff_. We use the same `docker-compose.yml` for tests. It tells Travis CI how to install BigchainDB, how to run all the tests, and what to do "after success" (e.g. run `codecov`). (We use [Codecov](https://codecov.io/) to get a rough estimate of our test coverage.) - - -### Tox - -We use [tox](https://tox.readthedocs.io/en/latest/) to run multiple suites of tests against multiple environments during automated testing. Generally you don't need to run this yourself, but it might be useful when troubleshooting a failing Travis CI build. - -To run all the tox tests, use: -```text -tox -``` - -or: -```text -python -m tox -``` - -To run only a few environments, use the `-e` flag: -```text -tox -e {ENVLIST} -``` - -where `{ENVLIST}` is one or more of the environments specified in the [tox.ini file](../tox.ini). +Note: There are acceptance tests in the `acceptance/` folder (at the same level in the hierarchy as the `tests/` folder). From cb22557771c2b75fa24607f58a1526e802fbe96f Mon Sep 17 00:00:00 2001 From: Vanshdeep Singh Date: Tue, 4 Sep 2018 15:30:52 +0200 Subject: [PATCH 21/22] Problem: Validation code not optimized (#2490) Solution: memoize data --- bigchaindb/backend/localmongodb/query.py | 6 +- bigchaindb/common/memoize.py | 58 ++++++++++++ bigchaindb/common/transaction.py | 27 +++++- bigchaindb/lib.py | 15 ++- bigchaindb/models.py | 2 +- .../upsert_validator/validator_election.py | 6 -- tests/backend/localmongodb/test_queries.py | 4 +- tests/common/test_memoize.py | 92 +++++++++++++++++++ tests/common/test_transaction.py | 4 +- tests/conftest.py | 6 ++ 10 files changed, 195 insertions(+), 25 deletions(-) create mode 100644 bigchaindb/common/memoize.py create mode 100644 tests/common/test_memoize.py diff --git a/bigchaindb/backend/localmongodb/query.py b/bigchaindb/backend/localmongodb/query.py index 6587d494..6309c86d 100644 --- a/bigchaindb/backend/localmongodb/query.py +++ b/bigchaindb/backend/localmongodb/query.py @@ -91,10 +91,8 @@ def get_assets(conn, asset_ids): @register_query(LocalMongoDBConnection) def get_spent(conn, transaction_id, output): - query = {'inputs.fulfills': { - 'transaction_id': transaction_id, - 'output_index': output}} - + query = {'inputs.fulfills': {'transaction_id': transaction_id, + 'output_index': output}} return conn.run( conn.collection('transactions') .find(query, {'_id': 0})) diff --git a/bigchaindb/common/memoize.py b/bigchaindb/common/memoize.py new file mode 100644 index 00000000..b814e512 --- /dev/null +++ b/bigchaindb/common/memoize.py @@ -0,0 +1,58 @@ +import functools +import codecs +from functools import lru_cache + + +class HDict(dict): + def __hash__(self): + return hash(codecs.decode(self['id'], 'hex')) + + +@lru_cache(maxsize=16384) +def from_dict(func, *args, **kwargs): + return func(*args, **kwargs) + + +def memoize_from_dict(func): + + @functools.wraps(func) + def memoized_func(*args, **kwargs): + + if args[1].get('id', None): + args = list(args) + args[1] = HDict(args[1]) + new_args = tuple(args) + return from_dict(func, *new_args, **kwargs) + else: + return func(*args, **kwargs) + + return memoized_func + + +class ToDictWrapper(): + def __init__(self, tx): + self.tx = tx + + def __eq__(self, other): + return self.tx.id == other.tx.id + + def __hash__(self): + return hash(self.tx.id) + + +@lru_cache(maxsize=16384) +def to_dict(func, tx_wrapped): + return func(tx_wrapped.tx) + + +def memoize_to_dict(func): + + @functools.wraps(func) + def memoized_func(*args, **kwargs): + + if args[0].id: + return to_dict(func, ToDictWrapper(args[0])) + else: + return func(*args, **kwargs) + + return memoized_func diff --git a/bigchaindb/common/transaction.py b/bigchaindb/common/transaction.py index 8baf8196..40cc68e7 100644 --- a/bigchaindb/common/transaction.py +++ b/bigchaindb/common/transaction.py @@ -12,7 +12,8 @@ Attributes: """ from collections import namedtuple from copy import deepcopy -from functools import reduce +from functools import reduce, lru_cache +import rapidjson import base58 from cryptoconditions import Fulfillment, ThresholdSha256, Ed25519Sha256 @@ -27,6 +28,7 @@ from bigchaindb.common.exceptions import (KeypairMismatchException, AmountError, AssetIdMismatch, ThresholdTooDeep) from bigchaindb.common.utils import serialize +from .memoize import memoize_from_dict, memoize_to_dict UnspentOutput = namedtuple( @@ -82,6 +84,11 @@ class Input(object): # TODO: If `other !== Fulfillment` return `False` return self.to_dict() == other.to_dict() + # NOTE: This function is used to provide a unique key for a given + # Input to suppliment memoization + def __hash__(self): + return hash((self.fulfillment, self.fulfills)) + def to_dict(self): """Transforms the object to a Python dictionary. @@ -500,7 +507,7 @@ class Transaction(object): VERSION = '2.0' def __init__(self, operation, asset, inputs=None, outputs=None, - metadata=None, version=None, hash_id=None): + metadata=None, version=None, hash_id=None, tx_dict=None): """The constructor allows to create a customizable Transaction. Note: @@ -553,6 +560,7 @@ class Transaction(object): self.outputs = outputs or [] self.metadata = metadata self._id = hash_id + self.tx_dict = tx_dict @property def unspent_outputs(self): @@ -990,7 +998,7 @@ class Transaction(object): raise ValueError('Inputs and ' 'output_condition_uris must have the same count') - tx_dict = self.to_dict() + tx_dict = self.tx_dict if self.tx_dict else self.to_dict() tx_dict = Transaction._remove_signatures(tx_dict) tx_dict['id'] = None tx_serialized = Transaction._to_str(tx_dict) @@ -1003,6 +1011,7 @@ class Transaction(object): return all(validate(i, cond) for i, cond in enumerate(output_condition_uris)) + @lru_cache(maxsize=16384) def _input_valid(self, input_, operation, message, output_condition_uri=None): """Validates a single Input against a single Output. @@ -1048,6 +1057,11 @@ class Transaction(object): ffill_valid = parsed_ffill.validate(message=message.digest()) return output_valid and ffill_valid + # This function is required by `lru_cache` to create a key for memoization + def __hash__(self): + return hash(self.id) + + @memoize_to_dict def to_dict(self): """Transforms the object to a Python dictionary. @@ -1150,7 +1164,9 @@ class Transaction(object): tx_body (dict): The Transaction to be transformed. """ # NOTE: Remove reference to avoid side effects - tx_body = deepcopy(tx_body) + # tx_body = deepcopy(tx_body) + tx_body = rapidjson.loads(rapidjson.dumps(tx_body)) + try: proposed_tx_id = tx_body['id'] except KeyError: @@ -1167,6 +1183,7 @@ class Transaction(object): raise InvalidHash(err_msg.format(proposed_tx_id)) @classmethod + @memoize_from_dict def from_dict(cls, tx, skip_schema_validation=True): """Transforms a Python dictionary to a Transaction object. @@ -1184,7 +1201,7 @@ class Transaction(object): inputs = [Input.from_dict(input_) for input_ in tx['inputs']] outputs = [Output.from_dict(output) for output in tx['outputs']] return cls(tx['operation'], tx['asset'], inputs, outputs, - tx['metadata'], tx['version'], hash_id=tx['id']) + tx['metadata'], tx['version'], hash_id=tx['id'], tx_dict=tx) @classmethod def from_db(cls, bigchain, tx_dict_list): diff --git a/bigchaindb/lib.py b/bigchaindb/lib.py index fa72a506..017151db 100644 --- a/bigchaindb/lib.py +++ b/bigchaindb/lib.py @@ -9,6 +9,7 @@ MongoDB. import logging from collections import namedtuple from uuid import uuid4 +import rapidjson try: from hashlib import sha3_256 @@ -77,10 +78,11 @@ class BigchainDB(object): raise ValidationError('Mode must be one of the following {}.' .format(', '.join(self.mode_list))) + tx_dict = transaction.tx_dict if transaction.tx_dict else transaction.to_dict() payload = { 'method': mode, 'jsonrpc': '2.0', - 'params': [encode_transaction(transaction.to_dict())], + 'params': [encode_transaction(tx_dict)], 'id': str(uuid4()) } # TODO: handle connection errors! @@ -122,10 +124,9 @@ class BigchainDB(object): txns = [] assets = [] txn_metadatas = [] - for transaction_obj in transactions: - # self.update_utxoset(transaction) - transaction = transaction_obj.to_dict() - if transaction['operation'] == transaction_obj.CREATE: + for t in transactions: + transaction = t.tx_dict if t.tx_dict else rapidjson.loads(rapidjson.dumps(t.to_dict())) + if transaction['operation'] == t.CREATE: asset = transaction.pop('asset') asset['id'] = transaction['id'] assets.append(asset) @@ -224,6 +225,10 @@ class BigchainDB(object): return backend.query.delete_unspent_outputs( self.connection, *unspent_outputs) + def is_committed(self, transaction_id): + transaction = backend.query.get_transaction(self.connection, transaction_id) + return bool(transaction) + def get_transaction(self, transaction_id): transaction = backend.query.get_transaction(self.connection, transaction_id) diff --git a/bigchaindb/models.py b/bigchaindb/models.py index 894f510d..9df4dbd4 100644 --- a/bigchaindb/models.py +++ b/bigchaindb/models.py @@ -27,7 +27,7 @@ class Transaction(Transaction): if self.operation == Transaction.CREATE: duplicates = any(txn for txn in current_transactions if txn.id == self.id) - if bigchain.get_transaction(self.to_dict()['id']) or duplicates: + if bigchain.is_committed(self.id) or duplicates: raise DuplicateTransaction('transaction `{}` already exists' .format(self.id)) diff --git a/bigchaindb/upsert_validator/validator_election.py b/bigchaindb/upsert_validator/validator_election.py index 95162442..d7e63cf2 100644 --- a/bigchaindb/upsert_validator/validator_election.py +++ b/bigchaindb/upsert_validator/validator_election.py @@ -35,12 +35,6 @@ class ValidatorElection(Transaction): INCONCLUSIVE = 'inconclusive' ELECTION_THRESHOLD = 2 / 3 - def __init__(self, operation, asset, inputs, outputs, - metadata=None, version=None, hash_id=None): - # operation `CREATE` is being passed as argument as `VALIDATOR_ELECTION` is an extension - # of `CREATE` and any validation on `CREATE` in the parent class should apply to it - super().__init__(operation, asset, inputs, outputs, metadata, version, hash_id) - @classmethod def get_validator_change(cls, bigchain, height=None): """Return the latest change to the validator set diff --git a/tests/backend/localmongodb/test_queries.py b/tests/backend/localmongodb/test_queries.py index 9c38d822..2262d723 100644 --- a/tests/backend/localmongodb/test_queries.py +++ b/tests/backend/localmongodb/test_queries.py @@ -205,7 +205,7 @@ def test_get_owned_ids(signed_create_tx, user_pk): conn = connect() # insert a transaction - conn.db.transactions.insert_one(signed_create_tx.to_dict()) + conn.db.transactions.insert_one(deepcopy(signed_create_tx.to_dict())) txns = list(query.get_owned_ids(conn, user_pk)) @@ -224,7 +224,7 @@ def test_get_spending_transactions(user_pk, user_sk): tx2 = Transaction.transfer([inputs[0]], out, tx1.id).sign([user_sk]) tx3 = Transaction.transfer([inputs[1]], out, tx1.id).sign([user_sk]) tx4 = Transaction.transfer([inputs[2]], out, tx1.id).sign([user_sk]) - txns = [tx.to_dict() for tx in [tx1, tx2, tx3, tx4]] + txns = [deepcopy(tx.to_dict()) for tx in [tx1, tx2, tx3, tx4]] conn.db.transactions.insert_many(txns) links = [inputs[0].fulfills.to_dict(), inputs[2].fulfills.to_dict()] diff --git a/tests/common/test_memoize.py b/tests/common/test_memoize.py new file mode 100644 index 00000000..3cc7f62b --- /dev/null +++ b/tests/common/test_memoize.py @@ -0,0 +1,92 @@ +# Copyright BigchainDB GmbH and BigchainDB contributors +# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0) +# Code is Apache-2.0 and docs are CC-BY-4.0 + +import pytest +from copy import deepcopy + +from bigchaindb.models import Transaction +from bigchaindb.common.crypto import generate_key_pair +from bigchaindb.common.memoize import to_dict, from_dict + + +pytestmark = [pytest.mark.tendermint, pytest.mark.bdb] + + +def test_memoize_to_dict(b): + alice = generate_key_pair() + asset = { + 'data': {'id': 'test_id'}, + } + + assert to_dict.cache_info().hits == 0 + assert to_dict.cache_info().misses == 0 + + tx = Transaction.create([alice.public_key], + [([alice.public_key], 1)], + asset=asset,)\ + .sign([alice.private_key]) + + tx.to_dict() + + assert to_dict.cache_info().hits == 0 + assert to_dict.cache_info().misses == 1 + + tx.to_dict() + tx.to_dict() + + assert to_dict.cache_info().hits == 2 + assert to_dict.cache_info().misses == 1 + + +def test_memoize_from_dict(b): + alice = generate_key_pair() + asset = { + 'data': {'id': 'test_id'}, + } + + assert from_dict.cache_info().hits == 0 + assert from_dict.cache_info().misses == 0 + + tx = Transaction.create([alice.public_key], + [([alice.public_key], 1)], + asset=asset,)\ + .sign([alice.private_key]) + tx_dict = deepcopy(tx.to_dict()) + + Transaction.from_dict(tx_dict) + + assert from_dict.cache_info().hits == 0 + assert from_dict.cache_info().misses == 1 + + Transaction.from_dict(tx_dict) + Transaction.from_dict(tx_dict) + + assert from_dict.cache_info().hits == 2 + assert from_dict.cache_info().misses == 1 + + +def test_memoize_input_valid(b): + alice = generate_key_pair() + asset = { + 'data': {'id': 'test_id'}, + } + + assert Transaction._input_valid.cache_info().hits == 0 + assert Transaction._input_valid.cache_info().misses == 0 + + tx = Transaction.create([alice.public_key], + [([alice.public_key], 1)], + asset=asset,)\ + .sign([alice.private_key]) + + tx.inputs_valid() + + assert Transaction._input_valid.cache_info().hits == 0 + assert Transaction._input_valid.cache_info().misses == 1 + + tx.inputs_valid() + tx.inputs_valid() + + assert Transaction._input_valid.cache_info().hits == 2 + assert Transaction._input_valid.cache_info().misses == 1 diff --git a/tests/common/test_transaction.py b/tests/common/test_transaction.py index 4a1586ca..fc6444dd 100644 --- a/tests/common/test_transaction.py +++ b/tests/common/test_transaction.py @@ -13,7 +13,7 @@ from cryptoconditions import Ed25519Sha256 from pytest import mark, raises from sha3 import sha3_256 -pytestmark = mark.tendermint +pytestmark = [mark.tendermint, mark.bdb] def test_input_serialization(ffill_uri, user_pub): @@ -533,7 +533,7 @@ def test_validate_input_with_invalid_parameters(utx): input_conditions = [out.fulfillment.condition_uri for out in utx.outputs] tx_dict = utx.to_dict() tx_serialized = Transaction._to_str(tx_dict) - valid = utx._input_valid(utx.inputs[0], tx_serialized, input_conditions) + valid = utx._input_valid(utx.inputs[0], tx_serialized, input_conditions[0]) assert not valid diff --git a/tests/conftest.py b/tests/conftest.py index 7faa66c6..12cd65e3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -144,11 +144,17 @@ def _bdb(_setup_database, _configure_bigchaindb): from bigchaindb import config from bigchaindb.backend import connect from .utils import flush_db + from bigchaindb.common.memoize import to_dict, from_dict + from bigchaindb.models import Transaction conn = connect() yield dbname = config['database']['name'] flush_db(conn, dbname) + to_dict.cache_clear() + from_dict.cache_clear() + Transaction._input_valid.cache_clear() + # We need this function to avoid loading an existing # conf file located in the home of the user running From af2b5424c05bf781ac356338568a6471e5e2e818 Mon Sep 17 00:00:00 2001 From: Vanshdeep Singh Date: Tue, 4 Sep 2018 15:45:48 +0200 Subject: [PATCH 22/22] Problem: Cryptoconditions not pinned which is casuing build failure (#2512) Solution: Pin cryptoconditions so that docs build properly --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 071ddd45..db7f1bd0 100644 --- a/setup.py +++ b/setup.py @@ -79,7 +79,7 @@ install_requires = [ # TODO Consider not installing the db drivers, or putting them in extras. 'pymongo~=3.6', 'pysha3~=1.0.2', - 'cryptoconditions~=0.7.2', + 'cryptoconditions==0.7.2', 'python-rapidjson~=0.6.0', 'logstats~=0.2.1', 'flask>=0.10.1',