mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge remote-tracking branch 'origin/isolate-db-testcalls'
This commit is contained in:
commit
024c8583b5
@ -303,44 +303,24 @@ class TestBigchainApi(object):
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_genesis_block(self, b):
|
||||
import rethinkdb as r
|
||||
from bigchaindb.util import is_genesis_block
|
||||
from bigchaindb.db.utils import get_conn
|
||||
block = b.backend.get_genesis_block()
|
||||
|
||||
response = list(r.table('bigchain')
|
||||
.filter(is_genesis_block)
|
||||
.run(get_conn()))
|
||||
|
||||
assert len(response) == 1
|
||||
block = response[0]
|
||||
assert len(block['block']['transactions']) == 1
|
||||
assert block['block']['transactions'][0]['transaction']['operation'] == 'GENESIS'
|
||||
assert block['block']['transactions'][0]['transaction']['fulfillments'][0]['input'] is None
|
||||
|
||||
def test_create_genesis_block_fails_if_table_not_empty(self, b):
|
||||
import rethinkdb as r
|
||||
from bigchaindb.common.exceptions import GenesisBlockAlreadyExistsError
|
||||
from bigchaindb.util import is_genesis_block
|
||||
from bigchaindb.db.utils import get_conn
|
||||
|
||||
b.create_genesis_block()
|
||||
|
||||
with pytest.raises(GenesisBlockAlreadyExistsError):
|
||||
b.create_genesis_block()
|
||||
|
||||
genesis_blocks = list(r.table('bigchain')
|
||||
.filter(is_genesis_block)
|
||||
.run(get_conn()))
|
||||
|
||||
assert len(genesis_blocks) == 1
|
||||
|
||||
@pytest.mark.skipif(reason='This test may not make sense after changing the chainification mode')
|
||||
def test_get_last_block(self, b):
|
||||
import rethinkdb as r
|
||||
from bigchaindb.db.utils import get_conn
|
||||
|
||||
# get the number of blocks
|
||||
num_blocks = r.table('bigchain').count().run(get_conn())
|
||||
num_blocks = b.backend.count_blocks()
|
||||
|
||||
# get the last block
|
||||
last_block = b.get_last_block()
|
||||
@ -392,15 +372,10 @@ class TestBigchainApi(object):
|
||||
assert status == b.BLOCK_UNDECIDED
|
||||
|
||||
def test_get_last_voted_block_returns_genesis_if_no_votes_has_been_casted(self, b):
|
||||
import rethinkdb as r
|
||||
from bigchaindb import util
|
||||
from bigchaindb.models import Block
|
||||
from bigchaindb.db.utils import get_conn
|
||||
|
||||
b.create_genesis_block()
|
||||
genesis = list(r.table('bigchain')
|
||||
.filter(util.is_genesis_block)
|
||||
.run(get_conn()))[0]
|
||||
genesis = b.backend.get_genesis_block()
|
||||
genesis = Block.from_dict(genesis)
|
||||
gb = b.get_last_voted_block()
|
||||
assert gb == genesis
|
||||
@ -463,29 +438,25 @@ class TestBigchainApi(object):
|
||||
assert b.get_last_voted_block().id == block_3.id
|
||||
|
||||
def test_no_vote_written_if_block_already_has_vote(self, b):
|
||||
import rethinkdb as r
|
||||
from bigchaindb.models import Block
|
||||
from bigchaindb.db.utils import get_conn
|
||||
|
||||
genesis = b.create_genesis_block()
|
||||
block_1 = dummy_block()
|
||||
b.write_block(block_1, durability='hard')
|
||||
|
||||
b.write_vote(b.vote(block_1.id, genesis.id, True))
|
||||
retrieved_block_1 = r.table('bigchain').get(block_1.id).run(get_conn())
|
||||
retrieved_block_1 = b.get_block(block_1.id)
|
||||
retrieved_block_1 = Block.from_dict(retrieved_block_1)
|
||||
|
||||
# try to vote again on the retrieved block, should do nothing
|
||||
b.write_vote(b.vote(retrieved_block_1.id, genesis.id, True))
|
||||
retrieved_block_2 = r.table('bigchain').get(block_1.id).run(get_conn())
|
||||
retrieved_block_2 = b.get_block(block_1.id)
|
||||
retrieved_block_2 = Block.from_dict(retrieved_block_2)
|
||||
|
||||
assert retrieved_block_1 == retrieved_block_2
|
||||
|
||||
def test_more_votes_than_voters(self, b):
|
||||
import rethinkdb as r
|
||||
from bigchaindb.common.exceptions import MultipleVotesError
|
||||
from bigchaindb.db.utils import get_conn
|
||||
|
||||
b.create_genesis_block()
|
||||
block_1 = dummy_block()
|
||||
@ -494,8 +465,8 @@ class TestBigchainApi(object):
|
||||
vote_1 = b.vote(block_1.id, b.get_last_voted_block().id, True)
|
||||
vote_2 = b.vote(block_1.id, b.get_last_voted_block().id, True)
|
||||
vote_2['node_pubkey'] = 'aaaaaaa'
|
||||
r.table('votes').insert(vote_1).run(get_conn())
|
||||
r.table('votes').insert(vote_2).run(get_conn())
|
||||
b.write_vote(vote_1)
|
||||
b.write_vote(vote_2)
|
||||
|
||||
with pytest.raises(MultipleVotesError) as excinfo:
|
||||
b.block_election_status(block_1.id, block_1.voters)
|
||||
@ -503,16 +474,14 @@ class TestBigchainApi(object):
|
||||
.format(block_id=block_1.id, n_votes=str(2), n_voters=str(1))
|
||||
|
||||
def test_multiple_votes_single_node(self, b):
|
||||
import rethinkdb as r
|
||||
from bigchaindb.common.exceptions import MultipleVotesError
|
||||
from bigchaindb.db.utils import get_conn
|
||||
|
||||
genesis = b.create_genesis_block()
|
||||
block_1 = dummy_block()
|
||||
b.write_block(block_1, durability='hard')
|
||||
# insert duplicate votes
|
||||
for i in range(2):
|
||||
r.table('votes').insert(b.vote(block_1.id, genesis.id, True)).run(get_conn())
|
||||
b.write_vote(b.vote(block_1.id, genesis.id, True))
|
||||
|
||||
with pytest.raises(MultipleVotesError) as excinfo:
|
||||
b.block_election_status(block_1.id, block_1.voters)
|
||||
@ -525,9 +494,7 @@ class TestBigchainApi(object):
|
||||
.format(block_id=block_1.id, n_votes=str(2), me=b.me)
|
||||
|
||||
def test_improper_vote_error(selfs, b):
|
||||
import rethinkdb as r
|
||||
from bigchaindb.common.exceptions import ImproperVoteError
|
||||
from bigchaindb.db.utils import get_conn
|
||||
|
||||
b.create_genesis_block()
|
||||
block_1 = dummy_block()
|
||||
@ -535,7 +502,7 @@ class TestBigchainApi(object):
|
||||
vote_1 = b.vote(block_1.id, b.get_last_voted_block().id, True)
|
||||
# mangle the signature
|
||||
vote_1['signature'] = 'a' * 87
|
||||
r.table('votes').insert(vote_1).run(get_conn())
|
||||
b.write_vote(vote_1)
|
||||
with pytest.raises(ImproperVoteError) as excinfo:
|
||||
b.has_previous_vote(block_1.id, block_1.id)
|
||||
assert excinfo.value.args[0] == 'Block {block_id} already has an incorrectly signed ' \
|
||||
@ -543,9 +510,7 @@ class TestBigchainApi(object):
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_assign_transaction_one_node(self, b, user_pk, user_sk):
|
||||
import rethinkdb as r
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.db.utils import get_conn
|
||||
|
||||
input_tx = b.get_owned_ids(user_pk).pop()
|
||||
input_tx = b.get_transaction(input_tx.txid)
|
||||
@ -555,17 +520,15 @@ class TestBigchainApi(object):
|
||||
b.write_transaction(tx)
|
||||
|
||||
# retrieve the transaction
|
||||
response = r.table('backlog').get(tx.id).run(get_conn())
|
||||
response = list(b.backend.get_stale_transactions(0))[0]
|
||||
|
||||
# check if the assignee is the current node
|
||||
assert response['assignee'] == b.me
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_assign_transaction_multiple_nodes(self, b, user_pk, user_sk):
|
||||
import rethinkdb as r
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.db.utils import get_conn
|
||||
|
||||
# create 5 federation nodes
|
||||
for _ in range(5):
|
||||
@ -580,11 +543,12 @@ class TestBigchainApi(object):
|
||||
tx = tx.sign([user_sk])
|
||||
b.write_transaction(tx)
|
||||
|
||||
# retrieve the transaction
|
||||
response = r.table('backlog').get(tx.id).run(get_conn())
|
||||
# retrieve the transaction
|
||||
response = b.backend.get_stale_transactions(0)
|
||||
|
||||
# check if the assignee is one of the _other_ federation nodes
|
||||
assert response['assignee'] in b.nodes_except_me
|
||||
# check if the assignee is one of the _other_ federation nodes
|
||||
for tx in response:
|
||||
assert tx['assignee'] in b.nodes_except_me
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
|
@ -1,8 +1,6 @@
|
||||
import time
|
||||
from unittest.mock import patch
|
||||
|
||||
import rethinkdb as r
|
||||
|
||||
from multipipes import Pipe
|
||||
|
||||
|
||||
@ -69,7 +67,7 @@ def test_write_block(b, user_pk):
|
||||
|
||||
block_doc = b.create_block(txs)
|
||||
block_maker.write(block_doc)
|
||||
expected = b.connection.run(r.table('bigchain').get(block_doc.id))
|
||||
expected = b.backend.get_block(block_doc.id)
|
||||
expected = Block.from_dict(expected)
|
||||
|
||||
assert expected == block_doc
|
||||
@ -90,18 +88,19 @@ def test_duplicate_transaction(b, user_pk):
|
||||
block_maker.write(block_doc)
|
||||
|
||||
# block is in bigchain
|
||||
assert b.connection.run(r.table('bigchain').get(block_doc.id)) == block_doc.to_dict()
|
||||
assert b.backend.get_block(block_doc.id) == block_doc.to_dict()
|
||||
|
||||
b.write_transaction(txs[0])
|
||||
|
||||
# verify tx is in the backlog
|
||||
assert b.connection.run(r.table('backlog').get(txs[0].id)) is not None
|
||||
assert b.get_transaction(txs[0].id) is not None
|
||||
|
||||
# try to validate a transaction that's already in the chain; should not work
|
||||
assert block_maker.validate_tx(txs[0].to_dict()) is None
|
||||
|
||||
# duplicate tx should be removed from backlog
|
||||
assert b.connection.run(r.table('backlog').get(txs[0].id)) is None
|
||||
response, status = b.get_transaction(txs[0].id, include_status=True)
|
||||
assert status != b.TX_IN_BACKLOG
|
||||
|
||||
|
||||
def test_delete_tx(b, user_pk):
|
||||
@ -119,9 +118,7 @@ def test_delete_tx(b, user_pk):
|
||||
block_doc = block_maker.create(None, timeout=True)
|
||||
|
||||
for tx in block_doc.to_dict()['block']['transactions']:
|
||||
returned_tx = b.connection.run(r.table('backlog').get(tx['id']))
|
||||
returned_tx.pop('assignee')
|
||||
returned_tx.pop('assignment_timestamp')
|
||||
returned_tx = b.get_transaction(tx['id']).to_dict()
|
||||
assert returned_tx == tx
|
||||
|
||||
returned_block = block_maker.delete_tx(block_doc)
|
||||
@ -129,7 +126,8 @@ def test_delete_tx(b, user_pk):
|
||||
assert returned_block == block_doc
|
||||
|
||||
for tx in block_doc.to_dict()['block']['transactions']:
|
||||
assert b.connection.run(r.table('backlog').get(tx['id'])) is None
|
||||
returned_tx, status = b.get_transaction(tx['id'], include_status=True)
|
||||
assert status != b.TX_IN_BACKLOG
|
||||
|
||||
|
||||
def test_prefeed(b, user_pk):
|
||||
@ -165,20 +163,16 @@ def test_full_pipeline(b, user_pk):
|
||||
from bigchaindb.pipelines.block import create_pipeline, get_changefeed
|
||||
|
||||
outpipe = Pipe()
|
||||
|
||||
count_assigned_to_me = 0
|
||||
# include myself here, so that some tx are actually assigned to me
|
||||
b.nodes_except_me = [b.me, 'aaa', 'bbb', 'ccc']
|
||||
for i in range(100):
|
||||
tx = Transaction.create([b.me], [([user_pk], 1)],
|
||||
{'msg': random.random()})
|
||||
tx = tx.sign([b.me_private]).to_dict()
|
||||
assignee = random.choice([b.me, 'aaa', 'bbb', 'ccc'])
|
||||
tx['assignee'] = assignee
|
||||
tx['assignment_timestamp'] = time.time()
|
||||
if assignee == b.me:
|
||||
count_assigned_to_me += 1
|
||||
b.connection.run(r.table('backlog').insert(tx, durability='hard'))
|
||||
tx = tx.sign([b.me_private])
|
||||
|
||||
assert b.connection.run(r.table('backlog').count()) == 100
|
||||
b.write_transaction(tx)
|
||||
|
||||
assert b.backend.count_backlog() == 100
|
||||
|
||||
pipeline = create_pipeline()
|
||||
pipeline.setup(indata=get_changefeed(), outdata=outpipe)
|
||||
@ -188,9 +182,9 @@ def test_full_pipeline(b, user_pk):
|
||||
pipeline.terminate()
|
||||
|
||||
block_doc = outpipe.get()
|
||||
chained_block = b.connection.run(r.table('bigchain').get(block_doc.id))
|
||||
chained_block = b.backend.get_block(block_doc.id)
|
||||
chained_block = Block.from_dict(chained_block)
|
||||
|
||||
assert len(block_doc.transactions) == count_assigned_to_me
|
||||
block_len = len(block_doc.transactions)
|
||||
assert chained_block == block_doc
|
||||
assert b.connection.run(r.table('backlog').count()) == 100 - count_assigned_to_me
|
||||
assert b.backend.count_backlog() == 100 - block_len
|
||||
|
@ -2,7 +2,6 @@ import time
|
||||
from unittest.mock import patch
|
||||
|
||||
from bigchaindb.common import crypto
|
||||
import rethinkdb as r
|
||||
from multipipes import Pipe, Pipeline
|
||||
|
||||
from bigchaindb import Bigchain
|
||||
@ -33,7 +32,8 @@ def test_check_for_quorum_invalid(b, user_pk):
|
||||
[member.vote(test_block.id, 'abc', False) for member in test_federation[2:]]
|
||||
|
||||
# cast votes
|
||||
b.connection.run(r.table('votes').insert(votes, durability='hard'))
|
||||
for vote in votes:
|
||||
b.write_vote(vote)
|
||||
|
||||
# since this block is now invalid, should pass to the next process
|
||||
assert e.check_for_quorum(votes[-1]) == test_block
|
||||
@ -62,7 +62,8 @@ def test_check_for_quorum_invalid_prev_node(b, user_pk):
|
||||
[member.vote(test_block.id, 'def', True) for member in test_federation[2:]]
|
||||
|
||||
# cast votes
|
||||
b.connection.run(r.table('votes').insert(votes, durability='hard'))
|
||||
for vote in votes:
|
||||
b.write_vote(vote)
|
||||
|
||||
# since nodes cannot agree on prev block, the block is invalid
|
||||
assert e.check_for_quorum(votes[-1]) == test_block
|
||||
@ -91,7 +92,8 @@ def test_check_for_quorum_valid(b, user_pk):
|
||||
votes = [member.vote(test_block.id, 'abc', True)
|
||||
for member in test_federation]
|
||||
# cast votes
|
||||
b.connection.run(r.table('votes').insert(votes, durability='hard'))
|
||||
for vote in votes:
|
||||
b.write_vote(vote)
|
||||
|
||||
# since this block is valid, should go nowhere
|
||||
assert e.check_for_quorum(votes[-1]) is None
|
||||
@ -107,10 +109,12 @@ def test_check_requeue_transaction(b, user_pk):
|
||||
test_block = b.create_block([tx1])
|
||||
|
||||
e.requeue_transactions(test_block)
|
||||
backlog_tx = b.connection.run(r.table('backlog').get(tx1.id))
|
||||
backlog_tx.pop('assignee')
|
||||
backlog_tx.pop('assignment_timestamp')
|
||||
assert backlog_tx == tx1.to_dict()
|
||||
|
||||
backlog_tx, status = b.get_transaction(tx1.id, include_status=True)
|
||||
#backlog_tx = b.connection.run(r.table('backlog').get(tx1.id))
|
||||
assert status == b.TX_IN_BACKLOG
|
||||
assert backlog_tx == tx1
|
||||
|
||||
|
||||
|
||||
@patch.object(Pipeline, 'start')
|
||||
@ -157,16 +161,16 @@ def test_full_pipeline(b, user_pk):
|
||||
vote_valid = b.vote(valid_block.id, 'abc', True)
|
||||
vote_invalid = b.vote(invalid_block.id, 'abc', False)
|
||||
|
||||
b.connection.run(r.table('votes').insert(vote_valid, durability='hard'))
|
||||
b.connection.run(r.table('votes').insert(vote_invalid, durability='hard'))
|
||||
b.write_vote(vote_valid)
|
||||
b.write_vote(vote_invalid)
|
||||
|
||||
outpipe.get()
|
||||
pipeline.terminate()
|
||||
|
||||
# only transactions from the invalid block should be returned to
|
||||
# the backlog
|
||||
assert b.connection.run(r.table('backlog').count()) == 100
|
||||
assert b.backend.count_backlog() == 100
|
||||
# NOTE: I'm still, I'm still tx from the block.
|
||||
tx_from_block = set([tx.id for tx in invalid_block.transactions])
|
||||
tx_from_backlog = set([tx['id'] for tx in list(b.connection.run(r.table('backlog')))])
|
||||
tx_from_backlog = set([tx['id'] for tx in list(b.backend.get_stale_transactions(0))])
|
||||
assert tx_from_block == tx_from_backlog
|
||||
|
@ -1,10 +1,8 @@
|
||||
import rethinkdb as r
|
||||
from bigchaindb import Bigchain
|
||||
from bigchaindb.pipelines import stale
|
||||
from multipipes import Pipe, Pipeline
|
||||
from unittest.mock import patch
|
||||
from bigchaindb import config_utils
|
||||
import time
|
||||
import os
|
||||
|
||||
|
||||
@ -43,23 +41,23 @@ def test_reassign_transactions(b, user_pk):
|
||||
stm = stale.StaleTransactionMonitor(timeout=0.001,
|
||||
backlog_reassign_delay=0.001)
|
||||
stm.bigchain.nodes_except_me = ['aaa', 'bbb', 'ccc']
|
||||
tx = list(b.connection.run(r.table('backlog')))[0]
|
||||
tx = list(b.backend.get_stale_transactions(0))[0]
|
||||
stm.reassign_transactions(tx)
|
||||
|
||||
reassigned_tx = b.connection.run(r.table('backlog').get(tx['id']))
|
||||
reassigned_tx = list(b.backend.get_stale_transactions(0))[0]
|
||||
assert reassigned_tx['assignment_timestamp'] > tx['assignment_timestamp']
|
||||
assert reassigned_tx['assignee'] != tx['assignee']
|
||||
|
||||
# test with node not in federation
|
||||
tx = Transaction.create([b.me], [([user_pk], 1)])
|
||||
tx = tx.sign([b.me_private]).to_dict()
|
||||
tx.update({'assignee': 'lol'})
|
||||
tx.update({'assignment_timestamp': time.time()})
|
||||
b.connection.run(r.table('backlog').insert(tx, durability='hard'))
|
||||
tx = tx.sign([b.me_private])
|
||||
stm.bigchain.nodes_except_me = ['lol']
|
||||
b.write_transaction(tx, durability='hard')
|
||||
stm.bigchain.nodes_except_me = None
|
||||
|
||||
tx = list(b.connection.run(r.table('backlog')))[0]
|
||||
tx = list(b.backend.get_stale_transactions(0))[0]
|
||||
stm.reassign_transactions(tx)
|
||||
assert b.connection.run(r.table('backlog').get(tx['id']))['assignee'] != 'lol'
|
||||
assert tx['assignee'] != 'lol'
|
||||
|
||||
|
||||
def test_full_pipeline(monkeypatch, user_pk):
|
||||
@ -89,9 +87,10 @@ def test_full_pipeline(monkeypatch, user_pk):
|
||||
original_txc.append(tx.to_dict())
|
||||
|
||||
b.write_transaction(tx)
|
||||
original_txs[tx.id] = b.connection.run(r.table('backlog').get(tx.id))
|
||||
original_txs = list(b.backend.get_stale_transactions(0))
|
||||
original_txs = {tx['id']: tx for tx in original_txs}
|
||||
|
||||
assert b.connection.run(r.table('backlog').count()) == 100
|
||||
assert len(original_txs) == 100
|
||||
|
||||
monkeypatch.undo()
|
||||
|
||||
@ -112,8 +111,8 @@ def test_full_pipeline(monkeypatch, user_pk):
|
||||
|
||||
pipeline.terminate()
|
||||
|
||||
assert b.connection.run(r.table('backlog').count()) == 100
|
||||
reassigned_txs = list(b.connection.run(r.table('backlog')))
|
||||
assert len(list(b.backend.get_stale_transactions(0))) == 100
|
||||
reassigned_txs= list(b.backend.get_stale_transactions(0))
|
||||
|
||||
# check that every assignment timestamp has increased, and every tx has a new assignee
|
||||
for reassigned_tx in reassigned_txs:
|
||||
|
@ -2,7 +2,6 @@ import time
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
import rethinkdb as r
|
||||
from multipipes import Pipe, Pipeline
|
||||
|
||||
|
||||
@ -167,7 +166,7 @@ def test_valid_block_voting_sequential(b, monkeypatch):
|
||||
last_vote = vote_obj.vote(*vote_obj.validate_tx(tx, block_id, num_tx))
|
||||
|
||||
vote_obj.write_vote(last_vote)
|
||||
vote_rs = b.connection.run(r.table('votes').get_all([block.id, b.me], index='block_and_voter'))
|
||||
vote_rs = b.backend.get_votes_by_block_id_and_voter(block_id, b.me)
|
||||
vote_doc = vote_rs.next()
|
||||
|
||||
assert vote_doc['vote'] == {'voting_for_block': block.id,
|
||||
@ -201,7 +200,7 @@ def test_valid_block_voting_multiprocessing(b, monkeypatch):
|
||||
vote_out = outpipe.get()
|
||||
vote_pipeline.terminate()
|
||||
|
||||
vote_rs = b.connection.run(r.table('votes').get_all([block.id, b.me], index='block_and_voter'))
|
||||
vote_rs = b.backend.get_votes_by_block_id_and_voter(block.id, b.me)
|
||||
vote_doc = vote_rs.next()
|
||||
assert vote_out['vote'] == vote_doc['vote']
|
||||
assert vote_doc['vote'] == {'voting_for_block': block.id,
|
||||
@ -242,7 +241,7 @@ def test_valid_block_voting_with_create_transaction(b, monkeypatch):
|
||||
vote_out = outpipe.get()
|
||||
vote_pipeline.terminate()
|
||||
|
||||
vote_rs = b.connection.run(r.table('votes').get_all([block.id, b.me], index='block_and_voter'))
|
||||
vote_rs = b.backend.get_votes_by_block_id_and_voter(block.id, b.me)
|
||||
vote_doc = vote_rs.next()
|
||||
assert vote_out['vote'] == vote_doc['vote']
|
||||
assert vote_doc['vote'] == {'voting_for_block': block.id,
|
||||
@ -297,7 +296,7 @@ def test_valid_block_voting_with_transfer_transactions(monkeypatch, b):
|
||||
vote2_out = outpipe.get()
|
||||
vote_pipeline.terminate()
|
||||
|
||||
vote_rs = b.connection.run(r.table('votes').get_all([block.id, b.me], index='block_and_voter'))
|
||||
vote_rs = b.backend.get_votes_by_block_id_and_voter(block.id, b.me)
|
||||
vote_doc = vote_rs.next()
|
||||
assert vote_out['vote'] == vote_doc['vote']
|
||||
assert vote_doc['vote'] == {'voting_for_block': block.id,
|
||||
@ -311,7 +310,7 @@ def test_valid_block_voting_with_transfer_transactions(monkeypatch, b):
|
||||
assert crypto.PublicKey(b.me).verify(serialized_vote,
|
||||
vote_doc['signature']) is True
|
||||
|
||||
vote2_rs = b.connection.run(r.table('votes').get_all([block2.id, b.me], index='block_and_voter'))
|
||||
vote2_rs = b.backend.get_votes_by_block_id_and_voter(block2.id, b.me)
|
||||
vote2_doc = vote2_rs.next()
|
||||
assert vote2_out['vote'] == vote2_doc['vote']
|
||||
assert vote2_doc['vote'] == {'voting_for_block': block2.id,
|
||||
@ -348,7 +347,7 @@ def test_unsigned_tx_in_block_voting(monkeypatch, b, user_pk):
|
||||
vote_out = outpipe.get()
|
||||
vote_pipeline.terminate()
|
||||
|
||||
vote_rs = b.connection.run(r.table('votes').get_all([block.id, b.me], index='block_and_voter'))
|
||||
vote_rs = b.backend.get_votes_by_block_id_and_voter(block.id, b.me)
|
||||
vote_doc = vote_rs.next()
|
||||
assert vote_out['vote'] == vote_doc['vote']
|
||||
assert vote_doc['vote'] == {'voting_for_block': block.id,
|
||||
@ -387,7 +386,7 @@ def test_invalid_id_tx_in_block_voting(monkeypatch, b, user_pk):
|
||||
vote_out = outpipe.get()
|
||||
vote_pipeline.terminate()
|
||||
|
||||
vote_rs = b.connection.run(r.table('votes').get_all([block['id'], b.me], index='block_and_voter'))
|
||||
vote_rs = b.backend.get_votes_by_block_id_and_voter(block['id'], b.me)
|
||||
vote_doc = vote_rs.next()
|
||||
assert vote_out['vote'] == vote_doc['vote']
|
||||
assert vote_doc['vote'] == {'voting_for_block': block['id'],
|
||||
@ -426,7 +425,7 @@ def test_invalid_content_in_tx_in_block_voting(monkeypatch, b, user_pk):
|
||||
vote_out = outpipe.get()
|
||||
vote_pipeline.terminate()
|
||||
|
||||
vote_rs = b.connection.run(r.table('votes').get_all([block['id'], b.me], index='block_and_voter'))
|
||||
vote_rs = b.backend.get_votes_by_block_id_and_voter(block['id'], b.me)
|
||||
vote_doc = vote_rs.next()
|
||||
assert vote_out['vote'] == vote_doc['vote']
|
||||
assert vote_doc['vote'] == {'voting_for_block': block['id'],
|
||||
@ -461,7 +460,7 @@ def test_invalid_block_voting(monkeypatch, b, user_pk):
|
||||
vote_out = outpipe.get()
|
||||
vote_pipeline.terminate()
|
||||
|
||||
vote_rs = b.connection.run(r.table('votes').get_all([block['id'], b.me], index='block_and_voter'))
|
||||
vote_rs = b.backend.get_votes_by_block_id_and_voter(block['id'], b.me)
|
||||
vote_doc = vote_rs.next()
|
||||
assert vote_out['vote'] == vote_doc['vote']
|
||||
assert vote_doc['vote'] == {'voting_for_block': block['id'],
|
||||
@ -484,13 +483,16 @@ def test_voter_considers_unvoted_blocks_when_single_node(monkeypatch, b):
|
||||
monkeypatch.setattr('time.time', lambda: 1)
|
||||
b.create_genesis_block()
|
||||
|
||||
block_ids = []
|
||||
# insert blocks in the database while the voter process is not listening
|
||||
# (these blocks won't appear in the changefeed)
|
||||
monkeypatch.setattr('time.time', lambda: 2)
|
||||
block_1 = dummy_block(b)
|
||||
block_ids.append(block_1.id)
|
||||
b.write_block(block_1, durability='hard')
|
||||
monkeypatch.setattr('time.time', lambda: 3)
|
||||
block_2 = dummy_block(b)
|
||||
block_ids.append(block_2.id)
|
||||
b.write_block(block_2, durability='hard')
|
||||
|
||||
vote_pipeline = vote.create_pipeline()
|
||||
@ -505,6 +507,7 @@ def test_voter_considers_unvoted_blocks_when_single_node(monkeypatch, b):
|
||||
# create a new block that will appear in the changefeed
|
||||
monkeypatch.setattr('time.time', lambda: 4)
|
||||
block_3 = dummy_block(b)
|
||||
block_ids.append(block_3.id)
|
||||
b.write_block(block_3, durability='hard')
|
||||
|
||||
# Same as before with the two `get`s
|
||||
@ -512,19 +515,9 @@ def test_voter_considers_unvoted_blocks_when_single_node(monkeypatch, b):
|
||||
|
||||
vote_pipeline.terminate()
|
||||
|
||||
# retrieve blocks from bigchain
|
||||
blocks = list(b.connection.run(
|
||||
r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))))
|
||||
|
||||
# FIXME: remove genesis block, we don't vote on it
|
||||
# (might change in the future)
|
||||
blocks.pop(0)
|
||||
vote_pipeline.terminate()
|
||||
|
||||
# retrieve vote
|
||||
votes = b.connection.run(r.table('votes'))
|
||||
votes = list(votes)
|
||||
votes = [list(b.backend.get_votes_by_block_id(_id))[0]
|
||||
for _id in block_ids]
|
||||
|
||||
assert all(vote['node_pubkey'] == b.me for vote in votes)
|
||||
|
||||
@ -537,12 +530,15 @@ def test_voter_chains_blocks_with_the_previous_ones(monkeypatch, b):
|
||||
monkeypatch.setattr('time.time', lambda: 1)
|
||||
b.create_genesis_block()
|
||||
|
||||
block_ids = []
|
||||
monkeypatch.setattr('time.time', lambda: 2)
|
||||
block_1 = dummy_block(b)
|
||||
block_ids.append(block_1.id)
|
||||
b.write_block(block_1, durability='hard')
|
||||
|
||||
monkeypatch.setattr('time.time', lambda: 3)
|
||||
block_2 = dummy_block(b)
|
||||
block_ids.append(block_2.id)
|
||||
b.write_block(block_2, durability='hard')
|
||||
|
||||
vote_pipeline = vote.create_pipeline()
|
||||
@ -556,15 +552,13 @@ def test_voter_chains_blocks_with_the_previous_ones(monkeypatch, b):
|
||||
vote_pipeline.terminate()
|
||||
|
||||
# retrive blocks from bigchain
|
||||
blocks = list(b.connection.run(
|
||||
r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))))
|
||||
|
||||
blocks = [b.get_block(_id) for _id in block_ids]
|
||||
# retrieve votes
|
||||
votes = list(b.connection.run(r.table('votes')))
|
||||
votes = [list(b.backend.get_votes_by_block_id(_id))[0]
|
||||
for _id in block_ids]
|
||||
|
||||
assert votes[0]['vote']['voting_for_block'] in (blocks[1]['id'], blocks[2]['id'])
|
||||
assert votes[1]['vote']['voting_for_block'] in (blocks[1]['id'], blocks[2]['id'])
|
||||
assert votes[0]['vote']['voting_for_block'] in (blocks[0]['id'], blocks[1]['id'])
|
||||
assert votes[1]['vote']['voting_for_block'] in (blocks[0]['id'], blocks[1]['id'])
|
||||
|
||||
|
||||
def test_voter_checks_for_previous_vote(monkeypatch, b):
|
||||
@ -579,8 +573,7 @@ def test_voter_checks_for_previous_vote(monkeypatch, b):
|
||||
monkeypatch.setattr('time.time', lambda: 2)
|
||||
block_1 = dummy_block(b)
|
||||
inpipe.put(block_1.to_dict())
|
||||
|
||||
assert b.connection.run(r.table('votes').count()) == 0
|
||||
assert len(list(b.backend.get_votes_by_block_id(block_1.id))) == 0
|
||||
|
||||
vote_pipeline = vote.create_pipeline()
|
||||
vote_pipeline.setup(indata=inpipe, outdata=outpipe)
|
||||
@ -595,14 +588,16 @@ def test_voter_checks_for_previous_vote(monkeypatch, b):
|
||||
|
||||
# queue another block
|
||||
monkeypatch.setattr('time.time', lambda: 4)
|
||||
inpipe.put(dummy_block(b).to_dict())
|
||||
block_2 = dummy_block(b)
|
||||
inpipe.put(block_2.to_dict())
|
||||
|
||||
# wait for the result of the new block
|
||||
outpipe.get()
|
||||
|
||||
vote_pipeline.terminate()
|
||||
|
||||
assert b.connection.run(r.table('votes').count()) == 2
|
||||
assert len(list(b.backend.get_votes_by_block_id(block_1.id))) == 1
|
||||
assert len(list(b.backend.get_votes_by_block_id(block_2.id))) == 1
|
||||
|
||||
|
||||
@patch.object(Pipeline, 'start')
|
||||
|
Loading…
x
Reference in New Issue
Block a user