mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge pull request #1389 from bigchaindb/vote-order-bug
Resume voting on blocks in order
This commit is contained in:
commit
479bb78d9b
@ -15,55 +15,26 @@ register_changefeed = module_dispatch_registrar(backend.changefeed)
|
|||||||
|
|
||||||
|
|
||||||
class MongoDBChangeFeed(ChangeFeed):
|
class MongoDBChangeFeed(ChangeFeed):
|
||||||
"""This class implements a MongoDB changefeed.
|
"""This class implements a MongoDB changefeed as a multipipes Node.
|
||||||
|
|
||||||
We emulate the behaviour of the RethinkDB changefeed by using a tailable
|
We emulate the behaviour of the RethinkDB changefeed by using a tailable
|
||||||
cursor that listens for events on the oplog.
|
cursor that listens for events on the oplog.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def run_forever(self):
|
def run_forever(self):
|
||||||
for element in self.prefeed:
|
for element in self.prefeed:
|
||||||
self.outqueue.put(element)
|
self.outqueue.put(element)
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
# XXX: hack to force reconnection. Why? Because the cursor
|
|
||||||
# in `run_changefeed` does not run in the context of a
|
|
||||||
# Connection object, so if the connection is lost we need
|
|
||||||
# to manually reset the connection to None.
|
|
||||||
# See #1154
|
|
||||||
self.connection.connection = None
|
|
||||||
self.run_changefeed()
|
|
||||||
break
|
|
||||||
except (BackendError, pymongo.errors.ConnectionFailure):
|
|
||||||
logger.exception('Error connecting to the database, retrying')
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
def run_changefeed(self):
|
|
||||||
dbname = self.connection.dbname
|
|
||||||
table = self.table
|
table = self.table
|
||||||
namespace = '{}.{}'.format(dbname, table)
|
dbname = self.connection.dbname
|
||||||
|
|
||||||
# last timestamp in the oplog. We only care for operations happening
|
# last timestamp in the oplog. We only care for operations happening
|
||||||
# in the future.
|
# in the future.
|
||||||
last_ts = self.connection.run(
|
last_ts = self.connection.run(
|
||||||
self.connection.query().local.oplog.rs.find()
|
self.connection.query().local.oplog.rs.find()
|
||||||
.sort('$natural', pymongo.DESCENDING).limit(1)
|
.sort('$natural', pymongo.DESCENDING).limit(1)
|
||||||
.next()['ts'])
|
.next()['ts'])
|
||||||
# tailable cursor. A tailable cursor will remain open even after the
|
|
||||||
# last result was returned. ``TAILABLE_AWAIT`` will block for some
|
|
||||||
# timeout after the last result was returned. If no result is received
|
|
||||||
# in the meantime it will raise a StopIteration excetiption.
|
|
||||||
cursor = self.connection.run(
|
|
||||||
self.connection.query().local.oplog.rs.find(
|
|
||||||
{'ns': namespace, 'ts': {'$gt': last_ts}},
|
|
||||||
cursor_type=pymongo.CursorType.TAILABLE_AWAIT
|
|
||||||
))
|
|
||||||
|
|
||||||
while cursor.alive:
|
for record in run_changefeed(self.connection, table, last_ts):
|
||||||
try:
|
|
||||||
record = cursor.next()
|
|
||||||
except StopIteration:
|
|
||||||
continue
|
|
||||||
|
|
||||||
is_insert = record['op'] == 'i'
|
is_insert = record['op'] == 'i'
|
||||||
is_delete = record['op'] == 'd'
|
is_delete = record['op'] == 'd'
|
||||||
@ -104,3 +75,37 @@ def get_changefeed(connection, table, operation, *, prefeed=None):
|
|||||||
|
|
||||||
return MongoDBChangeFeed(table, operation, prefeed=prefeed,
|
return MongoDBChangeFeed(table, operation, prefeed=prefeed,
|
||||||
connection=connection)
|
connection=connection)
|
||||||
|
|
||||||
|
|
||||||
|
_FEED_STOP = False
|
||||||
|
"""If it's True then the changefeed will return when there are no more items.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def run_changefeed(conn, table, last_ts):
|
||||||
|
"""Encapsulate operational logic of tailing changefeed from MongoDB
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# XXX: hack to force reconnection, in case the connection
|
||||||
|
# is lost while waiting on the cursor. See #1154.
|
||||||
|
conn._conn = None
|
||||||
|
namespace = conn.dbname + '.' + table
|
||||||
|
query = conn.query().local.oplog.rs.find(
|
||||||
|
{'ns': namespace, 'ts': {'$gt': last_ts}},
|
||||||
|
{'o._id': False},
|
||||||
|
cursor_type=pymongo.CursorType.TAILABLE_AWAIT
|
||||||
|
)
|
||||||
|
cursor = conn.run(query)
|
||||||
|
logging.debug('Tailing oplog at %s/%s', namespace, last_ts)
|
||||||
|
while cursor.alive:
|
||||||
|
try:
|
||||||
|
record = cursor.next()
|
||||||
|
yield record
|
||||||
|
last_ts = record['ts']
|
||||||
|
except StopIteration:
|
||||||
|
if _FEED_STOP:
|
||||||
|
return
|
||||||
|
except (BackendError, pymongo.errors.ConnectionFailure):
|
||||||
|
logger.exception('Lost connection while tailing oplog, retrying')
|
||||||
|
time.sleep(1)
|
||||||
|
@ -5,6 +5,7 @@ from time import time
|
|||||||
from pymongo import ReturnDocument
|
from pymongo import ReturnDocument
|
||||||
|
|
||||||
from bigchaindb import backend
|
from bigchaindb import backend
|
||||||
|
from bigchaindb.backend.mongodb.changefeed import run_changefeed
|
||||||
from bigchaindb.common.exceptions import CyclicBlockchainError
|
from bigchaindb.common.exceptions import CyclicBlockchainError
|
||||||
from bigchaindb.common.transaction import Transaction
|
from bigchaindb.common.transaction import Transaction
|
||||||
from bigchaindb.backend.exceptions import DuplicateKeyError, OperationError
|
from bigchaindb.backend.exceptions import DuplicateKeyError, OperationError
|
||||||
@ -335,24 +336,14 @@ def get_last_voted_block_id(conn, node_pubkey):
|
|||||||
|
|
||||||
|
|
||||||
@register_query(MongoDBConnection)
|
@register_query(MongoDBConnection)
|
||||||
def get_unvoted_blocks(conn, node_pubkey):
|
def get_new_blocks_feed(conn, start_block_id):
|
||||||
return conn.run(
|
namespace = conn.dbname + '.bigchain'
|
||||||
conn.collection('bigchain')
|
match = {'o.id': start_block_id, 'op': 'i', 'ns': namespace}
|
||||||
.aggregate([
|
# Neccesary to find in descending order since tests may write same block id several times
|
||||||
{'$lookup': {
|
query = conn.query().local.oplog.rs.find(match).sort('$natural', -1).next()['ts']
|
||||||
'from': 'votes',
|
last_ts = conn.run(query)
|
||||||
'localField': 'id',
|
feed = run_changefeed(conn, 'bigchain', last_ts)
|
||||||
'foreignField': 'vote.voting_for_block',
|
return (evt['o'] for evt in feed if evt['op'] == 'i')
|
||||||
'as': 'votes'
|
|
||||||
}},
|
|
||||||
{'$match': {
|
|
||||||
'votes.node_pubkey': {'$ne': node_pubkey},
|
|
||||||
'block.transactions.operation': {'$ne': 'GENESIS'}
|
|
||||||
}},
|
|
||||||
{'$project': {
|
|
||||||
'votes': False, '_id': False
|
|
||||||
}}
|
|
||||||
]))
|
|
||||||
|
|
||||||
|
|
||||||
@register_query(MongoDBConnection)
|
@register_query(MongoDBConnection)
|
||||||
|
@ -330,20 +330,6 @@ def get_last_voted_block_id(connection, node_pubkey):
|
|||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
@singledispatch
|
|
||||||
def get_unvoted_blocks(connection, node_pubkey):
|
|
||||||
"""Return all the blocks that have not been voted by the specified node.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
node_pubkey (str): base58 encoded public key
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
:obj:`list` of :obj:`dict`: a list of unvoted blocks
|
|
||||||
"""
|
|
||||||
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
|
|
||||||
@singledispatch
|
@singledispatch
|
||||||
def get_txids_filtered(connection, asset_id, operation=None):
|
def get_txids_filtered(connection, asset_id, operation=None):
|
||||||
"""
|
"""
|
||||||
@ -357,6 +343,21 @@ def get_txids_filtered(connection, asset_id, operation=None):
|
|||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
@singledispatch
|
||||||
|
def get_new_blocks_feed(connection, start_block_id):
|
||||||
|
"""
|
||||||
|
Return a generator that yields change events of the blocks feed
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start_block_id (str): ID of block to resume from
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Generator of change events
|
||||||
|
"""
|
||||||
|
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
@singledispatch
|
@singledispatch
|
||||||
def text_search(conn, search, *, language='english', case_sensitive=False,
|
def text_search(conn, search, *, language='english', case_sensitive=False,
|
||||||
diacritic_sensitive=False, text_score=False, limit=0):
|
diacritic_sensitive=False, text_score=False, limit=0):
|
||||||
|
@ -14,22 +14,13 @@ register_changefeed = module_dispatch_registrar(backend.changefeed)
|
|||||||
|
|
||||||
|
|
||||||
class RethinkDBChangeFeed(ChangeFeed):
|
class RethinkDBChangeFeed(ChangeFeed):
|
||||||
"""This class wraps a RethinkDB changefeed."""
|
"""This class wraps a RethinkDB changefeed as a multipipes Node."""
|
||||||
|
|
||||||
def run_forever(self):
|
def run_forever(self):
|
||||||
for element in self.prefeed:
|
for element in self.prefeed:
|
||||||
self.outqueue.put(element)
|
self.outqueue.put(element)
|
||||||
|
|
||||||
while True:
|
for change in run_changefeed(self.connection, self.table):
|
||||||
try:
|
|
||||||
self.run_changefeed()
|
|
||||||
break
|
|
||||||
except (BackendError, r.ReqlDriverError) as exc:
|
|
||||||
logger.exception('Error connecting to the database, retrying')
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
def run_changefeed(self):
|
|
||||||
for change in self.connection.run(r.table(self.table).changes()):
|
|
||||||
is_insert = change['old_val'] is None
|
is_insert = change['old_val'] is None
|
||||||
is_delete = change['new_val'] is None
|
is_delete = change['new_val'] is None
|
||||||
is_update = not is_insert and not is_delete
|
is_update = not is_insert and not is_delete
|
||||||
@ -42,6 +33,19 @@ class RethinkDBChangeFeed(ChangeFeed):
|
|||||||
self.outqueue.put(change['new_val'])
|
self.outqueue.put(change['new_val'])
|
||||||
|
|
||||||
|
|
||||||
|
def run_changefeed(connection, table):
|
||||||
|
"""Encapsulate operational logic of tailing changefeed from RethinkDB
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
for change in connection.run(r.table(table).changes()):
|
||||||
|
yield change
|
||||||
|
break
|
||||||
|
except (BackendError, r.ReqlDriverError) as exc:
|
||||||
|
logger.exception('Error connecting to the database, retrying')
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
|
||||||
@register_changefeed(RethinkDBConnection)
|
@register_changefeed(RethinkDBConnection)
|
||||||
def get_changefeed(connection, table, operation, *, prefeed=None):
|
def get_changefeed(connection, table, operation, *, prefeed=None):
|
||||||
"""Return a RethinkDB changefeed.
|
"""Return a RethinkDB changefeed.
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
from itertools import chain
|
from itertools import chain
|
||||||
|
import logging as logger
|
||||||
from time import time
|
from time import time
|
||||||
|
|
||||||
import rethinkdb as r
|
import rethinkdb as r
|
||||||
|
|
||||||
from bigchaindb import backend, utils
|
from bigchaindb import backend, utils
|
||||||
|
from bigchaindb.backend.rethinkdb import changefeed
|
||||||
from bigchaindb.common import exceptions
|
from bigchaindb.common import exceptions
|
||||||
from bigchaindb.common.transaction import Transaction
|
from bigchaindb.common.transaction import Transaction
|
||||||
from bigchaindb.common.utils import serialize
|
from bigchaindb.common.utils import serialize
|
||||||
@ -11,6 +13,9 @@ from bigchaindb.backend.utils import module_dispatch_registrar
|
|||||||
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
|
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
|
||||||
|
|
||||||
|
|
||||||
|
logger = logger.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
READ_MODE = 'majority'
|
READ_MODE = 'majority'
|
||||||
WRITE_DURABILITY = 'hard'
|
WRITE_DURABILITY = 'hard'
|
||||||
|
|
||||||
@ -255,18 +260,13 @@ def get_last_voted_block_id(connection, node_pubkey):
|
|||||||
|
|
||||||
|
|
||||||
@register_query(RethinkDBConnection)
|
@register_query(RethinkDBConnection)
|
||||||
def get_unvoted_blocks(connection, node_pubkey):
|
def get_new_blocks_feed(connection, start_block_id): # pragma: no cover
|
||||||
unvoted = connection.run(
|
logger.warning('RethinkDB changefeed unable to resume from given block: %s',
|
||||||
r.table('bigchain', read_mode=READ_MODE)
|
start_block_id)
|
||||||
.filter(lambda block: r.table('votes', read_mode=READ_MODE)
|
# In order to get blocks in the correct order, it may be acceptable to
|
||||||
.get_all([block['id'], node_pubkey], index='block_and_voter')
|
# look in the votes table to see what order other nodes have used.
|
||||||
.is_empty())
|
for change in changefeed.run_changefeed(connection, 'bigchain'):
|
||||||
.order_by(r.asc(r.row['block']['timestamp'])))
|
yield change['new_val']
|
||||||
|
|
||||||
# FIXME: I (@vrde) don't like this solution. Filtering should be done at a
|
|
||||||
# database level. Solving issue #444 can help untangling the situation
|
|
||||||
unvoted_blocks = filter(lambda block: not utils.is_genesis_block(block), unvoted)
|
|
||||||
return unvoted_blocks
|
|
||||||
|
|
||||||
|
|
||||||
@register_query(RethinkDBConnection)
|
@register_query(RethinkDBConnection)
|
||||||
|
@ -574,16 +574,6 @@ class Bigchain(object):
|
|||||||
self.me)
|
self.me)
|
||||||
return Block.from_dict(self.get_block(last_block_id))
|
return Block.from_dict(self.get_block(last_block_id))
|
||||||
|
|
||||||
def get_unvoted_blocks(self):
|
|
||||||
"""Return all the blocks that have not been voted on by this node.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
:obj:`list` of :obj:`dict`: a list of unvoted blocks
|
|
||||||
"""
|
|
||||||
|
|
||||||
# XXX: should this return instaces of Block?
|
|
||||||
return backend.query.get_unvoted_blocks(self.connection, self.me)
|
|
||||||
|
|
||||||
def block_election(self, block):
|
def block_election(self, block):
|
||||||
if type(block) != dict:
|
if type(block) != dict:
|
||||||
block = block.to_dict()
|
block = block.to_dict()
|
||||||
|
@ -10,10 +10,7 @@ from collections import Counter
|
|||||||
|
|
||||||
from multipipes import Pipeline, Node
|
from multipipes import Pipeline, Node
|
||||||
|
|
||||||
import bigchaindb
|
from bigchaindb import Bigchain, backend
|
||||||
from bigchaindb import Bigchain
|
|
||||||
from bigchaindb import backend
|
|
||||||
from bigchaindb.backend.changefeed import ChangeFeed
|
|
||||||
from bigchaindb.models import Transaction, Block
|
from bigchaindb.models import Transaction, Block
|
||||||
from bigchaindb.common import exceptions
|
from bigchaindb.common import exceptions
|
||||||
|
|
||||||
@ -151,20 +148,13 @@ class Vote:
|
|||||||
return vote
|
return vote
|
||||||
|
|
||||||
|
|
||||||
def initial():
|
|
||||||
"""Return unvoted blocks."""
|
|
||||||
b = Bigchain()
|
|
||||||
rs = b.get_unvoted_blocks()
|
|
||||||
return rs
|
|
||||||
|
|
||||||
|
|
||||||
def create_pipeline():
|
def create_pipeline():
|
||||||
"""Create and return the pipeline of operations to be distributed
|
"""Create and return the pipeline of operations to be distributed
|
||||||
on different processes."""
|
on different processes."""
|
||||||
|
|
||||||
voter = Vote()
|
voter = Vote()
|
||||||
|
|
||||||
vote_pipeline = Pipeline([
|
return Pipeline([
|
||||||
Node(voter.validate_block),
|
Node(voter.validate_block),
|
||||||
Node(voter.ungroup),
|
Node(voter.ungroup),
|
||||||
Node(voter.validate_tx, fraction_of_cores=1),
|
Node(voter.validate_tx, fraction_of_cores=1),
|
||||||
@ -172,13 +162,14 @@ def create_pipeline():
|
|||||||
Node(voter.write_vote)
|
Node(voter.write_vote)
|
||||||
])
|
])
|
||||||
|
|
||||||
return vote_pipeline
|
|
||||||
|
|
||||||
|
|
||||||
def get_changefeed():
|
def get_changefeed():
|
||||||
connection = backend.connect(**bigchaindb.config['database'])
|
"""Create and return ordered changefeed of blocks starting from
|
||||||
return backend.get_changefeed(connection, 'bigchain', ChangeFeed.INSERT,
|
last voted block"""
|
||||||
prefeed=initial())
|
b = Bigchain()
|
||||||
|
last_block_id = b.get_last_voted_block().id
|
||||||
|
feed = backend.query.get_new_blocks_feed(b.connection, last_block_id)
|
||||||
|
return Node(feed.__next__, name='changefeed')
|
||||||
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
|
@ -10,34 +10,32 @@ def mock_changefeed_data():
|
|||||||
return [
|
return [
|
||||||
{
|
{
|
||||||
'op': 'i',
|
'op': 'i',
|
||||||
'o': {'_id': '', 'msg': 'seems like we have an insert here'}
|
'o': {'_id': '', 'msg': 'seems like we have an insert here'},
|
||||||
|
'ts': 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'op': 'd',
|
'op': 'd',
|
||||||
'o': {'msg': 'seems like we have a delete here'}
|
'o': {'msg': 'seems like we have a delete here'},
|
||||||
|
'ts': 2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'op': 'u',
|
'op': 'u',
|
||||||
'o': {'msg': 'seems like we have an update here'},
|
'o': {'msg': 'seems like we have an update here'},
|
||||||
'o2': {'_id': 'some-id'}
|
'o2': {'_id': 'some-id'},
|
||||||
|
'ts': 3,
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.bdb
|
@pytest.mark.bdb
|
||||||
@mock.patch('pymongo.cursor.Cursor.alive', new_callable=mock.PropertyMock)
|
@mock.patch('bigchaindb.backend.mongodb.changefeed._FEED_STOP', True)
|
||||||
@mock.patch('pymongo.cursor.Cursor.next')
|
@mock.patch('pymongo.cursor.Cursor.next')
|
||||||
def test_changefeed_insert(mock_cursor_next, mock_cursor_alive,
|
def test_changefeed_insert(mock_cursor_next, mock_changefeed_data):
|
||||||
mock_changefeed_data):
|
|
||||||
from bigchaindb.backend import get_changefeed, connect
|
from bigchaindb.backend import get_changefeed, connect
|
||||||
from bigchaindb.backend.changefeed import ChangeFeed
|
from bigchaindb.backend.changefeed import ChangeFeed
|
||||||
|
|
||||||
# setup connection and mocks
|
# setup connection and mocks
|
||||||
conn = connect()
|
conn = connect()
|
||||||
# changefeed.run_forever only returns when the cursor is closed
|
|
||||||
# so we mock `alive` to be False it finishes reading the mocked data
|
|
||||||
mock_cursor_alive.side_effect = [mock.DEFAULT, mock.DEFAULT,
|
|
||||||
mock.DEFAULT, mock.DEFAULT, False]
|
|
||||||
# mock the `next` method of the cursor to return the mocked data
|
# mock the `next` method of the cursor to return the mocked data
|
||||||
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
|
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
|
||||||
|
|
||||||
@ -51,16 +49,13 @@ def test_changefeed_insert(mock_cursor_next, mock_cursor_alive,
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.bdb
|
@pytest.mark.bdb
|
||||||
@mock.patch('pymongo.cursor.Cursor.alive', new_callable=mock.PropertyMock)
|
@mock.patch('bigchaindb.backend.mongodb.changefeed._FEED_STOP', True)
|
||||||
@mock.patch('pymongo.cursor.Cursor.next')
|
@mock.patch('pymongo.cursor.Cursor.next')
|
||||||
def test_changefeed_delete(mock_cursor_next, mock_cursor_alive,
|
def test_changefeed_delete(mock_cursor_next, mock_changefeed_data):
|
||||||
mock_changefeed_data):
|
|
||||||
from bigchaindb.backend import get_changefeed, connect
|
from bigchaindb.backend import get_changefeed, connect
|
||||||
from bigchaindb.backend.changefeed import ChangeFeed
|
from bigchaindb.backend.changefeed import ChangeFeed
|
||||||
|
|
||||||
conn = connect()
|
conn = connect()
|
||||||
mock_cursor_alive.side_effect = [mock.DEFAULT, mock.DEFAULT,
|
|
||||||
mock.DEFAULT, mock.DEFAULT, False]
|
|
||||||
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
|
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
|
||||||
|
|
||||||
outpipe = Pipe()
|
outpipe = Pipe()
|
||||||
@ -73,17 +68,15 @@ def test_changefeed_delete(mock_cursor_next, mock_cursor_alive,
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.bdb
|
@pytest.mark.bdb
|
||||||
|
@mock.patch('bigchaindb.backend.mongodb.changefeed._FEED_STOP', True)
|
||||||
@mock.patch('pymongo.collection.Collection.find_one')
|
@mock.patch('pymongo.collection.Collection.find_one')
|
||||||
@mock.patch('pymongo.cursor.Cursor.alive', new_callable=mock.PropertyMock)
|
|
||||||
@mock.patch('pymongo.cursor.Cursor.next')
|
@mock.patch('pymongo.cursor.Cursor.next')
|
||||||
def test_changefeed_update(mock_cursor_next, mock_cursor_alive,
|
def test_changefeed_update(mock_cursor_next, mock_cursor_find_one,
|
||||||
mock_cursor_find_one, mock_changefeed_data):
|
mock_changefeed_data):
|
||||||
from bigchaindb.backend import get_changefeed, connect
|
from bigchaindb.backend import get_changefeed, connect
|
||||||
from bigchaindb.backend.changefeed import ChangeFeed
|
from bigchaindb.backend.changefeed import ChangeFeed
|
||||||
|
|
||||||
conn = connect()
|
conn = connect()
|
||||||
mock_cursor_alive.side_effect = [mock.DEFAULT, mock.DEFAULT,
|
|
||||||
mock.DEFAULT, mock.DEFAULT, False]
|
|
||||||
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
|
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
|
||||||
mock_cursor_find_one.return_value = mock_changefeed_data[2]['o']
|
mock_cursor_find_one.return_value = mock_changefeed_data[2]['o']
|
||||||
|
|
||||||
@ -101,18 +94,15 @@ def test_changefeed_update(mock_cursor_next, mock_cursor_alive,
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.bdb
|
@pytest.mark.bdb
|
||||||
|
@mock.patch('bigchaindb.backend.mongodb.changefeed._FEED_STOP', True)
|
||||||
@mock.patch('pymongo.collection.Collection.find_one')
|
@mock.patch('pymongo.collection.Collection.find_one')
|
||||||
@mock.patch('pymongo.cursor.Cursor.alive', new_callable=mock.PropertyMock)
|
|
||||||
@mock.patch('pymongo.cursor.Cursor.next')
|
@mock.patch('pymongo.cursor.Cursor.next')
|
||||||
def test_changefeed_multiple_operations(mock_cursor_next, mock_cursor_alive,
|
def test_changefeed_multiple_operations(mock_cursor_next, mock_cursor_find_one,
|
||||||
mock_cursor_find_one,
|
|
||||||
mock_changefeed_data):
|
mock_changefeed_data):
|
||||||
from bigchaindb.backend import get_changefeed, connect
|
from bigchaindb.backend import get_changefeed, connect
|
||||||
from bigchaindb.backend.changefeed import ChangeFeed
|
from bigchaindb.backend.changefeed import ChangeFeed
|
||||||
|
|
||||||
conn = connect()
|
conn = connect()
|
||||||
mock_cursor_alive.side_effect = [mock.DEFAULT, mock.DEFAULT,
|
|
||||||
mock.DEFAULT, mock.DEFAULT, False]
|
|
||||||
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
|
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
|
||||||
mock_cursor_find_one.return_value = mock_changefeed_data[2]['o']
|
mock_cursor_find_one.return_value = mock_changefeed_data[2]['o']
|
||||||
|
|
||||||
@ -128,16 +118,13 @@ def test_changefeed_multiple_operations(mock_cursor_next, mock_cursor_alive,
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.bdb
|
@pytest.mark.bdb
|
||||||
@mock.patch('pymongo.cursor.Cursor.alive', new_callable=mock.PropertyMock)
|
@mock.patch('bigchaindb.backend.mongodb.changefeed._FEED_STOP', True)
|
||||||
@mock.patch('pymongo.cursor.Cursor.next')
|
@mock.patch('pymongo.cursor.Cursor.next')
|
||||||
def test_changefeed_prefeed(mock_cursor_next, mock_cursor_alive,
|
def test_changefeed_prefeed(mock_cursor_next, mock_changefeed_data):
|
||||||
mock_changefeed_data):
|
|
||||||
from bigchaindb.backend import get_changefeed, connect
|
from bigchaindb.backend import get_changefeed, connect
|
||||||
from bigchaindb.backend.changefeed import ChangeFeed
|
from bigchaindb.backend.changefeed import ChangeFeed
|
||||||
|
|
||||||
conn = connect()
|
conn = connect()
|
||||||
mock_cursor_alive.side_effect = [mock.DEFAULT, mock.DEFAULT,
|
|
||||||
mock.DEFAULT, mock.DEFAULT, False]
|
|
||||||
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
|
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
|
||||||
|
|
||||||
outpipe = Pipe()
|
outpipe = Pipe()
|
||||||
@ -150,19 +137,13 @@ def test_changefeed_prefeed(mock_cursor_next, mock_cursor_alive,
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.bdb
|
@pytest.mark.bdb
|
||||||
@mock.patch('bigchaindb.backend.mongodb.changefeed.MongoDBChangeFeed.run_changefeed') # noqa
|
def test_connection_failure():
|
||||||
def test_connection_failure(mock_run_changefeed):
|
|
||||||
from bigchaindb.backend import get_changefeed, connect
|
|
||||||
from bigchaindb.backend.exceptions import ConnectionError
|
from bigchaindb.backend.exceptions import ConnectionError
|
||||||
from bigchaindb.backend.changefeed import ChangeFeed
|
from bigchaindb.backend.mongodb.changefeed import run_changefeed
|
||||||
|
|
||||||
conn = connect()
|
conn = mock.MagicMock()
|
||||||
mock_run_changefeed.side_effect = [ConnectionError(),
|
conn.run.side_effect = [ConnectionError(), RuntimeError()]
|
||||||
mock.DEFAULT]
|
changefeed = run_changefeed(conn, 'backlog', -1)
|
||||||
|
with pytest.raises(RuntimeError):
|
||||||
changefeed = get_changefeed(conn, 'backlog', ChangeFeed.INSERT)
|
for record in changefeed:
|
||||||
changefeed.run_forever()
|
assert False, 'Shouldn\'t get here'
|
||||||
|
|
||||||
# run_changefeed raises an exception the first time its called and then
|
|
||||||
# it's called again
|
|
||||||
assert mock_run_changefeed.call_count == 2
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
from unittest import mock
|
||||||
import pymongo
|
import pymongo
|
||||||
|
|
||||||
pytestmark = pytest.mark.bdb
|
pytestmark = pytest.mark.bdb
|
||||||
@ -381,21 +382,6 @@ def test_get_last_voted_block_id(genesis_block, signed_create_tx, b):
|
|||||||
query.get_last_voted_block_id(conn, b.me)
|
query.get_last_voted_block_id(conn, b.me)
|
||||||
|
|
||||||
|
|
||||||
def test_get_unvoted_blocks(signed_create_tx):
|
|
||||||
from bigchaindb.backend import connect, query
|
|
||||||
from bigchaindb.models import Block
|
|
||||||
conn = connect()
|
|
||||||
|
|
||||||
# create and insert a block
|
|
||||||
block = Block(transactions=[signed_create_tx], node_pubkey='aaa')
|
|
||||||
conn.db.bigchain.insert_one(block.to_dict())
|
|
||||||
|
|
||||||
unvoted_blocks = list(query.get_unvoted_blocks(conn, 'aaa'))
|
|
||||||
|
|
||||||
assert len(unvoted_blocks) == 1
|
|
||||||
assert unvoted_blocks[0] == block.to_dict()
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_txids_filtered(signed_create_tx, signed_transfer_tx):
|
def test_get_txids_filtered(signed_create_tx, signed_transfer_tx):
|
||||||
from bigchaindb.backend import connect, query
|
from bigchaindb.backend import connect, query
|
||||||
from bigchaindb.models import Block, Transaction
|
from bigchaindb.models import Block, Transaction
|
||||||
@ -423,6 +409,31 @@ def test_get_txids_filtered(signed_create_tx, signed_transfer_tx):
|
|||||||
assert txids == {signed_transfer_tx.id}
|
assert txids == {signed_transfer_tx.id}
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('bigchaindb.backend.mongodb.changefeed._FEED_STOP', True)
|
||||||
|
def test_get_new_blocks_feed(b, create_tx):
|
||||||
|
from bigchaindb.backend import query
|
||||||
|
from bigchaindb.models import Block
|
||||||
|
import random
|
||||||
|
|
||||||
|
def create_block():
|
||||||
|
ts = str(random.random())
|
||||||
|
block = Block(transactions=[create_tx], timestamp=ts)
|
||||||
|
b.write_block(block)
|
||||||
|
return block.decouple_assets()[1]
|
||||||
|
|
||||||
|
create_block()
|
||||||
|
b1 = create_block()
|
||||||
|
b2 = create_block()
|
||||||
|
|
||||||
|
feed = query.get_new_blocks_feed(b.connection, b1['id'])
|
||||||
|
|
||||||
|
assert feed.__next__() == b2
|
||||||
|
|
||||||
|
b3 = create_block()
|
||||||
|
|
||||||
|
assert list(feed) == [b3]
|
||||||
|
|
||||||
|
|
||||||
def test_get_spending_transactions(user_pk):
|
def test_get_spending_transactions(user_pk):
|
||||||
from bigchaindb.backend import connect, query
|
from bigchaindb.backend import connect, query
|
||||||
from bigchaindb.models import Block, Transaction
|
from bigchaindb.models import Block, Transaction
|
||||||
|
@ -31,11 +31,11 @@ def test_schema(schema_func_name, args_qty):
|
|||||||
('get_block', 1),
|
('get_block', 1),
|
||||||
('write_vote', 1),
|
('write_vote', 1),
|
||||||
('get_last_voted_block_id', 1),
|
('get_last_voted_block_id', 1),
|
||||||
('get_unvoted_blocks', 1),
|
|
||||||
('get_spent', 2),
|
('get_spent', 2),
|
||||||
('get_votes_by_block_id_and_voter', 2),
|
('get_votes_by_block_id_and_voter', 2),
|
||||||
('update_transaction', 2),
|
('update_transaction', 2),
|
||||||
('get_transaction_from_block', 2),
|
('get_transaction_from_block', 2),
|
||||||
|
('get_new_blocks_feed', 1),
|
||||||
('get_votes_for_blocks_by_voter', 2),
|
('get_votes_for_blocks_by_voter', 2),
|
||||||
('get_spending_transactions', 1),
|
('get_spending_transactions', 1),
|
||||||
('write_assets', 1),
|
('write_assets', 1),
|
||||||
|
@ -40,7 +40,6 @@ from contextlib import contextmanager
|
|||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
import bigchaindb.core
|
import bigchaindb.core
|
||||||
from bigchaindb.backend.changefeed import ChangeFeed
|
|
||||||
import bigchaindb.pipelines.block
|
import bigchaindb.pipelines.block
|
||||||
import bigchaindb.pipelines.stale
|
import bigchaindb.pipelines.stale
|
||||||
import bigchaindb.pipelines.vote
|
import bigchaindb.pipelines.vote
|
||||||
@ -58,7 +57,7 @@ class MultipipesStepper:
|
|||||||
name = '%s_%s' % (prefix, node.name)
|
name = '%s_%s' % (prefix, node.name)
|
||||||
next_name = '%s_%s' % (prefix, next.name)
|
next_name = '%s_%s' % (prefix, next.name)
|
||||||
|
|
||||||
if isinstance(node, ChangeFeed):
|
if node.name == 'changefeed':
|
||||||
self.processes.append(node)
|
self.processes.append(node)
|
||||||
|
|
||||||
def f(*args, **kwargs):
|
def f(*args, **kwargs):
|
||||||
|
@ -521,95 +521,6 @@ def test_invalid_block_voting(monkeypatch, b, user_pk, genesis_block):
|
|||||||
vote_doc['signature']) is True
|
vote_doc['signature']) is True
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.genesis
|
|
||||||
def test_voter_considers_unvoted_blocks_when_single_node(monkeypatch, b):
|
|
||||||
from bigchaindb.backend import query
|
|
||||||
from bigchaindb.pipelines import vote
|
|
||||||
|
|
||||||
outpipe = Pipe()
|
|
||||||
|
|
||||||
monkeypatch.setattr('time.time', lambda: 1000000000)
|
|
||||||
|
|
||||||
block_ids = []
|
|
||||||
# insert blocks in the database while the voter process is not listening
|
|
||||||
# (these blocks won't appear in the changefeed)
|
|
||||||
monkeypatch.setattr('time.time', lambda: 1000000020)
|
|
||||||
block_1 = dummy_block(b)
|
|
||||||
block_ids.append(block_1.id)
|
|
||||||
monkeypatch.setattr('time.time', lambda: 1000000030)
|
|
||||||
b.write_block(block_1)
|
|
||||||
block_2 = dummy_block(b)
|
|
||||||
block_ids.append(block_2.id)
|
|
||||||
b.write_block(block_2)
|
|
||||||
|
|
||||||
vote_pipeline = vote.create_pipeline()
|
|
||||||
vote_pipeline.setup(indata=vote.get_changefeed(), outdata=outpipe)
|
|
||||||
vote_pipeline.start()
|
|
||||||
|
|
||||||
# We expects two votes, so instead of waiting an arbitrary amount
|
|
||||||
# of time, we can do two blocking calls to `get`
|
|
||||||
outpipe.get()
|
|
||||||
outpipe.get()
|
|
||||||
|
|
||||||
# create a new block that will appear in the changefeed
|
|
||||||
monkeypatch.setattr('time.time', lambda: 1000000040)
|
|
||||||
block_3 = dummy_block(b)
|
|
||||||
block_ids.append(block_3.id)
|
|
||||||
b.write_block(block_3)
|
|
||||||
|
|
||||||
# Same as before with the two `get`s
|
|
||||||
outpipe.get()
|
|
||||||
|
|
||||||
vote_pipeline.terminate()
|
|
||||||
|
|
||||||
# retrieve vote
|
|
||||||
votes = [list(query.get_votes_by_block_id(b.connection, _id))[0]
|
|
||||||
for _id in block_ids]
|
|
||||||
|
|
||||||
assert all(vote['node_pubkey'] == b.me for vote in votes)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.genesis
|
|
||||||
def test_voter_chains_blocks_with_the_previous_ones(monkeypatch, b):
|
|
||||||
from bigchaindb.backend import query
|
|
||||||
from bigchaindb.pipelines import vote
|
|
||||||
|
|
||||||
outpipe = Pipe()
|
|
||||||
|
|
||||||
monkeypatch.setattr('time.time', lambda: 1000000000)
|
|
||||||
|
|
||||||
block_ids = []
|
|
||||||
monkeypatch.setattr('time.time', lambda: 1000000020)
|
|
||||||
block_1 = dummy_block(b)
|
|
||||||
block_ids.append(block_1.id)
|
|
||||||
b.write_block(block_1)
|
|
||||||
|
|
||||||
monkeypatch.setattr('time.time', lambda: 1000000030)
|
|
||||||
block_2 = dummy_block(b)
|
|
||||||
block_ids.append(block_2.id)
|
|
||||||
b.write_block(block_2)
|
|
||||||
|
|
||||||
vote_pipeline = vote.create_pipeline()
|
|
||||||
vote_pipeline.setup(indata=vote.get_changefeed(), outdata=outpipe)
|
|
||||||
vote_pipeline.start()
|
|
||||||
|
|
||||||
# We expects two votes, so instead of waiting an arbitrary amount
|
|
||||||
# of time, we can do two blocking calls to `get`
|
|
||||||
outpipe.get()
|
|
||||||
outpipe.get()
|
|
||||||
vote_pipeline.terminate()
|
|
||||||
|
|
||||||
# retrive blocks from bigchain
|
|
||||||
blocks = [b.get_block(_id) for _id in block_ids]
|
|
||||||
|
|
||||||
# retrieve votes
|
|
||||||
votes = [list(query.get_votes_by_block_id(b.connection, _id))[0]
|
|
||||||
for _id in block_ids]
|
|
||||||
|
|
||||||
assert ({v['vote']['voting_for_block'] for v in votes} ==
|
|
||||||
{block['id'] for block in blocks})
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.genesis
|
@pytest.mark.genesis
|
||||||
def test_voter_checks_for_previous_vote(monkeypatch, b):
|
def test_voter_checks_for_previous_vote(monkeypatch, b):
|
||||||
from bigchaindb.backend import query
|
from bigchaindb.backend import query
|
||||||
|
Loading…
x
Reference in New Issue
Block a user