mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Replace cherrypicked class-based architecture with singledispatch
This commit is contained in:
parent
1d0a7d2153
commit
dabb81ac98
@ -1,4 +1 @@
|
|||||||
from bigchaindb.db.factory import get_backend_factory
|
from bigchaindb.backend.connection import Connection # noqa
|
||||||
from bigchaindb.db.query import Query
|
|
||||||
from bigchaindb.db.schema import Schema
|
|
||||||
from bigchaindb.db.connection import Connection
|
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
"""Changefeed interfaces for backend databases"""
|
@ -1,254 +1,287 @@
|
|||||||
|
"""Query interfaces for backend databases"""
|
||||||
|
|
||||||
"""Interface to query the database.
|
from functools import singledispatch
|
||||||
|
|
||||||
This module contains all the methods to store and retrieve data from a generic database.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
@singledispatch
|
||||||
|
def write_transaction(connection, signed_transaction):
|
||||||
|
"""Write a transaction to the backlog table.
|
||||||
|
|
||||||
class Query:
|
Args:
|
||||||
|
signed_transaction (dict): a signed transaction.
|
||||||
|
|
||||||
def write_transaction(self, signed_transaction):
|
Returns:
|
||||||
"""Write a transaction to the backlog table.
|
The result of the operation.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
Args:
|
|
||||||
signed_transaction (dict): a signed transaction.
|
|
||||||
|
|
||||||
Returns:
|
@singledispatch
|
||||||
The result of the operation.
|
def update_transaction(connection, transaction_id, doc):
|
||||||
"""
|
"""Update a transaction in the backlog table.
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def update_transaction(self, transaction_id, doc):
|
Args:
|
||||||
"""Update a transaction in the backlog table.
|
transaction_id (str): the id of the transaction.
|
||||||
|
doc (dict): the values to update.
|
||||||
|
|
||||||
Args:
|
Returns:
|
||||||
transaction_id (str): the id of the transaction.
|
The result of the operation.
|
||||||
doc (dict): the values to update.
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
Returns:
|
|
||||||
The result of the operation.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def delete_transaction(self, *transaction_id):
|
@singledispatch
|
||||||
"""Delete a transaction from the backlog.
|
def delete_transaction(connection, *transaction_id):
|
||||||
|
"""Delete a transaction from the backlog.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
*transaction_id (str): the transaction(s) to delete
|
*transaction_id (str): the transaction(s) to delete
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The database response.
|
The database response.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def get_stale_transactions(self, reassign_delay):
|
|
||||||
"""Get a cursor of stale transactions.
|
|
||||||
|
|
||||||
Transactions are considered stale if they have been assigned a node,
|
@singledispatch
|
||||||
but are still in the backlog after some amount of time specified in the
|
def get_stale_transactions(connection, reassign_delay):
|
||||||
configuration.
|
"""Get a cursor of stale transactions.
|
||||||
|
|
||||||
Args:
|
Transactions are considered stale if they have been assigned a node,
|
||||||
reassign_delay (int): threshold (in seconds) to mark a transaction stale.
|
but are still in the backlog after some amount of time specified in the
|
||||||
|
configuration.
|
||||||
|
|
||||||
Returns:
|
Args:
|
||||||
A cursor of transactions.
|
reassign_delay (int): threshold (in seconds) to mark a transaction stale.
|
||||||
"""
|
|
||||||
|
|
||||||
raise NotImplementedError()
|
Returns:
|
||||||
|
A cursor of transactions.
|
||||||
|
"""
|
||||||
|
|
||||||
def get_transaction_from_block(self, transaction_id, block_id):
|
raise NotImplementedError()
|
||||||
"""Get a transaction from a specific block.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
transaction_id (str): the id of the transaction.
|
|
||||||
block_id (str): the id of the block.
|
|
||||||
|
|
||||||
Returns:
|
@singledispatch
|
||||||
The matching transaction.
|
def get_transaction_from_block(connection, transaction_id, block_id):
|
||||||
"""
|
"""Get a transaction from a specific block.
|
||||||
|
|
||||||
raise NotImplementedError()
|
Args:
|
||||||
|
transaction_id (str): the id of the transaction.
|
||||||
|
block_id (str): the id of the block.
|
||||||
|
|
||||||
def get_transaction_from_backlog(self, transaction_id):
|
Returns:
|
||||||
"""Get a transaction from backlog.
|
The matching transaction.
|
||||||
|
"""
|
||||||
|
|
||||||
Args:
|
raise NotImplementedError()
|
||||||
transaction_id (str): the id of the transaction.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The matching transaction.
|
|
||||||
"""
|
|
||||||
|
|
||||||
raise NotImplementedError()
|
@singledispatch
|
||||||
|
def get_transaction_from_backlog(connection, transaction_id):
|
||||||
|
"""Get a transaction from backlog.
|
||||||
|
|
||||||
def get_blocks_status_from_transaction(self, transaction_id):
|
Args:
|
||||||
"""Retrieve block election information given a secondary index and value
|
transaction_id (str): the id of the transaction.
|
||||||
|
|
||||||
Args:
|
Returns:
|
||||||
value: a value to search (e.g. transaction id string, payload hash string)
|
The matching transaction.
|
||||||
index (str): name of a secondary index, e.g. 'transaction_id'
|
"""
|
||||||
|
|
||||||
Returns:
|
raise NotImplementedError()
|
||||||
:obj:`list` of :obj:`dict`: A list of blocks with with only election information
|
|
||||||
"""
|
|
||||||
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def get_transactions_by_metadata_id(self, metadata_id):
|
@singledispatch
|
||||||
"""Retrieves transactions related to a metadata.
|
def get_blocks_status_from_transaction(connection, transaction_id):
|
||||||
|
"""Retrieve block election information given a secondary index and value
|
||||||
|
|
||||||
When creating a transaction one of the optional arguments is the `metadata`. The metadata is a generic
|
Args:
|
||||||
dict that contains extra information that can be appended to the transaction.
|
value: a value to search (e.g. transaction id string, payload hash string)
|
||||||
|
index (str): name of a secondary index, e.g. 'transaction_id'
|
||||||
|
|
||||||
To make it easy to query the bigchain for that particular metadata we create a UUID for the metadata and
|
Returns:
|
||||||
store it with the transaction.
|
:obj:`list` of :obj:`dict`: A list of blocks with with only election information
|
||||||
|
"""
|
||||||
|
|
||||||
Args:
|
raise NotImplementedError()
|
||||||
metadata_id (str): the id for this particular metadata.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A list of transactions containing that metadata. If no transaction exists with that metadata it
|
|
||||||
returns an empty list `[]`
|
|
||||||
"""
|
|
||||||
|
|
||||||
raise NotImplementedError()
|
@singledispatch
|
||||||
|
def get_transactions_by_metadata_id(connection, metadata_id):
|
||||||
|
"""Retrieves transactions related to a metadata.
|
||||||
|
|
||||||
def get_transactions_by_asset_id(self, asset_id):
|
When creating a transaction one of the optional arguments is the `metadata`. The metadata is a generic
|
||||||
"""Retrieves transactions related to a particular asset.
|
dict that contains extra information that can be appended to the transaction.
|
||||||
|
|
||||||
A digital asset in bigchaindb is identified by an uuid. This allows us to query all the transactions
|
To make it easy to query the bigchain for that particular metadata we create a UUID for the metadata and
|
||||||
related to a particular digital asset, knowing the id.
|
store it with the transaction.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
asset_id (str): the id for this particular metadata.
|
metadata_id (str): the id for this particular metadata.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A list of transactions containing related to the asset. If no transaction exists for that asset it
|
A list of transactions containing that metadata. If no transaction exists with that metadata it
|
||||||
returns an empty list `[]`
|
returns an empty list `[]`
|
||||||
"""
|
"""
|
||||||
|
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def get_spent(self, transaction_id, condition_id):
|
|
||||||
"""Check if a `txid` was already used as an input.
|
|
||||||
|
|
||||||
A transaction can be used as an input for another transaction. Bigchain needs to make sure that a
|
@singledispatch
|
||||||
given `txid` is only used once.
|
def get_transactions_by_asset_id(connection, asset_id):
|
||||||
|
"""Retrieves transactions related to a particular asset.
|
||||||
|
|
||||||
Args:
|
A digital asset in bigchaindb is identified by an uuid. This allows us to query all the transactions
|
||||||
transaction_id (str): The id of the transaction.
|
related to a particular digital asset, knowing the id.
|
||||||
condition_id (int): The index of the condition in the respective transaction.
|
|
||||||
|
|
||||||
Returns:
|
Args:
|
||||||
The transaction that used the `txid` as an input else `None`
|
asset_id (str): the id for this particular metadata.
|
||||||
"""
|
|
||||||
|
|
||||||
raise NotImplementedError()
|
Returns:
|
||||||
|
A list of transactions containing related to the asset. If no transaction exists for that asset it
|
||||||
|
returns an empty list `[]`
|
||||||
|
"""
|
||||||
|
|
||||||
def get_owned_ids(self, owner):
|
raise NotImplementedError()
|
||||||
"""Retrieve a list of `txids` that can we used has inputs.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
owner (str): base58 encoded public key.
|
|
||||||
|
|
||||||
Returns:
|
@singledispatch
|
||||||
A cursor for the matching transactions.
|
def get_spent(connection, transaction_id, condition_id):
|
||||||
"""
|
"""Check if a `txid` was already used as an input.
|
||||||
|
|
||||||
raise NotImplementedError()
|
A transaction can be used as an input for another transaction. Bigchain needs to make sure that a
|
||||||
|
given `txid` is only used once.
|
||||||
|
|
||||||
def get_votes_by_block_id(self, block_id):
|
Args:
|
||||||
"""Get all the votes casted for a specific block.
|
transaction_id (str): The id of the transaction.
|
||||||
|
condition_id (int): The index of the condition in the respective transaction.
|
||||||
|
|
||||||
Args:
|
Returns:
|
||||||
block_id (str): the block id to use.
|
The transaction that used the `txid` as an input else `None`
|
||||||
|
"""
|
||||||
|
|
||||||
Returns:
|
raise NotImplementedError()
|
||||||
A cursor for the matching votes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def get_votes_by_block_id_and_voter(self, block_id, node_pubkey):
|
@singledispatch
|
||||||
"""Get all the votes casted for a specific block by a specific voter.
|
def get_owned_ids(connection, owner):
|
||||||
|
"""Retrieve a list of `txids` that can we used has inputs.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
block_id (str): the block id to use.
|
owner (str): base58 encoded public key.
|
||||||
node_pubkey (str): base58 encoded public key
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A cursor for the matching votes.
|
A cursor for the matching transactions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def write_block(self, block, durability='soft'):
|
|
||||||
"""Write a block to the bigchain table.
|
|
||||||
|
|
||||||
Args:
|
@singledispatch
|
||||||
block (dict): the block to write.
|
def get_votes_by_block_id(connection, block_id):
|
||||||
|
"""Get all the votes casted for a specific block.
|
||||||
|
|
||||||
Returns:
|
Args:
|
||||||
The database response.
|
block_id (str): the block id to use.
|
||||||
"""
|
|
||||||
|
|
||||||
raise NotImplementedError()
|
Returns:
|
||||||
|
A cursor for the matching votes.
|
||||||
|
"""
|
||||||
|
|
||||||
def has_transaction(self, transaction_id):
|
raise NotImplementedError()
|
||||||
"""Check if a transaction exists in the bigchain table.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
transaction_id (str): the id of the transaction to check.
|
|
||||||
|
|
||||||
Returns:
|
@singledispatch
|
||||||
``True`` if the transaction exists, ``False`` otherwise.
|
def get_votes_by_block_id_and_voter(connection, block_id, node_pubkey):
|
||||||
"""
|
"""Get all the votes casted for a specific block by a specific voter.
|
||||||
|
|
||||||
raise NotImplementedError()
|
Args:
|
||||||
|
block_id (str): the block id to use.
|
||||||
|
node_pubkey (str): base58 encoded public key
|
||||||
|
|
||||||
def count_blocks(self):
|
Returns:
|
||||||
"""Count the number of blocks in the bigchain table.
|
A cursor for the matching votes.
|
||||||
|
"""
|
||||||
|
|
||||||
Returns:
|
raise NotImplementedError()
|
||||||
The number of blocks.
|
|
||||||
"""
|
|
||||||
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def write_vote(self, vote):
|
@singledispatch
|
||||||
"""Write a vote to the votes table.
|
def write_block(connection, block, durability='soft'):
|
||||||
|
"""Write a block to the bigchain table.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
vote (dict): the vote to write.
|
block (dict): the block to write.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The database response.
|
The database response.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def get_last_voted_block(self, node_pubkey):
|
|
||||||
"""Get the last voted block for a specific node.
|
|
||||||
|
|
||||||
Args:
|
@singledispatch
|
||||||
node_pubkey (str): base58 encoded public key.
|
def has_transaction(connection, transaction_id):
|
||||||
|
"""Check if a transaction exists in the bigchain table.
|
||||||
|
|
||||||
Returns:
|
Args:
|
||||||
The last block the node has voted on. If the node didn't cast
|
transaction_id (str): the id of the transaction to check.
|
||||||
any vote then the genesis block is returned.
|
|
||||||
"""
|
|
||||||
|
|
||||||
raise NotImplementedError()
|
Returns:
|
||||||
|
``True`` if the transaction exists, ``False`` otherwise.
|
||||||
|
"""
|
||||||
|
|
||||||
def get_unvoted_blocks(self, node_pubkey):
|
raise NotImplementedError()
|
||||||
"""Return all the blocks that have not been voted by the specified node.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
node_pubkey (str): base58 encoded public key
|
|
||||||
|
|
||||||
Returns:
|
@singledispatch
|
||||||
:obj:`list` of :obj:`dict`: a list of unvoted blocks
|
def count_blocks(connection):
|
||||||
"""
|
"""Count the number of blocks in the bigchain table.
|
||||||
|
|
||||||
raise NotImplementedError()
|
Returns:
|
||||||
|
The number of blocks.
|
||||||
|
"""
|
||||||
|
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
@singledispatch
|
||||||
|
def write_vote(connection, vote):
|
||||||
|
"""Write a vote to the votes table.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
vote (dict): the vote to write.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The database response.
|
||||||
|
"""
|
||||||
|
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
@singledispatch
|
||||||
|
def get_last_voted_block(connection, node_pubkey):
|
||||||
|
"""Get the last voted block for a specific node.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
node_pubkey (str): base58 encoded public key.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The last block the node has voted on. If the node didn't cast
|
||||||
|
any vote then the genesis block is returned.
|
||||||
|
"""
|
||||||
|
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
@singledispatch
|
||||||
|
def get_unvoted_blocks(connection, node_pubkey):
|
||||||
|
"""Return all the blocks that have not been voted by the specified node.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
node_pubkey (str): base58 encoded public key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
:obj:`list` of :obj:`dict`: a list of unvoted blocks
|
||||||
|
"""
|
||||||
|
|
||||||
|
raise NotImplementedError()
|
||||||
|
@ -1,11 +1,9 @@
|
|||||||
import time
|
import time
|
||||||
import logging
|
|
||||||
|
|
||||||
import rethinkdb as r
|
import rethinkdb as r
|
||||||
|
|
||||||
from bigchaindb.backend.connection import Connection
|
from bigchaindb.backend.connection import Connection
|
||||||
|
import bigchaindb
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class RethinkDBConnection(Connection):
|
class RethinkDBConnection(Connection):
|
||||||
@ -16,19 +14,19 @@ class RethinkDBConnection(Connection):
|
|||||||
more times to run the query or open a connection.
|
more times to run the query or open a connection.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, host, port, dbname, max_tries=3):
|
def __init__(self, host=None, port=None, db=None, max_tries=3):
|
||||||
"""Create a new Connection instance.
|
"""Create a new Connection instance.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
host (str, optional): the host to connect to.
|
host (str, optional): the host to connect to.
|
||||||
port (int, optional): the port to connect to.
|
port (int, optional): the port to connect to.
|
||||||
dbname (str, optional): the name of the database to use.
|
db (str, optional): the database to use.
|
||||||
max_tries (int, optional): how many tries before giving up.
|
max_tries (int, optional): how many tries before giving up.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self.host = host
|
self.host = host or bigchaindb.config['database']['host']
|
||||||
self.port = port
|
self.port = port or bigchaindb.config['database']['port']
|
||||||
self.dbname = dbname
|
self.db = db or bigchaindb.config['database']['name']
|
||||||
self.max_tries = max_tries
|
self.max_tries = max_tries
|
||||||
self.conn = None
|
self.conn = None
|
||||||
|
|
||||||
@ -40,7 +38,7 @@ class RethinkDBConnection(Connection):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if self.conn is None:
|
if self.conn is None:
|
||||||
self._connect()
|
self.connect()
|
||||||
|
|
||||||
for i in range(self.max_tries):
|
for i in range(self.max_tries):
|
||||||
try:
|
try:
|
||||||
@ -49,12 +47,13 @@ class RethinkDBConnection(Connection):
|
|||||||
if i + 1 == self.max_tries:
|
if i + 1 == self.max_tries:
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
self._connect()
|
self.connect()
|
||||||
|
|
||||||
def _connect(self):
|
def connect(self):
|
||||||
for i in range(self.max_tries):
|
for i in range(self.max_tries):
|
||||||
try:
|
try:
|
||||||
self.conn = r.connect(host=self.host, port=self.port, db=self.dbname)
|
self.conn = r.connect(host=self.host, port=self.port,
|
||||||
|
db=self.db)
|
||||||
except r.ReqlDriverError as exc:
|
except r.ReqlDriverError as exc:
|
||||||
if i + 1 == self.max_tries:
|
if i + 1 == self.max_tries:
|
||||||
raise
|
raise
|
||||||
|
@ -1,439 +1,445 @@
|
|||||||
"""Backend implementation for RethinkDB.
|
"""Query implementation for RethinkDB"""
|
||||||
|
|
||||||
This module contains all the methods to store and retrieve data from RethinkDB.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from time import time
|
from time import time
|
||||||
|
|
||||||
import rethinkdb as r
|
import rethinkdb as r
|
||||||
|
|
||||||
from bigchaindb.db import Query
|
|
||||||
from bigchaindb import util
|
from bigchaindb import util
|
||||||
from bigchaindb.db.utils import Connection
|
|
||||||
from bigchaindb.common import exceptions
|
from bigchaindb.common import exceptions
|
||||||
|
|
||||||
|
|
||||||
class RethinkDBBackend(Query):
|
READ_MODE = 'majority'
|
||||||
|
WRITE_DURABILITY = 'hard'
|
||||||
|
|
||||||
def __init__(self, host=None, port=None, db=None):
|
|
||||||
"""Initialize a new RethinkDB Backend instance.
|
def write_transaction(connection, signed_transaction):
|
||||||
|
"""Write a transaction to the backlog table.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
signed_transaction (dict): a signed transaction.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The result of the operation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return connection.run(
|
||||||
|
r.table('backlog')
|
||||||
|
.insert(signed_transaction, durability=WRITE_DURABILITY))
|
||||||
|
|
||||||
|
|
||||||
|
def update_transaction(connection, transaction_id, doc):
|
||||||
|
"""Update a transaction in the backlog table.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
transaction_id (str): the id of the transaction.
|
||||||
|
doc (dict): the values to update.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The result of the operation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return connection.run(
|
||||||
|
r.table('backlog')
|
||||||
|
.get(transaction_id)
|
||||||
|
.update(doc))
|
||||||
|
|
||||||
|
|
||||||
|
def delete_transaction(connection, *transaction_id):
|
||||||
|
"""Delete a transaction from the backlog.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
*transaction_id (str): the transaction(s) to delete
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The database response.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return connection.run(
|
||||||
|
r.table('backlog')
|
||||||
|
.get_all(*transaction_id)
|
||||||
|
.delete(durability=WRITE_DURABILITY))
|
||||||
|
|
||||||
|
|
||||||
|
def get_stale_transactions(connection, reassign_delay):
|
||||||
|
"""Get a cursor of stale transactions.
|
||||||
|
|
||||||
|
Transactions are considered stale if they have been assigned a node,
|
||||||
|
but are still in the backlog after some amount of time specified in the
|
||||||
|
configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
reassign_delay (int): threshold (in seconds) to mark a transaction stale.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A cursor of transactions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return connection.run(
|
||||||
|
r.table('backlog')
|
||||||
|
.filter(lambda tx: time() - tx['assignment_timestamp'] > reassign_delay))
|
||||||
|
|
||||||
|
|
||||||
|
def get_transaction_from_block(connection, transaction_id, block_id):
|
||||||
|
"""Get a transaction from a specific block.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
transaction_id (str): the id of the transaction.
|
||||||
|
block_id (str): the id of the block.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The matching transaction.
|
||||||
|
"""
|
||||||
|
return connection.run(
|
||||||
|
r.table('bigchain', read_mode=READ_MODE)
|
||||||
|
.get(block_id)
|
||||||
|
.get_field('block')
|
||||||
|
.get_field('transactions')
|
||||||
|
.filter(lambda tx: tx['id'] == transaction_id))[0]
|
||||||
|
|
||||||
|
|
||||||
|
def get_transaction_from_backlog(connection, transaction_id):
|
||||||
|
"""Get a transaction from backlog.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
transaction_id (str): the id of the transaction.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The matching transaction.
|
||||||
|
"""
|
||||||
|
return connection.run(
|
||||||
|
r.table('backlog')
|
||||||
|
.get(transaction_id)
|
||||||
|
.without('assignee', 'assignment_timestamp')
|
||||||
|
.default(None))
|
||||||
|
|
||||||
|
|
||||||
|
def get_blocks_status_from_transaction(connection, transaction_id):
|
||||||
|
"""Retrieve block election information given a secondary index and value
|
||||||
|
|
||||||
|
Args:
|
||||||
|
value: a value to search (e.g. transaction id string, payload hash string)
|
||||||
|
index (str): name of a secondary index, e.g. 'transaction_id'
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
:obj:`list` of :obj:`dict`: A list of blocks with with only election information
|
||||||
|
"""
|
||||||
|
|
||||||
|
return connection.run(
|
||||||
|
r.table('bigchain', read_mode=READ_MODE)
|
||||||
|
.get_all(transaction_id, index='transaction_id')
|
||||||
|
.pluck('votes', 'id', {'block': ['voters']}))
|
||||||
|
|
||||||
|
|
||||||
|
def get_txids_by_metadata_id(connection, metadata_id):
|
||||||
|
"""Retrieves transaction ids related to a particular metadata.
|
||||||
|
|
||||||
|
When creating a transaction one of the optional arguments is the
|
||||||
|
`metadata`. The metadata is a generic dict that contains extra
|
||||||
|
information that can be appended to the transaction.
|
||||||
|
|
||||||
|
To make it easy to query the bigchain for that particular metadata we
|
||||||
|
create a UUID for the metadata and store it with the transaction.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
metadata_id (str): the id for this particular metadata.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A list of transaction ids containing that metadata. If no
|
||||||
|
transaction exists with that metadata it returns an empty list `[]`
|
||||||
|
"""
|
||||||
|
return connection.run(
|
||||||
|
r.table('bigchain', read_mode=READ_MODE)
|
||||||
|
.get_all(metadata_id, index='metadata_id')
|
||||||
|
.concat_map(lambda block: block['block']['transactions'])
|
||||||
|
.filter(lambda transaction:
|
||||||
|
transaction['transaction']['metadata']['id'] ==
|
||||||
|
metadata_id)
|
||||||
|
.get_field('id'))
|
||||||
|
|
||||||
|
|
||||||
|
def get_txids_by_asset_id(connection, asset_id):
|
||||||
|
"""Retrieves transactions ids related to a particular asset.
|
||||||
|
|
||||||
|
A digital asset in bigchaindb is identified by an uuid. This allows us
|
||||||
|
to query all the transactions related to a particular digital asset,
|
||||||
|
knowing the id.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
asset_id (str): the id for this particular metadata.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A list of transactions ids related to the asset. If no transaction
|
||||||
|
exists for that asset it returns an empty list `[]`
|
||||||
|
"""
|
||||||
|
|
||||||
|
# here we only want to return the transaction ids since later on when
|
||||||
|
# we are going to retrieve the transaction with status validation
|
||||||
|
return connection.run(
|
||||||
|
r.table('bigchain')
|
||||||
|
.get_all(asset_id, index='asset_id')
|
||||||
|
.concat_map(lambda block: block['block']['transactions'])
|
||||||
|
.filter(lambda transaction: transaction['transaction']['asset']['id'] == asset_id)
|
||||||
|
.get_field('id'))
|
||||||
|
|
||||||
|
|
||||||
|
def get_asset_by_id(connection, asset_id):
|
||||||
|
"""Returns the asset associated with an asset_id.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
host (str): the host to connect to.
|
asset_id (str): The asset id.
|
||||||
port (int): the port to connect to.
|
|
||||||
db (str): the name of the database to use.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.read_mode = 'majority'
|
|
||||||
self.durability = 'soft'
|
|
||||||
self.connection = Connection(host=host, port=port, db=db)
|
|
||||||
|
|
||||||
def write_transaction(self, signed_transaction):
|
|
||||||
"""Write a transaction to the backlog table.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
signed_transaction (dict): a signed transaction.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The result of the operation.
|
Returns a rethinkdb cursor.
|
||||||
"""
|
"""
|
||||||
|
return connection.run(
|
||||||
return self.connection.run(
|
r.table('bigchain', read_mode=READ_MODE)
|
||||||
r.table('backlog')
|
.get_all(asset_id, index='asset_id')
|
||||||
.insert(signed_transaction, durability=self.durability))
|
.concat_map(lambda block: block['block']['transactions'])
|
||||||
|
.filter(lambda transaction:
|
||||||
|
transaction['transaction']['asset']['id'] == asset_id)
|
||||||
|
.filter(lambda transaction:
|
||||||
|
transaction['transaction']['operation'] == 'CREATE')
|
||||||
|
.pluck({'transaction': 'asset'}))
|
||||||
|
|
||||||
def update_transaction(self, transaction_id, doc):
|
|
||||||
"""Update a transaction in the backlog table.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
transaction_id (str): the id of the transaction.
|
|
||||||
doc (dict): the values to update.
|
|
||||||
|
|
||||||
Returns:
|
def get_spent(connection, transaction_id, condition_id):
|
||||||
The result of the operation.
|
"""Check if a `txid` was already used as an input.
|
||||||
"""
|
|
||||||
|
|
||||||
return self.connection.run(
|
A transaction can be used as an input for another transaction. Bigchain needs to make sure that a
|
||||||
r.table('backlog')
|
given `txid` is only used once.
|
||||||
.get(transaction_id)
|
|
||||||
.update(doc))
|
Args:
|
||||||
|
transaction_id (str): The id of the transaction.
|
||||||
def delete_transaction(self, *transaction_id):
|
condition_id (int): The index of the condition in the respective transaction.
|
||||||
"""Delete a transaction from the backlog.
|
|
||||||
|
Returns:
|
||||||
Args:
|
The transaction that used the `txid` as an input else `None`
|
||||||
*transaction_id (str): the transaction(s) to delete
|
"""
|
||||||
|
|
||||||
Returns:
|
# TODO: use index!
|
||||||
The database response.
|
return connection.run(
|
||||||
"""
|
r.table('bigchain', read_mode=READ_MODE)
|
||||||
|
.concat_map(lambda doc: doc['block']['transactions'])
|
||||||
return self.connection.run(
|
.filter(lambda transaction: transaction['transaction']['fulfillments'].contains(
|
||||||
r.table('backlog')
|
lambda fulfillment: fulfillment['input'] == {'txid': transaction_id, 'cid': condition_id})))
|
||||||
.get_all(*transaction_id)
|
|
||||||
.delete(durability='hard'))
|
|
||||||
|
def get_owned_ids(connection, owner):
|
||||||
def get_stale_transactions(self, reassign_delay):
|
"""Retrieve a list of `txids` that can we used has inputs.
|
||||||
"""Get a cursor of stale transactions.
|
|
||||||
|
Args:
|
||||||
Transactions are considered stale if they have been assigned a node,
|
owner (str): base58 encoded public key.
|
||||||
but are still in the backlog after some amount of time specified in the
|
|
||||||
configuration.
|
Returns:
|
||||||
|
A cursor for the matching transactions.
|
||||||
Args:
|
"""
|
||||||
reassign_delay (int): threshold (in seconds) to mark a transaction stale.
|
|
||||||
|
# TODO: use index!
|
||||||
Returns:
|
return connection.run(
|
||||||
A cursor of transactions.
|
r.table('bigchain', read_mode=READ_MODE)
|
||||||
"""
|
.concat_map(lambda doc: doc['block']['transactions'])
|
||||||
|
.filter(lambda tx: tx['transaction']['conditions'].contains(
|
||||||
return self.connection.run(
|
lambda c: c['owners_after'].contains(owner))))
|
||||||
r.table('backlog')
|
|
||||||
.filter(lambda tx: time() - tx['assignment_timestamp'] > reassign_delay))
|
|
||||||
|
def get_votes_by_block_id(connection, block_id):
|
||||||
def get_transaction_from_block(self, transaction_id, block_id):
|
"""Get all the votes casted for a specific block.
|
||||||
"""Get a transaction from a specific block.
|
|
||||||
|
Args:
|
||||||
Args:
|
block_id (str): the block id to use.
|
||||||
transaction_id (str): the id of the transaction.
|
|
||||||
block_id (str): the id of the block.
|
Returns:
|
||||||
|
A cursor for the matching votes.
|
||||||
Returns:
|
"""
|
||||||
The matching transaction.
|
return connection.run(
|
||||||
"""
|
r.table('votes', read_mode=READ_MODE)
|
||||||
return self.connection.run(
|
.between([block_id, r.minval], [block_id, r.maxval], index='block_and_voter'))
|
||||||
r.table('bigchain', read_mode=self.read_mode)
|
|
||||||
.get(block_id)
|
|
||||||
.get_field('block')
|
def get_votes_by_block_id_and_voter(connection, block_id, node_pubkey):
|
||||||
.get_field('transactions')
|
"""Get all the votes casted for a specific block by a specific voter.
|
||||||
.filter(lambda tx: tx['id'] == transaction_id))[0]
|
|
||||||
|
Args:
|
||||||
def get_transaction_from_backlog(self, transaction_id):
|
block_id (str): the block id to use.
|
||||||
"""Get a transaction from backlog.
|
node_pubkey (str): base58 encoded public key
|
||||||
|
|
||||||
Args:
|
Returns:
|
||||||
transaction_id (str): the id of the transaction.
|
A cursor for the matching votes.
|
||||||
|
"""
|
||||||
Returns:
|
return connection.run(
|
||||||
The matching transaction.
|
r.table('votes')
|
||||||
"""
|
.get_all([block_id, node_pubkey], index='block_and_voter'))
|
||||||
return self.connection.run(
|
|
||||||
r.table('backlog')
|
|
||||||
.get(transaction_id)
|
def write_block(connection, block):
|
||||||
.without('assignee', 'assignment_timestamp')
|
"""Write a block to the bigchain table.
|
||||||
.default(None))
|
|
||||||
|
Args:
|
||||||
def get_blocks_status_from_transaction(self, transaction_id):
|
block (dict): the block to write.
|
||||||
"""Retrieve block election information given a secondary index and value
|
|
||||||
|
Returns:
|
||||||
Args:
|
The database response.
|
||||||
value: a value to search (e.g. transaction id string, payload hash string)
|
"""
|
||||||
index (str): name of a secondary index, e.g. 'transaction_id'
|
return connection.run(
|
||||||
|
r.table('bigchain')
|
||||||
Returns:
|
.insert(r.json(block), durability=WRITE_DURABILITY))
|
||||||
:obj:`list` of :obj:`dict`: A list of blocks with with only election information
|
|
||||||
"""
|
|
||||||
|
def get_block(connection, block_id):
|
||||||
return self.connection.run(
|
"""Get a block from the bigchain table
|
||||||
r.table('bigchain', read_mode=self.read_mode)
|
|
||||||
.get_all(transaction_id, index='transaction_id')
|
Args:
|
||||||
.pluck('votes', 'id', {'block': ['voters']}))
|
block_id (str): block id of the block to get
|
||||||
|
|
||||||
def get_txids_by_metadata_id(self, metadata_id):
|
Returns:
|
||||||
"""Retrieves transaction ids related to a particular metadata.
|
block (dict): the block or `None`
|
||||||
|
"""
|
||||||
When creating a transaction one of the optional arguments is the
|
return connection.run(r.table('bigchain').get(block_id))
|
||||||
`metadata`. The metadata is a generic dict that contains extra
|
|
||||||
information that can be appended to the transaction.
|
|
||||||
|
def has_transaction(connection, transaction_id):
|
||||||
To make it easy to query the bigchain for that particular metadata we
|
"""Check if a transaction exists in the bigchain table.
|
||||||
create a UUID for the metadata and store it with the transaction.
|
|
||||||
|
Args:
|
||||||
Args:
|
transaction_id (str): the id of the transaction to check.
|
||||||
metadata_id (str): the id for this particular metadata.
|
|
||||||
|
Returns:
|
||||||
Returns:
|
``True`` if the transaction exists, ``False`` otherwise.
|
||||||
A list of transaction ids containing that metadata. If no
|
"""
|
||||||
transaction exists with that metadata it returns an empty list `[]`
|
return bool(connection.run(
|
||||||
"""
|
r.table('bigchain', read_mode=READ_MODE)
|
||||||
return self.connection.run(
|
.get_all(transaction_id, index='transaction_id').count()))
|
||||||
r.table('bigchain', read_mode=self.read_mode)
|
|
||||||
.get_all(metadata_id, index='metadata_id')
|
|
||||||
.concat_map(lambda block: block['block']['transactions'])
|
def count_blocks(connection):
|
||||||
.filter(lambda transaction:
|
"""Count the number of blocks in the bigchain table.
|
||||||
transaction['transaction']['metadata']['id'] ==
|
|
||||||
metadata_id)
|
Returns:
|
||||||
.get_field('id'))
|
The number of blocks.
|
||||||
|
"""
|
||||||
def get_txids_by_asset_id(self, asset_id):
|
|
||||||
"""Retrieves transactions ids related to a particular asset.
|
return connection.run(
|
||||||
|
r.table('bigchain', read_mode=READ_MODE)
|
||||||
A digital asset in bigchaindb is identified by an uuid. This allows us
|
.count())
|
||||||
to query all the transactions related to a particular digital asset,
|
|
||||||
knowing the id.
|
|
||||||
|
def count_backlog(connection):
|
||||||
Args:
|
"""Count the number of transactions in the backlog table.
|
||||||
asset_id (str): the id for this particular metadata.
|
|
||||||
|
Returns:
|
||||||
Returns:
|
The number of transactions in the backlog.
|
||||||
A list of transactions ids related to the asset. If no transaction
|
"""
|
||||||
exists for that asset it returns an empty list `[]`
|
|
||||||
"""
|
return connection.run(
|
||||||
|
r.table('backlog', read_mode=READ_MODE)
|
||||||
# here we only want to return the transaction ids since later on when
|
.count())
|
||||||
# we are going to retrieve the transaction with status validation
|
|
||||||
return self.connection.run(
|
|
||||||
r.table('bigchain', read_mode=self.read_mode)
|
def write_vote(connection, vote):
|
||||||
.get_all(asset_id, index='asset_id')
|
"""Write a vote to the votes table.
|
||||||
.concat_map(lambda block: block['block']['transactions'])
|
|
||||||
.filter(lambda transaction: transaction['transaction']['asset']['id'] == asset_id)
|
Args:
|
||||||
.get_field('id'))
|
vote (dict): the vote to write.
|
||||||
|
|
||||||
def get_asset_by_id(self, asset_id):
|
Returns:
|
||||||
"""Returns the asset associated with an asset_id.
|
The database response.
|
||||||
|
"""
|
||||||
Args:
|
return connection.run(
|
||||||
asset_id (str): The asset id.
|
r.table('votes')
|
||||||
|
.insert(vote))
|
||||||
Returns:
|
|
||||||
Returns a rethinkdb cursor.
|
|
||||||
"""
|
def get_genesis_block(connection):
|
||||||
return self.connection.run(
|
"""Get the genesis block
|
||||||
r.table('bigchain', read_mode=self.read_mode)
|
|
||||||
.get_all(asset_id, index='asset_id')
|
Returns:
|
||||||
.concat_map(lambda block: block['block']['transactions'])
|
The genesis block
|
||||||
.filter(lambda transaction:
|
"""
|
||||||
transaction['transaction']['asset']['id'] == asset_id)
|
return connection.run(
|
||||||
.filter(lambda transaction:
|
r.table('bigchain', read_mode=READ_MODE)
|
||||||
transaction['transaction']['operation'] == 'CREATE')
|
.filter(util.is_genesis_block)
|
||||||
.pluck({'transaction': 'asset'}))
|
.nth(0))
|
||||||
|
|
||||||
def get_spent(self, transaction_id, condition_id):
|
|
||||||
"""Check if a `txid` was already used as an input.
|
def get_last_voted_block(connection, node_pubkey):
|
||||||
|
"""Get the last voted block for a specific node.
|
||||||
A transaction can be used as an input for another transaction. Bigchain needs to make sure that a
|
|
||||||
given `txid` is only used once.
|
Args:
|
||||||
|
node_pubkey (str): base58 encoded public key.
|
||||||
Args:
|
|
||||||
transaction_id (str): The id of the transaction.
|
Returns:
|
||||||
condition_id (int): The index of the condition in the respective transaction.
|
The last block the node has voted on. If the node didn't cast
|
||||||
|
any vote then the genesis block is returned.
|
||||||
Returns:
|
"""
|
||||||
The transaction that used the `txid` as an input else `None`
|
try:
|
||||||
"""
|
# get the latest value for the vote timestamp (over all votes)
|
||||||
|
max_timestamp = connection.run(
|
||||||
# TODO: use index!
|
r.table('votes', read_mode=READ_MODE)
|
||||||
return self.connection.run(
|
.filter(r.row['node_pubkey'] == node_pubkey)
|
||||||
r.table('bigchain', read_mode=self.read_mode)
|
.max(r.row['vote']['timestamp']))['vote']['timestamp']
|
||||||
.concat_map(lambda doc: doc['block']['transactions'])
|
|
||||||
.filter(lambda transaction: transaction['transaction']['fulfillments'].contains(
|
last_voted = list(connection.run(
|
||||||
lambda fulfillment: fulfillment['input'] == {'txid': transaction_id, 'cid': condition_id})))
|
r.table('votes', read_mode=READ_MODE)
|
||||||
|
.filter(r.row['vote']['timestamp'] == max_timestamp)
|
||||||
def get_owned_ids(self, owner):
|
.filter(r.row['node_pubkey'] == node_pubkey)))
|
||||||
"""Retrieve a list of `txids` that can we used has inputs.
|
|
||||||
|
except r.ReqlNonExistenceError:
|
||||||
Args:
|
# return last vote if last vote exists else return Genesis block
|
||||||
owner (str): base58 encoded public key.
|
return get_genesis_block()
|
||||||
|
|
||||||
Returns:
|
# Now the fun starts. Since the resolution of timestamp is a second,
|
||||||
A cursor for the matching transactions.
|
# we might have more than one vote per timestamp. If this is the case
|
||||||
"""
|
# then we need to rebuild the chain for the blocks that have been retrieved
|
||||||
|
# to get the last one.
|
||||||
# TODO: use index!
|
|
||||||
return self.connection.run(
|
# Given a block_id, mapping returns the id of the block pointing at it.
|
||||||
r.table('bigchain', read_mode=self.read_mode)
|
mapping = {v['vote']['previous_block']: v['vote']['voting_for_block']
|
||||||
.concat_map(lambda doc: doc['block']['transactions'])
|
for v in last_voted}
|
||||||
.filter(lambda tx: tx['transaction']['conditions'].contains(
|
|
||||||
lambda c: c['owners_after'].contains(owner))))
|
# Since we follow the chain backwards, we can start from a random
|
||||||
|
# point of the chain and "move up" from it.
|
||||||
def get_votes_by_block_id(self, block_id):
|
last_block_id = list(mapping.values())[0]
|
||||||
"""Get all the votes casted for a specific block.
|
|
||||||
|
# We must be sure to break the infinite loop. This happens when:
|
||||||
Args:
|
# - the block we are currenty iterating is the one we are looking for.
|
||||||
block_id (str): the block id to use.
|
# This will trigger a KeyError, breaking the loop
|
||||||
|
# - we are visiting again a node we already explored, hence there is
|
||||||
Returns:
|
# a loop. This might happen if a vote points both `previous_block`
|
||||||
A cursor for the matching votes.
|
# and `voting_for_block` to the same `block_id`
|
||||||
"""
|
explored = set()
|
||||||
return self.connection.run(
|
|
||||||
r.table('votes', read_mode=self.read_mode)
|
while True:
|
||||||
.between([block_id, r.minval], [block_id, r.maxval], index='block_and_voter'))
|
|
||||||
|
|
||||||
def get_votes_by_block_id_and_voter(self, block_id, node_pubkey):
|
|
||||||
"""Get all the votes casted for a specific block by a specific voter.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
block_id (str): the block id to use.
|
|
||||||
node_pubkey (str): base58 encoded public key
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A cursor for the matching votes.
|
|
||||||
"""
|
|
||||||
return self.connection.run(
|
|
||||||
r.table('votes', read_mode=self.read_mode)
|
|
||||||
.get_all([block_id, node_pubkey], index='block_and_voter'))
|
|
||||||
|
|
||||||
def write_block(self, block, durability='soft'):
|
|
||||||
"""Write a block to the bigchain table.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
block (dict): the block to write.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The database response.
|
|
||||||
"""
|
|
||||||
return self.connection.run(
|
|
||||||
r.table('bigchain')
|
|
||||||
.insert(r.json(block), durability=durability))
|
|
||||||
|
|
||||||
def get_block(self, block_id):
|
|
||||||
"""Get a block from the bigchain table
|
|
||||||
|
|
||||||
Args:
|
|
||||||
block_id (str): block id of the block to get
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
block (dict): the block or `None`
|
|
||||||
"""
|
|
||||||
return self.connection.run(r.table('bigchain').get(block_id))
|
|
||||||
|
|
||||||
def has_transaction(self, transaction_id):
|
|
||||||
"""Check if a transaction exists in the bigchain table.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
transaction_id (str): the id of the transaction to check.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
``True`` if the transaction exists, ``False`` otherwise.
|
|
||||||
"""
|
|
||||||
return bool(self.connection.run(
|
|
||||||
r.table('bigchain', read_mode=self.read_mode)
|
|
||||||
.get_all(transaction_id, index='transaction_id').count()))
|
|
||||||
|
|
||||||
def count_blocks(self):
|
|
||||||
"""Count the number of blocks in the bigchain table.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The number of blocks.
|
|
||||||
"""
|
|
||||||
|
|
||||||
return self.connection.run(
|
|
||||||
r.table('bigchain', read_mode=self.read_mode)
|
|
||||||
.count())
|
|
||||||
|
|
||||||
def count_backlog(self):
|
|
||||||
"""Count the number of transactions in the backlog table.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The number of transactions in the backlog.
|
|
||||||
"""
|
|
||||||
|
|
||||||
return self.connection.run(
|
|
||||||
r.table('backlog', read_mode=self.read_mode)
|
|
||||||
.count())
|
|
||||||
|
|
||||||
def write_vote(self, vote):
|
|
||||||
"""Write a vote to the votes table.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
vote (dict): the vote to write.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The database response.
|
|
||||||
"""
|
|
||||||
return self.connection.run(
|
|
||||||
r.table('votes')
|
|
||||||
.insert(vote))
|
|
||||||
|
|
||||||
def get_genesis_block(self):
|
|
||||||
"""Get the genesis block
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The genesis block
|
|
||||||
"""
|
|
||||||
return self.connection.run(
|
|
||||||
r.table('bigchain', read_mode=self.read_mode)
|
|
||||||
.filter(util.is_genesis_block)
|
|
||||||
.nth(0))
|
|
||||||
|
|
||||||
def get_last_voted_block(self, node_pubkey):
|
|
||||||
"""Get the last voted block for a specific node.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
node_pubkey (str): base58 encoded public key.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The last block the node has voted on. If the node didn't cast
|
|
||||||
any vote then the genesis block is returned.
|
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
# get the latest value for the vote timestamp (over all votes)
|
if last_block_id in explored:
|
||||||
max_timestamp = self.connection.run(
|
raise exceptions.CyclicBlockchainError()
|
||||||
r.table('votes', read_mode=self.read_mode)
|
explored.add(last_block_id)
|
||||||
.filter(r.row['node_pubkey'] == node_pubkey)
|
last_block_id = mapping[last_block_id]
|
||||||
.max(r.row['vote']['timestamp']))['vote']['timestamp']
|
except KeyError:
|
||||||
|
break
|
||||||
|
|
||||||
last_voted = list(self.connection.run(
|
return connection.run(
|
||||||
r.table('votes', read_mode=self.read_mode)
|
r.table('bigchain', read_mode=READ_MODE)
|
||||||
.filter(r.row['vote']['timestamp'] == max_timestamp)
|
.get(last_block_id))
|
||||||
.filter(r.row['node_pubkey'] == node_pubkey)))
|
|
||||||
|
|
||||||
except r.ReqlNonExistenceError:
|
|
||||||
# return last vote if last vote exists else return Genesis block
|
|
||||||
return self.get_genesis_block()
|
|
||||||
|
|
||||||
# Now the fun starts. Since the resolution of timestamp is a second,
|
def get_unvoted_blocks(connection, node_pubkey):
|
||||||
# we might have more than one vote per timestamp. If this is the case
|
"""Return all the blocks that have not been voted by the specified node.
|
||||||
# then we need to rebuild the chain for the blocks that have been retrieved
|
|
||||||
# to get the last one.
|
|
||||||
|
|
||||||
# Given a block_id, mapping returns the id of the block pointing at it.
|
Args:
|
||||||
mapping = {v['vote']['previous_block']: v['vote']['voting_for_block']
|
node_pubkey (str): base58 encoded public key
|
||||||
for v in last_voted}
|
|
||||||
|
|
||||||
# Since we follow the chain backwards, we can start from a random
|
Returns:
|
||||||
# point of the chain and "move up" from it.
|
:obj:`list` of :obj:`dict`: a list of unvoted blocks
|
||||||
last_block_id = list(mapping.values())[0]
|
"""
|
||||||
|
|
||||||
# We must be sure to break the infinite loop. This happens when:
|
unvoted = connection.run(
|
||||||
# - the block we are currenty iterating is the one we are looking for.
|
r.table('bigchain', read_mode=READ_MODE)
|
||||||
# This will trigger a KeyError, breaking the loop
|
.filter(lambda block: r.table('votes', read_mode=READ_MODE)
|
||||||
# - we are visiting again a node we already explored, hence there is
|
.get_all([block['id'], node_pubkey], index='block_and_voter')
|
||||||
# a loop. This might happen if a vote points both `previous_block`
|
.is_empty())
|
||||||
# and `voting_for_block` to the same `block_id`
|
.order_by(r.asc(r.row['block']['timestamp'])))
|
||||||
explored = set()
|
|
||||||
|
|
||||||
while True:
|
# FIXME: I (@vrde) don't like this solution. Filtering should be done at a
|
||||||
try:
|
# database level. Solving issue #444 can help untangling the situation
|
||||||
if last_block_id in explored:
|
unvoted_blocks = filter(lambda block: not util.is_genesis_block(block), unvoted)
|
||||||
raise exceptions.CyclicBlockchainError()
|
return unvoted_blocks
|
||||||
explored.add(last_block_id)
|
|
||||||
last_block_id = mapping[last_block_id]
|
|
||||||
except KeyError:
|
|
||||||
break
|
|
||||||
|
|
||||||
return self.connection.run(
|
|
||||||
r.table('bigchain', read_mode=self.read_mode)
|
|
||||||
.get(last_block_id))
|
|
||||||
|
|
||||||
def get_unvoted_blocks(self, node_pubkey):
|
|
||||||
"""Return all the blocks that have not been voted by the specified node.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
node_pubkey (str): base58 encoded public key
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
:obj:`list` of :obj:`dict`: a list of unvoted blocks
|
|
||||||
"""
|
|
||||||
|
|
||||||
unvoted = self.connection.run(
|
|
||||||
r.table('bigchain', read_mode=self.read_mode)
|
|
||||||
.filter(lambda block: r.table('votes', read_mode=self.read_mode)
|
|
||||||
.get_all([block['id'], node_pubkey], index='block_and_voter')
|
|
||||||
.is_empty())
|
|
||||||
.order_by(r.asc(r.row['block']['timestamp'])))
|
|
||||||
|
|
||||||
# FIXME: I (@vrde) don't like this solution. Filtering should be done at a
|
|
||||||
# database level. Solving issue #444 can help untangling the situation
|
|
||||||
unvoted_blocks = filter(lambda block: not util.is_genesis_block(block), unvoted)
|
|
||||||
return unvoted_blocks
|
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from bigchaindb.db import Schema
|
|
||||||
from bigchaindb.common import exceptions
|
from bigchaindb.common import exceptions
|
||||||
import rethinkdb as r
|
import rethinkdb as r
|
||||||
|
|
||||||
@ -10,94 +9,94 @@ import rethinkdb as r
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class RethinkDBSchema(Schema):
|
def create_database(connection, name):
|
||||||
|
if connection.run(r.db_list().contains(name)):
|
||||||
|
raise exceptions.DatabaseAlreadyExists('Database `{}` already exists'.format(name))
|
||||||
|
|
||||||
def __init__(self, connection, name):
|
logger.info('Create database `%s`.', name)
|
||||||
self.connection = connection
|
connection.run(r.db_create(name))
|
||||||
self.name = name
|
|
||||||
|
|
||||||
def create_database(self):
|
|
||||||
if self.connection.run(r.db_list().contains(self.name)):
|
|
||||||
raise exceptions.DatabaseAlreadyExists('Database `{}` already exists'.format(self.name))
|
|
||||||
|
|
||||||
logger.info('Create database `%s`.', self.name)
|
def create_tables(connection, name):
|
||||||
self.connection.run(r.db_create(self.name))
|
for table_name in ['bigchain', 'backlog', 'votes']:
|
||||||
|
logger.info('Create `%s` table.', table_name)
|
||||||
|
connection.run(r.db(name).table_create(table_name))
|
||||||
|
|
||||||
def create_tables(self):
|
|
||||||
for table_name in ['bigchain', 'backlog', 'votes']:
|
|
||||||
logger.info('Create `%s` table.', table_name)
|
|
||||||
self.connection.run(r.db(self.name).table_create(table_name))
|
|
||||||
|
|
||||||
def create_indexes(self):
|
def create_indexes(connection, name):
|
||||||
self.create_bigchain_secondary_index()
|
create_bigchain_secondary_index(connection, name)
|
||||||
|
|
||||||
def drop_database(self):
|
|
||||||
try:
|
|
||||||
logger.info('Drop database `%s`', self.name)
|
|
||||||
self.connection.run(r.db_drop(self.name))
|
|
||||||
logger.info('Done.')
|
|
||||||
except r.ReqlOpFailedError:
|
|
||||||
raise exceptions.DatabaseDoesNotExist('Database `{}` does not exist'.format(self.name))
|
|
||||||
|
|
||||||
def create_bigchain_secondary_index(self):
|
def drop_database(connection, name):
|
||||||
logger.info('Create `bigchain` secondary index.')
|
try:
|
||||||
|
logger.info('Drop database `%s`', name)
|
||||||
|
connection.run(r.db_drop(name))
|
||||||
|
logger.info('Done.')
|
||||||
|
except r.ReqlOpFailedError:
|
||||||
|
raise exceptions.DatabaseDoesNotExist('Database `{}` does not exist'.format(name))
|
||||||
|
|
||||||
# to order blocks by timestamp
|
|
||||||
self.connection.run(
|
|
||||||
r.db(self.name)
|
|
||||||
.table('bigchain')
|
|
||||||
.index_create('block_timestamp', r.row['block']['timestamp']))
|
|
||||||
|
|
||||||
# to query the bigchain for a transaction id
|
def create_bigchain_secondary_index(connection, name):
|
||||||
self.connection.run(
|
logger.info('Create `bigchain` secondary index.')
|
||||||
r.db(self.name)
|
|
||||||
.table('bigchain')
|
|
||||||
.index_create('transaction_id', r.row['block']['transactions']['id'], multi=True))
|
|
||||||
|
|
||||||
# secondary index for payload data by UUID
|
# to order blocks by timestamp
|
||||||
self.connection.run(
|
connection.run(
|
||||||
r.db(self.name)
|
r.db(name)
|
||||||
.table('bigchain')
|
.table('bigchain')
|
||||||
.index_create('metadata_id', r.row['block']['transactions']['transaction']['metadata']['id'], multi=True))
|
.index_create('block_timestamp', r.row['block']['timestamp']))
|
||||||
|
|
||||||
# secondary index for asset uuid
|
# to query the bigchain for a transaction id
|
||||||
self.connection.run(
|
connection.run(
|
||||||
r.db(self.name)
|
r.db(name)
|
||||||
.table('bigchain')
|
.table('bigchain')
|
||||||
.index_create('asset_id', r.row['block']['transactions']['transaction']['asset']['id'], multi=True))
|
.index_create('transaction_id', r.row['block']['transactions']['id'], multi=True))
|
||||||
|
|
||||||
# wait for rethinkdb to finish creating secondary indexes
|
# secondary index for payload data by UUID
|
||||||
self.connection.run(
|
connection.run(
|
||||||
r.db(self.name)
|
r.db(name)
|
||||||
.table('bigchain')
|
.table('bigchain')
|
||||||
.index_wait())
|
.index_create('metadata_id', r.row['block']['transactions']['transaction']['metadata']['id'], multi=True))
|
||||||
|
|
||||||
def create_backlog_secondary_index(self):
|
# secondary index for asset uuid
|
||||||
logger.info('Create `backlog` secondary index.')
|
connection.run(
|
||||||
|
r.db(name)
|
||||||
|
.table('bigchain')
|
||||||
|
.index_create('asset_id', r.row['block']['transactions']['transaction']['asset']['id'], multi=True))
|
||||||
|
|
||||||
# compound index to read transactions from the backlog per assignee
|
# wait for rethinkdb to finish creating secondary indexes
|
||||||
self.connection.run(
|
connection.run(
|
||||||
r.db(self.name)
|
r.db(name)
|
||||||
.table('backlog')
|
.table('bigchain')
|
||||||
.index_create('assignee__transaction_timestamp', [r.row['assignee'], r.row['assignment_timestamp']]))
|
.index_wait())
|
||||||
|
|
||||||
# wait for rethinkdb to finish creating secondary indexes
|
|
||||||
self.connection.run(
|
|
||||||
r.db(self.name)
|
|
||||||
.table('backlog')
|
|
||||||
.index_wait())
|
|
||||||
|
|
||||||
def create_votes_secondary_index(self):
|
def create_backlog_secondary_index(connection, name):
|
||||||
logger.info('Create `votes` secondary index.')
|
logger.info('Create `backlog` secondary index.')
|
||||||
|
|
||||||
# compound index to order votes by block id and node
|
# compound index to read transactions from the backlog per assignee
|
||||||
self.connection.run(
|
connection.run(
|
||||||
r.db(self.name)
|
r.db(name)
|
||||||
.table('votes')\
|
.table('backlog')
|
||||||
.index_create('block_and_voter', [r.row['vote']['voting_for_block'], r.row['node_pubkey']]))
|
.index_create('assignee__transaction_timestamp', [r.row['assignee'], r.row['assignment_timestamp']]))
|
||||||
|
|
||||||
# wait for rethinkdb to finish creating secondary indexes
|
# wait for rethinkdb to finish creating secondary indexes
|
||||||
self.connection.run(
|
connection.run(
|
||||||
r.db(self.name)
|
r.db(name)
|
||||||
.table('votes')
|
.table('backlog')
|
||||||
.index_wait())
|
.index_wait())
|
||||||
|
|
||||||
|
|
||||||
|
def create_votes_secondary_index(connection, name):
|
||||||
|
logger.info('Create `votes` secondary index.')
|
||||||
|
|
||||||
|
# compound index to order votes by block id and node
|
||||||
|
connection.run(
|
||||||
|
r.db(name)
|
||||||
|
.table('votes')
|
||||||
|
.index_create('block_and_voter', [r.row['vote']['voting_for_block'], r.row['node_pubkey']]))
|
||||||
|
|
||||||
|
# wait for rethinkdb to finish creating secondary indexes
|
||||||
|
connection.run(
|
||||||
|
r.db(name)
|
||||||
|
.table('votes')
|
||||||
|
.index_wait())
|
||||||
|
@ -1,13 +1,23 @@
|
|||||||
class Schema:
|
"""Schema-providing interfaces for backend databases"""
|
||||||
|
|
||||||
def create_database(self):
|
from functools import singledispatch
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def create_tables(self):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def create_indexes(self):
|
@singledispatch
|
||||||
raise NotImplementedError()
|
def create_database(connection, name):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
def drop_database(self):
|
|
||||||
raise NotImplementedError()
|
@singledispatch
|
||||||
|
def create_tables(connection, name):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
@singledispatch
|
||||||
|
def create_indexes(connection, name):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
@singledispatch
|
||||||
|
def drop_database(connection, name):
|
||||||
|
raise NotImplementedError()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user