mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge remote-tracking branch 'upstream/master' into bug/419/config-syntax-error-pretty-message
This commit is contained in:
commit
fcca712853
@ -38,9 +38,9 @@ Familiarize yourself with how we do coding and documentation in the BigchainDB p
|
||||
### Step 2 - Install some Dependencies
|
||||
|
||||
* [Install RethinkDB Server](https://rethinkdb.com/docs/install/)
|
||||
* Make sure you have Python 3.4+ (maybe in a virtualenv)
|
||||
* [Install BigchaindB Server's OS-level dependencies](http://bigchaindb.readthedocs.io/en/latest/nodes/setup-run-node.html#install-bigchaindb-server)
|
||||
* [Make sure you have the latest version of pip](http://bigchaindb.readthedocs.io/en/latest/nodes/setup-run-node.html#how-to-install-bigchaindb-with-pip)
|
||||
* Make sure you have Python 3.4+ (preferably in a virtualenv)
|
||||
* [Install BigchaindB Server's OS-level dependencies](http://bigchaindb.readthedocs.io/en/latest/appendices/install-os-level-deps.html)
|
||||
* [Make sure you have the latest Python 3 version of pip and setuptools](http://bigchaindb.readthedocs.io/en/latest/appendices/install-latest-pip.html)
|
||||
|
||||
### Step 3 - Fork bigchaindb on GitHub
|
||||
|
||||
|
@ -16,8 +16,8 @@ We're hiring! [Learn more](https://github.com/bigchaindb/org/blob/master/engjob.
|
||||
## Get Started
|
||||
|
||||
### [Quickstart](http://bigchaindb.readthedocs.io/en/latest/quickstart.html)
|
||||
### [Set Up and Run a BigchainDB Node](http://bigchaindb.readthedocs.io/en/latest/nodes/setup-run-node.html)
|
||||
### [Run BigchainDB with Docker](http://bigchaindb.readthedocs.io/en/latest/nodes/run-with-docker.html)
|
||||
### [Set Up & Run a Dev/Test Node](http://bigchaindb.readthedocs.io/en/latest/dev-and-test/setup-run-node.html)
|
||||
### [Run BigchainDB with Docker](http://bigchaindb.readthedocs.io/en/latest/appendices/run-with-docker.html)
|
||||
|
||||
## Links for Everyone
|
||||
* [BigchainDB.com](https://www.bigchaindb.com/) - the main BigchainDB website, including newsletter signup
|
||||
|
@ -1,52 +0,0 @@
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
import queue
|
||||
|
||||
import rethinkdb as r
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb import Bigchain
|
||||
from bigchaindb.monitor import Monitor
|
||||
from bigchaindb.util import ProcessGroup
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BlockDeleteRevert(object):
|
||||
|
||||
def __init__(self, q_delete_to_revert):
|
||||
self.q_delete_to_revert = q_delete_to_revert
|
||||
|
||||
def write_blocks(self):
|
||||
"""
|
||||
Write blocks to the bigchain
|
||||
"""
|
||||
|
||||
# create bigchain instance
|
||||
b = Bigchain()
|
||||
|
||||
# Write blocks
|
||||
while True:
|
||||
block = self.q_delete_to_revert.get()
|
||||
|
||||
# poison pill
|
||||
if block == 'stop':
|
||||
return
|
||||
|
||||
b.write_block(block)
|
||||
|
||||
def kill(self):
|
||||
for i in range(mp.cpu_count()):
|
||||
self.q_delete_to_revert.put('stop')
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Initialize, spawn, and start the processes
|
||||
"""
|
||||
|
||||
# initialize the processes
|
||||
p_write = ProcessGroup(name='write_blocks', target=self.write_blocks)
|
||||
|
||||
# start the processes
|
||||
p_write.start()
|
@ -24,7 +24,7 @@ from bigchaindb.exceptions import (StartupError,
|
||||
DatabaseAlreadyExists,
|
||||
KeypairNotFoundException)
|
||||
from bigchaindb.commands import utils
|
||||
from bigchaindb.processes import Processes
|
||||
from bigchaindb import processes
|
||||
from bigchaindb import crypto
|
||||
|
||||
|
||||
@ -169,7 +169,6 @@ def run_start(args):
|
||||
sys.exit("Can't start BigchainDB, no keypair found. "
|
||||
'Did you run `bigchaindb configure`?')
|
||||
|
||||
processes = Processes()
|
||||
logger.info('Starting BigchainDB main process')
|
||||
processes.start()
|
||||
|
||||
|
@ -565,13 +565,9 @@ class Bigchain(object):
|
||||
|
||||
return vote_signed
|
||||
|
||||
def write_vote(self, block, vote):
|
||||
def write_vote(self, vote):
|
||||
"""Write the vote to the database."""
|
||||
|
||||
# First, make sure this block doesn't contain a vote from this node
|
||||
if self.has_previous_vote(block):
|
||||
return None
|
||||
|
||||
r.table('votes') \
|
||||
.insert(vote) \
|
||||
.run(self.conn)
|
||||
|
@ -152,6 +152,7 @@ def create_pipeline():
|
||||
|
||||
def start():
|
||||
"""Create, start, and return the block pipeline."""
|
||||
|
||||
pipeline = create_pipeline()
|
||||
pipeline.setup(indata=get_changefeed())
|
||||
pipeline.start()
|
||||
|
173
bigchaindb/pipelines/vote.py
Normal file
173
bigchaindb/pipelines/vote.py
Normal file
@ -0,0 +1,173 @@
|
||||
"""This module takes care of all the logic related to block voting.
|
||||
|
||||
The logic is encapsulated in the ``Vote`` class, while the sequence
|
||||
of actions to do on transactions is specified in the ``create_pipeline``
|
||||
function.
|
||||
"""
|
||||
|
||||
from collections import Counter
|
||||
|
||||
from multipipes import Pipeline, Node
|
||||
|
||||
from bigchaindb import config_utils, exceptions
|
||||
from bigchaindb.pipelines.utils import ChangeFeed
|
||||
from bigchaindb import Bigchain
|
||||
|
||||
|
||||
def create_invalid_tx():
|
||||
"""Create and return an invalid transaction.
|
||||
|
||||
The transaction is invalid because it's missing the signature."""
|
||||
|
||||
b = Bigchain()
|
||||
tx = b.create_transaction(b.me, b.me, None, 'CREATE')
|
||||
return tx
|
||||
|
||||
|
||||
class Vote:
|
||||
"""This class encapsulates the logic to vote on blocks.
|
||||
|
||||
Note:
|
||||
Methods of this class will be executed in different processes.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the Block voter."""
|
||||
|
||||
# Since cannot share a connection to RethinkDB using multiprocessing,
|
||||
# we need to create a temporary instance of BigchainDB that we use
|
||||
# only to query RethinkDB
|
||||
last_voted = Bigchain().get_last_voted_block()
|
||||
self.consensus = config_utils.load_consensus_plugin()
|
||||
|
||||
# This is the Bigchain instance that will be "shared" (aka: copied)
|
||||
# by all the subprocesses
|
||||
self.bigchain = Bigchain()
|
||||
self.last_voted_id = last_voted['id']
|
||||
|
||||
self.counters = Counter()
|
||||
self.validity = {}
|
||||
|
||||
self.invalid_dummy_tx = create_invalid_tx()
|
||||
|
||||
def validate_block(self, block):
|
||||
if not self.bigchain.has_previous_vote(block):
|
||||
try:
|
||||
self.consensus.validate_block(self.bigchain, block)
|
||||
valid = True
|
||||
except (exceptions.InvalidHash,
|
||||
exceptions.OperationError,
|
||||
exceptions.InvalidSignature) as e:
|
||||
valid = False
|
||||
return block, valid
|
||||
|
||||
def ungroup(self, block, valid):
|
||||
"""Given a block, ungroup the transactions in it.
|
||||
|
||||
Args:
|
||||
block (dict): the block to process
|
||||
|
||||
Returns:
|
||||
``None`` if the block has been already voted, an iterator that
|
||||
yields a transaction, block id, and the total number of
|
||||
transactions contained in the block otherwise.
|
||||
"""
|
||||
|
||||
# XXX: if a block is invalid we should skip the `validate_tx` step,
|
||||
# but since we are in a pipeline we cannot just jump to another
|
||||
# function. Hackish solution: generate an invalid transaction
|
||||
# and propagate it to the next steps of the pipeline
|
||||
if valid:
|
||||
num_tx = len(block['block']['transactions'])
|
||||
for tx in block['block']['transactions']:
|
||||
yield tx, block['id'], num_tx
|
||||
else:
|
||||
yield self.invalid_dummy_tx, block['id'], 1
|
||||
|
||||
def validate_tx(self, tx, block_id, num_tx):
|
||||
"""Validate a transaction.
|
||||
|
||||
Args:
|
||||
tx (dict): the transaction to validate
|
||||
block_id (str): the id of block containing the transaction
|
||||
num_tx (int): the total number of transactions to process
|
||||
|
||||
Returns:
|
||||
Three values are returned, the validity of the transaction,
|
||||
``block_id``, ``num_tx``.
|
||||
"""
|
||||
return bool(self.bigchain.is_valid_transaction(tx)), block_id, num_tx
|
||||
|
||||
def vote(self, tx_validity, block_id, num_tx):
|
||||
"""Collect the validity of transactions and cast a vote when ready.
|
||||
|
||||
Args:
|
||||
tx_validity (bool): the validity of the transaction
|
||||
block_id (str): the id of block containing the transaction
|
||||
num_tx (int): the total number of transactions to process
|
||||
|
||||
Returns:
|
||||
None, or a vote if a decision has been reached.
|
||||
"""
|
||||
|
||||
self.counters[block_id] += 1
|
||||
self.validity[block_id] = tx_validity and self.validity.get(block_id,
|
||||
True)
|
||||
|
||||
if self.counters[block_id] == num_tx:
|
||||
vote = self.bigchain.vote(block_id,
|
||||
self.last_voted_id,
|
||||
self.validity[block_id])
|
||||
self.last_voted_id = block_id
|
||||
del self.counters[block_id]
|
||||
del self.validity[block_id]
|
||||
return vote
|
||||
|
||||
def write_vote(self, vote):
|
||||
"""Write vote to the database.
|
||||
|
||||
Args:
|
||||
vote: the vote to write.
|
||||
"""
|
||||
|
||||
self.bigchain.write_vote(vote)
|
||||
return vote
|
||||
|
||||
|
||||
def initial():
|
||||
"""Return unvoted blocks."""
|
||||
b = Bigchain()
|
||||
rs = b.get_unvoted_blocks()
|
||||
return rs
|
||||
|
||||
|
||||
def get_changefeed():
|
||||
"""Create and return the changefeed for the bigchain table."""
|
||||
|
||||
return ChangeFeed('bigchain', 'insert', prefeed=initial())
|
||||
|
||||
|
||||
def create_pipeline():
|
||||
"""Create and return the pipeline of operations to be distributed
|
||||
on different processes."""
|
||||
|
||||
voter = Vote()
|
||||
|
||||
vote_pipeline = Pipeline([
|
||||
Node(voter.validate_block),
|
||||
Node(voter.ungroup),
|
||||
Node(voter.validate_tx, fraction_of_cores=1),
|
||||
Node(voter.vote),
|
||||
Node(voter.write_vote)
|
||||
])
|
||||
|
||||
return vote_pipeline
|
||||
|
||||
|
||||
def start():
|
||||
"""Create, start, and return the block pipeline."""
|
||||
|
||||
pipeline = create_pipeline()
|
||||
pipeline.setup(indata=get_changefeed())
|
||||
pipeline.start()
|
||||
return pipeline
|
@ -1,13 +1,8 @@
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
|
||||
import rethinkdb as r
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb.pipelines import block, election
|
||||
from bigchaindb import Bigchain
|
||||
from bigchaindb.voter import Voter
|
||||
from bigchaindb.block import BlockDeleteRevert
|
||||
from bigchaindb.pipelines import vote, block, election
|
||||
from bigchaindb.web import server
|
||||
|
||||
|
||||
@ -26,57 +21,23 @@ BANNER = """
|
||||
"""
|
||||
|
||||
|
||||
class Processes(object):
|
||||
def start():
|
||||
logger.info('Initializing BigchainDB...')
|
||||
|
||||
def __init__(self):
|
||||
# initialize the class
|
||||
self.q_new_block = mp.Queue()
|
||||
self.q_revert_delete = mp.Queue()
|
||||
# start the processes
|
||||
logger.info('Starting block')
|
||||
block.start()
|
||||
|
||||
def map_bigchain(self):
|
||||
# listen to changes on the bigchain and redirect the changes
|
||||
# to the correct queues
|
||||
logger.info('Starting voter')
|
||||
vote.start()
|
||||
|
||||
# create a bigchain instance
|
||||
b = Bigchain()
|
||||
logger.info('Starting election')
|
||||
election.start()
|
||||
|
||||
for change in r.table('bigchain').changes().run(b.conn):
|
||||
# start the web api
|
||||
app_server = server.create_server(bigchaindb.config['server'])
|
||||
p_webapi = mp.Process(name='webapi', target=app_server.run)
|
||||
p_webapi.start()
|
||||
|
||||
# insert
|
||||
if change['old_val'] is None:
|
||||
self.q_new_block.put(change['new_val'])
|
||||
|
||||
# delete
|
||||
elif change['new_val'] is None:
|
||||
# this should never happen in regular operation
|
||||
self.q_revert_delete.put(change['old_val'])
|
||||
|
||||
def start(self):
|
||||
logger.info('Initializing BigchainDB...')
|
||||
|
||||
delete_reverter = BlockDeleteRevert(self.q_revert_delete)
|
||||
|
||||
# start the web api
|
||||
app_server = server.create_server(bigchaindb.config['server'])
|
||||
p_webapi = mp.Process(name='webapi', target=app_server.run)
|
||||
p_webapi.start()
|
||||
|
||||
# initialize the processes
|
||||
p_map_bigchain = mp.Process(name='bigchain_mapper', target=self.map_bigchain)
|
||||
p_block_delete_revert = mp.Process(name='block_delete_revert', target=delete_reverter.start)
|
||||
p_voter = Voter(self.q_new_block)
|
||||
# start the processes
|
||||
logger.info('starting bigchain mapper')
|
||||
p_map_bigchain.start()
|
||||
logger.info('starting block')
|
||||
block.start()
|
||||
p_block_delete_revert.start()
|
||||
|
||||
logger.info('starting voter')
|
||||
p_voter.start()
|
||||
election.start()
|
||||
logger.info('starting election')
|
||||
|
||||
# start message
|
||||
p_voter.initialized.wait()
|
||||
logger.info(BANNER.format(bigchaindb.config['server']['bind']))
|
||||
# start message
|
||||
logger.info(BANNER.format(bigchaindb.config['server']['bind']))
|
||||
|
@ -1,199 +0,0 @@
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
import ctypes
|
||||
|
||||
from bigchaindb import Bigchain
|
||||
from bigchaindb.monitor import Monitor
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BlockStream(object):
|
||||
"""
|
||||
Combine the stream of new blocks coming from the changefeed with the list of unvoted blocks.
|
||||
|
||||
This is a utility class that abstracts the source of data for the `Voter`.
|
||||
"""
|
||||
|
||||
def __init__(self, new_blocks):
|
||||
"""
|
||||
Create a new BlockStream instance.
|
||||
|
||||
Args:
|
||||
new_block (queue): a queue of new blocks
|
||||
"""
|
||||
|
||||
b = Bigchain()
|
||||
self.new_blocks = new_blocks
|
||||
# TODO: there might be duplicate blocks since we *first* get the changefeed and only *then* we query the
|
||||
# database to get the old blocks.
|
||||
|
||||
# TODO how about a one liner, something like:
|
||||
# self.unvoted_blocks = b.get_unvoted_blocks() if not b.nodes_except_me else []
|
||||
self.unvoted_blocks = []
|
||||
if not b.nodes_except_me:
|
||||
self.unvoted_blocks = b.get_unvoted_blocks()
|
||||
|
||||
def get(self):
|
||||
"""
|
||||
Return the next block to be processed.
|
||||
"""
|
||||
try:
|
||||
# FIXME: apparently RethinkDB returns a list instead of a cursor when using `order_by`.
|
||||
# We might change the `pop` in the future, when the driver will return a cursor.
|
||||
# We have a test for this, so if the driver implementation changes we will get a failure:
|
||||
# - tests/test_voter.py::TestBlockStream::test_if_old_blocks_get_should_return_old_block_first
|
||||
return self.unvoted_blocks.pop(0)
|
||||
except IndexError:
|
||||
return self.new_blocks.get()
|
||||
|
||||
|
||||
class Voter(object):
|
||||
|
||||
def __init__(self, q_new_block):
|
||||
"""
|
||||
Initialize the class with the needed queues.
|
||||
|
||||
Initialize with a queue where new blocks added to the bigchain will be put
|
||||
"""
|
||||
|
||||
self.monitor = Monitor()
|
||||
|
||||
self.q_new_block = q_new_block
|
||||
self.q_blocks_to_validate = mp.Queue()
|
||||
self.q_validated_block = mp.Queue()
|
||||
self.q_voted_block = mp.Queue()
|
||||
self.v_previous_block_id = mp.Value(ctypes.c_char_p)
|
||||
self.initialized = mp.Event()
|
||||
|
||||
def feed_blocks(self):
|
||||
"""
|
||||
Prepare the queue with blocks to validate
|
||||
"""
|
||||
|
||||
block_stream = BlockStream(self.q_new_block)
|
||||
while True:
|
||||
# poison pill
|
||||
block = block_stream.get()
|
||||
if block == 'stop':
|
||||
self.q_blocks_to_validate.put('stop')
|
||||
return
|
||||
|
||||
self.q_blocks_to_validate.put(block)
|
||||
|
||||
def validate(self):
|
||||
"""
|
||||
Checks if incoming blocks are valid or not
|
||||
"""
|
||||
|
||||
# create a bigchain instance. All processes should create their own bigchcain instance so that they all
|
||||
# have their own connection to the database
|
||||
b = Bigchain()
|
||||
|
||||
logger.info('voter waiting for new blocks')
|
||||
# signal initialization complete
|
||||
self.initialized.set()
|
||||
|
||||
while True:
|
||||
new_block = self.q_blocks_to_validate.get()
|
||||
|
||||
# poison pill
|
||||
if new_block == 'stop':
|
||||
self.q_validated_block.put('stop')
|
||||
return
|
||||
|
||||
logger.info('new_block arrived to voter')
|
||||
|
||||
with self.monitor.timer('validate_block'):
|
||||
# FIXME: the following check is done also in `is_valid_block`,
|
||||
# but validity can be true even if the block has already
|
||||
# a vote.
|
||||
if b.has_previous_vote(new_block):
|
||||
continue
|
||||
validity = b.is_valid_block(new_block)
|
||||
|
||||
self.q_validated_block.put((new_block,
|
||||
self.v_previous_block_id.value.decode(),
|
||||
validity))
|
||||
|
||||
self.v_previous_block_id.value = new_block['id'].encode()
|
||||
|
||||
def vote(self):
|
||||
"""
|
||||
Votes on the block based on the decision of the validation
|
||||
"""
|
||||
|
||||
# create a bigchain instance
|
||||
b = Bigchain()
|
||||
|
||||
while True:
|
||||
elem = self.q_validated_block.get()
|
||||
|
||||
# poison pill
|
||||
if elem == 'stop':
|
||||
self.q_voted_block.put('stop')
|
||||
return
|
||||
|
||||
validated_block, previous_block_id, decision = elem
|
||||
vote = b.vote(validated_block['id'], previous_block_id, decision)
|
||||
self.q_voted_block.put((validated_block, vote))
|
||||
|
||||
def update_block(self):
|
||||
"""
|
||||
Appends the vote in the bigchain table
|
||||
"""
|
||||
|
||||
# create a bigchain instance
|
||||
b = Bigchain()
|
||||
|
||||
while True:
|
||||
elem = self.q_voted_block.get()
|
||||
|
||||
# poison pill
|
||||
if elem == 'stop':
|
||||
logger.info('clean exit')
|
||||
return
|
||||
|
||||
block, vote = elem
|
||||
pretty_vote = 'valid' if vote['vote']['is_block_valid'] else 'invalid'
|
||||
logger.info('voting %s for block %s', pretty_vote, block['id'])
|
||||
b.write_vote(block, vote)
|
||||
|
||||
def bootstrap(self):
|
||||
"""
|
||||
Before starting handling the new blocks received by the changefeed we need to handle unvoted blocks
|
||||
added to the bigchain while the process was down
|
||||
|
||||
We also need to set the previous_block_id.
|
||||
"""
|
||||
|
||||
b = Bigchain()
|
||||
last_voted = b.get_last_voted_block()
|
||||
|
||||
self.v_previous_block_id.value = last_voted['id'].encode()
|
||||
|
||||
def kill(self):
|
||||
"""
|
||||
Terminate processes
|
||||
"""
|
||||
self.q_new_block.put('stop')
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Initialize, spawn, and start the processes
|
||||
"""
|
||||
|
||||
self.bootstrap()
|
||||
|
||||
# initialize the processes
|
||||
p_feed_blocks = mp.Process(name='block_feeder', target=self.feed_blocks)
|
||||
p_validate = mp.Process(name='block_validator', target=self.validate)
|
||||
p_vote = mp.Process(name='block_voter', target=self.vote)
|
||||
p_update = mp.Process(name='block_updater', target=self.update_block)
|
||||
|
||||
# start the processes
|
||||
p_feed_blocks.start()
|
||||
p_validate.start()
|
||||
p_vote.start()
|
||||
p_update.start()
|
@ -1,6 +1,6 @@
|
||||
# AWS Setup
|
||||
# Basic AWS Setup
|
||||
|
||||
Before you can deploy a BigchainDB node or cluster on AWS, you must do a few things.
|
||||
Before you can deploy anything on AWS, you must do a few things.
|
||||
|
||||
|
||||
## Get an AWS Account
|
||||
@ -36,37 +36,3 @@ Default output format [None]: [Press Enter]
|
||||
```
|
||||
|
||||
This writes two files: `~/.aws/credentials` and `~/.aws/config`. AWS tools and packages look for those files.
|
||||
|
||||
|
||||
## Get Enough Amazon Elastic IP Addresses
|
||||
|
||||
You can skip this if you're deploying a single node.
|
||||
|
||||
Our AWS cluster deployment scripts use elastic IP addresses (although that may change in the future). By default, AWS accounts get five elastic IP addresses. If you want to deploy a cluster with more than five nodes, then you will need more than five elastic IP addresses; you may have to apply for those; see [the AWS documentation on elastic IP addresses](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html).
|
||||
|
||||
|
||||
## Create an Amazon EC2 Key Pair
|
||||
|
||||
Go to the AWS EC2 Console and select "Key Pairs" in the left sidebar. Click the "Create Key Pair" button. Give it the name `bigchaindb`. You should be prompted to save a file named `bigchaindb.pem`. That file contains the RSA private key. (You can get the public key from the private key, so there's no need to send it separately.)
|
||||
|
||||
If you're deploying a cluster, save the file in `bigchaindb/deploy-cluster-aws/pem/bigchaindb.pem`.
|
||||
|
||||
If you're deploying a single node, save the file in `bigchaindb/deploy-node-aws/pem/bigchaindb.pem`.
|
||||
|
||||
**You should not share your private key.**
|
||||
|
||||
|
||||
## Create an Amazon EC2 Security Group
|
||||
|
||||
Go to the AWS EC2 Console and select "Security Groups" in the left sidebar. Click the "Create Security Group" button. If you're deploying a cluster, give it the name `bigchaindb`, otherwise you can name it whatever you like. The description probably doesn't matter but we also put `bigchaindb` for that.
|
||||
|
||||
If you're deploying a test cluster, then add these rules for Inbound traffic:
|
||||
|
||||
* Type = All TCP, Protocol = TCP, Port Range = 0-65535, Source = 0.0.0.0/0
|
||||
* Type = SSH, Protocol = SSH, Port Range = 22, Source = 0.0.0.0/0
|
||||
* Type = All UDP, Protocol = UDP, Port Range = 0-65535, Source = 0.0.0.0/0
|
||||
* Type = All ICMP, Protocol = ICMP, Port Range = 0-65535, Source = 0.0.0.0/0
|
||||
|
||||
**Note: These rules are extremely lax! They're meant to make testing easy.** For example, Source = 0.0.0.0/0 is [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) for "allow this traffic to come from _any_ IP address."
|
||||
|
||||
If you're deploying a single node, then see [the BigchainDB Notes for Firewall Setup](firewall-notes.html) and [the AWS documentation about security groups](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html).
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
## Example Amazon EC2 Setups
|
||||
|
||||
We have some scripts for [deploying a _test_ BigchainDB cluster on AWS](../clusters-feds/deploy-on-aws.html). Those scripts include command sequences to set up storage for RethinkDB.
|
||||
We have some scripts for [deploying a _test_ BigchainDB cluster on AWS](../clusters-feds/aws-testing-cluster.html). Those scripts include command sequences to set up storage for RethinkDB.
|
||||
In particular, look in the file [/deploy-cluster-aws/fabfile.py](https://github.com/bigchaindb/bigchaindb/blob/master/deploy-cluster-aws/fabfile.py), under `def prep_rethinkdb_storage(USING_EBS)`. Note that there are two cases:
|
||||
|
||||
1. **Using EBS ([Amazon Elastic Block Store](https://aws.amazon.com/ebs/)).** This is always an option, and for some instance types ("EBS-only"), it's the only option.
|
||||
|
@ -47,7 +47,7 @@ Port 8080 is the default port used by RethinkDB for its adminstrative web (HTTP)
|
||||
|
||||
Port 9984 is the default port for the BigchainDB client-server HTTP API (TCP), which is served by Gunicorn HTTP Server. It's _possible_ allow port 9984 to accept inbound traffic from anyone, but we recommend against doing that. Instead, set up a reverse proxy server (e.g. using Nginx) and only allow traffic from there. Information about how to do that can be found [in the Gunicorn documentation](http://docs.gunicorn.org/en/stable/deploy.html). (They call it a proxy.)
|
||||
|
||||
If Gunicorn and the reverse proxy are running on the same server, then you'll have to tell Gunicorn to listen on some port other than 9984 (so that the reverse proxy can listen on port 9984). You can do that by setting `server.bind` to 'localhost:PORT' in the [BigchainDB Configuration Settings](../nodes/configuration.html), where PORT is whatever port you chose (e.g. 9983).
|
||||
If Gunicorn and the reverse proxy are running on the same server, then you'll have to tell Gunicorn to listen on some port other than 9984 (so that the reverse proxy can listen on port 9984). You can do that by setting `server.bind` to 'localhost:PORT' in the [BigchainDB Configuration Settings](../server-reference/configuration.html), where PORT is whatever port you chose (e.g. 9983).
|
||||
|
||||
You may want to have Gunicorn and the reverse proxy running on different servers, so that both can listen on port 9984. That would also help isolate the effects of a denial-of-service attack.
|
||||
|
||||
|
@ -7,6 +7,9 @@ Appendices
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
install-os-level-deps
|
||||
install-latest-pip
|
||||
run-with-docker
|
||||
json-serialization
|
||||
cryptography
|
||||
the-Bigchain-class
|
||||
@ -15,4 +18,5 @@ Appendices
|
||||
firewall-notes
|
||||
ntp-notes
|
||||
example-rethinkdb-storage-setups
|
||||
licenses
|
||||
licenses
|
||||
install-with-lxd
|
20
docs/source/appendices/install-latest-pip.md
Normal file
20
docs/source/appendices/install-latest-pip.md
Normal file
@ -0,0 +1,20 @@
|
||||
# How to Install the Latest pip and setuptools
|
||||
|
||||
You can check the version of `pip` you're using (in your current virtualenv) by doing:
|
||||
```text
|
||||
pip -V
|
||||
```
|
||||
|
||||
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.4, then you must install a `pip` version associated with Python 3.4+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
|
||||
|
||||
On Ubuntu 14.04, we found that this works:
|
||||
```text
|
||||
sudo apt-get install python3-pip
|
||||
```
|
||||
|
||||
That should install a Python 3 version of `pip` named `pip3`. If that didn't work, then another way to get `pip3` is to do `sudo apt-get install python3-setuptools` followed by `sudo easy_install3 pip`.
|
||||
|
||||
You can upgrade `pip` (`pip3`) and `setuptools` to the latest versions using:
|
||||
```text
|
||||
pip3 install --upgrade pip setuptools
|
||||
```
|
17
docs/source/appendices/install-os-level-deps.md
Normal file
17
docs/source/appendices/install-os-level-deps.md
Normal file
@ -0,0 +1,17 @@
|
||||
# How to Install OS-Level Dependencies
|
||||
|
||||
BigchainDB Server has some OS-level dependencies that must be installed.
|
||||
|
||||
On Ubuntu 14.04 and 16.04, we found that the following was enough:
|
||||
```text
|
||||
sudo apt-get update
|
||||
sudo apt-get install g++ python3-dev
|
||||
```
|
||||
|
||||
On Fedora 23 and 24, we found that the following was enough:
|
||||
```text
|
||||
sudo dnf update
|
||||
sudo dnf install gcc-c++ redhat-rpm-config python3-devel
|
||||
```
|
||||
|
||||
(If you're using a version of Fedora before version 22, you may have to use `yum` instead of `dnf`.)
|
43
docs/source/appendices/install-with-lxd.md
Normal file
43
docs/source/appendices/install-with-lxd.md
Normal file
@ -0,0 +1,43 @@
|
||||
# Installing BigchainDB on LXC containers using LXD
|
||||
|
||||
You can visit this link to install LXD (instructions here): [LXD Install](https://linuxcontainers.org/lxd/getting-started-cli/)
|
||||
|
||||
(assumption is that you are using Ubuntu 14.04 for host/container)
|
||||
|
||||
Let us create an LXC container (via LXD) with the following command:
|
||||
|
||||
`lxc launch ubuntu:14.04 bigchaindb`
|
||||
|
||||
(ubuntu:14.04 - this is the remote server the command fetches the image from)
|
||||
(bigchaindb - is the name of the container)
|
||||
|
||||
Below is the `install.sh` script you will need to install BigchainDB within your container.
|
||||
|
||||
Here is my `install.sh`:
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get install -y wget
|
||||
source /etc/lsb-release && echo "deb http://download.rethinkdb.com/apt $DISTRIB_CODENAME main" | sudo tee /etc/apt/sources.list.d/rethinkdb.list
|
||||
wget -qO- https://download.rethinkdb.com/apt/pubkey.gpg | sudo apt-key add -
|
||||
apt-get update
|
||||
apt-get install -y rethinkdb python3-pip
|
||||
pip3 install --upgrade pip wheel setuptools
|
||||
pip install ptpython bigchaindb
|
||||
```
|
||||
|
||||
Copy/Paste the above `install.sh` into the directory/path you are going to execute your LXD commands from (ie. the host).
|
||||
|
||||
Make sure your container is running by typing:
|
||||
|
||||
`lxc list`
|
||||
|
||||
Now, from the host (and the correct directory) where you saved `install.sh`, run this command:
|
||||
|
||||
`cat install.sh | lxc exec bigchaindb /bin/bash`
|
||||
|
||||
If you followed the commands correctly, you will have successfully created an LXC container (using LXD) that can get you up and running with BigchainDB in <5 minutes (depending on how long it takes to download all the packages).
|
||||
|
||||
From this point onwards, you can follow the [Python Example](https://bigchaindb.readthedocs.io/en/latest/drivers-clients/python-server-api-examples.html) .
|
@ -96,7 +96,7 @@ docker run --rm -v "$HOME/bigchaindb_docker:/data" -ti \
|
||||
|
||||
Note the `--link` option to link to the first container (named `bigchaindb`).
|
||||
|
||||
Aside: The `bigchaindb load` command has several options (e.g. `-m`). You can read more about it in [the documentation about the BigchainDB command line interface](bigchaindb-cli.html).
|
||||
Aside: The `bigchaindb load` command has several options (e.g. `-m`). You can read more about it in [the documentation about the BigchainDB command line interface](../server-reference/bigchaindb-cli.html).
|
||||
|
||||
If you look at the RethinkDB dashboard (in your web browser), you should see the effects of the load test. You can also see some effects in the Docker logs using:
|
||||
```text
|
@ -1,12 +1,16 @@
|
||||
# Deploy a Cluster on AWS
|
||||
# Deploy a Testing Cluster on AWS
|
||||
|
||||
This section explains a way to deploy a cluster of BigchainDB nodes on Amazon Web Services (AWS). We use some Bash and Python scripts to launch several instances (virtual servers) on Amazon Elastic Compute Cloud (EC2). Then we use Fabric to install RethinkDB and BigchainDB on all those instances.
|
||||
This section explains a way to deploy a cluster of BigchainDB nodes on Amazon Web Services (AWS) for testing purposes.
|
||||
|
||||
## Why?
|
||||
|
||||
You might ask why one would want to deploy a centrally-controlled BigchainDB cluster. Isn't BigchainDB supposed to be decentralized, where each node is controlled by a different person or organization?
|
||||
Why would anyone want to deploy a centrally-controlled BigchainDB cluster? Isn't BigchainDB supposed to be decentralized, where each node is controlled by a different person or organization?
|
||||
|
||||
Yes! These scripts are for deploying _test_ clusters, not production clusters.
|
||||
Yes! These scripts are for deploying a testing cluster, not a production cluster.
|
||||
|
||||
## How?
|
||||
|
||||
We use some Bash and Python scripts to launch several instances (virtual servers) on Amazon Elastic Compute Cloud (EC2). Then we use Fabric to install RethinkDB and BigchainDB on all those instances.
|
||||
|
||||
## Python Setup
|
||||
|
||||
@ -28,9 +32,37 @@ What did you just install?
|
||||
* [The aws-cli package](https://pypi.python.org/pypi/awscli), which is an AWS Command Line Interface (CLI).
|
||||
|
||||
|
||||
## AWS Setup
|
||||
## Basic AWS Setup
|
||||
|
||||
See the page about [AWS Setup](../appendices/aws-setup.html) in the Appendices.
|
||||
See the page about [basic AWS Setup](../appendices/aws-setup.html) in the Appendices.
|
||||
|
||||
|
||||
## Get Enough Amazon Elastic IP Addresses
|
||||
|
||||
The AWS cluster deployment scripts use elastic IP addresses (although that may change in the future). By default, AWS accounts get five elastic IP addresses. If you want to deploy a cluster with more than five nodes, then you will need more than five elastic IP addresses; you may have to apply for those; see [the AWS documentation on elastic IP addresses](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html).
|
||||
|
||||
|
||||
## Create an Amazon EC2 Key Pair
|
||||
|
||||
Go to the AWS EC2 Console and select "Key Pairs" in the left sidebar. Click the "Create Key Pair" button. Give it the name `bigchaindb`. You should be prompted to save a file named `bigchaindb.pem`. That file contains the RSA private key. (You can get the public key from the private key, so there's no need to send it separately.)
|
||||
|
||||
Save the file in `bigchaindb/deploy-cluster-aws/pem/bigchaindb.pem`.
|
||||
|
||||
**You should not share your private key.**
|
||||
|
||||
|
||||
## Create an Amazon EC2 Security Group
|
||||
|
||||
Go to the AWS EC2 Console and select "Security Groups" in the left sidebar. Click the "Create Security Group" button. Name it `bigchaindb`. The description probably doesn't matter; you can also put `bigchaindb` for that.
|
||||
|
||||
Add these rules for Inbound traffic:
|
||||
|
||||
* Type = All TCP, Protocol = TCP, Port Range = 0-65535, Source = 0.0.0.0/0
|
||||
* Type = SSH, Protocol = SSH, Port Range = 22, Source = 0.0.0.0/0
|
||||
* Type = All UDP, Protocol = UDP, Port Range = 0-65535, Source = 0.0.0.0/0
|
||||
* Type = All ICMP, Protocol = ICMP, Port Range = 0-65535, Source = 0.0.0.0/0
|
||||
|
||||
**Note: These rules are extremely lax! They're meant to make testing easy.** For example, Source = 0.0.0.0/0 is [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) for "allow this traffic to come from _any_ IP address."
|
||||
|
||||
|
||||
## Deploy a BigchainDB Monitor
|
||||
@ -74,7 +106,7 @@ fab --fabfile=fabfile-monitor.py --hosts=<EC2 hostname> run_monitor
|
||||
|
||||
For more information about monitoring (e.g. how to view the Grafana dashboard in your web browser), see the [Monitoring](monitoring.html) section of this documentation.
|
||||
|
||||
To configure a BigchainDB node to send monitoring data to the monitoring server, change the statsd host in the configuration of the BigchainDB node. The section on [Configuring a BigchainDB Node](../nodes/configuration.html) explains how you can do that. (For example, you can change the statsd host in `$HOME/.bigchaindb`.)
|
||||
To configure a BigchainDB node to send monitoring data to the monitoring server, change the statsd host in the configuration of the BigchainDB node. The section on [Configuring a BigchainDB Node](../server-reference/configuration.html) explains how you can do that. (For example, you can change the statsd host in `$HOME/.bigchaindb`.)
|
||||
|
||||
|
||||
## Deploy a BigchainDB Cluster
|
@ -22,7 +22,7 @@ That's just one possible way of setting up the file system so as to provide extr
|
||||
|
||||
Another way to get similar reliability would be to mount the RethinkDB data directory on an [Amazon EBS](https://aws.amazon.com/ebs/) volume. Each Amazon EBS volume is, "automatically replicated within its Availability Zone to protect you from component failure, offering high availability and durability."
|
||||
|
||||
See [the section on setting up storage for RethinkDB](../nodes/setup-run-node.html#set-up-storage-for-rethinkdb-data) for more details.
|
||||
See [the section on setting up storage for RethinkDB](../dev-and-test/setup-run-node.html#set-up-storage-for-rethinkdb-data) for more details.
|
||||
|
||||
As with shard replication, live file-system replication protects against many failure modes, but it doesn't protect against them all. You should still consider having normal, "cold" backups.
|
||||
|
||||
@ -39,7 +39,7 @@ rethinkdb dump -e bigchain.bigchain -e bigchain.votes
|
||||
```
|
||||
|
||||
That should write a file named `rethinkdb_dump_<date>_<time>.tar.gz`. The `-e` option is used to specify which tables should be exported. You probably don't need to export the backlog table, but you definitely need to export the bigchain and votes tables.
|
||||
`bigchain.votes` means the `votes` table in the RethinkDB database named `bigchain`. It's possible that your database has a different name: [the database name is a BigchainDB configuration setting](../nodes/configuration.html#database-host-database-port-database-name). The default name is `bigchain`. (Tip: you can see the values of all configuration settings using the `bigchaindb show-config` command.)
|
||||
`bigchain.votes` means the `votes` table in the RethinkDB database named `bigchain`. It's possible that your database has a different name: [the database name is a BigchainDB configuration setting](../server-reference/configuration.html#database-host-database-port-database-name). The default name is `bigchain`. (Tip: you can see the values of all configuration settings using the `bigchaindb show-config` command.)
|
||||
|
||||
There's [more information about the `rethinkdb dump` command in the RethinkDB documentation](https://www.rethinkdb.com/docs/backup/). It also explains how to restore data to a cluster from an archive file.
|
||||
|
||||
@ -108,7 +108,7 @@ Considerations for BigchainDB:
|
||||
|
||||
Although it's not advertised as such, RethinkDB's built-in replication feature is similar to continous backup, except the "backup" (i.e. the set of replica shards) is spread across all the nodes. One could take that idea a bit farther by creating a set of backup-only servers with one full backup:
|
||||
|
||||
* Give all the original BigchainDB nodes (RethinkDB nodes) the server tag `original`. This is the default if you used the RethinkDB config file suggested in the section titled [Configure RethinkDB Server](../nodes/setup-run-node.html#configure-rethinkdb-server).
|
||||
* Give all the original BigchainDB nodes (RethinkDB nodes) the server tag `original`. This is the default if you used the RethinkDB config file suggested in the section titled [Configure RethinkDB Server](../dev-and-test/setup-run-node.html#configure-rethinkdb-server).
|
||||
* Set up a group of servers running RethinkDB only, and give them the server tag `backup`. The `backup` servers could be geographically separated from all the `original` nodes (or not; it's up to the federation).
|
||||
* Clients shouldn't be able to read from or write to servers in the `backup` set.
|
||||
* Send a RethinkDB reconfigure command to the RethinkDB cluster to make it so that the `original` set has the same number of replicas as before (or maybe one less), and the `backup` set has one replica. Also, make sure the `primary_replica_tag='original'` so that all primary shards live on the `original` nodes.
|
||||
|
@ -7,10 +7,9 @@ BigchainDB Clusters & Federations
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
node-cluster-fed
|
||||
set-up-a-federation
|
||||
backup
|
||||
deploy-on-aws
|
||||
aws-testing-cluster
|
||||
monitoring
|
||||
future-docs
|
||||
|
@ -28,7 +28,7 @@ You can view the Grafana dashboard in your web browser at:
|
||||
|
||||
(You may want to replace `localhost` with another hostname in that URL, e.g. the hostname of a remote monitoring server.)
|
||||
|
||||
The login and password are `admin` by default. If BigchainDB is running and processing transactions, you should see analytics—if not, [start BigchainDB](../nodes/setup-run-node.html#run-bigchaindb) and load some test transactions:
|
||||
The login and password are `admin` by default. If BigchainDB is running and processing transactions, you should see analytics—if not, [start BigchainDB](../dev-and-test/setup-run-node.html#run-bigchaindb) and load some test transactions:
|
||||
```text
|
||||
$ bigchaindb load
|
||||
```
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Set Up a Federation
|
||||
|
||||
This section is about how to set up a BigchainDB _federation_, where each node is operated by a different operator. If you want to set up and run a BigchainDB cluster on AWS (where all nodes are operated by you), then see [the section about that](deploy-on-aws.html).
|
||||
This section is about how to set up a BigchainDB _federation_, where each node is operated by a different operator. If you want to set up and run a testing cluster on AWS (where all nodes are operated by you), then see [the section about that](aws-testing-cluster.html).
|
||||
|
||||
|
||||
## Initial Checklist
|
||||
@ -19,8 +19,9 @@ The federation must decide some things before setting up the initial cluster (in
|
||||
2. What will the replication factor be? (It must be 3 or more for [RethinkDB failover](https://rethinkdb.com/docs/failover/) to work.)
|
||||
3. Which node will be responsible for sending the commands to configure the RethinkDB database?
|
||||
|
||||
Once those things have been decided, each node operator can begin setting up their BigchainDB node.
|
||||
The steps to set up a cluster node are outlined in the section titled [Set Up and Run a Node](../nodes/setup-run-node.html). Each node operator will eventually need two pieces of information from all other nodes in the federation:
|
||||
Once those things have been decided, each node operator can begin [setting up their BigchainDB (production) node](../prod-node-setup-mgmt/index.html).
|
||||
|
||||
Each node operator will eventually need two pieces of information from all other nodes in the federation:
|
||||
|
||||
1. Their RethinkDB hostname, e.g. `rdb.farm2.organization.org`
|
||||
2. Their BigchainDB public key, e.g. `Eky3nkbxDTMgkmiJC8i5hKyVFiAQNmPP4a2G4JdDxJCK`
|
||||
|
11
docs/source/dev-and-test/index.rst
Normal file
11
docs/source/dev-and-test/index.rst
Normal file
@ -0,0 +1,11 @@
|
||||
.. You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Develop & Test BigchainDB
|
||||
=========================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
setup-run-node
|
||||
running-unit-tests
|
30
docs/source/dev-and-test/setup-run-node.md
Normal file
30
docs/source/dev-and-test/setup-run-node.md
Normal file
@ -0,0 +1,30 @@
|
||||
# Set Up & Run a Dev/Test Node
|
||||
|
||||
This page explains how to set up a minimal local BigchainDB node for development and testing purposes.
|
||||
|
||||
The BigchainDB core dev team develops BigchainDB on recent Ubuntu and Fedora distributions, so we recommend you use one of those. BigchainDB Server doesn't work on Windows and Mac OS X (unless you use a VM or containers).
|
||||
|
||||
First, read through the BigchainDB [CONTRIBUTING.md file](https://github.com/bigchaindb/bigchaindb/blob/master/CONTRIBUTING.md). It outlines the steps to setup a machine for developing and testing BigchainDB.
|
||||
|
||||
Next, create a default BigchainDB config file (in `$HOME/.bigchaindb`):
|
||||
```text
|
||||
bigchaindb -y configure
|
||||
```
|
||||
|
||||
Note: [The BigchainDB CLI](../server-reference/bigchaindb-cli.html) and the [BigchainDB Configuration Settings](../server-reference/configuration.html) are documented elsewhere. (Click the links.)
|
||||
|
||||
Start RethinkDB using:
|
||||
```text
|
||||
rethinkdb
|
||||
```
|
||||
|
||||
You can verify that RethinkDB is running by opening the RethinkDB web interface in your web browser. It should be at [http://localhost:8080/](http://localhost:8080/).
|
||||
|
||||
To run BigchainDB Server, do:
|
||||
```text
|
||||
bigchaindb start
|
||||
```
|
||||
|
||||
You can [run all the unit tests](running-unit-tests.html) to test your installation.
|
||||
|
||||
The BigchainDB [CONTRIBUTING.md file](https://github.com/bigchaindb/bigchaindb/blob/master/CONTRIBUTING.md) has more details about how to contribute.
|
@ -7,9 +7,9 @@ When you start Bigchaindb using `bigchaindb start`, an HTTP API is exposed at th
|
||||
|
||||
`http://localhost:9984/api/v1/ <http://localhost:9984/api/v1/>`_
|
||||
|
||||
but that address can be changed by changing the "API endpoint" configuration setting (e.g. in a local config file). There's more information about setting the API endpoint in :doc:`the section about Configuring a BigchainDB Node <../nodes/configuration>`.
|
||||
but that address can be changed by changing the "API endpoint" configuration setting (e.g. in a local config file). There's more information about setting the API endpoint in :doc:`the section about BigchainDB Configuration Settings <../server-reference/configuration>`.
|
||||
|
||||
There are other configuration settings related to the web server (serving the HTTP API). In particular, the default is for the web server socket to bind to `localhost:9984` but that can be changed (e.g. to `0.0.0.0:9984`). For more details, see the "server" settings ("bind", "workers" and "threads") in :doc:`the section about Configuring a BigchainDB Node <../nodes/configuration>`.
|
||||
There are other configuration settings related to the web server (serving the HTTP API). In particular, the default is for the web server socket to bind to `localhost:9984` but that can be changed (e.g. to `0.0.0.0:9984`). For more details, see the "server" settings ("bind", "workers" and "threads") in :doc:`the section about BigchainDB Configuration Settings <../server-reference/configuration>`.
|
||||
|
||||
The HTTP API currently exposes two endpoints, one to get information about a specific transaction, and one to push a new transaction to the BigchainDB cluster.
|
||||
|
||||
|
@ -9,7 +9,7 @@ One can also interact with a BigchainDB node via other APIs, including the HTTP
|
||||
|
||||
## Getting Started
|
||||
|
||||
First, make sure you have RethinkDB and BigchainDB _installed and running_, i.e. you [installed them](setup-run-node.html) and you ran:
|
||||
First, make sure you have RethinkDB and BigchainDB _installed and running_, i.e. you [installed them](../dev-and-test/setup-run-node.html) and you ran:
|
||||
```text
|
||||
$ rethinkdb
|
||||
$ bigchaindb configure
|
||||
|
@ -9,9 +9,13 @@ Table of Contents
|
||||
|
||||
introduction
|
||||
quickstart
|
||||
node-cluster-fed
|
||||
nodes/index
|
||||
clusters-feds/index
|
||||
dev-and-test/index
|
||||
prod-node-setup-mgmt/index
|
||||
server-reference/index
|
||||
drivers-clients/index
|
||||
clusters-feds/index
|
||||
topic-guides/index
|
||||
release-notes
|
||||
appendices/index
|
||||
|
@ -8,9 +8,9 @@ You can read about the motivations, goals and high-level architecture in the [Bi
|
||||
## Setup Instructions for Various Cases
|
||||
|
||||
* [Set up a stand-alone BigchainDB node for learning and experimenting: Quickstart](quickstart.html)
|
||||
* [Set up and run a dev/test node](dev-and-test/setup-run-node.html)
|
||||
* [Deploy a testing cluster on AWS](clusters-feds/aws-testing-cluster.html)
|
||||
* [Set up and run a federation](clusters-feds/set-up-a-federation.html) (i.a. an organization with a BigchainDB cluster)
|
||||
* To set up a stand-alone node so you can help contribute to the development of BigchainDB Server, see [the CONTRIBUTING.md file](https://github.com/bigchaindb/bigchaindb/blob/master/CONTRIBUTING.md)
|
||||
* [Deploy a cluster on AWS](clusters-feds/deploy-on-aws.html)
|
||||
|
||||
(Instructions for setting up a client will be provided once there's a public testnet.)
|
||||
|
||||
|
@ -1,13 +1,13 @@
|
||||
# Nodes, Clusters & Federations
|
||||
|
||||
A **BigchainDB node** is a server or set of closely-linked servers running RethinkDB Server, BigchainDB Server, and other BigchainDB-related software. Each node is controlled by one person or organization.
|
||||
A **BigchainDB node** is a machine or set of closely-linked machines running RethinkDB Server, BigchainDB Server, and related software. (A "machine" might be a bare-metal server, a virtual machine or a container.) Each node is controlled by one person or organization.
|
||||
|
||||
A set of BigchainDB nodes can connect to each other to form a **cluster**. Each node in the cluster runs the same software. A cluster contains one logical RethinkDB datastore. A cluster may have additional servers to do things such as cluster monitoring.
|
||||
A set of BigchainDB nodes can connect to each other to form a **cluster**. Each node in the cluster runs the same software. A cluster contains one logical RethinkDB datastore. A cluster may have additional machines to do things such as cluster monitoring.
|
||||
|
||||
The people and organizations that run the nodes in a cluster belong to a **federation** (i.e. another organization). A federation must have some sort of governance structure to make decisions. If a cluster is run by a single company, then the federation is just that company.
|
||||
|
||||
**What's the Difference Between a Cluster and a Federation?**
|
||||
|
||||
A cluster is just a bunch of connected nodes (computers). A cluster might be operated by just one person. A federation is an organization which has a cluster, and where each node in the cluster has a different operator.
|
||||
A cluster is just a bunch of connected nodes. A federation is an organization which has a cluster, and where each node in the cluster has a different operator.
|
||||
|
||||
Confusingly, we sometimes call a federation's cluster its "federation." You can probably tell what we mean from context.
|
@ -7,9 +7,5 @@ BigchainDB Nodes
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
node-components
|
||||
node-requirements
|
||||
setup-run-node
|
||||
run-with-docker
|
||||
running-unit-tests
|
||||
configuration
|
||||
bigchaindb-cli
|
||||
|
17
docs/source/nodes/node-components.md
Normal file
17
docs/source/nodes/node-components.md
Normal file
@ -0,0 +1,17 @@
|
||||
# Node Components
|
||||
|
||||
A BigchainDB node must include, at least:
|
||||
|
||||
* BigchainDB Server and
|
||||
* RethinkDB Server.
|
||||
|
||||
When doing development and testing, it's common to install both on the same machine, but in a production environment, it may make more sense to install them on separate machines.
|
||||
|
||||
In a production environment, a BigchainDB node can have several other components, including:
|
||||
|
||||
* nginx or similar, as a reverse proxy and/or load balancer for the web server
|
||||
* An NTP daemon running on all machines running BigchainDB code, and possibly other machines
|
||||
* A RethinkDB proxy server
|
||||
* Scalable storage for RethinkDB (e.g. using RAID)
|
||||
* Monitoring software, to monitor all the machines in the node
|
||||
* Maybe a configuration management (CM) server and CM agents on all machines
|
@ -1,21 +1,17 @@
|
||||
# Node Requirements (OS, Memory, Storage, etc.)
|
||||
|
||||
For now, we will assume that a BigchainDB node is just one server. In the future, a node may consist of several closely-coupled servers run by one node operator (federation member).
|
||||
|
||||
|
||||
## OS Requirements
|
||||
|
||||
* RethinkDB Server [will run on any modern OS](https://www.rethinkdb.com/docs/install/). Note that the Fedora package isn't officially supported. Also, official support for Windows is fairly recent ([April 2016](https://rethinkdb.com/blog/2.3-release/)).
|
||||
* Python 3.4+ [will run on any modern OS](https://docs.python.org/3.4/using/index.html).
|
||||
* [Some functionality in the `multiprocessing` package doesn't work on OS X](https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.qsize). You can still use Mac OS X if you use Docker or a virtual machine.
|
||||
* ZeroMQ [will run on any modern OS](http://zeromq.org/area:download).
|
||||
* BigchainDB Server requires Python 3.4+ and Python 3.4+ [will run on any modern OS](https://docs.python.org/3.4/using/index.html).
|
||||
* BigchaindB Server uses the Python `multiprocessing` package and [some functionality in the `multiprocessing` package doesn't work on OS X](https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.qsize). You can still use Mac OS X if you use Docker or a virtual machine.
|
||||
|
||||
The BigchainDB core dev team uses Ubuntu 14.04 or Fedora 23.
|
||||
The BigchainDB core dev team uses Ubuntu 14.04, Ubuntu 16.04, Fedora 23, and Fedora 24.
|
||||
|
||||
We don't test BigchainDB on Windows or Mac OS X, but you can try.
|
||||
|
||||
* If you run into problems on Windows, then you may want to try using Vagrant. One of our community members ([@Mec-Is](https://github.com/Mec-iS)) wrote [a page about how to install BigchainDB on a VM with Vagrant](https://gist.github.com/Mec-iS/b84758397f1b21f21700).
|
||||
* If you have Mac OS X and want to experiment with BigchainDB, then you could do that [using Docker](run-with-docker.html).
|
||||
* If you have Mac OS X and want to experiment with BigchainDB, then you could do that [using Docker](../appendices/run-with-docker.html).
|
||||
|
||||
|
||||
## Storage Requirements
|
||||
|
@ -1,208 +0,0 @@
|
||||
# Set Up and Run a Cluster Node
|
||||
|
||||
If you want to set up a BigchainDB node that's intended to be one of the nodes in a BigchainDB cluster (i.e. where each node is operated by a different member of a federation), then this page is for you, otherwise see [elsewhere](../introduction.html).
|
||||
|
||||
|
||||
## Get a Server
|
||||
|
||||
The first step is to get a server (or equivalent) which meets [the requirements for a BigchainDB node](node-requirements.html).
|
||||
|
||||
|
||||
## Secure Your Server
|
||||
|
||||
The steps that you must take to secure your server depend on your server OS and where your server is physically located. There are many articles and books about how to secure a server. Here we just cover special considerations when securing a BigchainDB node.
|
||||
|
||||
There are some [notes on BigchainDB-specific firewall setup](../appendices/firewall-notes.html) in the Appendices.
|
||||
|
||||
|
||||
## Sync Your System Clock
|
||||
|
||||
A BigchainDB node uses its system clock to generate timestamps for blocks and votes, so that clock should be kept in sync with some standard clock(s). The standard way to do that is to run an NTP daemon (Network Time Protocol daemon) on the node. (You could also use tlsdate, which uses TLS timestamps rather than NTP, but don't: it's not very accurate and it will break with TLS 1.3, which removes the timestamp.)
|
||||
|
||||
NTP is a standard protocol. There are many NTP daemons implementing it. We don't recommend a particular one. On the contrary, we recommend that different nodes in a federation run different NTP daemons, so that a problem with one daemon won't affect all nodes.
|
||||
|
||||
Please see the [notes on NTP daemon setup](../appendices/ntp-notes.html) in the Appendices.
|
||||
|
||||
|
||||
## Set Up Storage for RethinkDB Data
|
||||
|
||||
Below are some things to consider when setting up storage for the RethinkDB data. The Appendices have a [section with concrete examples](../appendices/example-rethinkdb-storage-setups.html).
|
||||
|
||||
We suggest you set up a separate storage "device" (partition, RAID array, or logical volume) to store the RethinkDB data. Here are some questions to ask:
|
||||
|
||||
* How easy will it be to add storage in the future? Will I have to shut down my server?
|
||||
* How big can the storage get? (Remember that [RAID](https://en.wikipedia.org/wiki/RAID) can be used to make several physical drives look like one.)
|
||||
* How fast can it read & write data? How many input/output operations per second (IOPS)?
|
||||
* How does IOPS scale as more physical hard drives are added?
|
||||
* What's the latency?
|
||||
* What's the reliability? Is there replication?
|
||||
* What's in the Service Level Agreement (SLA), if applicable?
|
||||
* What's the cost?
|
||||
|
||||
There are many options and tradeoffs. Don't forget to look into Amazon Elastic Block Store (EBS) and Amazon Elastic File System (EFS), or their equivalents from other providers.
|
||||
|
||||
**Storage Notes Specific to RethinkDB**
|
||||
|
||||
* The RethinkDB storage engine has a number of SSD optimizations, so you _can_ benefit from using SSDs. ([source](https://www.rethinkdb.com/docs/architecture/))
|
||||
|
||||
* If you want a RethinkDB cluster to store an amount of data D, with a replication factor of R (on every table), and the cluster has N nodes, then each node will need to be able to store R×D/N data.
|
||||
|
||||
* RethinkDB tables can have [at most 64 shards](https://rethinkdb.com/limitations/). For example, if you have only one table and more than 64 nodes, some nodes won't have the primary of any shard, i.e. they will have replicas only. In other words, once you pass 64 nodes, adding more nodes won't provide more storage space for new data. If the biggest single-node storage available is d, then the most you can store in a RethinkDB cluster is < 64×d: accomplished by putting one primary shard in each of 64 nodes, with all replica shards on other nodes. (This is assuming one table. If there are T tables, then the most you can store is < 64×d×T.)
|
||||
|
||||
* When you set up storage for your RethinkDB data, you may have to select a filesystem. (Sometimes, the filesystem is already decided by the choice of storage.) We recommend using a filesystem that supports direct I/O (Input/Output). Many compressed or encrypted file systems don't support direct I/O. The ext4 filesystem supports direct I/O (but be careful: if you enable the data=journal mode, then direct I/O support will be disabled; the default is data=ordered). If your chosen filesystem supports direct I/O and you're using Linux, then you don't need to do anything to request or enable direct I/O. RethinkDB does that.
|
||||
|
||||
<p style="background-color: lightgrey;">What is direct I/O? It allows RethinkDB to write directly to the storage device (or use its own in-memory caching mechanisms), rather than relying on the operating system's file read and write caching mechanisms. (If you're using Linux, a write-to-file normally writes to the in-memory Page Cache first; only later does that Page Cache get flushed to disk. The Page Cache is also used when reading files.)</p>
|
||||
|
||||
* RethinkDB stores its data in a specific directory. You can tell RethinkDB _which_ directory using the RethinkDB config file, as explained below. In this documentation, we assume the directory is `/data`. If you set up a separate device (partition, RAID array, or logical volume) to store the RethinkDB data, then mount that device on `/data`.
|
||||
|
||||
|
||||
## Install RethinkDB Server
|
||||
|
||||
If you don't already have RethinkDB Server installed, you must install it. The RethinkDB documentation has instructions for [how to install RethinkDB Server on a variety of operating systems](http://rethinkdb.com/docs/install/).
|
||||
|
||||
|
||||
## Configure RethinkDB Server
|
||||
|
||||
Create a RethinkDB configuration file (text file) named `instance1.conf` with the following contents (explained below):
|
||||
```text
|
||||
directory=/data
|
||||
bind=all
|
||||
direct-io
|
||||
# Replace node?_hostname with actual node hostnames below, e.g. rdb.examples.com
|
||||
join=node0_hostname:29015
|
||||
join=node1_hostname:29015
|
||||
join=node2_hostname:29015
|
||||
# continue until there's a join= line for each node in the federation
|
||||
```
|
||||
|
||||
* `directory=/data` tells the RethinkDB node to store its share of the database data in `/data`.
|
||||
* `bind=all` binds RethinkDB to all local network interfaces (e.g. loopback, Ethernet, wireless, whatever is available), so it can communicate with the outside world. (The default is to bind only to local interfaces.)
|
||||
* `direct-io` tells RethinkDB to use direct I/O (explained earlier). Only include this line if your file system supports direct I/O.
|
||||
* `join=hostname:29015` lines: A cluster node needs to find out the hostnames of all the other nodes somehow. You _could_ designate one node to be the one that every other node asks, and put that node's hostname in the config file, but that wouldn't be very decentralized. Instead, we include _every_ node in the list of nodes-to-ask.
|
||||
|
||||
If you're curious about the RethinkDB config file, there's [a RethinkDB documentation page about it](https://www.rethinkdb.com/docs/config-file/). The [explanations of the RethinkDB command-line options](https://rethinkdb.com/docs/cli-options/) are another useful reference.
|
||||
|
||||
TODO: Explain how to configure the RethinkDB cluster to be more secure. For now, see the [RethinkDB documentation on securing your cluster](https://rethinkdb.com/docs/security/).
|
||||
|
||||
|
||||
## Install Python 3.4+
|
||||
|
||||
If you don't already have it, then you should [install Python 3.4+](https://www.python.org/downloads/).
|
||||
|
||||
If you're testing or developing BigchainDB on a stand-alone node, then you should probably create a Python 3.4+ virtual environment and activate it (e.g. using virtualenv or conda). Later we will install several Python packages and you probably only want those installed in the virtual environment.
|
||||
|
||||
|
||||
## Install BigchainDB Server
|
||||
|
||||
BigchainDB Server has some OS-level dependencies that must be installed.
|
||||
|
||||
On Ubuntu 14.04, we found that the following was enough:
|
||||
```text
|
||||
sudo apt-get update
|
||||
sudo apt-get install g++ python3-dev
|
||||
```
|
||||
|
||||
On Fedora 23, we found that the following was enough (tested in February 2015):
|
||||
```text
|
||||
sudo dnf update
|
||||
sudo dnf install gcc-c++ redhat-rpm-config python3-devel
|
||||
```
|
||||
|
||||
(If you're using a version of Fedora before version 22, you may have to use `yum` instead of `dnf`.)
|
||||
|
||||
With OS-level dependencies installed, you can install BigchainDB Server with `pip` or from source.
|
||||
|
||||
|
||||
### How to Install BigchainDB with pip
|
||||
|
||||
BigchainDB (i.e. both the Server and the officially-supported drivers) is distributed as a Python package on PyPI so you can install it using `pip`. First, make sure you have an up-to-date Python 3.4+ version of `pip` installed:
|
||||
```text
|
||||
pip -V
|
||||
```
|
||||
|
||||
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.4, then you must install a `pip` version associated with Python 3.4+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
|
||||
|
||||
On Ubuntu 14.04, we found that this works:
|
||||
```text
|
||||
sudo apt-get install python3-pip
|
||||
```
|
||||
|
||||
That should install a Python 3 version of `pip` named `pip3`. If that didn't work, then another way to get `pip3` is to do `sudo apt-get install python3-setuptools` followed by `sudo easy_install3 pip`.
|
||||
|
||||
You can upgrade `pip` (`pip3`) and `setuptools` to the latest versions using:
|
||||
```text
|
||||
pip3 install --upgrade pip setuptools
|
||||
pip3 -V
|
||||
```
|
||||
|
||||
Now you can install BigchainDB Server (and officially-supported BigchainDB drivers) using:
|
||||
```text
|
||||
pip3 install bigchaindb
|
||||
```
|
||||
|
||||
(If you're not in a virtualenv and you want to install bigchaindb system-wide, then put `sudo` in front.)
|
||||
|
||||
Note: You can use `pip3` to upgrade the `bigchaindb` package to the latest version using `pip3 install --upgrade bigchaindb`.
|
||||
|
||||
|
||||
### How to Install BigchainDB from Source
|
||||
|
||||
If you want to install BitchainDB from source because you want to use the very latest bleeding-edge code, clone the public repository:
|
||||
```text
|
||||
git clone git@github.com:bigchaindb/bigchaindb.git
|
||||
python setup.py install
|
||||
```
|
||||
|
||||
|
||||
## Configure BigchainDB Server
|
||||
|
||||
Start by creating a default BigchainDB config file:
|
||||
```text
|
||||
bigchaindb -y configure
|
||||
```
|
||||
|
||||
(There's documentation for the `bigchaindb` command is in the section on [the BigchainDB Command Line Interface (CLI)](bigchaindb-cli.html).)
|
||||
|
||||
Edit the created config file:
|
||||
|
||||
* Open `$HOME/.bigchaindb` (the created config file) in your text editor.
|
||||
* Change `"server": {"bind": "localhost:9984", ... }` to `"server": {"bind": "0.0.0.0:9984", ... }`. This makes it so traffic can come from any IP address to port 9984 (the HTTP Client-Server API port).
|
||||
* Change `"api_endpoint": "http://localhost:9984/api/v1"` to `"api_endpoint": "http://your_api_hostname:9984/api/v1"`
|
||||
* Change `"keyring": []` to `"keyring": ["public_key_of_other_node_A", "public_key_of_other_node_B", "..."]` i.e. a list of the public keys of all the other nodes in the federation. The keyring should _not_ include your node's public key.
|
||||
|
||||
For more information about the BigchainDB config file, see [Configuring a BigchainDB Node](configuration.html).
|
||||
|
||||
|
||||
## Run RethinkDB Server
|
||||
|
||||
Start RethinkDB using:
|
||||
```text
|
||||
rethinkdb --config-file path/to/instance1.conf
|
||||
```
|
||||
|
||||
except replace the path with the actual path to `instance1.conf`.
|
||||
|
||||
Note: It's possible to [make RethinkDB start at system startup](https://www.rethinkdb.com/docs/start-on-startup/).
|
||||
|
||||
You can verify that RethinkDB is running by opening the RethinkDB web interface in your web browser. It should be at `http://rethinkdb-hostname:8080/`. If you're running RethinkDB on localhost, that would be [http://localhost:8080/](http://localhost:8080/).
|
||||
|
||||
|
||||
## Run BigchainDB Server
|
||||
|
||||
After all node operators have started RethinkDB, but before they start BigchainDB, one designated node operator must configure the RethinkDB database by running the following commands:
|
||||
```text
|
||||
bigchaindb init
|
||||
bigchaindb set-shards numshards
|
||||
bigchaindb set-replicas numreplicas
|
||||
```
|
||||
|
||||
where:
|
||||
|
||||
* `bigchaindb init` creates the database within RethinkDB, the tables, the indexes, and the genesis block.
|
||||
* `numshards` should be set to the number of nodes in the initial cluster.
|
||||
* `numreplicas` should be set to the database replication factor decided by the federation. It must be 3 or more for [RethinkDB failover](https://rethinkdb.com/docs/failover/) to work.
|
||||
|
||||
Once the RethinkDB database is configured, every node operator can start BigchainDB using:
|
||||
```text
|
||||
bigchaindb start
|
||||
```
|
8
docs/source/prod-node-setup-mgmt/index.rst
Normal file
8
docs/source/prod-node-setup-mgmt/index.rst
Normal file
@ -0,0 +1,8 @@
|
||||
Production Node Setup & Management
|
||||
==================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
overview
|
||||
|
12
docs/source/prod-node-setup-mgmt/overview.md
Normal file
12
docs/source/prod-node-setup-mgmt/overview.md
Normal file
@ -0,0 +1,12 @@
|
||||
# Overview
|
||||
|
||||
Deploying and managing a production BigchainDB node is much more involved than working with a dev/test node:
|
||||
|
||||
* There are more components in a production node; see [the page about node components](../nodes/node-components.html)
|
||||
* Production nodes need more security
|
||||
* Production nodes need monitoring
|
||||
* Production nodes need maintenance, e.g. software upgrades, scaling
|
||||
|
||||
Thankfully, there are tools to help!
|
||||
|
||||
This section explains how to use various tools to deploy and manage a production node.
|
@ -1,6 +1,6 @@
|
||||
# Quickstart
|
||||
|
||||
This page has instructions to set up a single stand-alone BigchainDB node for learning or experimenting. Instructions for other cases are [elsewhere](introduction.html). We will assume you're using Ubuntu 14.04 or similar. If you're not using Linux, then you might try [running BigchainDB with Docker](nodes/run-with-docker.html).
|
||||
This page has instructions to set up a single stand-alone BigchainDB node for learning or experimenting. Instructions for other cases are [elsewhere](introduction.html). We will assume you're using Ubuntu 14.04 or similar. If you're not using Linux, then you might try [running BigchainDB with Docker](appendices/run-with-docker.html).
|
||||
|
||||
A. [Install RethinkDB Server](https://rethinkdb.com/docs/install/ubuntu/)
|
||||
|
||||
|
@ -1,4 +1,7 @@
|
||||
# The BigchainDB Command Line Interface (CLI)
|
||||
# BigchainDB Command Line Interface (CLI)
|
||||
|
||||
**Note: At the time of writing, BigchainDB Server and our BigchainDB client are combined, so the BigchainDB CLI includes some server-specific commands and some client-specific commands (e.g. `bigchaindb load`). Soon, BigchainDB Server will be separate from all BigchainDB clients, and they'll all have different CLIs.**
|
||||
|
||||
|
||||
The command-line command to interact with BigchainDB is `bigchaindb`.
|
||||
|
@ -1,4 +1,6 @@
|
||||
# Node Configuration Settings
|
||||
# BigchainDB Configuration Settings
|
||||
|
||||
**Note: At the time of writing, BigchainDB Server code and BigchainDB Python driver code are mixed together, so the following settings are the settings used by BigchainDB Server and also by clients written using the Python driver code. Soon, the code will be separated into server, driver and shared modules, so that BigchainDB Server and BigchainDB clients will have different configuration settings.**
|
||||
|
||||
The value of each configuration setting is determined according to the following rules:
|
||||
|
11
docs/source/server-reference/index.rst
Normal file
11
docs/source/server-reference/index.rst
Normal file
@ -0,0 +1,11 @@
|
||||
.. You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
BigchainDB Settings and CLI
|
||||
===========================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
configuration
|
||||
bigchaindb-cli
|
@ -1,6 +1,4 @@
|
||||
import copy
|
||||
import multiprocessing as mp
|
||||
import random
|
||||
import time
|
||||
|
||||
import pytest
|
||||
@ -9,8 +7,6 @@ import cryptoconditions as cc
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb import crypto, exceptions, util
|
||||
from bigchaindb.voter import Voter
|
||||
from bigchaindb.block import BlockDeleteRevert
|
||||
|
||||
|
||||
@pytest.mark.skipif(reason='Some tests throw a ResourceWarning that might result in some weird '
|
||||
@ -140,7 +136,7 @@ class TestBigchainApi(object):
|
||||
|
||||
# vote the block invalid
|
||||
vote = b.vote(block['id'], b.get_last_voted_block()['id'], False)
|
||||
b.write_vote(block, vote)
|
||||
b.write_vote(vote)
|
||||
response = b.get_transaction(tx_signed["id"])
|
||||
|
||||
# should be None, because invalid blocks are ignored
|
||||
@ -280,13 +276,13 @@ class TestBigchainApi(object):
|
||||
# make sure all the blocks are written at the same time
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '1')
|
||||
|
||||
b.write_vote(block_1, b.vote(block_1['id'], b.get_last_voted_block()['id'], True))
|
||||
b.write_vote(b.vote(block_1['id'], b.get_last_voted_block()['id'], True))
|
||||
assert b.get_last_voted_block()['id'] == block_1['id']
|
||||
|
||||
b.write_vote(block_2, b.vote(block_2['id'], b.get_last_voted_block()['id'], True))
|
||||
b.write_vote(b.vote(block_2['id'], b.get_last_voted_block()['id'], True))
|
||||
assert b.get_last_voted_block()['id'] == block_2['id']
|
||||
|
||||
b.write_vote(block_3, b.vote(block_3['id'], b.get_last_voted_block()['id'], True))
|
||||
b.write_vote(b.vote(block_3['id'], b.get_last_voted_block()['id'], True))
|
||||
assert b.get_last_voted_block()['id'] == block_3['id']
|
||||
|
||||
|
||||
@ -305,15 +301,15 @@ class TestBigchainApi(object):
|
||||
|
||||
# make sure all the blocks are written at different timestamps
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '1')
|
||||
b.write_vote(block_1, b.vote(block_1['id'], b.get_last_voted_block()['id'], True))
|
||||
b.write_vote(b.vote(block_1['id'], b.get_last_voted_block()['id'], True))
|
||||
assert b.get_last_voted_block()['id'] == block_1['id']
|
||||
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '2')
|
||||
b.write_vote(block_2, b.vote(block_2['id'], b.get_last_voted_block()['id'], True))
|
||||
b.write_vote(b.vote(block_2['id'], b.get_last_voted_block()['id'], True))
|
||||
assert b.get_last_voted_block()['id'] == block_2['id']
|
||||
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '3')
|
||||
b.write_vote(block_3, b.vote(block_3['id'], b.get_last_voted_block()['id'], True))
|
||||
b.write_vote(b.vote(block_3['id'], b.get_last_voted_block()['id'], True))
|
||||
assert b.get_last_voted_block()['id'] == block_3['id']
|
||||
|
||||
def test_no_vote_written_if_block_already_has_vote(self, b):
|
||||
@ -323,11 +319,11 @@ class TestBigchainApi(object):
|
||||
|
||||
b.write_block(block_1, durability='hard')
|
||||
|
||||
b.write_vote(block_1, b.vote(block_1['id'], genesis['id'], True))
|
||||
b.write_vote(b.vote(block_1['id'], genesis['id'], True))
|
||||
retrieved_block_1 = r.table('bigchain').get(block_1['id']).run(b.conn)
|
||||
|
||||
# try to vote again on the retrieved block, should do nothing
|
||||
b.write_vote(retrieved_block_1, b.vote(retrieved_block_1['id'], genesis['id'], True))
|
||||
b.write_vote(b.vote(retrieved_block_1['id'], genesis['id'], True))
|
||||
retrieved_block_2 = r.table('bigchain').get(block_1['id']).run(b.conn)
|
||||
|
||||
assert retrieved_block_1 == retrieved_block_2
|
||||
@ -609,45 +605,6 @@ class TestBlockValidation(object):
|
||||
b.validate_block(block)
|
||||
|
||||
|
||||
class TestBigchainBlock(object):
|
||||
|
||||
def test_revert_delete_block(self, b):
|
||||
b.create_genesis_block()
|
||||
|
||||
block_1 = dummy_block()
|
||||
block_2 = dummy_block()
|
||||
block_3 = dummy_block()
|
||||
|
||||
b.write_block(block_1, durability='hard')
|
||||
b.write_block(block_2, durability='hard')
|
||||
b.write_block(block_3, durability='hard')
|
||||
|
||||
b.write_vote(block_1, b.vote(block_1['id'], b.get_last_voted_block()['id'], True))
|
||||
b.write_vote(block_2, b.vote(block_2['id'], b.get_last_voted_block()['id'], True))
|
||||
b.write_vote(block_3, b.vote(block_3['id'], b.get_last_voted_block()['id'], True))
|
||||
|
||||
q_revert_delete = mp.Queue()
|
||||
|
||||
reverter = BlockDeleteRevert(q_revert_delete)
|
||||
|
||||
# simulate changefeed
|
||||
r.table('bigchain').get(block_2['id']).delete().run(b.conn)
|
||||
q_revert_delete.put(block_2)
|
||||
|
||||
assert r.table('bigchain').get(block_2['id']).run(b.conn) is None
|
||||
|
||||
reverter.start()
|
||||
time.sleep(1)
|
||||
reverter.kill()
|
||||
|
||||
reverted_block_2 = r.table('bigchain').get(block_2['id']).run(b.conn)
|
||||
|
||||
assert reverted_block_2 == block_2
|
||||
|
||||
def test_duplicated_transactions(self):
|
||||
pytest.skip('We may have duplicates in the initial_results and changefeed')
|
||||
|
||||
|
||||
class TestMultipleInputs(object):
|
||||
def test_transfer_single_owners_single_input(self, b, user_sk, user_vk, inputs):
|
||||
# create a new user
|
||||
@ -922,7 +879,7 @@ class TestMultipleInputs(object):
|
||||
|
||||
# vote the block VALID
|
||||
vote = b.vote(block['id'], genesis['id'], True)
|
||||
b.write_vote(block, vote)
|
||||
b.write_vote(vote)
|
||||
|
||||
# get input
|
||||
owned_inputs_user1 = b.get_owned_ids(user_vk)
|
||||
@ -938,7 +895,7 @@ class TestMultipleInputs(object):
|
||||
|
||||
# vote the block invalid
|
||||
vote = b.vote(block['id'], b.get_last_voted_block()['id'], False)
|
||||
b.write_vote(block, vote)
|
||||
b.write_vote(vote)
|
||||
|
||||
owned_inputs_user1 = b.get_owned_ids(user_vk)
|
||||
owned_inputs_user2 = b.get_owned_ids(user2_vk)
|
||||
@ -1052,7 +1009,7 @@ class TestMultipleInputs(object):
|
||||
|
||||
# vote the block VALID
|
||||
vote = b.vote(block['id'], genesis['id'], True)
|
||||
b.write_vote(block, vote)
|
||||
b.write_vote(vote)
|
||||
|
||||
# get input
|
||||
owned_inputs_user1 = b.get_owned_ids(user_vk)
|
||||
@ -1069,7 +1026,7 @@ class TestMultipleInputs(object):
|
||||
|
||||
# vote the block invalid
|
||||
vote = b.vote(block['id'], b.get_last_voted_block()['id'], False)
|
||||
b.write_vote(block, vote)
|
||||
b.write_vote(vote)
|
||||
response = b.get_transaction(tx_signed["id"])
|
||||
spent_inputs_user1 = b.get_spent(owned_inputs_user1[0])
|
||||
|
||||
|
@ -1,508 +0,0 @@
|
||||
import pytest
|
||||
import time
|
||||
import rethinkdb as r
|
||||
import multiprocessing as mp
|
||||
|
||||
from bigchaindb import util
|
||||
|
||||
from bigchaindb.voter import Voter, BlockStream
|
||||
from bigchaindb import crypto, Bigchain
|
||||
|
||||
|
||||
# Some util functions
|
||||
def dummy_tx():
|
||||
b = Bigchain()
|
||||
tx = b.create_transaction(b.me, b.me, None, 'CREATE')
|
||||
tx_signed = b.sign_transaction(tx, b.me_private)
|
||||
return tx_signed
|
||||
|
||||
|
||||
def dummy_block():
|
||||
b = Bigchain()
|
||||
block = b.create_block([dummy_tx()])
|
||||
return block
|
||||
|
||||
|
||||
class TestBigchainVoter(object):
|
||||
|
||||
def test_valid_block_voting(self, b):
|
||||
q_new_block = mp.Queue()
|
||||
|
||||
genesis = b.create_genesis_block()
|
||||
|
||||
# create valid block
|
||||
# sleep so that `block` as a higher timestamp then genesis
|
||||
time.sleep(1)
|
||||
block = dummy_block()
|
||||
# assert block is valid
|
||||
assert b.is_valid_block(block)
|
||||
b.write_block(block, durability='hard')
|
||||
|
||||
# create queue and voter
|
||||
voter = Voter(q_new_block)
|
||||
|
||||
# vote
|
||||
voter.start()
|
||||
# wait for vote to be written
|
||||
time.sleep(1)
|
||||
voter.kill()
|
||||
|
||||
# retrive block from bigchain
|
||||
blocks = list(r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))
|
||||
.run(b.conn))
|
||||
|
||||
# retrieve vote
|
||||
vote = r.table('votes').get_all([block['id'], b.me], index='block_and_voter').run(b.conn)
|
||||
vote = vote.next()
|
||||
|
||||
# validate vote
|
||||
assert vote is not None
|
||||
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == genesis['id']
|
||||
assert vote['vote']['is_block_valid'] is True
|
||||
assert vote['vote']['invalid_reason'] is None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote['vote']), vote['signature']) is True
|
||||
|
||||
def test_valid_block_voting_with_create_transaction(self, b):
|
||||
q_new_block = mp.Queue()
|
||||
|
||||
genesis = b.create_genesis_block()
|
||||
|
||||
# create a `CREATE` transaction
|
||||
test_user_priv, test_user_pub = crypto.generate_key_pair()
|
||||
tx = b.create_transaction(b.me, test_user_pub, None, 'CREATE')
|
||||
tx_signed = b.sign_transaction(tx, b.me_private)
|
||||
assert b.is_valid_transaction(tx_signed)
|
||||
|
||||
# create valid block
|
||||
# sleep so that block as a higher timestamp then genesis
|
||||
time.sleep(1)
|
||||
block = b.create_block([tx_signed])
|
||||
# assert block is valid
|
||||
assert b.is_valid_block(block)
|
||||
b.write_block(block, durability='hard')
|
||||
|
||||
# create queue and voter
|
||||
voter = Voter(q_new_block)
|
||||
|
||||
# vote
|
||||
voter.start()
|
||||
# wait for vote to be written
|
||||
time.sleep(1)
|
||||
voter.kill()
|
||||
|
||||
# retrive block from bigchain
|
||||
blocks = list(r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))
|
||||
.run(b.conn))
|
||||
# retrieve vote
|
||||
vote = r.table('votes').get_all([block['id'], b.me], index='block_and_voter').run(b.conn)
|
||||
vote = vote.next()
|
||||
|
||||
# validate vote
|
||||
assert vote is not None
|
||||
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == genesis['id']
|
||||
assert vote['vote']['is_block_valid'] is True
|
||||
assert vote['vote']['invalid_reason'] is None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote['vote']), vote['signature']) is True
|
||||
|
||||
def test_valid_block_voting_with_transfer_transactions(self, b):
|
||||
q_new_block = mp.Queue()
|
||||
|
||||
b.create_genesis_block()
|
||||
|
||||
# create a `CREATE` transaction
|
||||
test_user_priv, test_user_pub = crypto.generate_key_pair()
|
||||
tx = b.create_transaction(b.me, test_user_pub, None, 'CREATE')
|
||||
tx_signed = b.sign_transaction(tx, b.me_private)
|
||||
assert b.is_valid_transaction(tx_signed)
|
||||
|
||||
# create valid block
|
||||
block = b.create_block([tx_signed])
|
||||
# assert block is valid
|
||||
assert b.is_valid_block(block)
|
||||
b.write_block(block, durability='hard')
|
||||
|
||||
# create queue and voter
|
||||
voter = Voter(q_new_block)
|
||||
|
||||
# vote
|
||||
voter.start()
|
||||
# wait for vote to be written
|
||||
time.sleep(1)
|
||||
voter.kill()
|
||||
|
||||
# retrive block from bigchain
|
||||
blocks = list(r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))
|
||||
.run(b.conn))
|
||||
|
||||
# retrieve vote
|
||||
vote = r.table('votes').get_all([block['id'], b.me], index='block_and_voter').run(b.conn)
|
||||
vote = vote.next()
|
||||
|
||||
# validate vote
|
||||
assert vote is not None
|
||||
|
||||
# create a `TRANSFER` transaction
|
||||
test_user2_priv, test_user2_pub = crypto.generate_key_pair()
|
||||
tx2 = b.create_transaction(test_user_pub, test_user2_pub, {'txid': tx['id'], 'cid': 0}, 'TRANSFER')
|
||||
tx2_signed = b.sign_transaction(tx2, test_user_priv)
|
||||
assert b.is_valid_transaction(tx2_signed)
|
||||
|
||||
# create valid block
|
||||
block = b.create_block([tx2_signed])
|
||||
# assert block is valid
|
||||
assert b.is_valid_block(block)
|
||||
b.write_block(block, durability='hard')
|
||||
|
||||
# create queue and voter
|
||||
voter = Voter(q_new_block)
|
||||
|
||||
# vote
|
||||
voter.start()
|
||||
# wait for vote to be written
|
||||
time.sleep(1)
|
||||
voter.kill()
|
||||
|
||||
# retrive block from bigchain
|
||||
blocks = list(r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))
|
||||
.run(b.conn))
|
||||
|
||||
# retrieve vote
|
||||
vote = r.table('votes').get_all([blocks[2]['id'], b.me], index='block_and_voter').run(b.conn)
|
||||
vote = vote.next()
|
||||
|
||||
# validate vote
|
||||
assert vote is not None
|
||||
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['is_block_valid'] is True
|
||||
assert vote['vote']['invalid_reason'] is None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote['vote']), vote['signature']) is True
|
||||
|
||||
def test_invalid_block_voting(self, b, user_vk):
|
||||
# create queue and voter
|
||||
q_new_block = mp.Queue()
|
||||
voter = Voter(q_new_block)
|
||||
|
||||
# create transaction
|
||||
transaction = b.create_transaction(b.me, user_vk, None, 'CREATE')
|
||||
transaction_signed = b.sign_transaction(transaction, b.me_private)
|
||||
|
||||
genesis = b.create_genesis_block()
|
||||
|
||||
# create invalid block
|
||||
# sleep so that `block` as a higher timestamp then `genesis`
|
||||
time.sleep(1)
|
||||
block = b.create_block([transaction_signed])
|
||||
# change transaction id to make it invalid
|
||||
block['block']['transactions'][0]['id'] = 'abc'
|
||||
assert not b.is_valid_block(block)
|
||||
b.write_block(block, durability='hard')
|
||||
|
||||
# vote
|
||||
voter.start()
|
||||
time.sleep(1)
|
||||
voter.kill()
|
||||
|
||||
# retrive block from bigchain
|
||||
blocks = list(r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))
|
||||
.run(b.conn))
|
||||
|
||||
# retrieve vote
|
||||
vote = r.table('votes').get_all([block['id'], b.me], index='block_and_voter').run(b.conn)
|
||||
vote = vote.next()
|
||||
|
||||
# validate vote
|
||||
assert vote is not None
|
||||
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == genesis['id']
|
||||
assert vote['vote']['is_block_valid'] is False
|
||||
assert vote['vote']['invalid_reason'] is None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote['vote']), vote['signature']) is True
|
||||
|
||||
def test_vote_creation_valid(self, b):
|
||||
# create valid block
|
||||
block = dummy_block()
|
||||
# retrieve vote
|
||||
vote = b.vote(block['id'], 'abc', True)
|
||||
|
||||
# assert vote is correct
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == 'abc'
|
||||
assert vote['vote']['is_block_valid'] is True
|
||||
assert vote['vote']['invalid_reason'] is None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote['vote']), vote['signature']) is True
|
||||
|
||||
def test_vote_creation_invalid(self, b):
|
||||
# create valid block
|
||||
block = dummy_block()
|
||||
# retrieve vote
|
||||
vote = b.vote(block['id'], 'abc', False)
|
||||
|
||||
# assert vote is correct
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == 'abc'
|
||||
assert vote['vote']['is_block_valid'] is False
|
||||
assert vote['vote']['invalid_reason'] is None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote['vote']), vote['signature']) is True
|
||||
|
||||
def test_voter_considers_unvoted_blocks_when_single_node(self, b):
|
||||
# simulate a voter going donw in a single node environment
|
||||
b.create_genesis_block()
|
||||
|
||||
# insert blocks in the database while the voter process is not listening
|
||||
# (these blocks won't appear in the changefeed)
|
||||
block_1 = dummy_block()
|
||||
b.write_block(block_1, durability='hard')
|
||||
block_2 = dummy_block()
|
||||
b.write_block(block_2, durability='hard')
|
||||
|
||||
# voter is back online, we simulate that by creating a queue and a Voter instance
|
||||
q_new_block = mp.Queue()
|
||||
voter = Voter(q_new_block)
|
||||
|
||||
# vote
|
||||
voter.start()
|
||||
time.sleep(1)
|
||||
|
||||
# create a new block that will appear in the changefeed
|
||||
block_3 = dummy_block()
|
||||
b.write_block(block_3, durability='hard')
|
||||
|
||||
time.sleep(1)
|
||||
voter.kill()
|
||||
|
||||
# retrive blocks from bigchain
|
||||
blocks = list(r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))
|
||||
.run(b.conn))
|
||||
|
||||
# FIXME: remove genesis block, we don't vote on it (might change in the future)
|
||||
blocks.pop(0)
|
||||
|
||||
# retrieve vote
|
||||
votes = r.table('votes').run(b.conn)
|
||||
votes = list(votes)
|
||||
|
||||
assert all(vote['node_pubkey'] == b.me for vote in votes)
|
||||
|
||||
def test_voter_chains_blocks_with_the_previous_ones(self, b):
|
||||
b.create_genesis_block()
|
||||
# sleep so that `block_*` as a higher timestamp then `genesis`
|
||||
time.sleep(1)
|
||||
block_1 = dummy_block()
|
||||
b.write_block(block_1, durability='hard')
|
||||
time.sleep(1)
|
||||
block_2 = dummy_block()
|
||||
b.write_block(block_2, durability='hard')
|
||||
|
||||
q_new_block = mp.Queue()
|
||||
|
||||
voter = Voter(q_new_block)
|
||||
voter.start()
|
||||
time.sleep(1)
|
||||
voter.kill()
|
||||
|
||||
# retrive blocks from bigchain
|
||||
blocks = list(r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))
|
||||
.run(b.conn))
|
||||
|
||||
# retrieve votes
|
||||
votes = list(r.table('votes').run(b.conn))
|
||||
|
||||
assert votes[0]['vote']['voting_for_block'] in (blocks[1]['id'], blocks[2]['id'])
|
||||
assert votes[1]['vote']['voting_for_block'] in (blocks[1]['id'], blocks[2]['id'])
|
||||
|
||||
def test_voter_checks_for_previous_vote(self, b):
|
||||
b.create_genesis_block()
|
||||
block_1 = dummy_block()
|
||||
b.write_block(block_1, durability='hard')
|
||||
|
||||
q_new_block = mp.Queue()
|
||||
|
||||
voter = Voter(q_new_block)
|
||||
voter.start()
|
||||
|
||||
time.sleep(1)
|
||||
retrieved_block = r.table('bigchain').get(block_1['id']).run(b.conn)
|
||||
|
||||
# queue block for voting AGAIN
|
||||
q_new_block.put(retrieved_block)
|
||||
time.sleep(1)
|
||||
voter.kill()
|
||||
|
||||
re_retrieved_block = r.table('bigchain').get(block_1['id']).run(b.conn)
|
||||
|
||||
# block should be unchanged
|
||||
assert retrieved_block == re_retrieved_block
|
||||
|
||||
@pytest.mark.skipif(reason='Updating the block_number must be atomic')
|
||||
def test_updating_block_number_must_be_atomic(self):
|
||||
pass
|
||||
|
||||
|
||||
class TestBlockElection(object):
|
||||
|
||||
def test_quorum(self, b):
|
||||
# create a new block
|
||||
test_block = dummy_block()
|
||||
|
||||
# simulate a federation with four voters
|
||||
key_pairs = [crypto.generate_key_pair() for _ in range(4)]
|
||||
test_federation = [Bigchain(public_key=key_pair[1], private_key=key_pair[0])
|
||||
for key_pair in key_pairs]
|
||||
|
||||
# dummy block with test federation public keys as voters
|
||||
test_block['block']['voters'] = [key_pair[1] for key_pair in key_pairs]
|
||||
|
||||
# fake "yes" votes
|
||||
valid_vote = [member.vote(test_block['id'], 'abc', True)
|
||||
for member in test_federation]
|
||||
|
||||
# fake "no" votes
|
||||
invalid_vote = [member.vote(test_block['id'], 'abc', False)
|
||||
for member in test_federation]
|
||||
|
||||
# fake "yes" votes with incorrect signatures
|
||||
improperly_signed_valid_vote = [member.vote(test_block['id'], 'abc', True) for
|
||||
member in test_federation]
|
||||
[vote['vote'].update(this_should_ruin_things='lol')
|
||||
for vote in improperly_signed_valid_vote]
|
||||
|
||||
# test unanimously valid block
|
||||
r.table('votes').insert(valid_vote, durability='hard').run(b.conn)
|
||||
assert b.block_election_status(test_block) == Bigchain.BLOCK_VALID
|
||||
r.table('votes').delete().run(b.conn)
|
||||
|
||||
# test partial quorum situations
|
||||
r.table('votes').insert(valid_vote[:2], durability='hard').run(b.conn)
|
||||
assert b.block_election_status(test_block) == Bigchain.BLOCK_UNDECIDED
|
||||
r.table('votes').delete().run(b.conn)
|
||||
#
|
||||
r.table('votes').insert(valid_vote[:3], durability='hard').run(b.conn)
|
||||
assert b.block_election_status(test_block) == Bigchain.BLOCK_VALID
|
||||
r.table('votes').delete().run(b.conn)
|
||||
#
|
||||
r.table('votes').insert(invalid_vote[:2], durability='hard').run(b.conn)
|
||||
assert b.block_election_status(test_block) == Bigchain.BLOCK_INVALID
|
||||
r.table('votes').delete().run(b.conn)
|
||||
|
||||
# test unanimously valid block with one improperly signed vote -- should still succeed
|
||||
r.table('votes').insert(valid_vote[:3] + improperly_signed_valid_vote[3:], durability='hard').run(b.conn)
|
||||
assert b.block_election_status(test_block) == Bigchain.BLOCK_VALID
|
||||
r.table('votes').delete().run(b.conn)
|
||||
|
||||
# test unanimously valid block with two improperly signed votes -- should fail
|
||||
r.table('votes').insert(valid_vote[:2] + improperly_signed_valid_vote[2:], durability='hard').run(b.conn)
|
||||
assert b.block_election_status(test_block) == Bigchain.BLOCK_INVALID
|
||||
r.table('votes').delete().run(b.conn)
|
||||
|
||||
# test block with minority invalid vote
|
||||
r.table('votes').insert(invalid_vote[:1] + valid_vote[1:], durability='hard').run(b.conn)
|
||||
assert b.block_election_status(test_block) == Bigchain.BLOCK_VALID
|
||||
r.table('votes').delete().run(b.conn)
|
||||
|
||||
# test split vote
|
||||
r.table('votes').insert(invalid_vote[:2] + valid_vote[2:], durability='hard').run(b.conn)
|
||||
assert b.block_election_status(test_block) == Bigchain.BLOCK_INVALID
|
||||
r.table('votes').delete().run(b.conn)
|
||||
|
||||
# test undecided
|
||||
r.table('votes').insert(valid_vote[:2], durability='hard').run(b.conn)
|
||||
assert b.block_election_status(test_block) == Bigchain.BLOCK_UNDECIDED
|
||||
r.table('votes').delete().run(b.conn)
|
||||
|
||||
# change signatures in block, should fail
|
||||
test_block['block']['voters'][0] = 'abc'
|
||||
test_block['block']['voters'][1] = 'abc'
|
||||
r.table('votes').insert(valid_vote, durability='hard').run(b.conn)
|
||||
assert b.block_election_status(test_block) == Bigchain.BLOCK_INVALID
|
||||
|
||||
def test_quorum_odd(self, b):
|
||||
# test partial quorum situations for odd numbers of voters
|
||||
# create a new block
|
||||
test_block = dummy_block()
|
||||
|
||||
# simulate a federation with four voters
|
||||
key_pairs = [crypto.generate_key_pair() for _ in range(5)]
|
||||
test_federation = [Bigchain(public_key=key_pair[1], private_key=key_pair[0])
|
||||
for key_pair in key_pairs]
|
||||
|
||||
# dummy block with test federation public keys as voters
|
||||
test_block['block']['voters'] = [key_pair[1] for key_pair in key_pairs]
|
||||
|
||||
# fake "yes" votes
|
||||
valid_vote = [member.vote(test_block['id'], 'abc', True)
|
||||
for member in test_federation]
|
||||
|
||||
# fake "no" votes
|
||||
invalid_vote = [member.vote(test_block['id'], 'abc', False)
|
||||
for member in test_federation]
|
||||
|
||||
r.table('votes').insert(valid_vote[:2], durability='hard').run(b.conn)
|
||||
assert b.block_election_status(test_block) == Bigchain.BLOCK_UNDECIDED
|
||||
r.table('votes').delete().run(b.conn)
|
||||
|
||||
r.table('votes').insert(invalid_vote[:2], durability='hard').run(b.conn)
|
||||
assert b.block_election_status(test_block) == Bigchain.BLOCK_UNDECIDED
|
||||
r.table('votes').delete().run(b.conn)
|
||||
|
||||
r.table('votes').insert(valid_vote[:3], durability='hard').run(b.conn)
|
||||
assert b.block_election_status(test_block) == Bigchain.BLOCK_VALID
|
||||
r.table('votes').delete().run(b.conn)
|
||||
|
||||
r.table('votes').insert(invalid_vote[:3], durability='hard').run(b.conn)
|
||||
assert b.block_election_status(test_block) == Bigchain.BLOCK_INVALID
|
||||
r.table('votes').delete().run(b.conn)
|
||||
|
||||
|
||||
class TestBlockStream(object):
|
||||
|
||||
def test_if_federation_size_is_greater_than_one_ignore_past_blocks(self, b):
|
||||
for _ in range(5):
|
||||
b.nodes_except_me.append(crypto.generate_key_pair()[1])
|
||||
new_blocks = mp.Queue()
|
||||
bs = BlockStream(new_blocks)
|
||||
block_1 = dummy_block()
|
||||
new_blocks.put(block_1)
|
||||
assert block_1 == bs.get()
|
||||
|
||||
def test_if_no_old_blocks_get_should_return_new_blocks(self, b):
|
||||
new_blocks = mp.Queue()
|
||||
bs = BlockStream(new_blocks)
|
||||
|
||||
# create two blocks
|
||||
block_1 = dummy_block()
|
||||
block_2 = dummy_block()
|
||||
|
||||
# write the blocks
|
||||
b.write_block(block_1, durability='hard')
|
||||
b.write_block(block_2, durability='hard')
|
||||
|
||||
# simulate a changefeed
|
||||
new_blocks.put(block_1)
|
||||
new_blocks.put(block_2)
|
||||
|
||||
# and check if we get exactly these two blocks
|
||||
assert bs.get() == block_1
|
||||
assert bs.get() == block_2
|
||||
|
||||
@pytest.mark.skipif(reason='We may have duplicated blocks when retrieving the BlockStream')
|
||||
def test_ignore_duplicated_blocks_when_retrieving_the_blockstream(self):
|
||||
pass
|
479
tests/pipelines/test_vote.py
Normal file
479
tests/pipelines/test_vote.py
Normal file
@ -0,0 +1,479 @@
|
||||
from unittest.mock import patch
|
||||
import rethinkdb as r
|
||||
from multipipes import Pipe, Pipeline
|
||||
|
||||
from bigchaindb import util
|
||||
from bigchaindb import crypto
|
||||
|
||||
|
||||
def dummy_tx(b):
|
||||
tx = b.create_transaction(b.me, b.me, None, 'CREATE')
|
||||
tx_signed = b.sign_transaction(tx, b.me_private)
|
||||
return tx_signed
|
||||
|
||||
|
||||
def dummy_block(b):
|
||||
block = b.create_block([dummy_tx(b) for _ in range(10)])
|
||||
return block
|
||||
|
||||
|
||||
def test_vote_creation_valid(b):
|
||||
# create valid block
|
||||
block = dummy_block(b)
|
||||
# retrieve vote
|
||||
vote = b.vote(block['id'], 'abc', True)
|
||||
|
||||
# assert vote is correct
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == 'abc'
|
||||
assert vote['vote']['is_block_valid'] is True
|
||||
assert vote['vote']['invalid_reason'] is None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote['vote']),
|
||||
vote['signature']) is True
|
||||
|
||||
|
||||
def test_vote_creation_invalid(b):
|
||||
# create valid block
|
||||
block = dummy_block(b)
|
||||
# retrieve vote
|
||||
vote = b.vote(block['id'], 'abc', False)
|
||||
|
||||
# assert vote is correct
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == 'abc'
|
||||
assert vote['vote']['is_block_valid'] is False
|
||||
assert vote['vote']['invalid_reason'] is None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote['vote']),
|
||||
vote['signature']) is True
|
||||
|
||||
|
||||
def test_vote_ungroup_returns_a_set_of_results(b):
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
b.create_genesis_block()
|
||||
block = dummy_block(b)
|
||||
vote_obj = vote.Vote()
|
||||
txs = list(vote_obj.ungroup(block, True))
|
||||
|
||||
assert len(txs) == 10
|
||||
|
||||
|
||||
def test_vote_validate_block(b):
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
b.create_genesis_block()
|
||||
tx = dummy_tx(b)
|
||||
block = b.create_block([tx])
|
||||
|
||||
vote_obj = vote.Vote()
|
||||
validation = vote_obj.validate_block(block)
|
||||
assert validation == (block, True)
|
||||
|
||||
block = b.create_block([tx])
|
||||
block['block']['id'] = 'this-is-not-a-valid-hash'
|
||||
|
||||
vote_obj = vote.Vote()
|
||||
validation = vote_obj.validate_block(block)
|
||||
assert validation == (block, False)
|
||||
|
||||
|
||||
def test_vote_validate_transaction(b):
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
b.create_genesis_block()
|
||||
tx = dummy_tx(b)
|
||||
vote_obj = vote.Vote()
|
||||
validation = vote_obj.validate_tx(tx, 123, 1)
|
||||
assert validation == (True, 123, 1)
|
||||
|
||||
tx['id'] = 'a' * 64
|
||||
validation = vote_obj.validate_tx(tx, 456, 10)
|
||||
assert validation == (False, 456, 10)
|
||||
|
||||
|
||||
def test_vote_accumulates_transactions(b):
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
b.create_genesis_block()
|
||||
vote_obj = vote.Vote()
|
||||
|
||||
for _ in range(10):
|
||||
tx = dummy_tx(b)
|
||||
|
||||
validation = vote_obj.validate_tx(tx, 123, 1)
|
||||
assert validation == (True, 123, 1)
|
||||
|
||||
tx['id'] = 'a' * 64
|
||||
validation = vote_obj.validate_tx(tx, 456, 10)
|
||||
assert validation == (False, 456, 10)
|
||||
|
||||
|
||||
def test_valid_block_voting_sequential(b, monkeypatch):
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '1')
|
||||
genesis = b.create_genesis_block()
|
||||
vote_obj = vote.Vote()
|
||||
block = dummy_block(b)
|
||||
|
||||
for tx, block_id, num_tx in vote_obj.ungroup(block, True):
|
||||
last_vote = vote_obj.vote(*vote_obj.validate_tx(tx, block_id, num_tx))
|
||||
|
||||
vote_obj.write_vote(last_vote)
|
||||
vote_rs = r.table('votes').get_all([block['id'], b.me],
|
||||
index='block_and_voter').run(b.conn)
|
||||
vote_doc = vote_rs.next()
|
||||
|
||||
assert vote_doc['vote'] == {'voting_for_block': block['id'],
|
||||
'previous_block': genesis['id'],
|
||||
'is_block_valid': True,
|
||||
'invalid_reason': None,
|
||||
'timestamp': '1'}
|
||||
|
||||
assert vote_doc['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote_doc['vote']),
|
||||
vote_doc['signature']) is True
|
||||
|
||||
|
||||
def test_valid_block_voting_multiprocessing(b, monkeypatch):
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
inpipe = Pipe()
|
||||
outpipe = Pipe()
|
||||
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '1')
|
||||
genesis = b.create_genesis_block()
|
||||
vote_pipeline = vote.create_pipeline()
|
||||
vote_pipeline.setup(indata=inpipe, outdata=outpipe)
|
||||
|
||||
block = dummy_block(b)
|
||||
|
||||
inpipe.put(block)
|
||||
vote_pipeline.start()
|
||||
vote_out = outpipe.get()
|
||||
vote_pipeline.terminate()
|
||||
|
||||
vote_rs = r.table('votes').get_all([block['id'], b.me],
|
||||
index='block_and_voter').run(b.conn)
|
||||
vote_doc = vote_rs.next()
|
||||
assert vote_out['vote'] == vote_doc['vote']
|
||||
assert vote_doc['vote'] == {'voting_for_block': block['id'],
|
||||
'previous_block': genesis['id'],
|
||||
'is_block_valid': True,
|
||||
'invalid_reason': None,
|
||||
'timestamp': '1'}
|
||||
|
||||
assert vote_doc['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote_doc['vote']),
|
||||
vote_doc['signature']) is True
|
||||
|
||||
|
||||
def test_valid_block_voting_with_create_transaction(b, monkeypatch):
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
genesis = b.create_genesis_block()
|
||||
|
||||
# create a `CREATE` transaction
|
||||
test_user_priv, test_user_pub = crypto.generate_key_pair()
|
||||
tx = b.create_transaction(b.me, test_user_pub, None, 'CREATE')
|
||||
tx_signed = b.sign_transaction(tx, b.me_private)
|
||||
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '1')
|
||||
block = b.create_block([tx_signed])
|
||||
|
||||
inpipe = Pipe()
|
||||
outpipe = Pipe()
|
||||
|
||||
vote_pipeline = vote.create_pipeline()
|
||||
vote_pipeline.setup(indata=inpipe, outdata=outpipe)
|
||||
|
||||
inpipe.put(block)
|
||||
vote_pipeline.start()
|
||||
vote_out = outpipe.get()
|
||||
vote_pipeline.terminate()
|
||||
|
||||
vote_rs = r.table('votes').get_all([block['id'], b.me],
|
||||
index='block_and_voter').run(b.conn)
|
||||
vote_doc = vote_rs.next()
|
||||
assert vote_out['vote'] == vote_doc['vote']
|
||||
assert vote_doc['vote'] == {'voting_for_block': block['id'],
|
||||
'previous_block': genesis['id'],
|
||||
'is_block_valid': True,
|
||||
'invalid_reason': None,
|
||||
'timestamp': '1'}
|
||||
|
||||
assert vote_doc['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote_doc['vote']),
|
||||
vote_doc['signature']) is True
|
||||
|
||||
|
||||
def test_valid_block_voting_with_transfer_transactions(monkeypatch, b):
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
genesis = b.create_genesis_block()
|
||||
|
||||
# create a `CREATE` transaction
|
||||
test_user_priv, test_user_pub = crypto.generate_key_pair()
|
||||
tx = b.create_transaction(b.me, test_user_pub, None, 'CREATE')
|
||||
tx_signed = b.sign_transaction(tx, b.me_private)
|
||||
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '1')
|
||||
block = b.create_block([tx_signed])
|
||||
b.write_block(block, durability='hard')
|
||||
|
||||
# create a `TRANSFER` transaction
|
||||
test_user2_priv, test_user2_pub = crypto.generate_key_pair()
|
||||
tx2 = b.create_transaction(test_user_pub, test_user2_pub,
|
||||
{'txid': tx['id'], 'cid': 0}, 'TRANSFER')
|
||||
tx2_signed = b.sign_transaction(tx2, test_user_priv)
|
||||
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '2')
|
||||
block2 = b.create_block([tx2_signed])
|
||||
b.write_block(block2, durability='hard')
|
||||
|
||||
inpipe = Pipe()
|
||||
outpipe = Pipe()
|
||||
|
||||
vote_pipeline = vote.create_pipeline()
|
||||
vote_pipeline.setup(indata=inpipe, outdata=outpipe)
|
||||
|
||||
inpipe.put(block)
|
||||
inpipe.put(block2)
|
||||
vote_pipeline.start()
|
||||
vote_out = outpipe.get()
|
||||
vote2_out = outpipe.get()
|
||||
vote_pipeline.terminate()
|
||||
|
||||
vote_rs = r.table('votes').get_all([block['id'], b.me],
|
||||
index='block_and_voter').run(b.conn)
|
||||
vote_doc = vote_rs.next()
|
||||
assert vote_out['vote'] == vote_doc['vote']
|
||||
assert vote_doc['vote'] == {'voting_for_block': block['id'],
|
||||
'previous_block': genesis['id'],
|
||||
'is_block_valid': True,
|
||||
'invalid_reason': None,
|
||||
'timestamp': '2'}
|
||||
|
||||
assert vote_doc['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote_doc['vote']),
|
||||
vote_doc['signature']) is True
|
||||
|
||||
vote2_rs = r.table('votes').get_all([block2['id'], b.me],
|
||||
index='block_and_voter').run(b.conn)
|
||||
vote2_doc = vote2_rs.next()
|
||||
assert vote2_out['vote'] == vote2_doc['vote']
|
||||
assert vote2_doc['vote'] == {'voting_for_block': block2['id'],
|
||||
'previous_block': block['id'],
|
||||
'is_block_valid': True,
|
||||
'invalid_reason': None,
|
||||
'timestamp': '2'}
|
||||
|
||||
assert vote2_doc['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote2_doc['vote']),
|
||||
vote2_doc['signature']) is True
|
||||
|
||||
|
||||
def test_invalid_tx_in_block_voting(monkeypatch, b, user_vk):
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
inpipe = Pipe()
|
||||
outpipe = Pipe()
|
||||
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '1')
|
||||
genesis = b.create_genesis_block()
|
||||
vote_pipeline = vote.create_pipeline()
|
||||
vote_pipeline.setup(indata=inpipe, outdata=outpipe)
|
||||
|
||||
block = dummy_block(b)
|
||||
block['block']['transactions'][0]['id'] = 'abc'
|
||||
|
||||
inpipe.put(block)
|
||||
vote_pipeline.start()
|
||||
vote_out = outpipe.get()
|
||||
vote_pipeline.terminate()
|
||||
|
||||
vote_rs = r.table('votes').get_all([block['id'], b.me],
|
||||
index='block_and_voter').run(b.conn)
|
||||
vote_doc = vote_rs.next()
|
||||
assert vote_out['vote'] == vote_doc['vote']
|
||||
assert vote_doc['vote'] == {'voting_for_block': block['id'],
|
||||
'previous_block': genesis['id'],
|
||||
'is_block_valid': False,
|
||||
'invalid_reason': None,
|
||||
'timestamp': '1'}
|
||||
|
||||
assert vote_doc['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote_doc['vote']),
|
||||
vote_doc['signature']) is True
|
||||
|
||||
|
||||
def test_invalid_block_voting(monkeypatch, b, user_vk):
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
inpipe = Pipe()
|
||||
outpipe = Pipe()
|
||||
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '1')
|
||||
genesis = b.create_genesis_block()
|
||||
vote_pipeline = vote.create_pipeline()
|
||||
vote_pipeline.setup(indata=inpipe, outdata=outpipe)
|
||||
|
||||
block = dummy_block(b)
|
||||
block['block']['id'] = 'this-is-not-a-valid-hash'
|
||||
|
||||
inpipe.put(block)
|
||||
vote_pipeline.start()
|
||||
vote_out = outpipe.get()
|
||||
vote_pipeline.terminate()
|
||||
|
||||
vote_rs = r.table('votes').get_all([block['id'], b.me],
|
||||
index='block_and_voter').run(b.conn)
|
||||
vote_doc = vote_rs.next()
|
||||
assert vote_out['vote'] == vote_doc['vote']
|
||||
assert vote_doc['vote'] == {'voting_for_block': block['id'],
|
||||
'previous_block': genesis['id'],
|
||||
'is_block_valid': False,
|
||||
'invalid_reason': None,
|
||||
'timestamp': '1'}
|
||||
|
||||
assert vote_doc['node_pubkey'] == b.me
|
||||
assert crypto.VerifyingKey(b.me).verify(util.serialize(vote_doc['vote']),
|
||||
vote_doc['signature']) is True
|
||||
|
||||
|
||||
def test_voter_considers_unvoted_blocks_when_single_node(monkeypatch, b):
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
outpipe = Pipe()
|
||||
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '1')
|
||||
b.create_genesis_block()
|
||||
|
||||
# insert blocks in the database while the voter process is not listening
|
||||
# (these blocks won't appear in the changefeed)
|
||||
block_1 = dummy_block(b)
|
||||
b.write_block(block_1, durability='hard')
|
||||
block_2 = dummy_block(b)
|
||||
b.write_block(block_2, durability='hard')
|
||||
|
||||
vote_pipeline = vote.create_pipeline()
|
||||
vote_pipeline.setup(indata=vote.get_changefeed(), outdata=outpipe)
|
||||
vote_pipeline.start()
|
||||
|
||||
# We expects two votes, so instead of waiting an arbitrary amount
|
||||
# of time, we can do two blocking calls to `get`
|
||||
outpipe.get()
|
||||
outpipe.get()
|
||||
|
||||
# create a new block that will appear in the changefeed
|
||||
block_3 = dummy_block(b)
|
||||
b.write_block(block_3, durability='hard')
|
||||
|
||||
# Same as before with the two `get`s
|
||||
outpipe.get()
|
||||
|
||||
vote_pipeline.terminate()
|
||||
|
||||
# retrive blocks from bigchain
|
||||
blocks = list(r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))
|
||||
.run(b.conn))
|
||||
|
||||
# FIXME: remove genesis block, we don't vote on it
|
||||
# (might change in the future)
|
||||
blocks.pop(0)
|
||||
vote_pipeline.terminate()
|
||||
|
||||
# retrieve vote
|
||||
votes = r.table('votes').run(b.conn)
|
||||
votes = list(votes)
|
||||
|
||||
assert all(vote['node_pubkey'] == b.me for vote in votes)
|
||||
|
||||
|
||||
def test_voter_chains_blocks_with_the_previous_ones(monkeypatch, b):
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
outpipe = Pipe()
|
||||
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '1')
|
||||
b.create_genesis_block()
|
||||
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '2')
|
||||
block_1 = dummy_block(b)
|
||||
b.write_block(block_1, durability='hard')
|
||||
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '3')
|
||||
block_2 = dummy_block(b)
|
||||
b.write_block(block_2, durability='hard')
|
||||
|
||||
vote_pipeline = vote.create_pipeline()
|
||||
vote_pipeline.setup(indata=vote.get_changefeed(), outdata=outpipe)
|
||||
vote_pipeline.start()
|
||||
|
||||
# We expects two votes, so instead of waiting an arbitrary amount
|
||||
# of time, we can do two blocking calls to `get`
|
||||
outpipe.get()
|
||||
outpipe.get()
|
||||
vote_pipeline.terminate()
|
||||
|
||||
# retrive blocks from bigchain
|
||||
blocks = list(r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))
|
||||
.run(b.conn))
|
||||
|
||||
# retrieve votes
|
||||
votes = list(r.table('votes').run(b.conn))
|
||||
|
||||
assert votes[0]['vote']['voting_for_block'] in (blocks[1]['id'], blocks[2]['id'])
|
||||
assert votes[1]['vote']['voting_for_block'] in (blocks[1]['id'], blocks[2]['id'])
|
||||
|
||||
|
||||
def test_voter_checks_for_previous_vote(monkeypatch, b):
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
inpipe = Pipe()
|
||||
outpipe = Pipe()
|
||||
|
||||
monkeypatch.setattr(util, 'timestamp', lambda: '1')
|
||||
b.create_genesis_block()
|
||||
|
||||
block_1 = dummy_block(b)
|
||||
inpipe.put(block_1)
|
||||
|
||||
assert r.table('votes').count().run(b.conn) == 0
|
||||
|
||||
vote_pipeline = vote.create_pipeline()
|
||||
vote_pipeline.setup(indata=inpipe, outdata=outpipe)
|
||||
vote_pipeline.start()
|
||||
|
||||
# wait for the result
|
||||
outpipe.get()
|
||||
|
||||
# queue block for voting AGAIN
|
||||
inpipe.put(block_1)
|
||||
|
||||
# queue another block
|
||||
inpipe.put(dummy_block(b))
|
||||
|
||||
# wait for the result of the new block
|
||||
outpipe.get()
|
||||
|
||||
vote_pipeline.terminate()
|
||||
|
||||
assert r.table('votes').count().run(b.conn) == 2
|
||||
|
||||
|
||||
@patch.object(Pipeline, 'start')
|
||||
def test_start(mock_start, b):
|
||||
# TODO: `block.start` is just a wrapper around `vote.create_pipeline`,
|
||||
# that is tested by `test_full_pipeline`.
|
||||
# If anyone has better ideas on how to test this, please do a PR :)
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
b.create_genesis_block()
|
||||
|
||||
vote.start()
|
||||
mock_start.assert_called_with()
|
@ -32,8 +32,8 @@ def mock_db_init_with_existing_db(monkeypatch):
|
||||
|
||||
@pytest.fixture
|
||||
def mock_processes_start(monkeypatch):
|
||||
from bigchaindb.processes import Processes
|
||||
monkeypatch.setattr(Processes, 'start', lambda *args: None)
|
||||
from bigchaindb import processes
|
||||
monkeypatch.setattr(processes, 'start', lambda *args: None)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
Loading…
x
Reference in New Issue
Block a user