Merge branch 'master' into refactor-multiprocessing-for-vote

This commit is contained in:
vrde 2016-08-15 14:51:16 +02:00
commit 702fbe693f
No known key found for this signature in database
GPG Key ID: 6581C7C39B3D397D
44 changed files with 449 additions and 678 deletions

View File

@ -38,9 +38,9 @@ Familiarize yourself with how we do coding and documentation in the BigchainDB p
### Step 2 - Install some Dependencies
* [Install RethinkDB Server](https://rethinkdb.com/docs/install/)
* Make sure you have Python 3.4+ (maybe in a virtualenv)
* [Install BigchaindB Server's OS-level dependencies](http://bigchaindb.readthedocs.io/en/latest/nodes/setup-run-node.html#install-bigchaindb-server)
* [Make sure you have the latest version of pip](http://bigchaindb.readthedocs.io/en/latest/nodes/setup-run-node.html#how-to-install-bigchaindb-with-pip)
* Make sure you have Python 3.4+ (preferably in a virtualenv)
* [Install BigchaindB Server's OS-level dependencies](http://bigchaindb.readthedocs.io/en/latest/appendices/install-os-level-deps.html)
* [Make sure you have the latest Python 3 version of pip and setuptools](http://bigchaindb.readthedocs.io/en/latest/appendices/install-latest-pip.html)
### Step 3 - Fork bigchaindb on GitHub

View File

@ -16,8 +16,8 @@ We're hiring! [Learn more](https://github.com/bigchaindb/org/blob/master/engjob.
## Get Started
### [Quickstart](http://bigchaindb.readthedocs.io/en/latest/quickstart.html)
### [Set Up and Run a BigchainDB Node](http://bigchaindb.readthedocs.io/en/latest/nodes/setup-run-node.html)
### [Run BigchainDB with Docker](http://bigchaindb.readthedocs.io/en/latest/nodes/run-with-docker.html)
### [Set Up & Run a Dev/Test Node](http://bigchaindb.readthedocs.io/en/latest/dev-and-test/setup-run-node.html)
### [Run BigchainDB with Docker](http://bigchaindb.readthedocs.io/en/latest/appendices/run-with-docker.html)
## Links for Everyone
* [BigchainDB.com](https://www.bigchaindb.com/) - the main BigchainDB website, including newsletter signup

View File

@ -1,52 +0,0 @@
import logging
import multiprocessing as mp
import queue
import rethinkdb as r
import bigchaindb
from bigchaindb import Bigchain
from bigchaindb.monitor import Monitor
from bigchaindb.util import ProcessGroup
logger = logging.getLogger(__name__)
class BlockDeleteRevert(object):
def __init__(self, q_delete_to_revert):
self.q_delete_to_revert = q_delete_to_revert
def write_blocks(self):
"""
Write blocks to the bigchain
"""
# create bigchain instance
b = Bigchain()
# Write blocks
while True:
block = self.q_delete_to_revert.get()
# poison pill
if block == 'stop':
return
b.write_block(block)
def kill(self):
for i in range(mp.cpu_count()):
self.q_delete_to_revert.put('stop')
def start(self):
"""
Initialize, spawn, and start the processes
"""
# initialize the processes
p_write = ProcessGroup(name='write_blocks', target=self.write_blocks)
# start the processes
p_write.start()

View File

@ -24,7 +24,7 @@ from bigchaindb.exceptions import (StartupError,
DatabaseAlreadyExists,
KeypairNotFoundException)
from bigchaindb.commands import utils
from bigchaindb.processes import Processes
from bigchaindb import processes
from bigchaindb import crypto
@ -169,7 +169,6 @@ def run_start(args):
sys.exit("Can't start BigchainDB, no keypair found. "
'Did you run `bigchaindb configure`?')
processes = Processes()
logger.info('Starting BigchainDB main process')
processes.start()

View File

@ -209,7 +209,7 @@ def write_config(config, filename=None):
filename = CONFIG_DEFAULT_PATH
with open(filename, 'w') as f:
json.dump(config, f)
json.dump(config, f, indent=4)
def autoconfigure(filename=None, config=None, force=False):

View File

@ -0,0 +1,65 @@
"""This module takes care of all the logic related to block status.
Specifically, what happens when a block becomes invalid. The logic is
encapsulated in the ``Election`` class, while the sequence of actions
is specified in ``create_pipeline``.
"""
import logging
import rethinkdb as r
from multipipes import Pipeline, Node
from bigchaindb.pipelines.utils import ChangeFeed
from bigchaindb import Bigchain
logger = logging.getLogger(__name__)
class Election:
def __init__(self):
self.bigchain = Bigchain()
def check_for_quorum(self, next_vote):
"""
Checks if block has enough invalid votes to make a decision
"""
next_block = r.table('bigchain')\
.get(next_vote['vote']['voting_for_block'])\
.run(self.bigchain.conn)
if self.bigchain.block_election_status(next_block) == self.bigchain.BLOCK_INVALID:
return next_block
def requeue_transactions(self, invalid_block):
"""
Liquidates transactions from invalid blocks so they can be processed again
"""
logger.info('Rewriting %s transactions from invalid block %s',
len(invalid_block['block']['transactions']),
invalid_block['id'])
for tx in invalid_block['block']['transactions']:
self.bigchain.write_transaction(tx)
return invalid_block
def get_changefeed():
return ChangeFeed(table='votes', operation='insert')
def create_pipeline():
election = Election()
election_pipeline = Pipeline([
Node(election.check_for_quorum),
Node(election.requeue_transactions)
])
return election_pipeline
def start():
pipeline = create_pipeline()
pipeline.setup(indata=get_changefeed())
pipeline.start()
return pipeline

View File

@ -1,13 +1,8 @@
import logging
import multiprocessing as mp
import rethinkdb as r
import bigchaindb
from bigchaindb.pipelines import block, vote
from bigchaindb import Bigchain
from bigchaindb.voter import Election
from bigchaindb.block import BlockDeleteRevert
from bigchaindb.pipelines import vote, block, election
from bigchaindb.web import server
@ -26,56 +21,23 @@ BANNER = """
"""
class Processes(object):
def start():
logger.info('Initializing BigchainDB...')
def __init__(self):
# initialize the class
self.q_block_new_vote = mp.Queue()
self.q_revert_delete = mp.Queue()
# start the processes
logger.info('Starting block')
block.start()
def map_bigchain(self):
# listen to changes on the bigchain and redirect the changes
# to the correct queues
logger.info('Starting voter')
vote.start()
# create a bigchain instance
b = Bigchain()
logger.info('Starting election')
election.start()
for change in r.table('bigchain').changes().run(b.conn):
# start the web api
app_server = server.create_server(bigchaindb.config['server'])
p_webapi = mp.Process(name='webapi', target=app_server.run)
p_webapi.start()
# delete
if change['new_val'] is None:
# this should never happen in regular operation
self.q_revert_delete.put(change['old_val'])
# update (new vote)
elif change['new_val'] is not None and change['old_val'] is not None:
self.q_block_new_vote.put(change['new_val'])
def start(self):
logger.info('Initializing BigchainDB...')
delete_reverter = BlockDeleteRevert(self.q_revert_delete)
# start the web api
app_server = server.create_server(bigchaindb.config['server'])
p_webapi = mp.Process(name='webapi', target=app_server.run)
p_webapi.start()
# initialize the processes
p_map_bigchain = mp.Process(name='bigchain_mapper', target=self.map_bigchain)
p_block_delete_revert = mp.Process(name='block_delete_revert', target=delete_reverter.start)
p_election = Election(self.q_block_new_vote)
# start the processes
logger.info('starting bigchain mapper')
p_map_bigchain.start()
logger.info('starting block')
block.start()
p_block_delete_revert.start()
logger.info('starting voter')
vote.start()
logger.info('starting election')
p_election.start()
# start message
logger.info(BANNER.format(bigchaindb.config['server']['bind']))
# start message
logger.info(BANNER.format(bigchaindb.config['server']['bind']))

View File

@ -169,8 +169,8 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
Reference:
{
"id": "<sha3 hash>",
"version": "transaction version number",
"transaction": {
"version": "transaction version number",
"fulfillments": [
{
"current_owners": ["list of <pub-keys>"],
@ -278,6 +278,7 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
})
tx = {
'version': 1,
'fulfillments': fulfillments,
'conditions': conditions,
'operation': operation,
@ -291,7 +292,6 @@ def create_tx(current_owners, new_owners, inputs, operation, payload=None):
# create the transaction
transaction = {
'id': tx_hash,
'version': 1,
'transaction': tx
}
@ -479,7 +479,7 @@ def get_fulfillment_message(transaction, fulfillment, serialized=False):
'operation': transaction['transaction']['operation'],
'timestamp': transaction['transaction']['timestamp'],
'data': transaction['transaction']['data'],
'version': transaction['version'],
'version': transaction['transaction']['version'],
'id': transaction['id']
}
# and the condition which needs to be retrieved from the output of a previous transaction

View File

@ -1,72 +0,0 @@
import logging
import multiprocessing as mp
from bigchaindb import Bigchain
logger = logging.getLogger(__name__)
class Election(object):
def __init__(self, q_block_new_vote):
"""
Initialize the class with the needed queues.
Initialize a queue where blocks with new votes will be held
"""
self.q_block_new_vote = q_block_new_vote
self.q_invalid_blocks = mp.Queue()
def check_for_quorum(self):
"""
Checks if block has enough invalid votes to make a decision
"""
b = Bigchain()
while True:
next_block = self.q_block_new_vote.get()
# poison pill
if next_block == 'stop':
self.q_invalid_blocks.put('stop')
logger.info('clean exit')
return
if b.block_election_status(next_block) == 'invalid':
self.q_invalid_blocks.put(next_block)
def requeue_transactions(self):
"""
Liquidates transactions from invalid blocks so they can be processed again
"""
while True:
invalid_block = self.q_invalid_blocks.get()
# poison pill
if invalid_block == 'stop':
logger.info('clean exit')
return
b = Bigchain()
for tx in invalid_block['block']['transactions']:
b.write_transaction(tx)
def kill(self):
"""
Terminate processes
"""
self.q_block_new_vote.put('stop')
def start(self):
"""
Initialize, spawn, and start the processes
"""
# initialize the processes
p_quorum_check = mp.Process(name='check_for_quorum', target=self.check_for_quorum)
p_requeue_tx = mp.Process(name='requeue_tx', target=self.requeue_transactions)
# start the processes
p_quorum_check.start()
p_requeue_tx.start()

View File

@ -2,7 +2,7 @@
## Example Amazon EC2 Setups
We have some scripts for [deploying a _test_ BigchainDB cluster on AWS](../clusters-feds/deploy-on-aws.html). Those scripts include command sequences to set up storage for RethinkDB.
We have some scripts for [deploying a _test_ BigchainDB cluster on AWS](../clusters-feds/aws-testing-cluster.html). Those scripts include command sequences to set up storage for RethinkDB.
In particular, look in the file [/deploy-cluster-aws/fabfile.py](https://github.com/bigchaindb/bigchaindb/blob/master/deploy-cluster-aws/fabfile.py), under `def prep_rethinkdb_storage(USING_EBS)`. Note that there are two cases:
1. **Using EBS ([Amazon Elastic Block Store](https://aws.amazon.com/ebs/)).** This is always an option, and for some instance types ("EBS-only"), it's the only option.

View File

@ -47,7 +47,7 @@ Port 8080 is the default port used by RethinkDB for its adminstrative web (HTTP)
Port 9984 is the default port for the BigchainDB client-server HTTP API (TCP), which is served by Gunicorn HTTP Server. It's _possible_ allow port 9984 to accept inbound traffic from anyone, but we recommend against doing that. Instead, set up a reverse proxy server (e.g. using Nginx) and only allow traffic from there. Information about how to do that can be found [in the Gunicorn documentation](http://docs.gunicorn.org/en/stable/deploy.html). (They call it a proxy.)
If Gunicorn and the reverse proxy are running on the same server, then you'll have to tell Gunicorn to listen on some port other than 9984 (so that the reverse proxy can listen on port 9984). You can do that by setting `server.bind` to 'localhost:PORT' in the [BigchainDB Configuration Settings](../nodes/configuration.html), where PORT is whatever port you chose (e.g. 9983).
If Gunicorn and the reverse proxy are running on the same server, then you'll have to tell Gunicorn to listen on some port other than 9984 (so that the reverse proxy can listen on port 9984). You can do that by setting `server.bind` to 'localhost:PORT' in the [BigchainDB Configuration Settings](../server-reference/configuration.html), where PORT is whatever port you chose (e.g. 9983).
You may want to have Gunicorn and the reverse proxy running on different servers, so that both can listen on port 9984. That would also help isolate the effects of a denial-of-service attack.

View File

@ -7,6 +7,9 @@ Appendices
.. toctree::
:maxdepth: 1
install-os-level-deps
install-latest-pip
run-with-docker
json-serialization
cryptography
the-Bigchain-class
@ -15,4 +18,5 @@ Appendices
firewall-notes
ntp-notes
example-rethinkdb-storage-setups
licenses
licenses
install-with-lxd

View File

@ -0,0 +1,20 @@
# How to Install the Latest pip and setuptools
You can check the version of `pip` you're using (in your current virtualenv) by doing:
```text
pip -V
```
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.4, then you must install a `pip` version associated with Python 3.4+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
On Ubuntu 14.04, we found that this works:
```text
sudo apt-get install python3-pip
```
That should install a Python 3 version of `pip` named `pip3`. If that didn't work, then another way to get `pip3` is to do `sudo apt-get install python3-setuptools` followed by `sudo easy_install3 pip`.
You can upgrade `pip` (`pip3`) and `setuptools` to the latest versions using:
```text
pip3 install --upgrade pip setuptools
```

View File

@ -0,0 +1,17 @@
# How to Install OS-Level Dependencies
BigchainDB Server has some OS-level dependencies that must be installed.
On Ubuntu 14.04 and 16.04, we found that the following was enough:
```text
sudo apt-get update
sudo apt-get install g++ python3-dev
```
On Fedora 23 and 24, we found that the following was enough:
```text
sudo dnf update
sudo dnf install gcc-c++ redhat-rpm-config python3-devel
```
(If you're using a version of Fedora before version 22, you may have to use `yum` instead of `dnf`.)

View File

@ -0,0 +1,43 @@
# Installing BigchainDB on LXC containers using LXD
You can visit this link to install LXD (instructions here): [LXD Install](https://linuxcontainers.org/lxd/getting-started-cli/)
(assumption is that you are using Ubuntu 14.04 for host/container)
Let us create an LXC container (via LXD) with the following command:
`lxc launch ubuntu:14.04 bigchaindb`
(ubuntu:14.04 - this is the remote server the command fetches the image from)
(bigchaindb - is the name of the container)
Below is the `install.sh` script you will need to install BigchainDB within your container.
Here is my `install.sh`:
```
#!/bin/bash
set -ex
export DEBIAN_FRONTEND=noninteractive
apt-get install -y wget
source /etc/lsb-release && echo "deb http://download.rethinkdb.com/apt $DISTRIB_CODENAME main" | sudo tee /etc/apt/sources.list.d/rethinkdb.list
wget -qO- https://download.rethinkdb.com/apt/pubkey.gpg | sudo apt-key add -
apt-get update
apt-get install -y rethinkdb python3-pip
pip3 install --upgrade pip wheel setuptools
pip install ptpython bigchaindb
```
Copy/Paste the above `install.sh` into the directory/path you are going to execute your LXD commands from (ie. the host).
Make sure your container is running by typing:
`lxc list`
Now, from the host (and the correct directory) where you saved `install.sh`, run this command:
`cat install.sh | lxc exec bigchaindb /bin/bash`
If you followed the commands correctly, you will have successfully created an LXC container (using LXD) that can get you up and running with BigchainDB in <5 minutes (depending on how long it takes to download all the packages).
From this point onwards, you can follow the [Python Example](https://bigchaindb.readthedocs.io/en/latest/drivers-clients/python-server-api-examples.html) .

View File

@ -96,7 +96,7 @@ docker run --rm -v "$HOME/bigchaindb_docker:/data" -ti \
Note the `--link` option to link to the first container (named `bigchaindb`).
Aside: The `bigchaindb load` command has several options (e.g. `-m`). You can read more about it in [the documentation about the BigchainDB command line interface](bigchaindb-cli.html).
Aside: The `bigchaindb load` command has several options (e.g. `-m`). You can read more about it in [the documentation about the BigchainDB command line interface](../server-reference/bigchaindb-cli.html).
If you look at the RethinkDB dashboard (in your web browser), you should see the effects of the load test. You can also see some effects in the Docker logs using:
```text

View File

@ -1,12 +1,16 @@
# Deploy a Cluster on AWS
# Deploy a Testing Cluster on AWS
This section explains a way to deploy a cluster of BigchainDB nodes on Amazon Web Services (AWS). We use some Bash and Python scripts to launch several instances (virtual servers) on Amazon Elastic Compute Cloud (EC2). Then we use Fabric to install RethinkDB and BigchainDB on all those instances.
This section explains a way to deploy a cluster of BigchainDB nodes on Amazon Web Services (AWS) for testing purposes.
## Why?
You might ask why one would want to deploy a centrally-controlled BigchainDB cluster. Isn't BigchainDB supposed to be decentralized, where each node is controlled by a different person or organization?
Why would anyone want to deploy a centrally-controlled BigchainDB cluster? Isn't BigchainDB supposed to be decentralized, where each node is controlled by a different person or organization?
Yes! These scripts are for deploying _test_ clusters, not production clusters.
Yes! These scripts are for deploying a testing cluster, not a production cluster.
## How?
We use some Bash and Python scripts to launch several instances (virtual servers) on Amazon Elastic Compute Cloud (EC2). Then we use Fabric to install RethinkDB and BigchainDB on all those instances.
## Python Setup
@ -74,7 +78,7 @@ fab --fabfile=fabfile-monitor.py --hosts=<EC2 hostname> run_monitor
For more information about monitoring (e.g. how to view the Grafana dashboard in your web browser), see the [Monitoring](monitoring.html) section of this documentation.
To configure a BigchainDB node to send monitoring data to the monitoring server, change the statsd host in the configuration of the BigchainDB node. The section on [Configuring a BigchainDB Node](../nodes/configuration.html) explains how you can do that. (For example, you can change the statsd host in `$HOME/.bigchaindb`.)
To configure a BigchainDB node to send monitoring data to the monitoring server, change the statsd host in the configuration of the BigchainDB node. The section on [Configuring a BigchainDB Node](../server-reference/configuration.html) explains how you can do that. (For example, you can change the statsd host in `$HOME/.bigchaindb`.)
## Deploy a BigchainDB Cluster

View File

@ -22,7 +22,7 @@ That's just one possible way of setting up the file system so as to provide extr
Another way to get similar reliability would be to mount the RethinkDB data directory on an [Amazon EBS](https://aws.amazon.com/ebs/) volume. Each Amazon EBS volume is, "automatically replicated within its Availability Zone to protect you from component failure, offering high availability and durability."
See [the section on setting up storage for RethinkDB](../nodes/setup-run-node.html#set-up-storage-for-rethinkdb-data) for more details.
See [the section on setting up storage for RethinkDB](../dev-and-test/setup-run-node.html#set-up-storage-for-rethinkdb-data) for more details.
As with shard replication, live file-system replication protects against many failure modes, but it doesn't protect against them all. You should still consider having normal, "cold" backups.
@ -39,7 +39,7 @@ rethinkdb dump -e bigchain.bigchain -e bigchain.votes
```
That should write a file named `rethinkdb_dump_<date>_<time>.tar.gz`. The `-e` option is used to specify which tables should be exported. You probably don't need to export the backlog table, but you definitely need to export the bigchain and votes tables.
`bigchain.votes` means the `votes` table in the RethinkDB database named `bigchain`. It's possible that your database has a different name: [the database name is a BigchainDB configuration setting](../nodes/configuration.html#database-host-database-port-database-name). The default name is `bigchain`. (Tip: you can see the values of all configuration settings using the `bigchaindb show-config` command.)
`bigchain.votes` means the `votes` table in the RethinkDB database named `bigchain`. It's possible that your database has a different name: [the database name is a BigchainDB configuration setting](../server-reference/configuration.html#database-host-database-port-database-name). The default name is `bigchain`. (Tip: you can see the values of all configuration settings using the `bigchaindb show-config` command.)
There's [more information about the `rethinkdb dump` command in the RethinkDB documentation](https://www.rethinkdb.com/docs/backup/). It also explains how to restore data to a cluster from an archive file.
@ -108,7 +108,7 @@ Considerations for BigchainDB:
Although it's not advertised as such, RethinkDB's built-in replication feature is similar to continous backup, except the "backup" (i.e. the set of replica shards) is spread across all the nodes. One could take that idea a bit farther by creating a set of backup-only servers with one full backup:
* Give all the original BigchainDB nodes (RethinkDB nodes) the server tag `original`. This is the default if you used the RethinkDB config file suggested in the section titled [Configure RethinkDB Server](../nodes/setup-run-node.html#configure-rethinkdb-server).
* Give all the original BigchainDB nodes (RethinkDB nodes) the server tag `original`. This is the default if you used the RethinkDB config file suggested in the section titled [Configure RethinkDB Server](../dev-and-test/setup-run-node.html#configure-rethinkdb-server).
* Set up a group of servers running RethinkDB only, and give them the server tag `backup`. The `backup` servers could be geographically separated from all the `original` nodes (or not; it's up to the federation).
* Clients shouldn't be able to read from or write to servers in the `backup` set.
* Send a RethinkDB reconfigure command to the RethinkDB cluster to make it so that the `original` set has the same number of replicas as before (or maybe one less), and the `backup` set has one replica. Also, make sure the `primary_replica_tag='original'` so that all primary shards live on the `original` nodes.

View File

@ -7,10 +7,9 @@ BigchainDB Clusters & Federations
.. toctree::
:maxdepth: 1
node-cluster-fed
set-up-a-federation
backup
deploy-on-aws
aws-testing-cluster
monitoring
future-docs

View File

@ -28,7 +28,7 @@ You can view the Grafana dashboard in your web browser at:
(You may want to replace `localhost` with another hostname in that URL, e.g. the hostname of a remote monitoring server.)
The login and password are `admin` by default. If BigchainDB is running and processing transactions, you should see analytics—if not, [start BigchainDB](../nodes/setup-run-node.html#run-bigchaindb) and load some test transactions:
The login and password are `admin` by default. If BigchainDB is running and processing transactions, you should see analytics—if not, [start BigchainDB](../dev-and-test/setup-run-node.html#run-bigchaindb) and load some test transactions:
```text
$ bigchaindb load
```

View File

@ -1,6 +1,6 @@
# Set Up a Federation
This section is about how to set up a BigchainDB _federation_, where each node is operated by a different operator. If you want to set up and run a BigchainDB cluster on AWS (where all nodes are operated by you), then see [the section about that](deploy-on-aws.html).
This section is about how to set up a BigchainDB _federation_, where each node is operated by a different operator. If you want to set up and run a testing cluster on AWS (where all nodes are operated by you), then see [the section about that](aws-testing-cluster.html).
## Initial Checklist
@ -19,8 +19,9 @@ The federation must decide some things before setting up the initial cluster (in
2. What will the replication factor be? (It must be 3 or more for [RethinkDB failover](https://rethinkdb.com/docs/failover/) to work.)
3. Which node will be responsible for sending the commands to configure the RethinkDB database?
Once those things have been decided, each node operator can begin setting up their BigchainDB node.
The steps to set up a cluster node are outlined in the section titled [Set Up and Run a Node](../nodes/setup-run-node.html). Each node operator will eventually need two pieces of information from all other nodes in the federation:
Once those things have been decided, each node operator can begin [setting up their BigchainDB (production) node](../prod-node-setup-mgmt/index.html).
Each node operator will eventually need two pieces of information from all other nodes in the federation:
1. Their RethinkDB hostname, e.g. `rdb.farm2.organization.org`
2. Their BigchainDB public key, e.g. `Eky3nkbxDTMgkmiJC8i5hKyVFiAQNmPP4a2G4JdDxJCK`

View File

@ -0,0 +1,11 @@
.. You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Develop & Test BigchainDB
=========================
.. toctree::
:maxdepth: 1
setup-run-node
running-unit-tests

View File

@ -0,0 +1,30 @@
# Set Up & Run a Dev/Test Node
This page explains how to set up a minimal local BigchainDB node for development and testing purposes.
The BigchainDB core dev team develops BigchainDB on recent Ubuntu and Fedora distributions, so we recommend you use one of those. BigchainDB Server doesn't work on Windows and Mac OS X (unless you use a VM or containers).
First, read through the BigchainDB [CONTRIBUTING.md file](https://github.com/bigchaindb/bigchaindb/blob/master/CONTRIBUTING.md). It outlines the steps to setup a machine for developing and testing BigchainDB.
Next, create a default BigchainDB config file (in `$HOME/.bigchaindb`):
```text
bigchaindb -y configure
```
Note: [The BigchainDB CLI](../server-reference/bigchaindb-cli.html) and the [BigchainDB Configuration Settings](../server-reference/configuration.html) are documented elsewhere. (Click the links.)
Start RethinkDB using:
```text
rethinkdb
```
You can verify that RethinkDB is running by opening the RethinkDB web interface in your web browser. It should be at [http://localhost:8080/](http://localhost:8080/).
To run BigchainDB Server, do:
```text
bigchaindb start
```
You can [run all the unit tests](running-unit-tests.html) to test your installation.
The BigchainDB [CONTRIBUTING.md file](https://github.com/bigchaindb/bigchaindb/blob/master/CONTRIBUTING.md) has more details about how to contribute.

View File

@ -9,7 +9,7 @@ One can also interact with a BigchainDB node via other APIs, including the HTTP
## Getting Started
First, make sure you have RethinkDB and BigchainDB _installed and running_, i.e. you [installed them](setup-run-node.html) and you ran:
First, make sure you have RethinkDB and BigchainDB _installed and running_, i.e. you [installed them](../dev-and-test/setup-run-node.html) and you ran:
```text
$ rethinkdb
$ bigchaindb configure

View File

@ -9,9 +9,13 @@ Table of Contents
introduction
quickstart
node-cluster-fed
nodes/index
clusters-feds/index
dev-and-test/index
prod-node-setup-mgmt/index
server-reference/index
drivers-clients/index
clusters-feds/index
topic-guides/index
release-notes
appendices/index

View File

@ -8,9 +8,9 @@ You can read about the motivations, goals and high-level architecture in the [Bi
## Setup Instructions for Various Cases
* [Set up a stand-alone BigchainDB node for learning and experimenting: Quickstart](quickstart.html)
* [Set up and run a dev/test node](dev-and-test/setup-run-node.html)
* [Deploy a testing cluster on AWS](clusters-feds/aws-testing-cluster.html)
* [Set up and run a federation](clusters-feds/set-up-a-federation.html) (i.a. an organization with a BigchainDB cluster)
* To set up a stand-alone node so you can help contribute to the development of BigchainDB Server, see [the CONTRIBUTING.md file](https://github.com/bigchaindb/bigchaindb/blob/master/CONTRIBUTING.md)
* [Deploy a cluster on AWS](clusters-feds/deploy-on-aws.html)
(Instructions for setting up a client will be provided once there's a public testnet.)

View File

@ -1,13 +1,13 @@
# Nodes, Clusters & Federations
A **BigchainDB node** is a server or set of closely-linked servers running RethinkDB Server, BigchainDB Server, and other BigchainDB-related software. Each node is controlled by one person or organization.
A **BigchainDB node** is a machine or set of closely-linked machines running RethinkDB Server, BigchainDB Server, and related software. (A "machine" might be a bare-metal server, a virtual machine or a container.) Each node is controlled by one person or organization.
A set of BigchainDB nodes can connect to each other to form a **cluster**. Each node in the cluster runs the same software. A cluster contains one logical RethinkDB datastore. A cluster may have additional servers to do things such as cluster monitoring.
A set of BigchainDB nodes can connect to each other to form a **cluster**. Each node in the cluster runs the same software. A cluster contains one logical RethinkDB datastore. A cluster may have additional machines to do things such as cluster monitoring.
The people and organizations that run the nodes in a cluster belong to a **federation** (i.e. another organization). A federation must have some sort of governance structure to make decisions. If a cluster is run by a single company, then the federation is just that company.
**What's the Difference Between a Cluster and a Federation?**
A cluster is just a bunch of connected nodes (computers). A cluster might be operated by just one person. A federation is an organization which has a cluster, and where each node in the cluster has a different operator.
A cluster is just a bunch of connected nodes. A federation is an organization which has a cluster, and where each node in the cluster has a different operator.
Confusingly, we sometimes call a federation's cluster its "federation." You can probably tell what we mean from context.

View File

@ -7,9 +7,5 @@ BigchainDB Nodes
.. toctree::
:maxdepth: 1
node-components
node-requirements
setup-run-node
run-with-docker
running-unit-tests
configuration
bigchaindb-cli

View File

@ -0,0 +1,17 @@
# Node Components
A BigchainDB node must include, at least:
* BigchainDB Server and
* RethinkDB Server.
When doing development and testing, it's common to install both on the same machine, but in a production environment, it may make more sense to install them on separate machines.
In a production environment, a BigchainDB node can have several other components, including:
* nginx or similar, as a reverse proxy and/or load balancer for the web server
* An NTP daemon running on all machines running BigchainDB code, and possibly other machines
* A RethinkDB proxy server
* Monitoring software, to monitor all the machines in the node
* Maybe more, e.g. a configuration management server and agents on all machines

View File

@ -1,21 +1,17 @@
# Node Requirements (OS, Memory, Storage, etc.)
For now, we will assume that a BigchainDB node is just one server. In the future, a node may consist of several closely-coupled servers run by one node operator (federation member).
## OS Requirements
* RethinkDB Server [will run on any modern OS](https://www.rethinkdb.com/docs/install/). Note that the Fedora package isn't officially supported. Also, official support for Windows is fairly recent ([April 2016](https://rethinkdb.com/blog/2.3-release/)).
* Python 3.4+ [will run on any modern OS](https://docs.python.org/3.4/using/index.html).
* [Some functionality in the `multiprocessing` package doesn't work on OS X](https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.qsize). You can still use Mac OS X if you use Docker or a virtual machine.
* ZeroMQ [will run on any modern OS](http://zeromq.org/area:download).
* BigchainDB Server requires Python 3.4+ and Python 3.4+ [will run on any modern OS](https://docs.python.org/3.4/using/index.html).
* BigchaindB Server uses the Python `multiprocessing` package and [some functionality in the `multiprocessing` package doesn't work on OS X](https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.qsize). You can still use Mac OS X if you use Docker or a virtual machine.
The BigchainDB core dev team uses Ubuntu 14.04 or Fedora 23.
The BigchainDB core dev team uses Ubuntu 14.04, Ubuntu 16.04, Fedora 23, and Fedora 24.
We don't test BigchainDB on Windows or Mac OS X, but you can try.
* If you run into problems on Windows, then you may want to try using Vagrant. One of our community members ([@Mec-Is](https://github.com/Mec-iS)) wrote [a page about how to install BigchainDB on a VM with Vagrant](https://gist.github.com/Mec-iS/b84758397f1b21f21700).
* If you have Mac OS X and want to experiment with BigchainDB, then you could do that [using Docker](run-with-docker.html).
* If you have Mac OS X and want to experiment with BigchainDB, then you could do that [using Docker](../appendices/run-with-docker.html).
## Storage Requirements

View File

@ -1,208 +0,0 @@
# Set Up and Run a Cluster Node
If you want to set up a BigchainDB node that's intended to be one of the nodes in a BigchainDB cluster (i.e. where each node is operated by a different member of a federation), then this page is for you, otherwise see [elsewhere](../introduction.html).
## Get a Server
The first step is to get a server (or equivalent) which meets [the requirements for a BigchainDB node](node-requirements.html).
## Secure Your Server
The steps that you must take to secure your server depend on your server OS and where your server is physically located. There are many articles and books about how to secure a server. Here we just cover special considerations when securing a BigchainDB node.
There are some [notes on BigchainDB-specific firewall setup](../appendices/firewall-notes.html) in the Appendices.
## Sync Your System Clock
A BigchainDB node uses its system clock to generate timestamps for blocks and votes, so that clock should be kept in sync with some standard clock(s). The standard way to do that is to run an NTP daemon (Network Time Protocol daemon) on the node. (You could also use tlsdate, which uses TLS timestamps rather than NTP, but don't: it's not very accurate and it will break with TLS 1.3, which removes the timestamp.)
NTP is a standard protocol. There are many NTP daemons implementing it. We don't recommend a particular one. On the contrary, we recommend that different nodes in a federation run different NTP daemons, so that a problem with one daemon won't affect all nodes.
Please see the [notes on NTP daemon setup](../appendices/ntp-notes.html) in the Appendices.
## Set Up Storage for RethinkDB Data
Below are some things to consider when setting up storage for the RethinkDB data. The Appendices have a [section with concrete examples](../appendices/example-rethinkdb-storage-setups.html).
We suggest you set up a separate storage "device" (partition, RAID array, or logical volume) to store the RethinkDB data. Here are some questions to ask:
* How easy will it be to add storage in the future? Will I have to shut down my server?
* How big can the storage get? (Remember that [RAID](https://en.wikipedia.org/wiki/RAID) can be used to make several physical drives look like one.)
* How fast can it read & write data? How many input/output operations per second (IOPS)?
* How does IOPS scale as more physical hard drives are added?
* What's the latency?
* What's the reliability? Is there replication?
* What's in the Service Level Agreement (SLA), if applicable?
* What's the cost?
There are many options and tradeoffs. Don't forget to look into Amazon Elastic Block Store (EBS) and Amazon Elastic File System (EFS), or their equivalents from other providers.
**Storage Notes Specific to RethinkDB**
* The RethinkDB storage engine has a number of SSD optimizations, so you _can_ benefit from using SSDs. ([source](https://www.rethinkdb.com/docs/architecture/))
* If you want a RethinkDB cluster to store an amount of data D, with a replication factor of R (on every table), and the cluster has N nodes, then each node will need to be able to store R×D/N data.
* RethinkDB tables can have [at most 64 shards](https://rethinkdb.com/limitations/). For example, if you have only one table and more than 64 nodes, some nodes won't have the primary of any shard, i.e. they will have replicas only. In other words, once you pass 64 nodes, adding more nodes won't provide more storage space for new data. If the biggest single-node storage available is d, then the most you can store in a RethinkDB cluster is < 64×d: accomplished by putting one primary shard in each of 64 nodes, with all replica shards on other nodes. (This is assuming one table. If there are T tables, then the most you can store is < 64×d×T.)
* When you set up storage for your RethinkDB data, you may have to select a filesystem. (Sometimes, the filesystem is already decided by the choice of storage.) We recommend using a filesystem that supports direct I/O (Input/Output). Many compressed or encrypted file systems don't support direct I/O. The ext4 filesystem supports direct I/O (but be careful: if you enable the data=journal mode, then direct I/O support will be disabled; the default is data=ordered). If your chosen filesystem supports direct I/O and you're using Linux, then you don't need to do anything to request or enable direct I/O. RethinkDB does that.
<p style="background-color: lightgrey;">What is direct I/O? It allows RethinkDB to write directly to the storage device (or use its own in-memory caching mechanisms), rather than relying on the operating system's file read and write caching mechanisms. (If you're using Linux, a write-to-file normally writes to the in-memory Page Cache first; only later does that Page Cache get flushed to disk. The Page Cache is also used when reading files.)</p>
* RethinkDB stores its data in a specific directory. You can tell RethinkDB _which_ directory using the RethinkDB config file, as explained below. In this documentation, we assume the directory is `/data`. If you set up a separate device (partition, RAID array, or logical volume) to store the RethinkDB data, then mount that device on `/data`.
## Install RethinkDB Server
If you don't already have RethinkDB Server installed, you must install it. The RethinkDB documentation has instructions for [how to install RethinkDB Server on a variety of operating systems](http://rethinkdb.com/docs/install/).
## Configure RethinkDB Server
Create a RethinkDB configuration file (text file) named `instance1.conf` with the following contents (explained below):
```text
directory=/data
bind=all
direct-io
# Replace node?_hostname with actual node hostnames below, e.g. rdb.examples.com
join=node0_hostname:29015
join=node1_hostname:29015
join=node2_hostname:29015
# continue until there's a join= line for each node in the federation
```
* `directory=/data` tells the RethinkDB node to store its share of the database data in `/data`.
* `bind=all` binds RethinkDB to all local network interfaces (e.g. loopback, Ethernet, wireless, whatever is available), so it can communicate with the outside world. (The default is to bind only to local interfaces.)
* `direct-io` tells RethinkDB to use direct I/O (explained earlier). Only include this line if your file system supports direct I/O.
* `join=hostname:29015` lines: A cluster node needs to find out the hostnames of all the other nodes somehow. You _could_ designate one node to be the one that every other node asks, and put that node's hostname in the config file, but that wouldn't be very decentralized. Instead, we include _every_ node in the list of nodes-to-ask.
If you're curious about the RethinkDB config file, there's [a RethinkDB documentation page about it](https://www.rethinkdb.com/docs/config-file/). The [explanations of the RethinkDB command-line options](https://rethinkdb.com/docs/cli-options/) are another useful reference.
TODO: Explain how to configure the RethinkDB cluster to be more secure. For now, see the [RethinkDB documentation on securing your cluster](https://rethinkdb.com/docs/security/).
## Install Python 3.4+
If you don't already have it, then you should [install Python 3.4+](https://www.python.org/downloads/).
If you're testing or developing BigchainDB on a stand-alone node, then you should probably create a Python 3.4+ virtual environment and activate it (e.g. using virtualenv or conda). Later we will install several Python packages and you probably only want those installed in the virtual environment.
## Install BigchainDB Server
BigchainDB Server has some OS-level dependencies that must be installed.
On Ubuntu 14.04, we found that the following was enough:
```text
sudo apt-get update
sudo apt-get install g++ python3-dev
```
On Fedora 23, we found that the following was enough (tested in February 2015):
```text
sudo dnf update
sudo dnf install gcc-c++ redhat-rpm-config python3-devel
```
(If you're using a version of Fedora before version 22, you may have to use `yum` instead of `dnf`.)
With OS-level dependencies installed, you can install BigchainDB Server with `pip` or from source.
### How to Install BigchainDB with pip
BigchainDB (i.e. both the Server and the officially-supported drivers) is distributed as a Python package on PyPI so you can install it using `pip`. First, make sure you have an up-to-date Python 3.4+ version of `pip` installed:
```text
pip -V
```
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.4, then you must install a `pip` version associated with Python 3.4+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
On Ubuntu 14.04, we found that this works:
```text
sudo apt-get install python3-pip
```
That should install a Python 3 version of `pip` named `pip3`. If that didn't work, then another way to get `pip3` is to do `sudo apt-get install python3-setuptools` followed by `sudo easy_install3 pip`.
You can upgrade `pip` (`pip3`) and `setuptools` to the latest versions using:
```text
pip3 install --upgrade pip setuptools
pip3 -V
```
Now you can install BigchainDB Server (and officially-supported BigchainDB drivers) using:
```text
pip3 install bigchaindb
```
(If you're not in a virtualenv and you want to install bigchaindb system-wide, then put `sudo` in front.)
Note: You can use `pip3` to upgrade the `bigchaindb` package to the latest version using `pip3 install --upgrade bigchaindb`.
### How to Install BigchainDB from Source
If you want to install BitchainDB from source because you want to use the very latest bleeding-edge code, clone the public repository:
```text
git clone git@github.com:bigchaindb/bigchaindb.git
python setup.py install
```
## Configure BigchainDB Server
Start by creating a default BigchainDB config file:
```text
bigchaindb -y configure
```
(There's documentation for the `bigchaindb` command is in the section on [the BigchainDB Command Line Interface (CLI)](bigchaindb-cli.html).)
Edit the created config file:
* Open `$HOME/.bigchaindb` (the created config file) in your text editor.
* Change `"server": {"bind": "localhost:9984", ... }` to `"server": {"bind": "0.0.0.0:9984", ... }`. This makes it so traffic can come from any IP address to port 9984 (the HTTP Client-Server API port).
* Change `"api_endpoint": "http://localhost:9984/api/v1"` to `"api_endpoint": "http://your_api_hostname:9984/api/v1"`
* Change `"keyring": []` to `"keyring": ["public_key_of_other_node_A", "public_key_of_other_node_B", "..."]` i.e. a list of the public keys of all the other nodes in the federation. The keyring should _not_ include your node's public key.
For more information about the BigchainDB config file, see [Configuring a BigchainDB Node](configuration.html).
## Run RethinkDB Server
Start RethinkDB using:
```text
rethinkdb --config-file path/to/instance1.conf
```
except replace the path with the actual path to `instance1.conf`.
Note: It's possible to [make RethinkDB start at system startup](https://www.rethinkdb.com/docs/start-on-startup/).
You can verify that RethinkDB is running by opening the RethinkDB web interface in your web browser. It should be at `http://rethinkdb-hostname:8080/`. If you're running RethinkDB on localhost, that would be [http://localhost:8080/](http://localhost:8080/).
## Run BigchainDB Server
After all node operators have started RethinkDB, but before they start BigchainDB, one designated node operator must configure the RethinkDB database by running the following commands:
```text
bigchaindb init
bigchaindb set-shards numshards
bigchaindb set-replicas numreplicas
```
where:
* `bigchaindb init` creates the database within RethinkDB, the tables, the indexes, and the genesis block.
* `numshards` should be set to the number of nodes in the initial cluster.
* `numreplicas` should be set to the database replication factor decided by the federation. It must be 3 or more for [RethinkDB failover](https://rethinkdb.com/docs/failover/) to work.
Once the RethinkDB database is configured, every node operator can start BigchainDB using:
```text
bigchaindb start
```

View File

@ -0,0 +1,10 @@
.. You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Production Node Setup & Management
==================================
.. toctree::
:maxdepth: 1
install-chef-dk

View File

@ -0,0 +1,5 @@
# Install Chef Dev Kit
TODO

View File

@ -1,6 +1,6 @@
# Quickstart
This page has instructions to set up a single stand-alone BigchainDB node for learning or experimenting. Instructions for other cases are [elsewhere](introduction.html). We will assume you're using Ubuntu 14.04 or similar. If you're not using Linux, then you might try [running BigchainDB with Docker](nodes/run-with-docker.html).
This page has instructions to set up a single stand-alone BigchainDB node for learning or experimenting. Instructions for other cases are [elsewhere](introduction.html). We will assume you're using Ubuntu 14.04 or similar. If you're not using Linux, then you might try [running BigchainDB with Docker](appendices/run-with-docker.html).
A. [Install RethinkDB Server](https://rethinkdb.com/docs/install/ubuntu/)
@ -15,16 +15,15 @@ sudo apt-get update
sudo apt-get install g++ python3-dev
```
D. Get the latest version of pip, wheel and setuptools:
D. Get the latest version of pip and setuptools:
```text
sudo apt-get install python3-setuptools
sudo easy_install3 pip
pip3 install --upgrade pip wheel setuptools
sudo apt-get install python3-pip
sudo pip3 install --upgrade pip setuptools
```
E. Install the `bigchaindb` Python package from PyPI:
```text
sudo pip install bigchaindb
sudo pip3 install bigchaindb
```
F. Configure and run BigchainDB:

View File

@ -1,4 +1,7 @@
# The BigchainDB Command Line Interface (CLI)
# BigchainDB Command Line Interface (CLI)
**Note: At the time of writing, BigchainDB Server and our BigchainDB client are combined, so the BigchainDB CLI includes some server-specific commands and some client-specific commands (e.g. `bigchaindb load`). Soon, BigchainDB Server will be separate from all BigchainDB clients, and they'll all have different CLIs.**
The command-line command to interact with BigchainDB is `bigchaindb`.

View File

@ -1,4 +1,6 @@
# Node Configuration Settings
# BigchainDB Configuration Settings
**Note: At the time of writing, BigchainDB Server code and BigchainDB Python driver code are mixed together, so the following settings are the settings used by BigchainDB Server and also by clients written using the Python driver code. Soon, the code will be separated into server, driver and shared modules, so that BigchainDB Server and BigchainDB clients will have different configuration settings.**
The value of each configuration setting is determined according to the following rules:

View File

@ -0,0 +1,11 @@
.. You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
BigchainDB Settings and CLI
===========================
.. toctree::
:maxdepth: 1
configuration
bigchaindb-cli

View File

@ -58,8 +58,8 @@ Assets can be mutable (changeable) or immutable. To change a mutable asset, you
```json
{
"id": "<hash of transaction, excluding signatures (see explanation)>",
"version": "<version number of the transaction model>",
"transaction": {
"version": "<version number of the transaction model>",
"fulfillments": ["<list of fulfillments>"],
"conditions": ["<list of conditions>"],
"operation": "<string>",
@ -75,8 +75,8 @@ Assets can be mutable (changeable) or immutable. To change a mutable asset, you
Here's some explanation of the contents of a transaction:
- `id`: The hash of everything inside the serialized `transaction` body (i.e. `fulfillments`, `conditions`, `operation`, `timestamp` and `data`; see below), with one wrinkle: for each fulfillment in `fulfillments`, `fulfillment` is set to `null`. The `id` is also the database primary key.
- `version`: Version number of the transaction model, so that software can support different transaction models.
- `transaction`:
- `version`: Version number of the transaction model, so that software can support different transaction models.
- `fulfillments`: List of fulfillments. Each _fulfillment_ contains a pointer to an unspent asset
and a _crypto fulfillment_ that satisfies a spending condition set on the unspent asset. A _fulfillment_
is usually a signature proving the ownership of the asset.

View File

@ -1,5 +1,4 @@
import copy
import multiprocessing as mp
import time
import pytest
@ -8,7 +7,6 @@ import cryptoconditions as cc
import bigchaindb
from bigchaindb import crypto, exceptions, util
from bigchaindb.block import BlockDeleteRevert
@pytest.mark.skipif(reason='Some tests throw a ResourceWarning that might result in some weird '
@ -36,8 +34,8 @@ class TestBigchainApi(object):
def test_create_transaction_create(self, b, user_sk):
tx = b.create_transaction(b.me, user_sk, None, 'CREATE')
assert sorted(tx) == ['id', 'transaction', 'version']
assert sorted(tx['transaction']) == ['conditions', 'data', 'fulfillments', 'operation', 'timestamp']
assert sorted(tx) == ['id', 'transaction']
assert sorted(tx['transaction']) == ['conditions', 'data', 'fulfillments', 'operation', 'timestamp', 'version']
def test_create_transaction_with_unsupported_payload_raises(self, b):
with pytest.raises(TypeError):
@ -77,8 +75,8 @@ class TestBigchainApi(object):
tx = b.create_transaction(user_vk, b.me, input_tx, 'TRANSFER')
assert sorted(tx) == ['id', 'transaction', 'version']
assert sorted(tx['transaction']) == ['conditions', 'data', 'fulfillments', 'operation', 'timestamp']
assert sorted(tx) == ['id', 'transaction']
assert sorted(tx['transaction']) == ['conditions', 'data', 'fulfillments', 'operation', 'timestamp', 'version']
tx_signed = b.sign_transaction(tx, user_sk)
@ -607,45 +605,6 @@ class TestBlockValidation(object):
b.validate_block(block)
class TestBigchainBlock(object):
def test_revert_delete_block(self, b):
b.create_genesis_block()
block_1 = dummy_block()
block_2 = dummy_block()
block_3 = dummy_block()
b.write_block(block_1, durability='hard')
b.write_block(block_2, durability='hard')
b.write_block(block_3, durability='hard')
b.write_vote(b.vote(block_1['id'], b.get_last_voted_block()['id'], True))
b.write_vote(b.vote(block_2['id'], b.get_last_voted_block()['id'], True))
b.write_vote(b.vote(block_3['id'], b.get_last_voted_block()['id'], True))
q_revert_delete = mp.Queue()
reverter = BlockDeleteRevert(q_revert_delete)
# simulate changefeed
r.table('bigchain').get(block_2['id']).delete().run(b.conn)
q_revert_delete.put(block_2)
assert r.table('bigchain').get(block_2['id']).run(b.conn) is None
reverter.start()
time.sleep(1)
reverter.kill()
reverted_block_2 = r.table('bigchain').get(block_2['id']).run(b.conn)
assert reverted_block_2 == block_2
def test_duplicated_transactions(self):
pytest.skip('We may have duplicates in the initial_results and changefeed')
class TestMultipleInputs(object):
def test_transfer_single_owners_single_input(self, b, user_sk, user_vk, inputs):
# create a new user
@ -1167,7 +1126,7 @@ class TestFulfillmentMessage(object):
assert fulfillment_message['fulfillment']['input'] == original_fulfillment['input']
assert fulfillment_message['operation'] == tx['transaction']['operation']
assert fulfillment_message['timestamp'] == tx['transaction']['timestamp']
assert fulfillment_message['version'] == tx['version']
assert fulfillment_message['version'] == tx['transaction']['version']
@pytest.mark.usefixtures('inputs')
def test_fulfillment_message_transfer(self, b, user_vk):
@ -1190,7 +1149,7 @@ class TestFulfillmentMessage(object):
assert fulfillment_message['fulfillment']['input'] == original_fulfillment['input']
assert fulfillment_message['operation'] == tx['transaction']['operation']
assert fulfillment_message['timestamp'] == tx['transaction']['timestamp']
assert fulfillment_message['version'] == tx['version']
assert fulfillment_message['version'] == tx['transaction']['version']
def test_fulfillment_message_multiple_current_owners_multiple_new_owners_multiple_inputs(self, b, user_vk):
# create a new users
@ -1228,7 +1187,7 @@ class TestFulfillmentMessage(object):
assert fulfillment_message['fulfillment']['input'] == original_fulfillment['input']
assert fulfillment_message['operation'] == tx['transaction']['operation']
assert fulfillment_message['timestamp'] == tx['transaction']['timestamp']
assert fulfillment_message['version'] == tx['version']
assert fulfillment_message['version'] == tx['transaction']['version']
class TestTransactionMalleability(object):
@ -1250,7 +1209,7 @@ class TestTransactionMalleability(object):
assert b.is_valid_transaction(tx_changed) is False
tx_changed = copy.deepcopy(tx_signed)
tx_changed['version'] = '0'
tx_changed['transaction']['version'] = '0'
assert b.validate_fulfillments(tx_changed) is False
assert b.is_valid_transaction(tx_changed) is False

View File

@ -1,181 +0,0 @@
import time
import rethinkdb as r
import multiprocessing as mp
from bigchaindb.voter import Election
from bigchaindb import crypto, Bigchain
# Some util functions
def dummy_tx():
b = Bigchain()
tx = b.create_transaction(b.me, b.me, None, 'CREATE')
tx_signed = b.sign_transaction(tx, b.me_private)
return tx_signed
def dummy_block():
b = Bigchain()
block = b.create_block([dummy_tx()])
return block
class TestBlockElection(object):
def test_quorum(self, b):
# create a new block
test_block = dummy_block()
# simulate a federation with four voters
key_pairs = [crypto.generate_key_pair() for _ in range(4)]
test_federation = [Bigchain(public_key=key_pair[1], private_key=key_pair[0])
for key_pair in key_pairs]
# dummy block with test federation public keys as voters
test_block['block']['voters'] = [key_pair[1] for key_pair in key_pairs]
# fake "yes" votes
valid_vote = [member.vote(test_block['id'], 'abc', True)
for member in test_federation]
# fake "no" votes
invalid_vote = [member.vote(test_block['id'], 'abc', False)
for member in test_federation]
# fake "yes" votes with incorrect signatures
improperly_signed_valid_vote = [member.vote(test_block['id'], 'abc', True) for
member in test_federation]
[vote['vote'].update(this_should_ruin_things='lol')
for vote in improperly_signed_valid_vote]
# test unanimously valid block
r.table('votes').insert(valid_vote, durability='hard').run(b.conn)
assert b.block_election_status(test_block) == Bigchain.BLOCK_VALID
r.table('votes').delete().run(b.conn)
# test partial quorum situations
r.table('votes').insert(valid_vote[:2], durability='hard').run(b.conn)
assert b.block_election_status(test_block) == Bigchain.BLOCK_UNDECIDED
r.table('votes').delete().run(b.conn)
#
r.table('votes').insert(valid_vote[:3], durability='hard').run(b.conn)
assert b.block_election_status(test_block) == Bigchain.BLOCK_VALID
r.table('votes').delete().run(b.conn)
#
r.table('votes').insert(invalid_vote[:2], durability='hard').run(b.conn)
assert b.block_election_status(test_block) == Bigchain.BLOCK_INVALID
r.table('votes').delete().run(b.conn)
# test unanimously valid block with one improperly signed vote -- should still succeed
r.table('votes').insert(valid_vote[:3] + improperly_signed_valid_vote[3:], durability='hard').run(b.conn)
assert b.block_election_status(test_block) == Bigchain.BLOCK_VALID
r.table('votes').delete().run(b.conn)
# test unanimously valid block with two improperly signed votes -- should fail
r.table('votes').insert(valid_vote[:2] + improperly_signed_valid_vote[2:], durability='hard').run(b.conn)
assert b.block_election_status(test_block) == Bigchain.BLOCK_INVALID
r.table('votes').delete().run(b.conn)
# test block with minority invalid vote
r.table('votes').insert(invalid_vote[:1] + valid_vote[1:], durability='hard').run(b.conn)
assert b.block_election_status(test_block) == Bigchain.BLOCK_VALID
r.table('votes').delete().run(b.conn)
# test split vote
r.table('votes').insert(invalid_vote[:2] + valid_vote[2:], durability='hard').run(b.conn)
assert b.block_election_status(test_block) == Bigchain.BLOCK_INVALID
r.table('votes').delete().run(b.conn)
# test undecided
r.table('votes').insert(valid_vote[:2], durability='hard').run(b.conn)
assert b.block_election_status(test_block) == Bigchain.BLOCK_UNDECIDED
r.table('votes').delete().run(b.conn)
# change signatures in block, should fail
test_block['block']['voters'][0] = 'abc'
test_block['block']['voters'][1] = 'abc'
r.table('votes').insert(valid_vote, durability='hard').run(b.conn)
assert b.block_election_status(test_block) == Bigchain.BLOCK_INVALID
def test_quorum_odd(self, b):
# test partial quorum situations for odd numbers of voters
# create a new block
test_block = dummy_block()
# simulate a federation with four voters
key_pairs = [crypto.generate_key_pair() for _ in range(5)]
test_federation = [Bigchain(public_key=key_pair[1], private_key=key_pair[0])
for key_pair in key_pairs]
# dummy block with test federation public keys as voters
test_block['block']['voters'] = [key_pair[1] for key_pair in key_pairs]
# fake "yes" votes
valid_vote = [member.vote(test_block['id'], 'abc', True)
for member in test_federation]
# fake "no" votes
invalid_vote = [member.vote(test_block['id'], 'abc', False)
for member in test_federation]
r.table('votes').insert(valid_vote[:2], durability='hard').run(b.conn)
assert b.block_election_status(test_block) == Bigchain.BLOCK_UNDECIDED
r.table('votes').delete().run(b.conn)
r.table('votes').insert(invalid_vote[:2], durability='hard').run(b.conn)
assert b.block_election_status(test_block) == Bigchain.BLOCK_UNDECIDED
r.table('votes').delete().run(b.conn)
r.table('votes').insert(valid_vote[:3], durability='hard').run(b.conn)
assert b.block_election_status(test_block) == Bigchain.BLOCK_VALID
r.table('votes').delete().run(b.conn)
r.table('votes').insert(invalid_vote[:3], durability='hard').run(b.conn)
assert b.block_election_status(test_block) == Bigchain.BLOCK_INVALID
r.table('votes').delete().run(b.conn)
def test_tx_rewritten_after_invalid(self, b, user_vk):
q_block_new_vote = mp.Queue()
# create blocks with transactions
tx1 = b.create_transaction(b.me, user_vk, None, 'CREATE')
tx2 = b.create_transaction(b.me, user_vk, None, 'CREATE')
test_block_1 = b.create_block([tx1])
test_block_2 = b.create_block([tx2])
# simulate a federation with four voters
key_pairs = [crypto.generate_key_pair() for _ in range(4)]
test_federation = [Bigchain(public_key=key_pair[1], private_key=key_pair[0])
for key_pair in key_pairs]
# simulate a federation with four voters
test_block_1['block']['voters'] = [key_pair[1] for key_pair in key_pairs]
test_block_2['block']['voters'] = [key_pair[1] for key_pair in key_pairs]
# votes for block one
vote_1 = [member.vote(test_block_1['id'], 'abc', True)
for member in test_federation]
# votes for block two
vote_2 = [member.vote(test_block_2['id'], 'abc', True) for member in test_federation[:2]] + \
[member.vote(test_block_2['id'], 'abc', False) for member in test_federation[2:]]
# construct valid block
r.table('votes').insert(vote_1, durability='hard').run(b.conn)
q_block_new_vote.put(test_block_1)
# construct invalid block
r.table('votes').insert(vote_2, durability='hard').run(b.conn)
q_block_new_vote.put(test_block_2)
election = Election(q_block_new_vote)
election.start()
time.sleep(1)
election.kill()
# tx1 was in a valid block, and should not be in the backlog
assert r.table('backlog').get(tx1['id']).run(b.conn) is None
# tx2 was in an invalid block and SHOULD be in the backlog
assert r.table('backlog').get(tx2['id']).run(b.conn)['id'] == tx2['id']

View File

@ -0,0 +1,127 @@
import time
import random
from bigchaindb import crypto, Bigchain
from unittest.mock import patch
import rethinkdb as r
from bigchaindb.pipelines import election
from multipipes import Pipe, Pipeline
def test_check_for_quorum_invalid(b, user_vk):
e = election.Election()
# create blocks with transactions
tx1 = b.create_transaction(b.me, user_vk, None, 'CREATE')
test_block = b.create_block([tx1])
# simulate a federation with four voters
key_pairs = [crypto.generate_key_pair() for _ in range(4)]
test_federation = [Bigchain(public_key=key_pair[1], private_key=key_pair[0])
for key_pair in key_pairs]
# add voters to block and write
test_block['block']['voters'] = [key_pair[1] for key_pair in key_pairs]
b.write_block(test_block)
# split_vote (invalid)
votes = [member.vote(test_block['id'], 'abc', True) for member in test_federation[:2]] + \
[member.vote(test_block['id'], 'abc', False) for member in test_federation[2:]]
# cast votes
r.table('votes').insert(votes, durability='hard').run(b.conn)
# since this block is now invalid, should pass to the next process
assert e.check_for_quorum(votes[-1]) == test_block
def test_check_for_quorum_valid(b, user_vk):
e = election.Election()
# create blocks with transactions
tx1 = b.create_transaction(b.me, user_vk, None, 'CREATE')
test_block = b.create_block([tx1])
# simulate a federation with four voters
key_pairs = [crypto.generate_key_pair() for _ in range(4)]
test_federation = [Bigchain(public_key=key_pair[1], private_key=key_pair[0])
for key_pair in key_pairs]
# add voters to block and write
test_block['block']['voters'] = [key_pair[1] for key_pair in key_pairs]
b.write_block(test_block)
# votes for block one
votes = [member.vote(test_block['id'], 'abc', True)
for member in test_federation]
# cast votes
r.table('votes').insert(votes, durability='hard').run(b.conn)
# since this block is valid, should go nowhere
assert e.check_for_quorum(votes[-1]) is None
def test_check_requeue_transaction(b, user_vk):
e = election.Election()
# create blocks with transactions
tx1 = b.create_transaction(b.me, user_vk, None, 'CREATE')
test_block = b.create_block([tx1])
e.requeue_transactions(test_block)
assert r.table('backlog').get(tx1['id']).run(b.conn) == tx1
@patch.object(Pipeline, 'start')
def test_start(mock_start):
# TODO: `block.election` is just a wrapper around `block.create_pipeline`,
# that is tested by `test_full_pipeline`.
# If anyone has better ideas on how to test this, please do a PR :)
election.start()
mock_start.assert_called_with()
def test_full_pipeline(b, user_vk):
outpipe = Pipe()
# write two blocks
txs = []
for i in range(100):
tx = b.create_transaction(b.me, user_vk, None, 'CREATE')
tx = b.sign_transaction(tx, b.me_private)
txs.append(tx)
valid_block = b.create_block(txs)
b.write_block(valid_block)
txs = []
for i in range(100):
tx = b.create_transaction(b.me, user_vk, None, 'CREATE')
tx = b.sign_transaction(tx, b.me_private)
txs.append(tx)
invalid_block = b.create_block(txs)
b.write_block(invalid_block)
pipeline = election.create_pipeline()
pipeline.setup(indata=election.get_changefeed(), outdata=outpipe)
pipeline.start()
time.sleep(1)
# vote one block valid, one invalid
vote_valid = b.vote(valid_block['id'], 'abc', True)
vote_invalid = b.vote(invalid_block['id'], 'abc', False)
r.table('votes').insert(vote_valid, durability='hard').run(b.conn)
r.table('votes').insert(vote_invalid, durability='hard').run(b.conn)
outpipe.get()
pipeline.terminate()
# only transactions from the invalid block should be returned to
# the backlog
assert r.table('backlog').count().run(b.conn) == 100
tx_from_block = set([tx['id'] for tx in invalid_block['block']['transactions']])
tx_from_backlog = set([tx['id'] for tx in list(r.table('backlog').run(b.conn))])
assert tx_from_block == tx_from_backlog

View File

@ -32,8 +32,8 @@ def mock_db_init_with_existing_db(monkeypatch):
@pytest.fixture
def mock_processes_start(monkeypatch):
from bigchaindb.processes import Processes
monkeypatch.setattr(Processes, 'start', lambda *args: None)
from bigchaindb import processes
monkeypatch.setattr(processes, 'start', lambda *args: None)
@pytest.fixture

View File

@ -151,7 +151,7 @@ def test_create_tx_with_empty_inputs():
tx = create_tx(None, None, [], None)
assert 'id' in tx
assert 'transaction' in tx
assert 'version' in tx
assert 'version' in tx['transaction']
assert 'fulfillments' in tx['transaction']
assert 'conditions' in tx['transaction']
assert 'operation' in tx['transaction']