mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge branch 'master' into docs/1170/move-http-api-docs-to-top-level-section
This commit is contained in:
commit
59be30187f
@ -145,6 +145,13 @@ Once you accept and submit the CLA, we'll email you with further instructions. (
|
|||||||
|
|
||||||
Someone will then merge your branch or suggest changes. If we suggest changes, you won't have to open a new pull request, you can just push new code to the same branch (on `origin`) as you did before creating the pull request.
|
Someone will then merge your branch or suggest changes. If we suggest changes, you won't have to open a new pull request, you can just push new code to the same branch (on `origin`) as you did before creating the pull request.
|
||||||
|
|
||||||
|
### Tip: Upgrading All BigchainDB Dependencies
|
||||||
|
|
||||||
|
Over time, your versions of the Python packages used by BigchainDB will get out of date. You can upgrade them using:
|
||||||
|
```text
|
||||||
|
pip install --upgrade -e .[dev]
|
||||||
|
```
|
||||||
|
|
||||||
## Quick Links
|
## Quick Links
|
||||||
|
|
||||||
* [BigchainDB Community links](https://www.bigchaindb.com/community)
|
* [BigchainDB Community links](https://www.bigchaindb.com/community)
|
||||||
|
@ -8,3 +8,7 @@ class CriticalDoubleSpend(BigchainDBError):
|
|||||||
|
|
||||||
class CriticalDoubleInclusion(BigchainDBError):
|
class CriticalDoubleInclusion(BigchainDBError):
|
||||||
"""Data integrity error that requires attention"""
|
"""Data integrity error that requires attention"""
|
||||||
|
|
||||||
|
|
||||||
|
class CriticalDuplicateVote(BigchainDBError):
|
||||||
|
"""Data integrity error that requires attention"""
|
||||||
|
@ -41,9 +41,11 @@ SUBSCRIBER_LOGGING_CONFIG = {
|
|||||||
'level': logging.INFO,
|
'level': logging.INFO,
|
||||||
},
|
},
|
||||||
'file': {
|
'file': {
|
||||||
'class': 'logging.FileHandler',
|
'class': 'logging.handlers.RotatingFileHandler',
|
||||||
'filename': join(DEFAULT_LOG_DIR, 'bigchaindb.log'),
|
'filename': join(DEFAULT_LOG_DIR, 'bigchaindb.log'),
|
||||||
'mode': 'w',
|
'mode': 'w',
|
||||||
|
'maxBytes': 209715200,
|
||||||
|
'backupCount': 5,
|
||||||
'formatter': 'file',
|
'formatter': 'file',
|
||||||
'level': logging.INFO,
|
'level': logging.INFO,
|
||||||
},
|
},
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
import collections
|
import collections
|
||||||
|
|
||||||
from bigchaindb.common.schema import SchemaValidationError, validate_vote_schema
|
from bigchaindb.common.schema import SchemaValidationError, validate_vote_schema
|
||||||
|
from bigchaindb.exceptions import CriticalDuplicateVote
|
||||||
from bigchaindb.common.utils import serialize
|
from bigchaindb.common.utils import serialize
|
||||||
from bigchaindb.common.crypto import PublicKey
|
from bigchaindb.common.crypto import PublicKey
|
||||||
|
|
||||||
@ -33,7 +34,8 @@ class Voting:
|
|||||||
n_voters = len(eligible_voters)
|
n_voters = len(eligible_voters)
|
||||||
eligible_votes, ineligible_votes = \
|
eligible_votes, ineligible_votes = \
|
||||||
cls.partition_eligible_votes(votes, eligible_voters)
|
cls.partition_eligible_votes(votes, eligible_voters)
|
||||||
results = cls.count_votes(eligible_votes)
|
by_voter = cls.dedupe_by_voter(eligible_votes)
|
||||||
|
results = cls.count_votes(by_voter)
|
||||||
results['block_id'] = block['id']
|
results['block_id'] = block['id']
|
||||||
results['status'] = cls.decide_votes(n_voters, **results['counts'])
|
results['status'] = cls.decide_votes(n_voters, **results['counts'])
|
||||||
results['ineligible'] = ineligible_votes
|
results['ineligible'] = ineligible_votes
|
||||||
@ -60,38 +62,29 @@ class Voting:
|
|||||||
return eligible, ineligible
|
return eligible, ineligible
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def count_votes(cls, eligible_votes):
|
def dedupe_by_voter(cls, eligible_votes):
|
||||||
|
"""
|
||||||
|
Throw a critical error if there is a duplicate vote
|
||||||
|
"""
|
||||||
|
by_voter = {}
|
||||||
|
for vote in eligible_votes:
|
||||||
|
pubkey = vote['node_pubkey']
|
||||||
|
if pubkey in by_voter:
|
||||||
|
raise CriticalDuplicateVote(pubkey)
|
||||||
|
by_voter[pubkey] = vote
|
||||||
|
return by_voter
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def count_votes(cls, by_voter):
|
||||||
"""
|
"""
|
||||||
Given a list of eligible votes, (votes from known nodes that are listed
|
Given a list of eligible votes, (votes from known nodes that are listed
|
||||||
as voters), produce the number that say valid and the number that say
|
as voters), produce the number that say valid and the number that say
|
||||||
invalid.
|
invalid. Votes must agree on previous block, otherwise they become invalid.
|
||||||
|
|
||||||
* Detect if there are multiple votes from a single node and return them
|
|
||||||
in a separate "cheat" dictionary.
|
|
||||||
* Votes must agree on previous block, otherwise they become invalid.
|
|
||||||
|
|
||||||
note:
|
|
||||||
The sum of votes returned by this function does not necessarily
|
|
||||||
equal the length of the list of votes fed in. It may differ for
|
|
||||||
example if there are found to be multiple votes submitted by a
|
|
||||||
single voter.
|
|
||||||
"""
|
"""
|
||||||
prev_blocks = collections.Counter()
|
prev_blocks = collections.Counter()
|
||||||
cheat = []
|
|
||||||
malformed = []
|
malformed = []
|
||||||
|
|
||||||
# Group by pubkey to detect duplicate voting
|
for vote in by_voter.values():
|
||||||
by_voter = collections.defaultdict(list)
|
|
||||||
for vote in eligible_votes:
|
|
||||||
by_voter[vote['node_pubkey']].append(vote)
|
|
||||||
|
|
||||||
for pubkey, votes in by_voter.items():
|
|
||||||
if len(votes) > 1:
|
|
||||||
cheat.append(votes)
|
|
||||||
continue
|
|
||||||
|
|
||||||
vote = votes[0]
|
|
||||||
|
|
||||||
if not cls.verify_vote_schema(vote):
|
if not cls.verify_vote_schema(vote):
|
||||||
malformed.append(vote)
|
malformed.append(vote)
|
||||||
continue
|
continue
|
||||||
@ -111,7 +104,6 @@ class Voting:
|
|||||||
'n_valid': n_valid,
|
'n_valid': n_valid,
|
||||||
'n_invalid': len(by_voter) - n_valid,
|
'n_invalid': len(by_voter) - n_valid,
|
||||||
},
|
},
|
||||||
'cheat': cheat,
|
|
||||||
'malformed': malformed,
|
'malformed': malformed,
|
||||||
'previous_block': prev_block,
|
'previous_block': prev_block,
|
||||||
'other_previous_block': dict(prev_blocks),
|
'other_previous_block': dict(prev_blocks),
|
||||||
|
@ -1,21 +1,21 @@
|
|||||||
# Terminology
|
# Terminology
|
||||||
|
|
||||||
There is some specialized terminology associated with BigchainDB. To get started, you should at least know what what we mean by a BigchainDB *node*, *cluster* and *consortium*.
|
There is some specialized terminology associated with BigchainDB. To get started, you should at least know the following:
|
||||||
|
|
||||||
|
|
||||||
## Node
|
## BigchainDB Node
|
||||||
|
|
||||||
A **BigchainDB node** is a machine or set of closely-linked machines running RethinkDB/MongoDB Server, BigchainDB Server, and related software. (A "machine" might be a bare-metal server, a virtual machine or a container.) Each node is controlled by one person or organization.
|
A **BigchainDB node** is a machine or set of closely-linked machines running RethinkDB/MongoDB Server, BigchainDB Server, and related software. Each node is controlled by one person or organization.
|
||||||
|
|
||||||
|
|
||||||
## Cluster
|
## BigchainDB Cluster
|
||||||
|
|
||||||
A set of BigchainDB nodes can connect to each other to form a **cluster**. Each node in the cluster runs the same software. A cluster contains one logical RethinkDB datastore. A cluster may have additional machines to do things such as cluster monitoring.
|
A set of BigchainDB nodes can connect to each other to form a **BigchainDB cluster**. Each node in the cluster runs the same software. A cluster contains one logical RethinkDB/MongoDB datastore. A cluster may have additional machines to do things such as cluster monitoring.
|
||||||
|
|
||||||
|
|
||||||
## Consortium
|
## BigchainDB Consortium
|
||||||
|
|
||||||
The people and organizations that run the nodes in a cluster belong to a **consortium** (i.e. another organization). A consortium must have some sort of governance structure to make decisions. If a cluster is run by a single company, then the "consortium" is just that company.
|
The people and organizations that run the nodes in a cluster belong to a **BigchainDB consortium** (i.e. another organization). A consortium must have some sort of governance structure to make decisions. If a cluster is run by a single company, then the "consortium" is just that company.
|
||||||
|
|
||||||
**What's the Difference Between a Cluster and a Consortium?**
|
**What's the Difference Between a Cluster and a Consortium?**
|
||||||
|
|
||||||
|
Binary file not shown.
Before Width: | Height: | Size: 82 KiB After Width: | Height: | Size: 38 KiB |
@ -18,7 +18,7 @@ pip install awscli
|
|||||||
|
|
||||||
## Create an AWS Access Key
|
## Create an AWS Access Key
|
||||||
|
|
||||||
The next thing you'll need is an AWS access key. If you don't have one, you can create one using the [instructions in the AWS documentation](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html). You should get an access key ID (e.g. AKIAIOSFODNN7EXAMPLE) and a secret access key (e.g. wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY).
|
The next thing you'll need is AWS access keys (access key ID and secret access key). If you don't have those, see [the AWS documentation about access keys](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys).
|
||||||
|
|
||||||
You should also pick a default AWS region name (e.g. `eu-central-1`). That's where your cluster will run. The AWS documentation has [a list of them](http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region).
|
You should also pick a default AWS region name (e.g. `eu-central-1`). That's where your cluster will run. The AWS documentation has [a list of them](http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region).
|
||||||
|
|
||||||
|
@ -1,5 +0,0 @@
|
|||||||
#########
|
|
||||||
Consensus
|
|
||||||
#########
|
|
||||||
|
|
||||||
.. automodule:: bigchaindb.consensus
|
|
101
docs/server/source/appendices/docker-on-mac.md
Normal file
101
docs/server/source/appendices/docker-on-mac.md
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
# Run BigchainDB with Docker On Mac
|
||||||
|
|
||||||
|
**NOT for Production Use**
|
||||||
|
|
||||||
|
Those developing on Mac can follow this document to run BigchainDB in docker
|
||||||
|
containers for a quick dev setup.
|
||||||
|
Running BigchainDB on Mac (Docker or otherwise) is not officially supported.
|
||||||
|
|
||||||
|
Support is very much limited as there are certain things that work differently
|
||||||
|
in Docker for Mac than Docker for other platforms.
|
||||||
|
Also, we do not use mac for our development and testing. :)
|
||||||
|
|
||||||
|
This page may not be up to date with various settings and docker updates at
|
||||||
|
all the times.
|
||||||
|
|
||||||
|
These steps work as of this writing (2017.Mar.09) and might break in the
|
||||||
|
future with updates to Docker for mac.
|
||||||
|
Community contribution to make BigchainDB run on Docker for Mac will always be
|
||||||
|
welcome.
|
||||||
|
|
||||||
|
|
||||||
|
## Prerequisite
|
||||||
|
|
||||||
|
Install Docker for Mac.
|
||||||
|
|
||||||
|
## (Optional) For a clean start
|
||||||
|
|
||||||
|
1. Stop all BigchainDB and RethinkDB/MongoDB containers.
|
||||||
|
2. Delete all BigchainDB docker images.
|
||||||
|
3. Delete the ~/bigchaindb_docker folder.
|
||||||
|
|
||||||
|
|
||||||
|
## Pull the images
|
||||||
|
|
||||||
|
Pull the bigchaindb and other required docker images from docker hub.
|
||||||
|
|
||||||
|
```text
|
||||||
|
docker pull bigchaindb/bigchaindb:master
|
||||||
|
docker pull [rethinkdb:2.3|mongo:3.4.1]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Create the BigchainDB configuration file on Mac
|
||||||
|
```text
|
||||||
|
docker run \
|
||||||
|
--rm \
|
||||||
|
--volume $HOME/bigchaindb_docker:/data \
|
||||||
|
bigchaindb/bigchaindb:master \
|
||||||
|
-y configure \
|
||||||
|
[mongodb|rethinkdb]
|
||||||
|
```
|
||||||
|
|
||||||
|
To ensure that BigchainDB connects to the backend database bound to the virtual
|
||||||
|
interface `172.17.0.1`, you must edit the BigchainDB configuration file
|
||||||
|
(`~/bigchaindb_docker/.bigchaindb`) and change database.host from `localhost`
|
||||||
|
to `172.17.0.1`.
|
||||||
|
|
||||||
|
|
||||||
|
## Run the backend database on Mac
|
||||||
|
|
||||||
|
From v0.9 onwards, you can run RethinkDB or MongoDB.
|
||||||
|
|
||||||
|
We use the virtual interface created by the Docker daemon to allow
|
||||||
|
communication between the BigchainDB and database containers.
|
||||||
|
It has an IP address of 172.17.0.1 by default.
|
||||||
|
|
||||||
|
You can also use docker host networking or bind to your primary (eth)
|
||||||
|
interface, if needed.
|
||||||
|
|
||||||
|
### For RethinkDB backend
|
||||||
|
```text
|
||||||
|
docker run \
|
||||||
|
--name=rethinkdb \
|
||||||
|
--publish=28015:28015 \
|
||||||
|
--publish=8080:8080 \
|
||||||
|
--restart=always \
|
||||||
|
--volume $HOME/bigchaindb_docker:/data \
|
||||||
|
rethinkdb:2.3
|
||||||
|
```
|
||||||
|
|
||||||
|
### For MongoDB backend
|
||||||
|
```text
|
||||||
|
docker run \
|
||||||
|
--name=mongodb \
|
||||||
|
--publish=27017:27017 \
|
||||||
|
--restart=always \
|
||||||
|
--volume=$HOME/bigchaindb_docker/db:/data/db \
|
||||||
|
--volume=$HOME/bigchaindb_docker/configdb:/data/configdb \
|
||||||
|
mongo:3.4.1 --replSet=bigchain-rs
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run BigchainDB on Mac
|
||||||
|
```text
|
||||||
|
docker run \
|
||||||
|
--name=bigchaindb \
|
||||||
|
--publish=9984:9984 \
|
||||||
|
--restart=always \
|
||||||
|
--volume=$HOME/bigchaindb_docker:/data \
|
||||||
|
bigchaindb/bigchaindb \
|
||||||
|
start
|
||||||
|
```
|
||||||
|
|
@ -1,25 +0,0 @@
|
|||||||
# Example RethinkDB Storage Setups
|
|
||||||
|
|
||||||
## Example Amazon EC2 Setups
|
|
||||||
|
|
||||||
We have some scripts for [deploying a _test_ BigchainDB cluster on AWS](../clusters-feds/aws-testing-cluster.html). Those scripts include command sequences to set up storage for RethinkDB.
|
|
||||||
In particular, look in the file [/deploy-cluster-aws/fabfile.py](https://github.com/bigchaindb/bigchaindb/blob/master/deploy-cluster-aws/fabfile.py), under `def prep_rethinkdb_storage(USING_EBS)`. Note that there are two cases:
|
|
||||||
|
|
||||||
1. **Using EBS ([Amazon Elastic Block Store](https://aws.amazon.com/ebs/)).** This is always an option, and for some instance types ("EBS-only"), it's the only option.
|
|
||||||
2. **Using an "instance store" volume provided with an Amazon EC2 instance.** Note that our scripts only use one of the (possibly many) volumes in the instance store.
|
|
||||||
|
|
||||||
There's some explanation of the steps in the [Amazon EC2 documentation about making an Amazon EBS volume available for use](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html).
|
|
||||||
|
|
||||||
You shouldn't use an EC2 "instance store" to store RethinkDB data for a production node, because it's not replicated and it's only intended for temporary, ephemeral data. If the associated instance crashes, is stopped, or is terminated, the data in the instance store is lost forever. Amazon EBS storage is replicated, has incremental snapshots, and is low-latency.
|
|
||||||
|
|
||||||
|
|
||||||
## Example Using Amazon EFS
|
|
||||||
|
|
||||||
TODO
|
|
||||||
|
|
||||||
|
|
||||||
## Other Examples?
|
|
||||||
|
|
||||||
TODO
|
|
||||||
|
|
||||||
Maybe RAID, ZFS, ... (over EBS volumes, i.e. a DIY Amazon EFS)
|
|
@ -10,10 +10,10 @@ Appendices
|
|||||||
install-os-level-deps
|
install-os-level-deps
|
||||||
install-latest-pip
|
install-latest-pip
|
||||||
run-with-docker
|
run-with-docker
|
||||||
|
docker-on-mac
|
||||||
json-serialization
|
json-serialization
|
||||||
cryptography
|
cryptography
|
||||||
the-Bigchain-class
|
the-Bigchain-class
|
||||||
consensus
|
|
||||||
pipelines
|
pipelines
|
||||||
backend
|
backend
|
||||||
commands
|
commands
|
||||||
@ -21,6 +21,7 @@ Appendices
|
|||||||
generate-key-pair-for-ssh
|
generate-key-pair-for-ssh
|
||||||
firewall-notes
|
firewall-notes
|
||||||
ntp-notes
|
ntp-notes
|
||||||
example-rethinkdb-storage-setups
|
rethinkdb-reqs
|
||||||
|
rethinkdb-backup
|
||||||
licenses
|
licenses
|
||||||
install-with-lxd
|
install-with-lxd
|
||||||
|
@ -24,7 +24,7 @@ deserialize(serialize(data)) == data
|
|||||||
True
|
True
|
||||||
```
|
```
|
||||||
|
|
||||||
Since BigchainDB performs a lot of serialization we decided to use [python-rapidjson](https://github.com/kenrobbins/python-rapidjson)
|
Since BigchainDB performs a lot of serialization we decided to use [python-rapidjson](https://github.com/python-rapidjson/python-rapidjson)
|
||||||
which is a python wrapper for [rapidjson](https://github.com/miloyip/rapidjson) a fast and fully RFC complient JSON parser.
|
which is a python wrapper for [rapidjson](https://github.com/miloyip/rapidjson) a fast and fully RFC complient JSON parser.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Backing Up & Restoring Data
|
# Backing Up and Restoring Data
|
||||||
|
|
||||||
There are several ways to backup and restore the data in a BigchainDB cluster.
|
This page was written when BigchainDB only worked with RethinkDB, so its focus is on RethinkDB-based backup. BigchainDB now supports MongoDB as a backend database and we recommend that you use MongoDB in production. Nevertheless, some of the following backup ideas are still relevant regardless of the backend database being used, so we moved this page to the Appendices.
|
||||||
|
|
||||||
|
|
||||||
## RethinkDB's Replication as a form of Backup
|
## RethinkDB's Replication as a form of Backup
|
@ -1,20 +1,8 @@
|
|||||||
# Production Node Requirements
|
# RethinkDB Requirements
|
||||||
|
|
||||||
Note: This section will be broken apart into several pages, e.g. NTP requirements, RethinkDB requirements, BigchainDB requirements, etc. and those pages will add more details.
|
[The RethinkDB documentation](https://rethinkdb.com/docs/) should be your first source of information about its requirements. This page serves mostly to document some of its more obscure requirements.
|
||||||
|
|
||||||
|
RethinkDB Server [will run on any modern OS](https://www.rethinkdb.com/docs/install/). Note that the Fedora package isn't officially supported. Also, official support for Windows is fairly recent ([April 2016](https://rethinkdb.com/blog/2.3-release/)).
|
||||||
## OS Requirements
|
|
||||||
|
|
||||||
* RethinkDB Server [will run on any modern OS](https://www.rethinkdb.com/docs/install/). Note that the Fedora package isn't officially supported. Also, official support for Windows is fairly recent ([April 2016](https://rethinkdb.com/blog/2.3-release/)).
|
|
||||||
* BigchainDB Server requires Python 3.4+ and Python 3.4+ [will run on any modern OS](https://docs.python.org/3.4/using/index.html).
|
|
||||||
* BigchaindB Server uses the Python `multiprocessing` package and [some functionality in the `multiprocessing` package doesn't work on OS X](https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.qsize). You can still use Mac OS X if you use Docker or a virtual machine.
|
|
||||||
|
|
||||||
The BigchainDB core dev team uses recent LTS versions of Ubuntu and recent versions of Fedora.
|
|
||||||
|
|
||||||
We don't test BigchainDB on Windows or Mac OS X, but you can try.
|
|
||||||
|
|
||||||
* If you run into problems on Windows, then you may want to try using Vagrant. One of our community members ([@Mec-Is](https://github.com/Mec-iS)) wrote [a page about how to install BigchainDB on a VM with Vagrant](https://gist.github.com/Mec-iS/b84758397f1b21f21700).
|
|
||||||
* If you have Mac OS X and want to experiment with BigchainDB, then you could do that [using Docker](../appendices/run-with-docker.html).
|
|
||||||
|
|
||||||
|
|
||||||
## Storage Requirements
|
## Storage Requirements
|
||||||
@ -28,6 +16,20 @@ For RethinkDB's failover mechanisms to work, [every RethinkDB table must have at
|
|||||||
|
|
||||||
As for the read & write rates, what do you expect those to be for your situation? It's not enough for the storage system alone to handle those rates: the interconnects between the nodes must also be able to handle them.
|
As for the read & write rates, what do you expect those to be for your situation? It's not enough for the storage system alone to handle those rates: the interconnects between the nodes must also be able to handle them.
|
||||||
|
|
||||||
|
**Storage Notes Specific to RethinkDB**
|
||||||
|
|
||||||
|
* The RethinkDB storage engine has a number of SSD optimizations, so you _can_ benefit from using SSDs. ([source](https://www.rethinkdb.com/docs/architecture/))
|
||||||
|
|
||||||
|
* If you have an N-node RethinkDB cluster and 1) you want to use it to store an amount of data D (unique records, before replication), 2) you want the replication factor to be R (all tables), and 3) you want N shards (all tables), then each BigchainDB node must have storage space of at least R×D/N.
|
||||||
|
|
||||||
|
* RethinkDB tables can have [at most 64 shards](https://rethinkdb.com/limitations/). What does that imply? Suppose you only have one table, with 64 shards. How big could that table be? It depends on how much data can be stored in each node. If the maximum amount of data that a node can store is d, then the biggest-possible shard is d, and the biggest-possible table size is 64 times that. (All shard replicas would have to be stored on other nodes beyond the initial 64.) If there are two tables, the second table could also have 64 shards, stored on 64 other maxed-out nodes, so the total amount of unique data in the database would be (64 shards/table)×(2 tables)×d. In general, if you have T tables, the maximum amount of unique data that can be stored in the database (i.e. the amount of data before replication) is 64×T×d.
|
||||||
|
|
||||||
|
* When you set up storage for your RethinkDB data, you may have to select a filesystem. (Sometimes, the filesystem is already decided by the choice of storage.) We recommend using a filesystem that supports direct I/O (Input/Output). Many compressed or encrypted file systems don't support direct I/O. The ext4 filesystem supports direct I/O (but be careful: if you enable the data=journal mode, then direct I/O support will be disabled; the default is data=ordered). If your chosen filesystem supports direct I/O and you're using Linux, then you don't need to do anything to request or enable direct I/O. RethinkDB does that.
|
||||||
|
|
||||||
|
<p style="background-color: lightgrey;">What is direct I/O? It allows RethinkDB to write directly to the storage device (or use its own in-memory caching mechanisms), rather than relying on the operating system's file read and write caching mechanisms. (If you're using Linux, a write-to-file normally writes to the in-memory Page Cache first; only later does that Page Cache get flushed to disk. The Page Cache is also used when reading files.)</p>
|
||||||
|
|
||||||
|
* RethinkDB stores its data in a specific directory. You can tell RethinkDB _which_ directory using the RethinkDB config file, as explained below. In this documentation, we assume the directory is `/data`. If you set up a separate device (partition, RAID array, or logical volume) to store the RethinkDB data, then mount that device on `/data`.
|
||||||
|
|
||||||
|
|
||||||
## Memory (RAM) Requirements
|
## Memory (RAM) Requirements
|
||||||
|
|
@ -25,7 +25,7 @@ docker run \
|
|||||||
--interactive \
|
--interactive \
|
||||||
--rm \
|
--rm \
|
||||||
--tty \
|
--tty \
|
||||||
--volume "$HOME/bigchaindb_docker:/data" \
|
--volume $HOME/bigchaindb_docker:/data \
|
||||||
bigchaindb/bigchaindb \
|
bigchaindb/bigchaindb \
|
||||||
-y configure \
|
-y configure \
|
||||||
[mongodb|rethinkdb]
|
[mongodb|rethinkdb]
|
||||||
@ -45,7 +45,7 @@ Let's analyze that command:
|
|||||||
`$HOME/bigchaindb_docker` to the container directory `/data`;
|
`$HOME/bigchaindb_docker` to the container directory `/data`;
|
||||||
this allows us to have the data persisted on the host machine,
|
this allows us to have the data persisted on the host machine,
|
||||||
you can read more in the [official Docker
|
you can read more in the [official Docker
|
||||||
documentation](https://docs.docker.com/engine/tutorials/dockervolumes/#/mount-a-host-directory-as-a-data-volume)
|
documentation](https://docs.docker.com/engine/tutorials/dockervolumes)
|
||||||
* `bigchaindb/bigchaindb` the image to use. All the options after the container name are passed on to the entrypoint inside the container.
|
* `bigchaindb/bigchaindb` the image to use. All the options after the container name are passed on to the entrypoint inside the container.
|
||||||
* `-y configure` execute the `configure` sub-command (of the `bigchaindb`
|
* `-y configure` execute the `configure` sub-command (of the `bigchaindb`
|
||||||
command) inside the container, with the `-y` option to automatically use all the default config values
|
command) inside the container, with the `-y` option to automatically use all the default config values
|
||||||
@ -76,13 +76,13 @@ docker run \
|
|||||||
--publish=172.17.0.1:28015:28015 \
|
--publish=172.17.0.1:28015:28015 \
|
||||||
--publish=172.17.0.1:58080:8080 \
|
--publish=172.17.0.1:58080:8080 \
|
||||||
--restart=always \
|
--restart=always \
|
||||||
--volume "$HOME/bigchaindb_docker:/data" \
|
--volume $HOME/bigchaindb_docker:/data \
|
||||||
rethinkdb:2.3
|
rethinkdb:2.3
|
||||||
```
|
```
|
||||||
|
|
||||||
|
<!-- Don't hyperlink http://172.17.0.1:58080/ because Sphinx will fail when you do "make linkcheck" -->
|
||||||
|
|
||||||
You can also access the RethinkDB dashboard at
|
You can also access the RethinkDB dashboard at http://172.17.0.1:58080/
|
||||||
[http://172.17.0.1:58080/](http://172.17.0.1:58080/)
|
|
||||||
|
|
||||||
|
|
||||||
#### For MongoDB
|
#### For MongoDB
|
||||||
@ -95,7 +95,7 @@ be owned by this user in the host.
|
|||||||
If there is no owner with UID 999, you can create the corresponding user and
|
If there is no owner with UID 999, you can create the corresponding user and
|
||||||
group.
|
group.
|
||||||
|
|
||||||
`groupadd -r --gid 999 mongodb && useradd -r --uid 999 -g mongodb mongodb`
|
`useradd -r --uid 999 mongodb` OR `groupadd -r --gid 999 mongodb && useradd -r --uid 999 -g mongodb mongodb` should work.
|
||||||
|
|
||||||
|
|
||||||
```text
|
```text
|
||||||
@ -156,3 +156,4 @@ docker build --tag local-bigchaindb .
|
|||||||
```
|
```
|
||||||
|
|
||||||
Now you can use your own image to run BigchainDB containers.
|
Now you can use your own image to run BigchainDB containers.
|
||||||
|
|
||||||
|
@ -157,7 +157,7 @@ Step 5: Create the Config Map - Optional
|
|||||||
|
|
||||||
This step is required only if you are planning to set up multiple
|
This step is required only if you are planning to set up multiple
|
||||||
`BigchainDB nodes
|
`BigchainDB nodes
|
||||||
<https://docs.bigchaindb.com/en/latest/terminology.html#node>`_.
|
<https://docs.bigchaindb.com/en/latest/terminology.html>`_.
|
||||||
|
|
||||||
MongoDB reads the local ``/etc/hosts`` file while bootstrapping a replica set
|
MongoDB reads the local ``/etc/hosts`` file while bootstrapping a replica set
|
||||||
to resolve the hostname provided to the ``rs.initiate()`` command. It needs to
|
to resolve the hostname provided to the ``rs.initiate()`` command. It needs to
|
||||||
@ -268,7 +268,7 @@ Step 7: Initialize a MongoDB Replica Set - Optional
|
|||||||
|
|
||||||
This step is required only if you are planning to set up multiple
|
This step is required only if you are planning to set up multiple
|
||||||
`BigchainDB nodes
|
`BigchainDB nodes
|
||||||
<https://docs.bigchaindb.com/en/latest/terminology.html#node>`_.
|
<https://docs.bigchaindb.com/en/latest/terminology.html>`_.
|
||||||
|
|
||||||
|
|
||||||
Login to the running MongoDB instance and access the mongo shell using:
|
Login to the running MongoDB instance and access the mongo shell using:
|
||||||
@ -315,7 +315,7 @@ Step 8: Create a DNS record - Optional
|
|||||||
|
|
||||||
This step is required only if you are planning to set up multiple
|
This step is required only if you are planning to set up multiple
|
||||||
`BigchainDB nodes
|
`BigchainDB nodes
|
||||||
<https://docs.bigchaindb.com/en/latest/terminology.html#node>`_.
|
<https://docs.bigchaindb.com/en/latest/terminology.html>`_.
|
||||||
|
|
||||||
**Azure.** Select the current Azure resource group and look for the ``Public IP``
|
**Azure.** Select the current Azure resource group and look for the ``Public IP``
|
||||||
resource. You should see at least 2 entries there - one for the Kubernetes
|
resource. You should see at least 2 entries there - one for the Kubernetes
|
||||||
@ -426,9 +426,8 @@ on the cluster and query the internal DNS and IP endpoints.
|
|||||||
$ kubectl run -it toolbox -- image <docker image to run> --restart=Never --rm
|
$ kubectl run -it toolbox -- image <docker image to run> --restart=Never --rm
|
||||||
|
|
||||||
There is a generic image based on alpine:3.5 with the required utilities
|
There is a generic image based on alpine:3.5 with the required utilities
|
||||||
hosted at Docker Hub under ``bigchaindb/toolbox``.
|
hosted at Docker Hub under `bigchaindb/toolbox <https://hub.docker.com/r/bigchaindb/toolbox/>`_.
|
||||||
The corresponding Dockerfile is `here
|
The corresponding Dockerfile is in the bigchaindb/bigchaindb repository on GitHub, at `https://github.com/bigchaindb/bigchaindb/blob/master/k8s/toolbox/Dockerfile <https://github.com/bigchaindb/bigchaindb/blob/master/k8s/toolbox/Dockerfile>`_.
|
||||||
<https://github.com/bigchaindb/bigchaindb/k8s/toolbox/Dockerfile>`_.
|
|
||||||
|
|
||||||
You can use it as below to get started immediately:
|
You can use it as below to get started immediately:
|
||||||
|
|
||||||
|
@ -81,4 +81,4 @@ where, as before, `<key-name>` must be replaced.
|
|||||||
|
|
||||||
## Next Steps
|
## Next Steps
|
||||||
|
|
||||||
You could make changes to the Ansible playbook (and the resources it uses) to make the node more production-worthy. See [the section on production node assumptions, components and requirements](../nodes/index.html).
|
You could make changes to the Ansible playbook (and the resources it uses) to make the node more production-worthy. See [the section on production node assumptions, components and requirements](../production-nodes/index.html).
|
||||||
|
@ -53,7 +53,7 @@ on the node and mark it as unscheduleable
|
|||||||
|
|
||||||
kubectl drain $NODENAME
|
kubectl drain $NODENAME
|
||||||
|
|
||||||
There are `more details in the Kubernetes docs <https://kubernetes.io/docs/admin/cluster-management/#maintenance-on-a-node>`_,
|
There are `more details in the Kubernetes docs <https://kubernetes.io/docs/concepts/cluster-administration/cluster-management/#maintenance-on-a-node>`_,
|
||||||
including instructions to make the node scheduleable again.
|
including instructions to make the node scheduleable again.
|
||||||
|
|
||||||
To manually upgrade the host OS,
|
To manually upgrade the host OS,
|
||||||
@ -82,13 +82,13 @@ A typical upgrade workflow for a single Deployment would be:
|
|||||||
|
|
||||||
$ KUBE_EDITOR=nano kubectl edit deployment/<name of Deployment>
|
$ KUBE_EDITOR=nano kubectl edit deployment/<name of Deployment>
|
||||||
|
|
||||||
The `kubectl edit <https://kubernetes.io/docs/user-guide/kubectl/kubectl_edit/>`_
|
The ``kubectl edit`` command
|
||||||
command opens the specified editor (nano in the above example),
|
opens the specified editor (nano in the above example),
|
||||||
allowing you to edit the specified Deployment *in the Kubernetes cluster*.
|
allowing you to edit the specified Deployment *in the Kubernetes cluster*.
|
||||||
You can change the version tag on the Docker image, for example.
|
You can change the version tag on the Docker image, for example.
|
||||||
Don't forget to save your edits before exiting the editor.
|
Don't forget to save your edits before exiting the editor.
|
||||||
The Kubernetes docs have more information about
|
The Kubernetes docs have more information about
|
||||||
`updating a Deployment <https://kubernetes.io/docs/user-guide/deployments/#updating-a-deployment>`_.
|
`Deployments <https://kubernetes.io/docs/concepts/workloads/controllers/deployment/>`_ (including updating them).
|
||||||
|
|
||||||
|
|
||||||
The upgrade story for the MongoDB StatefulSet is *different*.
|
The upgrade story for the MongoDB StatefulSet is *different*.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Deploy a Testing Cluster on AWS
|
# Deploy a RethinkDB-Based Testing Cluster on AWS
|
||||||
|
|
||||||
This section explains a way to deploy a cluster of BigchainDB nodes on Amazon Web Services (AWS) for testing purposes.
|
This section explains a way to deploy a _RethinkDB-based_ cluster of BigchainDB nodes on Amazon Web Services (AWS) for testing purposes.
|
||||||
|
|
||||||
## Why?
|
## Why?
|
||||||
|
|
||||||
|
@ -5,6 +5,5 @@ Clusters
|
|||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
|
||||||
set-up-a-cluster
|
set-up-a-cluster
|
||||||
backup
|
|
||||||
aws-testing-cluster
|
aws-testing-cluster
|
||||||
|
|
||||||
|
@ -3,7 +3,9 @@
|
|||||||
This section is about how to set up a BigchainDB cluster where each node is operated by a different operator. If you want to set up and run a testing cluster on AWS (where all nodes are operated by you), then see [the section about that](aws-testing-cluster.html).
|
This section is about how to set up a BigchainDB cluster where each node is operated by a different operator. If you want to set up and run a testing cluster on AWS (where all nodes are operated by you), then see [the section about that](aws-testing-cluster.html).
|
||||||
|
|
||||||
|
|
||||||
## Initial Checklist
|
## Initial Questions
|
||||||
|
|
||||||
|
There are many questions that must be answered before setting up a BigchainDB cluster. For example:
|
||||||
|
|
||||||
* Do you have a governance process for making consortium-level decisions, such as how to admit new members?
|
* Do you have a governance process for making consortium-level decisions, such as how to admit new members?
|
||||||
* What will you store in creation transactions (data payload)? Is there a data schema?
|
* What will you store in creation transactions (data payload)? Is there a data schema?
|
||||||
@ -15,14 +17,12 @@ This section is about how to set up a BigchainDB cluster where each node is oper
|
|||||||
|
|
||||||
The consortium must decide some things before setting up the initial cluster (initial set of BigchainDB nodes):
|
The consortium must decide some things before setting up the initial cluster (initial set of BigchainDB nodes):
|
||||||
|
|
||||||
1. Who will operate a node in the initial cluster?
|
1. Who will operate each node in the initial cluster?
|
||||||
2. What will the replication factor be? (It must be 3 or more for [RethinkDB failover](https://rethinkdb.com/docs/failover/) to work.)
|
2. What will the replication factor be? (It should be 3 or more.)
|
||||||
3. Which node will be responsible for sending the commands to configure the RethinkDB database?
|
3. Who will deploy the first node, second node, etc.?
|
||||||
|
|
||||||
Once those things have been decided, each node operator can begin setting up their BigchainDB (production) node.
|
Once those things have been decided, the cluster deployment process can begin. The process for deploying a production node is outlined in [the section on production nodes](../production-nodes/index.html).
|
||||||
|
|
||||||
Each node operator will eventually need two pieces of information from all other nodes:
|
Every time a new BigchainDB node is added, every other node must update their [BigchainDB keyring](../server-reference/configuration.html#keyring) (one of the BigchainDB configuration settings): they must add the public key of the new node.
|
||||||
|
|
||||||
1. Their RethinkDB hostname, e.g. `rdb.farm2.organization.org`
|
|
||||||
2. Their BigchainDB public key, e.g. `Eky3nkbxDTMgkmiJC8i5hKyVFiAQNmPP4a2G4JdDxJCK`
|
|
||||||
|
|
||||||
|
To secure communications between BigchainDB nodes, each BigchainDB node can use a firewall or similar, and doing that will require additional coordination.
|
||||||
|
@ -23,7 +23,9 @@ Start RethinkDB using:
|
|||||||
$ rethinkdb
|
$ rethinkdb
|
||||||
```
|
```
|
||||||
|
|
||||||
You can verify that RethinkDB is running by opening the RethinkDB web interface in your web browser. It should be at [http://localhost:8080/](http://localhost:8080/).
|
You can verify that RethinkDB is running by opening the RethinkDB web interface in your web browser. It should be at http://localhost:8080/
|
||||||
|
|
||||||
|
<!-- Don't hyperlink http://localhost:8080/ because Sphinx will fail when you do "make linkcheck" -->
|
||||||
|
|
||||||
To run BigchainDB Server, do:
|
To run BigchainDB Server, do:
|
||||||
```text
|
```text
|
||||||
@ -87,11 +89,11 @@ Start RethinkDB:
|
|||||||
docker-compose up -d rdb
|
docker-compose up -d rdb
|
||||||
```
|
```
|
||||||
|
|
||||||
The RethinkDB web interface should be accessible at <http://localhost:58080/>.
|
The RethinkDB web interface should be accessible at http://localhost:58080/.
|
||||||
Depending on which platform, and/or how you are running docker, you may need
|
Depending on which platform, and/or how you are running docker, you may need
|
||||||
to change `localhost` for the `ip` of the machine that is running docker. As a
|
to change `localhost` for the `ip` of the machine that is running docker. As a
|
||||||
dummy example, if the `ip` of that machine was `0.0.0.0`, you would access the
|
dummy example, if the `ip` of that machine was `0.0.0.0`, you would access the
|
||||||
web interface at: <http://0.0.0.0:58080/>.
|
web interface at: http://0.0.0.0:58080/.
|
||||||
|
|
||||||
Start a BigchainDB node:
|
Start a BigchainDB node:
|
||||||
|
|
||||||
|
@ -25,6 +25,6 @@ Please note that some of these projects may be work in progress, but may
|
|||||||
nevertheless be very useful.
|
nevertheless be very useful.
|
||||||
|
|
||||||
* `Javascript transaction builder <https://github.com/sohkai/js-bigchaindb-quickstart>`_
|
* `Javascript transaction builder <https://github.com/sohkai/js-bigchaindb-quickstart>`_
|
||||||
* `Haskell transaction builder <https://github.com/libscott/bigchaindb-hs>`_
|
* `Haskell transaction builder <https://github.com/bigchaindb/bigchaindb-hs>`_
|
||||||
* `Go driver <https://github.com/zbo14/envoke/blob/master/bigchain/bigchain.go>`_
|
* `Go driver <https://github.com/zbo14/envoke/blob/master/bigchain/bigchain.go>`_
|
||||||
* `Java driver <https://github.com/mgrand/bigchaindb-java-driver>`_
|
* `Java driver <https://github.com/mgrand/bigchaindb-java-driver>`_
|
||||||
|
@ -406,7 +406,7 @@ Determining the API Root URL
|
|||||||
When you start BigchainDB Server using ``bigchaindb start``,
|
When you start BigchainDB Server using ``bigchaindb start``,
|
||||||
an HTTP API is exposed at some address. The default is:
|
an HTTP API is exposed at some address. The default is:
|
||||||
|
|
||||||
`http://localhost:9984/api/v1/ <http://localhost:9984/api/v1/>`_
|
``http://localhost:9984/api/v1/``
|
||||||
|
|
||||||
It's bound to ``localhost``,
|
It's bound to ``localhost``,
|
||||||
so you can access it from the same machine,
|
so you can access it from the same machine,
|
||||||
|
@ -8,7 +8,7 @@ BigchainDB Server Documentation
|
|||||||
introduction
|
introduction
|
||||||
quickstart
|
quickstart
|
||||||
cloud-deployment-templates/index
|
cloud-deployment-templates/index
|
||||||
nodes/index
|
production-nodes/index
|
||||||
dev-and-test/index
|
dev-and-test/index
|
||||||
server-reference/index
|
server-reference/index
|
||||||
http-client-server-api
|
http-client-server-api
|
||||||
|
@ -1,10 +0,0 @@
|
|||||||
Production Node Assumptions, Components & Requirements
|
|
||||||
======================================================
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:maxdepth: 1
|
|
||||||
|
|
||||||
node-assumptions
|
|
||||||
node-components
|
|
||||||
node-requirements
|
|
||||||
setup-run-node
|
|
@ -1,13 +0,0 @@
|
|||||||
# Production Node Assumptions
|
|
||||||
|
|
||||||
If you're not sure what we mean by a BigchainDB *node*, *cluster*, *consortium*, or *production node*, then see [the section in the Introduction where we defined those terms](../introduction.html#some-basic-vocabulary).
|
|
||||||
|
|
||||||
We make some assumptions about production nodes:
|
|
||||||
|
|
||||||
1. **Each production node is set up and managed by an experienced professional system administrator (or a team of them).**
|
|
||||||
|
|
||||||
2. Each production node in a cluster is managed by a different person or team.
|
|
||||||
|
|
||||||
Because of the first assumption, we don't provide a detailed cookbook explaining how to secure a server, or other things that a sysadmin should know. (We do provide some [templates](../cloud-deployment-templates/index.html), but those are just a starting point.)
|
|
||||||
|
|
||||||
|
|
@ -1,23 +0,0 @@
|
|||||||
# Production Node Components
|
|
||||||
|
|
||||||
A BigchainDB node must include, at least:
|
|
||||||
|
|
||||||
* BigchainDB Server and
|
|
||||||
* RethinkDB Server.
|
|
||||||
|
|
||||||
When doing development and testing, it's common to install both on the same machine, but in a production environment, it may make more sense to install them on separate machines.
|
|
||||||
|
|
||||||
In a production environment, a BigchainDB node should have several other components, including:
|
|
||||||
|
|
||||||
* nginx or similar, as a reverse proxy and/or load balancer for the Gunicorn server(s) inside the node
|
|
||||||
* An NTP daemon running on all machines running BigchainDB code, and possibly other machines
|
|
||||||
* A RethinkDB proxy server
|
|
||||||
* A RethinkDB "wire protocol firewall" (in the future: this component doesn't exist yet)
|
|
||||||
* Scalable storage for RethinkDB (e.g. using RAID)
|
|
||||||
* Monitoring software, to monitor all the machines in the node
|
|
||||||
* Configuration management agents (if you're using a configuration managment system that uses agents)
|
|
||||||
* Maybe more
|
|
||||||
|
|
||||||
The relationship between these components is illustrated below.
|
|
||||||
|
|
||||||

|
|
@ -1,193 +0,0 @@
|
|||||||
# Set Up and Run a Cluster Node
|
|
||||||
|
|
||||||
This is a page of general guidelines for setting up a production node. It says nothing about how to upgrade software, storage, processing, etc. or other details of node management. It will be expanded more in the future.
|
|
||||||
|
|
||||||
|
|
||||||
## Get a Server
|
|
||||||
|
|
||||||
The first step is to get a server (or equivalent) which meets [the requirements for a BigchainDB node](node-requirements.html).
|
|
||||||
|
|
||||||
|
|
||||||
## Secure Your Server
|
|
||||||
|
|
||||||
The steps that you must take to secure your server depend on your server OS and where your server is physically located. There are many articles and books about how to secure a server. Here we just cover special considerations when securing a BigchainDB node.
|
|
||||||
|
|
||||||
There are some [notes on BigchainDB-specific firewall setup](../appendices/firewall-notes.html) in the Appendices.
|
|
||||||
|
|
||||||
|
|
||||||
## Sync Your System Clock
|
|
||||||
|
|
||||||
A BigchainDB node uses its system clock to generate timestamps for blocks and votes, so that clock should be kept in sync with some standard clock(s). The standard way to do that is to run an NTP daemon (Network Time Protocol daemon) on the node. (You could also use tlsdate, which uses TLS timestamps rather than NTP, but don't: it's not very accurate and it will break with TLS 1.3, which removes the timestamp.)
|
|
||||||
|
|
||||||
NTP is a standard protocol. There are many NTP daemons implementing it. We don't recommend a particular one. On the contrary, we recommend that different nodes in a cluster run different NTP daemons, so that a problem with one daemon won't affect all nodes.
|
|
||||||
|
|
||||||
Please see the [notes on NTP daemon setup](../appendices/ntp-notes.html) in the Appendices.
|
|
||||||
|
|
||||||
|
|
||||||
## Set Up Storage for RethinkDB Data
|
|
||||||
|
|
||||||
Below are some things to consider when setting up storage for the RethinkDB data. The Appendices have a [section with concrete examples](../appendices/example-rethinkdb-storage-setups.html).
|
|
||||||
|
|
||||||
We suggest you set up a separate storage "device" (partition, RAID array, or logical volume) to store the RethinkDB data. Here are some questions to ask:
|
|
||||||
|
|
||||||
* How easy will it be to add storage in the future? Will I have to shut down my server?
|
|
||||||
* How big can the storage get? (Remember that [RAID](https://en.wikipedia.org/wiki/RAID) can be used to make several physical drives look like one.)
|
|
||||||
* How fast can it read & write data? How many input/output operations per second (IOPS)?
|
|
||||||
* How does IOPS scale as more physical hard drives are added?
|
|
||||||
* What's the latency?
|
|
||||||
* What's the reliability? Is there replication?
|
|
||||||
* What's in the Service Level Agreement (SLA), if applicable?
|
|
||||||
* What's the cost?
|
|
||||||
|
|
||||||
There are many options and tradeoffs. Don't forget to look into Amazon Elastic Block Store (EBS) and Amazon Elastic File System (EFS), or their equivalents from other providers.
|
|
||||||
|
|
||||||
**Storage Notes Specific to RethinkDB**
|
|
||||||
|
|
||||||
* The RethinkDB storage engine has a number of SSD optimizations, so you _can_ benefit from using SSDs. ([source](https://www.rethinkdb.com/docs/architecture/))
|
|
||||||
|
|
||||||
* If you want a RethinkDB cluster to store an amount of data D, with a replication factor of R (on every table), and the cluster has N nodes, then each node will need to be able to store R×D/N data.
|
|
||||||
|
|
||||||
* RethinkDB tables can have [at most 64 shards](https://rethinkdb.com/limitations/). For example, if you have only one table and more than 64 nodes, some nodes won't have the primary of any shard, i.e. they will have replicas only. In other words, once you pass 64 nodes, adding more nodes won't provide more storage space for new data. If the biggest single-node storage available is d, then the most you can store in a RethinkDB cluster is < 64×d: accomplished by putting one primary shard in each of 64 nodes, with all replica shards on other nodes. (This is assuming one table. If there are T tables, then the most you can store is < 64×d×T.)
|
|
||||||
|
|
||||||
* When you set up storage for your RethinkDB data, you may have to select a filesystem. (Sometimes, the filesystem is already decided by the choice of storage.) We recommend using a filesystem that supports direct I/O (Input/Output). Many compressed or encrypted file systems don't support direct I/O. The ext4 filesystem supports direct I/O (but be careful: if you enable the data=journal mode, then direct I/O support will be disabled; the default is data=ordered). If your chosen filesystem supports direct I/O and you're using Linux, then you don't need to do anything to request or enable direct I/O. RethinkDB does that.
|
|
||||||
|
|
||||||
<p style="background-color: lightgrey;">What is direct I/O? It allows RethinkDB to write directly to the storage device (or use its own in-memory caching mechanisms), rather than relying on the operating system's file read and write caching mechanisms. (If you're using Linux, a write-to-file normally writes to the in-memory Page Cache first; only later does that Page Cache get flushed to disk. The Page Cache is also used when reading files.)</p>
|
|
||||||
|
|
||||||
* RethinkDB stores its data in a specific directory. You can tell RethinkDB _which_ directory using the RethinkDB config file, as explained below. In this documentation, we assume the directory is `/data`. If you set up a separate device (partition, RAID array, or logical volume) to store the RethinkDB data, then mount that device on `/data`.
|
|
||||||
|
|
||||||
|
|
||||||
## Install RethinkDB Server
|
|
||||||
|
|
||||||
If you don't already have RethinkDB Server installed, you must install it. The RethinkDB documentation has instructions for [how to install RethinkDB Server on a variety of operating systems](https://rethinkdb.com/docs/install/).
|
|
||||||
|
|
||||||
|
|
||||||
## Configure RethinkDB Server
|
|
||||||
|
|
||||||
Create a RethinkDB configuration file (text file) named `instance1.conf` with the following contents (explained below):
|
|
||||||
```text
|
|
||||||
directory=/data
|
|
||||||
bind=all
|
|
||||||
direct-io
|
|
||||||
# Replace node?_hostname with actual node hostnames below, e.g. rdb.examples.com
|
|
||||||
join=node0_hostname:29015
|
|
||||||
join=node1_hostname:29015
|
|
||||||
join=node2_hostname:29015
|
|
||||||
# continue until there's a join= line for each node in the cluster
|
|
||||||
```
|
|
||||||
|
|
||||||
* `directory=/data` tells the RethinkDB node to store its share of the database data in `/data`.
|
|
||||||
* `bind=all` binds RethinkDB to all local network interfaces (e.g. loopback, Ethernet, wireless, whatever is available), so it can communicate with the outside world. (The default is to bind only to local interfaces.)
|
|
||||||
* `direct-io` tells RethinkDB to use direct I/O (explained earlier). Only include this line if your file system supports direct I/O.
|
|
||||||
* `join=hostname:29015` lines: A cluster node needs to find out the hostnames of all the other nodes somehow. You _could_ designate one node to be the one that every other node asks, and put that node's hostname in the config file, but that wouldn't be very decentralized. Instead, we include _every_ node in the list of nodes-to-ask.
|
|
||||||
|
|
||||||
If you're curious about the RethinkDB config file, there's [a RethinkDB documentation page about it](https://www.rethinkdb.com/docs/config-file/). The [explanations of the RethinkDB command-line options](https://rethinkdb.com/docs/cli-options/) are another useful reference.
|
|
||||||
|
|
||||||
See the [RethinkDB documentation on securing your cluster](https://rethinkdb.com/docs/security/).
|
|
||||||
|
|
||||||
|
|
||||||
## Install Python 3.4+
|
|
||||||
|
|
||||||
If you don't already have it, then you should [install Python 3.4+](https://www.python.org/downloads/).
|
|
||||||
|
|
||||||
If you're testing or developing BigchainDB on a stand-alone node, then you should probably create a Python 3.4+ virtual environment and activate it (e.g. using virtualenv or conda). Later we will install several Python packages and you probably only want those installed in the virtual environment.
|
|
||||||
|
|
||||||
|
|
||||||
## Install BigchainDB Server
|
|
||||||
|
|
||||||
First, [install the OS-level dependencies of BigchainDB Server (link)](../appendices/install-os-level-deps.html).
|
|
||||||
|
|
||||||
With OS-level dependencies installed, you can install BigchainDB Server with `pip` or from source.
|
|
||||||
|
|
||||||
|
|
||||||
### How to Install BigchainDB with pip
|
|
||||||
|
|
||||||
BigchainDB (i.e. both the Server and the officially-supported drivers) is distributed as a Python package on PyPI so you can install it using `pip`. First, make sure you have an up-to-date Python 3.4+ version of `pip` installed:
|
|
||||||
```text
|
|
||||||
pip -V
|
|
||||||
```
|
|
||||||
|
|
||||||
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.4, then you must install a `pip` version associated with Python 3.4+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
|
|
||||||
|
|
||||||
On Ubuntu 16.04, we found that this works:
|
|
||||||
```text
|
|
||||||
sudo apt-get install python3-pip
|
|
||||||
```
|
|
||||||
|
|
||||||
That should install a Python 3 version of `pip` named `pip3`. If that didn't work, then another way to get `pip3` is to do `sudo apt-get install python3-setuptools` followed by `sudo easy_install3 pip`.
|
|
||||||
|
|
||||||
You can upgrade `pip` (`pip3`) and `setuptools` to the latest versions using:
|
|
||||||
```text
|
|
||||||
pip3 install --upgrade pip setuptools
|
|
||||||
pip3 -V
|
|
||||||
```
|
|
||||||
|
|
||||||
Now you can install BigchainDB Server (and officially-supported BigchainDB drivers) using:
|
|
||||||
```text
|
|
||||||
pip3 install bigchaindb
|
|
||||||
```
|
|
||||||
|
|
||||||
(If you're not in a virtualenv and you want to install bigchaindb system-wide, then put `sudo` in front.)
|
|
||||||
|
|
||||||
Note: You can use `pip3` to upgrade the `bigchaindb` package to the latest version using `pip3 install --upgrade bigchaindb`.
|
|
||||||
|
|
||||||
|
|
||||||
### How to Install BigchainDB from Source
|
|
||||||
|
|
||||||
If you want to install BitchainDB from source because you want to use the very latest bleeding-edge code, clone the public repository:
|
|
||||||
```text
|
|
||||||
git clone git@github.com:bigchaindb/bigchaindb.git
|
|
||||||
python setup.py install
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Configure BigchainDB Server
|
|
||||||
|
|
||||||
Start by creating a default BigchainDB config file:
|
|
||||||
```text
|
|
||||||
bigchaindb -y configure rethinkdb
|
|
||||||
```
|
|
||||||
|
|
||||||
(There's documentation for the `bigchaindb` command is in the section on [the BigchainDB Command Line Interface (CLI)](bigchaindb-cli.html).)
|
|
||||||
|
|
||||||
Edit the created config file:
|
|
||||||
|
|
||||||
* Open `$HOME/.bigchaindb` (the created config file) in your text editor.
|
|
||||||
* Change `"server": {"bind": "localhost:9984", ... }` to `"server": {"bind": "0.0.0.0:9984", ... }`. This makes it so traffic can come from any IP address to port 9984 (the HTTP Client-Server API port).
|
|
||||||
* Change `"keyring": []` to `"keyring": ["public_key_of_other_node_A", "public_key_of_other_node_B", "..."]` i.e. a list of the public keys of all the other nodes in the cluster. The keyring should _not_ include your node's public key.
|
|
||||||
|
|
||||||
For more information about the BigchainDB config file, see [Configuring a BigchainDB Node](configuration.html).
|
|
||||||
|
|
||||||
|
|
||||||
## Run RethinkDB Server
|
|
||||||
|
|
||||||
Start RethinkDB using:
|
|
||||||
```text
|
|
||||||
rethinkdb --config-file path/to/instance1.conf
|
|
||||||
```
|
|
||||||
|
|
||||||
except replace the path with the actual path to `instance1.conf`.
|
|
||||||
|
|
||||||
Note: It's possible to [make RethinkDB start at system startup](https://www.rethinkdb.com/docs/start-on-startup/).
|
|
||||||
|
|
||||||
You can verify that RethinkDB is running by opening the RethinkDB web interface in your web browser. It should be at `http://rethinkdb-hostname:8080/`. If you're running RethinkDB on localhost, that would be [http://localhost:8080/](http://localhost:8080/).
|
|
||||||
|
|
||||||
|
|
||||||
## Run BigchainDB Server
|
|
||||||
|
|
||||||
After all node operators have started RethinkDB, but before they start BigchainDB, one designated node operator must configure the RethinkDB database by running the following commands:
|
|
||||||
```text
|
|
||||||
bigchaindb init
|
|
||||||
bigchaindb set-shards numshards
|
|
||||||
bigchaindb set-replicas numreplicas
|
|
||||||
```
|
|
||||||
|
|
||||||
where:
|
|
||||||
|
|
||||||
* `bigchaindb init` creates the database within RethinkDB, the tables, the indexes, and the genesis block.
|
|
||||||
* `numshards` should be set to the number of nodes in the initial cluster.
|
|
||||||
* `numreplicas` should be set to the database replication factor decided by the consortium. It must be 3 or more for [RethinkDB failover](https://rethinkdb.com/docs/failover/) to work.
|
|
||||||
|
|
||||||
Once the RethinkDB database is configured, every node operator can start BigchainDB using:
|
|
||||||
```text
|
|
||||||
bigchaindb start
|
|
||||||
```
|
|
10
docs/server/source/production-nodes/index.rst
Normal file
10
docs/server/source/production-nodes/index.rst
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
Production Nodes
|
||||||
|
================
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 1
|
||||||
|
|
||||||
|
node-assumptions
|
||||||
|
node-components
|
||||||
|
node-requirements
|
||||||
|
setup-run-node
|
16
docs/server/source/production-nodes/node-assumptions.md
Normal file
16
docs/server/source/production-nodes/node-assumptions.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# Production Node Assumptions
|
||||||
|
|
||||||
|
Be sure you know the key BigchainDB terminology:
|
||||||
|
|
||||||
|
* [BigchainDB node, BigchainDB cluster and BigchainDB consortum](https://docs.bigchaindb.com/en/latest/terminology.html)
|
||||||
|
* [dev/test node, bare-bones node and production node](../introduction.html)
|
||||||
|
|
||||||
|
We make some assumptions about production nodes:
|
||||||
|
|
||||||
|
1. Production nodes use MongoDB, not RethinkDB.
|
||||||
|
1. Each production node is set up and managed by an experienced professional system administrator or a team of them.
|
||||||
|
1. Each production node in a cluster is managed by a different person or team.
|
||||||
|
|
||||||
|
You can use RethinkDB when building prototypes, but we don't advise or support using it in production.
|
||||||
|
|
||||||
|
We don't provide a detailed cookbook explaining how to secure a server, or other things that a sysadmin should know. (We do provide some [templates](../cloud-deployment-templates/index.html), but those are just a starting point.)
|
22
docs/server/source/production-nodes/node-components.md
Normal file
22
docs/server/source/production-nodes/node-components.md
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
# Production Node Components
|
||||||
|
|
||||||
|
A production BigchainDB node must include:
|
||||||
|
|
||||||
|
* BigchainDB Server
|
||||||
|
* MongoDB Server 3.4+ (mongod)
|
||||||
|
* Scalable storage for MongoDB
|
||||||
|
|
||||||
|
It could also include several other components, including:
|
||||||
|
|
||||||
|
* NGINX or similar, to provide authentication, rate limiting, etc.
|
||||||
|
* An NTP daemon running on all machines running BigchainDB Server or mongod, and possibly other machines
|
||||||
|
* **Not** MongoDB Automation Agent. It's for automating the deployment of an entire MongoDB cluster, not just one MongoDB node within a cluster.
|
||||||
|
* MongoDB Monitoring Agent
|
||||||
|
* MongoDB Backup Agent
|
||||||
|
* Log aggregation software
|
||||||
|
* Monitoring software
|
||||||
|
* Maybe more
|
||||||
|
|
||||||
|
The relationship between the main components is illustrated below. Note that BigchainDB Server must be able to communicate with the _primary_ MongoDB instance, and any of the MongoDB instances might be the primary, so BigchainDB Server must be able to communicate with all the MongoDB instances. Also, all MongoDB instances must be able to communicate with each other.
|
||||||
|
|
||||||
|

|
17
docs/server/source/production-nodes/node-requirements.md
Normal file
17
docs/server/source/production-nodes/node-requirements.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Production Node Requirements
|
||||||
|
|
||||||
|
**This page is about the requirements of BigchainDB Server.** You can find the requirements of MongoDB, NGINX, your NTP daemon, your monitoring software, and other [production node components](node-components.html) in the documentation for that software.
|
||||||
|
|
||||||
|
|
||||||
|
## OS Requirements
|
||||||
|
|
||||||
|
BigchainDB Server requires Python 3.4+ and Python 3.4+ [will run on any modern OS](https://docs.python.org/3.4/using/index.html), but we recommend using an LTS version of [Ubuntu Server](https://www.ubuntu.com/server) or a similarly server-grade Linux distribution.
|
||||||
|
|
||||||
|
_Don't use macOS_ (formerly OS X, formerly Mac OS X), because it's not a server-grade operating system. Also, BigchaindB Server uses the Python multiprocessing package and [some functionality in the multiprocessing package doesn't work on Mac OS X](https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.qsize).
|
||||||
|
|
||||||
|
|
||||||
|
## General Considerations
|
||||||
|
|
||||||
|
BigchainDB Server runs many concurrent processes, so more RAM and more CPU cores is better.
|
||||||
|
|
||||||
|
As mentioned on the page about [production node components](node-components.html), every machine running BigchainDB Server should be running an NTP daemon.
|
137
docs/server/source/production-nodes/setup-run-node.md
Normal file
137
docs/server/source/production-nodes/setup-run-node.md
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
# Set Up and Run a Cluster Node
|
||||||
|
|
||||||
|
This is a page of general guidelines for setting up a production BigchainDB node. Before continuing, make sure you've read the pages about production node [assumptions](node-assumptions.html), [components](node-components.html) and [requirements](node-requirements.html).
|
||||||
|
|
||||||
|
Note: These are just guidelines. You can modify them to suit your needs. For example, if you want to initialize the MongoDB replica set before installing BigchainDB, you _can_ do that. If you'd prefer to use Docker and Kubernetes, you can (and [we have a template](../cloud-deployment-templates/node-on-kubernetes.html)). We don't cover all possible setup procedures here.
|
||||||
|
|
||||||
|
|
||||||
|
## Security Guidelines
|
||||||
|
|
||||||
|
There are many articles, websites and books about securing servers, virtual machines, networks, etc. Consult those.
|
||||||
|
There are some [notes on BigchainDB-specific firewall setup](../appendices/firewall-notes.html) in the Appendices.
|
||||||
|
|
||||||
|
|
||||||
|
## Sync Your System Clock
|
||||||
|
|
||||||
|
A BigchainDB node uses its system clock to generate timestamps for blocks and votes, so that clock should be kept in sync with some standard clock(s). The standard way to do that is to run an NTP daemon (Network Time Protocol daemon) on the node.
|
||||||
|
|
||||||
|
MongoDB also recommends having an NTP daemon running on all MongoDB nodes.
|
||||||
|
|
||||||
|
NTP is a standard protocol. There are many NTP daemons implementing it. We don't recommend a particular one. On the contrary, we recommend that different nodes in a cluster run different NTP daemons, so that a problem with one daemon won't affect all nodes.
|
||||||
|
|
||||||
|
Please see the [notes on NTP daemon setup](../appendices/ntp-notes.html) in the Appendices.
|
||||||
|
|
||||||
|
|
||||||
|
## Set Up Storage for MongoDB
|
||||||
|
|
||||||
|
We suggest you set up a separate storage device (partition, RAID array, or logical volume) to store the data in the MongoDB database. Here are some questions to ask:
|
||||||
|
|
||||||
|
* How easy will it be to add storage in the future? Will I have to shut down my server?
|
||||||
|
* How big can the storage get? (Remember that [RAID](https://en.wikipedia.org/wiki/RAID) can be used to make several physical drives look like one.)
|
||||||
|
* How fast can it read & write data? How many input/output operations per second (IOPS)?
|
||||||
|
* How does IOPS scale as more physical hard drives are added?
|
||||||
|
* What's the latency?
|
||||||
|
* What's the reliability? Is there replication?
|
||||||
|
* What's in the Service Level Agreement (SLA), if applicable?
|
||||||
|
* What's the cost?
|
||||||
|
|
||||||
|
There are many options and tradeoffs.
|
||||||
|
|
||||||
|
Consult the MongoDB documentation for its recommendations regarding storage hardware, software and settings, e.g. in the [MongoDB Production Notes](https://docs.mongodb.com/manual/administration/production-notes/).
|
||||||
|
|
||||||
|
|
||||||
|
## Install and Run MongoDB
|
||||||
|
|
||||||
|
* [Install MongoDB 3.4+](https://docs.mongodb.com/manual/installation/). (BigchainDB only works with MongoDB 3.4+.)
|
||||||
|
* [Run MongoDB (mongod)](https://docs.mongodb.com/manual/reference/program/mongod/)
|
||||||
|
|
||||||
|
|
||||||
|
## Install BigchainDB Server
|
||||||
|
|
||||||
|
### Install BigchainDB Server Dependencies
|
||||||
|
|
||||||
|
Before you can install BigchainDB Server, you must [install its OS-level dependencies](../appendices/install-os-level-deps.html) and you may have to [install Python 3.4+](https://www.python.org/downloads/).
|
||||||
|
|
||||||
|
### How to Install BigchainDB Server with pip
|
||||||
|
|
||||||
|
BigchainDB is distributed as a Python package on PyPI so you can install it using `pip`. First, make sure you have an up-to-date Python 3.4+ version of `pip` installed:
|
||||||
|
```text
|
||||||
|
pip -V
|
||||||
|
```
|
||||||
|
|
||||||
|
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.4, then you must install a `pip` version associated with Python 3.4+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
|
||||||
|
|
||||||
|
On Ubuntu 16.04, we found that this works:
|
||||||
|
```text
|
||||||
|
sudo apt-get install python3-pip
|
||||||
|
```
|
||||||
|
|
||||||
|
That should install a Python 3 version of `pip` named `pip3`. If that didn't work, then another way to get `pip3` is to do `sudo apt-get install python3-setuptools` followed by `sudo easy_install3 pip`.
|
||||||
|
|
||||||
|
You can upgrade `pip` (`pip3`) and `setuptools` to the latest versions using:
|
||||||
|
```text
|
||||||
|
pip3 install --upgrade pip setuptools
|
||||||
|
pip3 -V
|
||||||
|
```
|
||||||
|
|
||||||
|
Now you can install BigchainDB Server using:
|
||||||
|
```text
|
||||||
|
pip3 install bigchaindb
|
||||||
|
```
|
||||||
|
|
||||||
|
(If you're not in a virtualenv and you want to install bigchaindb system-wide, then put `sudo` in front.)
|
||||||
|
|
||||||
|
Note: You can use `pip3` to upgrade the `bigchaindb` package to the latest version using `pip3 install --upgrade bigchaindb`.
|
||||||
|
|
||||||
|
|
||||||
|
### How to Install BigchainDB Server from Source
|
||||||
|
|
||||||
|
If you want to install BitchainDB from source because you want to use the very latest bleeding-edge code, clone the public repository:
|
||||||
|
```text
|
||||||
|
git clone git@github.com:bigchaindb/bigchaindb.git
|
||||||
|
cd bigchaindb
|
||||||
|
python setup.py install
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Configure BigchainDB Server
|
||||||
|
|
||||||
|
Start by creating a default BigchainDB config file for a MongoDB backend:
|
||||||
|
```text
|
||||||
|
bigchaindb -y configure mongodb
|
||||||
|
```
|
||||||
|
|
||||||
|
(There's documentation for the `bigchaindb` command is in the section on [the BigchainDB Command Line Interface (CLI)](../server-reference/bigchaindb-cli.html).)
|
||||||
|
|
||||||
|
Edit the created config file by opening `$HOME/.bigchaindb` (the created config file) in your text editor:
|
||||||
|
|
||||||
|
* Change `"server": {"bind": "localhost:9984", ... }` to `"server": {"bind": "0.0.0.0:9984", ... }`. This makes it so traffic can come from any IP address to port 9984 (the HTTP Client-Server API port).
|
||||||
|
* Change `"keyring": []` to `"keyring": ["public_key_of_other_node_A", "public_key_of_other_node_B", "..."]` i.e. a list of the public keys of all the other nodes in the cluster. The keyring should _not_ include your node's public key.
|
||||||
|
* Ensure that `database.host` and `database.port` are set to the hostname and port of your MongoDB instance. (The port is usually 27017, unless you changed it.)
|
||||||
|
|
||||||
|
For more information about the BigchainDB config file, see the page about the [BigchainDB configuration settings](../server-reference/configuration.html).
|
||||||
|
|
||||||
|
|
||||||
|
## Get All Other Nodes to Update Their Keyring
|
||||||
|
|
||||||
|
All other BigchainDB nodes in the cluster must add your new node's public key to their BigchainDB keyring. Currently, the only way to get BigchainDB Server to "notice" a changed keyring is to shut it down and start it back up again (with the new keyring).
|
||||||
|
|
||||||
|
|
||||||
|
## Maybe Update the MongoDB Replica Set
|
||||||
|
|
||||||
|
**If this isn't the first node in the BigchainDB cluster**, then someone with an existing BigchainDB node (not you) must add your MongoDB instance to the MongoDB replica set. They can do so (on their node) using:
|
||||||
|
```text
|
||||||
|
bigchaindb add-replicas your-mongod-hostname:27017
|
||||||
|
```
|
||||||
|
|
||||||
|
where they must replace `your-mongod-hostname` with the actual hostname of your MongoDB instance, and they may have to replace `27017` with the actual port.
|
||||||
|
|
||||||
|
|
||||||
|
## Start BigchainDB
|
||||||
|
|
||||||
|
**Warning: If you're not deploying the first node in the BigchainDB cluster, then don't start BigchainDB before your MongoDB instance has been added to the MongoDB replica set (as outlined above).**
|
||||||
|
|
||||||
|
```text
|
||||||
|
# See warning above
|
||||||
|
bigchaindb start
|
||||||
|
```
|
@ -21,7 +21,6 @@ For convenience, here's a list of all the relevant environment variables (docume
|
|||||||
`BIGCHAINDB_SERVER_THREADS`<br>
|
`BIGCHAINDB_SERVER_THREADS`<br>
|
||||||
`BIGCHAINDB_CONFIG_PATH`<br>
|
`BIGCHAINDB_CONFIG_PATH`<br>
|
||||||
`BIGCHAINDB_BACKLOG_REASSIGN_DELAY`<br>
|
`BIGCHAINDB_BACKLOG_REASSIGN_DELAY`<br>
|
||||||
`BIGCHAINDB_CONSENSUS_PLUGIN`<br>
|
|
||||||
`BIGCHAINDB_LOG`<br>
|
`BIGCHAINDB_LOG`<br>
|
||||||
`BIGCHAINDB_LOG_FILE`<br>
|
`BIGCHAINDB_LOG_FILE`<br>
|
||||||
`BIGCHAINDB_LOG_LEVEL_CONSOLE`<br>
|
`BIGCHAINDB_LOG_LEVEL_CONSOLE`<br>
|
||||||
@ -169,21 +168,9 @@ export BIGCHAINDB_BACKLOG_REASSIGN_DELAY=30
|
|||||||
"backlog_reassign_delay": 120
|
"backlog_reassign_delay": 120
|
||||||
```
|
```
|
||||||
|
|
||||||
## consensus_plugin
|
|
||||||
|
|
||||||
The [consensus plugin](../appendices/consensus.html) to use.
|
|
||||||
|
|
||||||
**Example using an environment variable**
|
|
||||||
```text
|
|
||||||
export BIGCHAINDB_CONSENSUS_PLUGIN=default
|
|
||||||
```
|
|
||||||
|
|
||||||
**Example config file snippet: the default**
|
|
||||||
```js
|
|
||||||
"consensus_plugin": "default"
|
|
||||||
```
|
|
||||||
|
|
||||||
## log
|
## log
|
||||||
|
|
||||||
The `log` key is expected to point to a mapping (set of key/value pairs)
|
The `log` key is expected to point to a mapping (set of key/value pairs)
|
||||||
holding the logging configuration.
|
holding the logging configuration.
|
||||||
|
|
||||||
|
13
k8s/nginx-3scale/nginx-3scale-cm.yaml
Normal file
13
k8s/nginx-3scale/nginx-3scale-cm.yaml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
############################################################################
|
||||||
|
# This YAML file desribes a ConfigMap with a valid list of ':' separated #
|
||||||
|
# IP addresses (or 'all' for all IP addresses) that can connect to the #
|
||||||
|
# MongoDB instance. We only support the value 'all' currently. #
|
||||||
|
############################################################################
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: mongodb-whitelist
|
||||||
|
namespace: default
|
||||||
|
data:
|
||||||
|
allowed-hosts: "all"
|
96
k8s/nginx-3scale/nginx-3scale-dep.yaml
Normal file
96
k8s/nginx-3scale/nginx-3scale-dep.yaml
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
###############################################################
|
||||||
|
# This config file runs nginx as a k8s deployment and exposes #
|
||||||
|
# it using an external load balancer. #
|
||||||
|
# This deployment is used as a front end to both BigchainDB #
|
||||||
|
# and MongoDB. #
|
||||||
|
###############################################################
|
||||||
|
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: ngx-instance-0-dep
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: ngx-instance-0-dep
|
||||||
|
spec:
|
||||||
|
terminationGracePeriodSeconds: 10
|
||||||
|
containers:
|
||||||
|
- name: nginx-3scale
|
||||||
|
image: bigchaindb/nginx_3scale:0.1
|
||||||
|
# TODO(Krish): Change later to IfNotPresent
|
||||||
|
imagePullPolicy: Always
|
||||||
|
env:
|
||||||
|
- name: MONGODB_FRONTEND_PORT
|
||||||
|
value: $(NGX_INSTANCE_0_SERVICE_PORT_NGX_PUBLIC_MDB_PORT)
|
||||||
|
- name: MONGODB_BACKEND_HOST
|
||||||
|
value: mdb-instance-0.default.svc.cluster.local
|
||||||
|
- name: MONGODB_BACKEND_PORT
|
||||||
|
value: "27017"
|
||||||
|
- name: BIGCHAINDB_FRONTEND_PORT
|
||||||
|
value: $(NGX_INSTANCE_0_SERVICE_PORT_NGX_PUBLIC_BDB_PORT)
|
||||||
|
- name: BIGCHAINDB_BACKEND_HOST
|
||||||
|
value: bdb-instance-0.default.svc.cluster.local
|
||||||
|
- name: BIGCHAINDB_BACKEND_PORT
|
||||||
|
value: "9984"
|
||||||
|
- name: MONGODB_WHITELIST
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: mongodb-whitelist
|
||||||
|
key: allowed-hosts
|
||||||
|
- name: DNS_SERVER
|
||||||
|
value: "10.0.0.10"
|
||||||
|
- name: NGINX_HEALTH_CHECK_PORT
|
||||||
|
value: "8888"
|
||||||
|
# TODO(Krish): use secrets for sensitive info
|
||||||
|
- name: THREESCALE_SECRET_TOKEN
|
||||||
|
value: "<Secret Token Here>"
|
||||||
|
- name: THREESCALE_SERVICE_ID
|
||||||
|
value: "<Service ID Here>"
|
||||||
|
- name: THREESCALE_VERSION_HEADER
|
||||||
|
value: "<Version Header Here>"
|
||||||
|
- name: THREESCALE_PROVIDER_KEY
|
||||||
|
value: "<Provider Key Here>"
|
||||||
|
- name: THREESCALE_FRONTEND_API_DNS_NAME
|
||||||
|
value: "<Frontend API FQDN Here>"
|
||||||
|
- name: THREESCALE_UPSTREAM_API_PORT
|
||||||
|
value: "<Upstream API Port Here>"
|
||||||
|
ports:
|
||||||
|
- containerPort: 27017
|
||||||
|
hostPort: 27017
|
||||||
|
name: public-mdb-port
|
||||||
|
protocol: TCP
|
||||||
|
- containerPort: 443
|
||||||
|
hostPort: 443
|
||||||
|
name: public-bdb-port
|
||||||
|
protocol: TCP
|
||||||
|
- containerPort: 8888
|
||||||
|
hostPort: 8888
|
||||||
|
name: health-check
|
||||||
|
protocol: TCP
|
||||||
|
- containerPort: 8080
|
||||||
|
hostPort: 8080
|
||||||
|
name: public-api-port
|
||||||
|
protocol: TCP
|
||||||
|
volumeMounts:
|
||||||
|
- name: https
|
||||||
|
mountPath: /usr/local/openresty/nginx/conf/ssl/
|
||||||
|
readOnly: true
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 200m
|
||||||
|
memory: 768Mi
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: 8888
|
||||||
|
initialDelaySeconds: 15
|
||||||
|
timeoutSeconds: 10
|
||||||
|
restartPolicy: Always
|
||||||
|
volumes:
|
||||||
|
- name: https
|
||||||
|
secret:
|
||||||
|
secretName: certs
|
||||||
|
defaultMode: 0400
|
13
k8s/nginx-3scale/nginx-3scale-secret.yaml
Normal file
13
k8s/nginx-3scale/nginx-3scale-secret.yaml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Certificate data should be base64 encoded before embedding them here by using
|
||||||
|
# `cat cert.pem | base64 -w 0 > cert.pem.b64` and then copy the resulting
|
||||||
|
# value here. Same goes for cert.key.
|
||||||
|
# Ref: https://kubernetes.io/docs/concepts/configuration/secret/
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: certs
|
||||||
|
type: Opaque
|
||||||
|
data:
|
||||||
|
cert.pem: <certificate data here>
|
||||||
|
cert.key: <key data here>
|
29
k8s/nginx-3scale/nginx-3scale-svc.yaml
Normal file
29
k8s/nginx-3scale/nginx-3scale-svc.yaml
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: ngx-instance-0
|
||||||
|
namespace: default
|
||||||
|
labels:
|
||||||
|
name: ngx-instance-0
|
||||||
|
annotations:
|
||||||
|
# NOTE: the following annotation is a beta feature and
|
||||||
|
# only available in GCE/GKE and Azure as of now
|
||||||
|
# Ref: https://kubernetes.io/docs/tutorials/services/source-ip/
|
||||||
|
service.beta.kubernetes.io/external-traffic: OnlyLocal
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: ngx-instance-0-dep
|
||||||
|
ports:
|
||||||
|
- port: 443
|
||||||
|
targetPort: 443
|
||||||
|
name: ngx-public-bdb-port
|
||||||
|
protocol: TCP
|
||||||
|
- port: 8080
|
||||||
|
targetPort: 8080
|
||||||
|
name: ngx-public-3scale-port
|
||||||
|
protocol: TCP
|
||||||
|
- port: 27017
|
||||||
|
targetPort: 27017
|
||||||
|
name: ngx-public-mdb-port
|
||||||
|
protocol: TCP
|
||||||
|
type: LoadBalancer
|
@ -1,7 +1,9 @@
|
|||||||
import pytest
|
import pytest
|
||||||
|
from unittest.mock import patch
|
||||||
from collections import Counter
|
from collections import Counter
|
||||||
|
|
||||||
from bigchaindb.core import Bigchain
|
from bigchaindb.core import Bigchain
|
||||||
|
from bigchaindb.exceptions import CriticalDuplicateVote
|
||||||
from bigchaindb.voting import Voting, INVALID, VALID, UNDECIDED
|
from bigchaindb.voting import Voting, INVALID, VALID, UNDECIDED
|
||||||
|
|
||||||
|
|
||||||
@ -37,24 +39,22 @@ def test_count_votes():
|
|||||||
def verify_vote_schema(cls, vote):
|
def verify_vote_schema(cls, vote):
|
||||||
return vote['node_pubkey'] != 'malformed'
|
return vote['node_pubkey'] != 'malformed'
|
||||||
|
|
||||||
voters = (['cheat', 'cheat', 'says invalid', 'malformed'] +
|
voters = (['says invalid', 'malformed'] +
|
||||||
['kosher' + str(i) for i in range(10)])
|
['kosher' + str(i) for i in range(10)])
|
||||||
|
|
||||||
votes = [Bigchain(v).vote('block', 'a', True) for v in voters]
|
votes = [Bigchain(v).vote('block', 'a', True) for v in voters]
|
||||||
votes[2]['vote']['is_block_valid'] = False
|
votes[0]['vote']['is_block_valid'] = False
|
||||||
# Incorrect previous block subtracts from n_valid and adds to n_invalid
|
# Incorrect previous block subtracts from n_valid and adds to n_invalid
|
||||||
votes[-1]['vote']['previous_block'] = 'z'
|
votes[-1]['vote']['previous_block'] = 'z'
|
||||||
|
|
||||||
assert TestVoting.count_votes(votes) == {
|
by_voter = dict(enumerate(votes))
|
||||||
|
|
||||||
|
assert TestVoting.count_votes(by_voter) == {
|
||||||
'counts': {
|
'counts': {
|
||||||
'n_valid': 9, # 9 kosher votes
|
'n_valid': 9, # 9 kosher votes
|
||||||
'n_invalid': 4, # 1 cheat, 1 invalid, 1 malformed, 1 rogue prev block
|
'n_invalid': 3, # 1 invalid, 1 malformed, 1 rogue prev block
|
||||||
# One of the cheat votes counts towards n_invalid, the other is
|
|
||||||
# not counted here.
|
|
||||||
# len(cheat) + n_valid + n_invalid == len(votes)
|
|
||||||
},
|
},
|
||||||
'cheat': [votes[:2]],
|
'malformed': [votes[1]],
|
||||||
'malformed': [votes[3]],
|
|
||||||
'previous_block': 'a',
|
'previous_block': 'a',
|
||||||
'other_previous_block': {'z': 1},
|
'other_previous_block': {'z': 1},
|
||||||
}
|
}
|
||||||
@ -70,7 +70,8 @@ def test_must_agree_prev_block():
|
|||||||
votes = [Bigchain(v).vote('block', 'a', True) for v in voters]
|
votes = [Bigchain(v).vote('block', 'a', True) for v in voters]
|
||||||
votes[0]['vote']['previous_block'] = 'b'
|
votes[0]['vote']['previous_block'] = 'b'
|
||||||
votes[1]['vote']['previous_block'] = 'c'
|
votes[1]['vote']['previous_block'] = 'c'
|
||||||
assert TestVoting.count_votes(votes) == {
|
by_voter = dict(enumerate(votes))
|
||||||
|
assert TestVoting.count_votes(by_voter) == {
|
||||||
'counts': {
|
'counts': {
|
||||||
'n_valid': 2,
|
'n_valid': 2,
|
||||||
'n_invalid': 2,
|
'n_invalid': 2,
|
||||||
@ -78,7 +79,6 @@ def test_must_agree_prev_block():
|
|||||||
'previous_block': 'a',
|
'previous_block': 'a',
|
||||||
'other_previous_block': {'b': 1, 'c': 1},
|
'other_previous_block': {'b': 1, 'c': 1},
|
||||||
'malformed': [],
|
'malformed': [],
|
||||||
'cheat': [],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -230,8 +230,19 @@ def test_block_election(b):
|
|||||||
'block_id': 'xyz',
|
'block_id': 'xyz',
|
||||||
'counts': {'n_valid': 2, 'n_invalid': 0},
|
'counts': {'n_valid': 2, 'n_invalid': 0},
|
||||||
'ineligible': [votes[-1]],
|
'ineligible': [votes[-1]],
|
||||||
'cheat': [],
|
|
||||||
'malformed': [],
|
'malformed': [],
|
||||||
'previous_block': 'a',
|
'previous_block': 'a',
|
||||||
'other_previous_block': {},
|
'other_previous_block': {},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@patch('bigchaindb.voting.Voting.verify_vote_signature', return_value=True)
|
||||||
|
def test_duplicate_vote_throws_critical_error(b):
|
||||||
|
keyring = 'abc'
|
||||||
|
block = {'id': 'xyz', 'block': {'voters': 'ab'}}
|
||||||
|
votes = [{
|
||||||
|
'node_pubkey': c,
|
||||||
|
'vote': {'is_block_valid': True, 'previous_block': 'a'}
|
||||||
|
} for c in 'aabc']
|
||||||
|
with pytest.raises(CriticalDuplicateVote):
|
||||||
|
Voting.block_election(block, votes, keyring)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user