mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge remote-tracking branch 'origin/master' into update-changelog-for-0.10.0
This commit is contained in:
commit
b691d7da45
2
.gitignore
vendored
2
.gitignore
vendored
@ -78,7 +78,7 @@ ntools/one-m/ansible/ansible.cfg
|
||||
|
||||
# Just in time documentation
|
||||
docs/server/source/schema
|
||||
docs/server/source/drivers-clients/samples
|
||||
docs/server/source/http-samples
|
||||
|
||||
# Terraform state files
|
||||
# See https://stackoverflow.com/a/41482391
|
||||
|
@ -145,6 +145,13 @@ Once you accept and submit the CLA, we'll email you with further instructions. (
|
||||
|
||||
Someone will then merge your branch or suggest changes. If we suggest changes, you won't have to open a new pull request, you can just push new code to the same branch (on `origin`) as you did before creating the pull request.
|
||||
|
||||
### Tip: Upgrading All BigchainDB Dependencies
|
||||
|
||||
Over time, your versions of the Python packages used by BigchainDB will get out of date. You can upgrade them using:
|
||||
```text
|
||||
pip install --upgrade -e .[dev]
|
||||
```
|
||||
|
||||
## Quick Links
|
||||
|
||||
* [BigchainDB Community links](https://www.bigchaindb.com/community)
|
||||
|
@ -59,6 +59,10 @@ config = {
|
||||
'workers': None, # if none, the value will be cpu_count * 2 + 1
|
||||
'threads': None, # if none, the value will be cpu_count * 2 + 1
|
||||
},
|
||||
'wsserver': {
|
||||
'host': os.environ.get('BIGCHAINDB_WSSERVER_HOST') or 'localhost',
|
||||
'port': int(os.environ.get('BIGCHAINDB_WSSERVER_PORT', 9985)),
|
||||
},
|
||||
'database': _database_map[
|
||||
os.environ.get('BIGCHAINDB_DATABASE_BACKEND', 'rethinkdb')
|
||||
],
|
||||
|
@ -96,6 +96,10 @@ def run_configure(args, skip_if_exists=False):
|
||||
val = conf['server'][key]
|
||||
conf['server'][key] = input_on_stderr('API Server {}? (default `{}`): '.format(key, val), val)
|
||||
|
||||
for key in ('host', 'port'):
|
||||
val = conf['wsserver'][key]
|
||||
conf['wsserver'][key] = input_on_stderr('WebSocket Server {}? (default `{}`): '.format(key, val), val)
|
||||
|
||||
for key in database_keys:
|
||||
val = conf['database'][key]
|
||||
conf['database'][key] = input_on_stderr('Database {}? (default `{}`): '.format(key, val), val)
|
||||
|
33
bigchaindb/events.py
Normal file
33
bigchaindb/events.py
Normal file
@ -0,0 +1,33 @@
|
||||
from enum import Enum
|
||||
from multiprocessing import Queue
|
||||
|
||||
|
||||
class EventTypes(Enum):
|
||||
BLOCK_VALID = 1
|
||||
BLOCK_INVALID = 2
|
||||
|
||||
|
||||
class Event:
|
||||
|
||||
def __init__(self, event_type, event_data):
|
||||
self.type = event_type
|
||||
self.data = event_data
|
||||
|
||||
|
||||
class EventHandler:
|
||||
|
||||
def __init__(self, events_queue):
|
||||
self.events_queue = events_queue
|
||||
|
||||
def put_event(self, event, timeout=None):
|
||||
# TODO: handle timeouts
|
||||
self.events_queue.put(event, timeout=None)
|
||||
|
||||
def get_event(self, timeout=None):
|
||||
# TODO: handle timeouts
|
||||
return self.events_queue.get(timeout=None)
|
||||
|
||||
|
||||
def setup_events_queue():
|
||||
# TODO: set bounds to the queue
|
||||
return Queue()
|
@ -8,3 +8,7 @@ class CriticalDoubleSpend(BigchainDBError):
|
||||
|
||||
class CriticalDoubleInclusion(BigchainDBError):
|
||||
"""Data integrity error that requires attention"""
|
||||
|
||||
|
||||
class CriticalDuplicateVote(BigchainDBError):
|
||||
"""Data integrity error that requires attention"""
|
||||
|
@ -41,9 +41,11 @@ SUBSCRIBER_LOGGING_CONFIG = {
|
||||
'level': logging.INFO,
|
||||
},
|
||||
'file': {
|
||||
'class': 'logging.FileHandler',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': join(DEFAULT_LOG_DIR, 'bigchaindb.log'),
|
||||
'mode': 'w',
|
||||
'maxBytes': 209715200,
|
||||
'backupCount': 5,
|
||||
'formatter': 'file',
|
||||
'level': logging.INFO,
|
||||
},
|
||||
|
@ -13,6 +13,7 @@ from bigchaindb import backend
|
||||
from bigchaindb.backend.changefeed import ChangeFeed
|
||||
from bigchaindb.models import Block
|
||||
from bigchaindb import Bigchain
|
||||
from bigchaindb.events import EventHandler, Event, EventTypes
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -22,8 +23,11 @@ logger_results = logging.getLogger('pipeline.election.results')
|
||||
class Election:
|
||||
"""Election class."""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, events_queue=None):
|
||||
self.bigchain = Bigchain()
|
||||
self.event_handler = None
|
||||
if events_queue:
|
||||
self.event_handler = EventHandler(events_queue)
|
||||
|
||||
def check_for_quorum(self, next_vote):
|
||||
"""
|
||||
@ -42,6 +46,7 @@ class Election:
|
||||
next_block = self.bigchain.get_block(block_id)
|
||||
|
||||
result = self.bigchain.block_election(next_block)
|
||||
self.handle_block_events(result, block_id)
|
||||
if result['status'] == self.bigchain.BLOCK_INVALID:
|
||||
return Block.from_dict(next_block)
|
||||
|
||||
@ -67,9 +72,21 @@ class Election:
|
||||
self.bigchain.write_transaction(tx)
|
||||
return invalid_block
|
||||
|
||||
def handle_block_events(self, result, block_id):
|
||||
if self.event_handler:
|
||||
if result['status'] == self.bigchain.BLOCK_UNDECIDED:
|
||||
return
|
||||
elif result['status'] == self.bigchain.BLOCK_INVALID:
|
||||
event_type = EventTypes.BLOCK_INVALID
|
||||
elif result['status'] == self.bigchain.BLOCK_VALID:
|
||||
event_type = EventTypes.BLOCK_VALID
|
||||
|
||||
def create_pipeline():
|
||||
election = Election()
|
||||
event = Event(event_type, self.bigchain.get_block(block_id))
|
||||
self.event_handler.put_event(event)
|
||||
|
||||
|
||||
def create_pipeline(events_queue=None):
|
||||
election = Election(events_queue=events_queue)
|
||||
|
||||
election_pipeline = Pipeline([
|
||||
Node(election.check_for_quorum),
|
||||
@ -84,8 +101,8 @@ def get_changefeed():
|
||||
return backend.get_changefeed(connection, 'votes', ChangeFeed.INSERT)
|
||||
|
||||
|
||||
def start():
|
||||
pipeline = create_pipeline()
|
||||
def start(events_queue=None):
|
||||
pipeline = create_pipeline(events_queue=events_queue)
|
||||
pipeline.setup(indata=get_changefeed())
|
||||
pipeline.start()
|
||||
return pipeline
|
||||
|
@ -3,7 +3,8 @@ import multiprocessing as mp
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb.pipelines import vote, block, election, stale
|
||||
from bigchaindb.web import server
|
||||
from bigchaindb.events import setup_events_queue
|
||||
from bigchaindb.web import server, websocket_server
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -25,6 +26,13 @@ BANNER = """
|
||||
def start():
|
||||
logger.info('Initializing BigchainDB...')
|
||||
|
||||
# Create the events queue
|
||||
# The events queue needs to be initialized once and shared between
|
||||
# processes. This seems the best way to do it
|
||||
# At this point only the election processs and the event consumer require
|
||||
# this queue.
|
||||
events_queue = setup_events_queue()
|
||||
|
||||
# start the processes
|
||||
logger.info('Starting block')
|
||||
block.start()
|
||||
@ -36,12 +44,18 @@ def start():
|
||||
stale.start()
|
||||
|
||||
logger.info('Starting election')
|
||||
election.start()
|
||||
election.start(events_queue=events_queue)
|
||||
|
||||
# start the web api
|
||||
app_server = server.create_server(bigchaindb.config['server'])
|
||||
p_webapi = mp.Process(name='webapi', target=app_server.run)
|
||||
p_webapi.start()
|
||||
|
||||
logger.info('WebSocket server started')
|
||||
p_websocket_server = mp.Process(name='ws',
|
||||
target=websocket_server.start,
|
||||
args=(events_queue,))
|
||||
p_websocket_server.start()
|
||||
|
||||
# start message
|
||||
logger.info(BANNER.format(bigchaindb.config['server']['bind']))
|
||||
|
@ -1,6 +1,7 @@
|
||||
import collections
|
||||
|
||||
from bigchaindb.common.schema import SchemaValidationError, validate_vote_schema
|
||||
from bigchaindb.exceptions import CriticalDuplicateVote
|
||||
from bigchaindb.common.utils import serialize
|
||||
from bigchaindb.common.crypto import PublicKey
|
||||
|
||||
@ -33,7 +34,8 @@ class Voting:
|
||||
n_voters = len(eligible_voters)
|
||||
eligible_votes, ineligible_votes = \
|
||||
cls.partition_eligible_votes(votes, eligible_voters)
|
||||
results = cls.count_votes(eligible_votes)
|
||||
by_voter = cls.dedupe_by_voter(eligible_votes)
|
||||
results = cls.count_votes(by_voter)
|
||||
results['block_id'] = block['id']
|
||||
results['status'] = cls.decide_votes(n_voters, **results['counts'])
|
||||
results['ineligible'] = ineligible_votes
|
||||
@ -60,38 +62,29 @@ class Voting:
|
||||
return eligible, ineligible
|
||||
|
||||
@classmethod
|
||||
def count_votes(cls, eligible_votes):
|
||||
def dedupe_by_voter(cls, eligible_votes):
|
||||
"""
|
||||
Throw a critical error if there is a duplicate vote
|
||||
"""
|
||||
by_voter = {}
|
||||
for vote in eligible_votes:
|
||||
pubkey = vote['node_pubkey']
|
||||
if pubkey in by_voter:
|
||||
raise CriticalDuplicateVote(pubkey)
|
||||
by_voter[pubkey] = vote
|
||||
return by_voter
|
||||
|
||||
@classmethod
|
||||
def count_votes(cls, by_voter):
|
||||
"""
|
||||
Given a list of eligible votes, (votes from known nodes that are listed
|
||||
as voters), produce the number that say valid and the number that say
|
||||
invalid.
|
||||
|
||||
* Detect if there are multiple votes from a single node and return them
|
||||
in a separate "cheat" dictionary.
|
||||
* Votes must agree on previous block, otherwise they become invalid.
|
||||
|
||||
note:
|
||||
The sum of votes returned by this function does not necessarily
|
||||
equal the length of the list of votes fed in. It may differ for
|
||||
example if there are found to be multiple votes submitted by a
|
||||
single voter.
|
||||
invalid. Votes must agree on previous block, otherwise they become invalid.
|
||||
"""
|
||||
prev_blocks = collections.Counter()
|
||||
cheat = []
|
||||
malformed = []
|
||||
|
||||
# Group by pubkey to detect duplicate voting
|
||||
by_voter = collections.defaultdict(list)
|
||||
for vote in eligible_votes:
|
||||
by_voter[vote['node_pubkey']].append(vote)
|
||||
|
||||
for pubkey, votes in by_voter.items():
|
||||
if len(votes) > 1:
|
||||
cheat.append(votes)
|
||||
continue
|
||||
|
||||
vote = votes[0]
|
||||
|
||||
for vote in by_voter.values():
|
||||
if not cls.verify_vote_schema(vote):
|
||||
malformed.append(vote)
|
||||
continue
|
||||
@ -111,7 +104,6 @@ class Voting:
|
||||
'n_valid': n_valid,
|
||||
'n_invalid': len(by_voter) - n_valid,
|
||||
},
|
||||
'cheat': cheat,
|
||||
'malformed': malformed,
|
||||
'previous_block': prev_block,
|
||||
'other_previous_block': dict(prev_blocks),
|
||||
|
@ -5,6 +5,9 @@ import logging
|
||||
|
||||
from flask import jsonify, request
|
||||
|
||||
from bigchaindb import config
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -21,3 +24,8 @@ def make_error(status_code, message=None):
|
||||
def base_url():
|
||||
return '%s://%s/' % (request.environ['wsgi.url_scheme'],
|
||||
request.environ['HTTP_HOST'])
|
||||
|
||||
|
||||
def base_ws_uri():
|
||||
"""Base websocket uri."""
|
||||
return 'ws://{host}:{port}'.format(**config['wsserver'])
|
||||
|
@ -1,8 +1,6 @@
|
||||
"""This module provides the blueprint for the blocks API endpoints.
|
||||
|
||||
For more information please refer to the documentation on ReadTheDocs:
|
||||
- https://docs.bigchaindb.com/projects/server/en/latest/drivers-clients/
|
||||
http-client-server-api.html
|
||||
For more information please refer to the documentation: http://bigchaindb.com/http-api
|
||||
"""
|
||||
from flask import current_app
|
||||
from flask_restful import Resource, reqparse
|
||||
|
@ -4,8 +4,9 @@ import flask
|
||||
from flask_restful import Resource
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb.web.views.base import base_url
|
||||
from bigchaindb.web.views.base import base_url, base_ws_uri
|
||||
from bigchaindb import version
|
||||
from bigchaindb.web.websocket_server import EVENTS_ENDPOINT
|
||||
|
||||
|
||||
class RootIndex(Resource):
|
||||
@ -30,16 +31,18 @@ class RootIndex(Resource):
|
||||
class ApiV1Index(Resource):
|
||||
def get(self):
|
||||
api_root = base_url() + 'api/v1/'
|
||||
websocket_root = base_ws_uri() + EVENTS_ENDPOINT
|
||||
docs_url = [
|
||||
'https://docs.bigchaindb.com/projects/server/en/v',
|
||||
version.__version__,
|
||||
'/drivers-clients/http-client-server-api.html',
|
||||
'/http-client-server-api.html',
|
||||
]
|
||||
return {
|
||||
return flask.jsonify({
|
||||
'_links': {
|
||||
'docs': ''.join(docs_url),
|
||||
'self': api_root,
|
||||
'statuses': api_root + 'statuses/',
|
||||
'transactions': api_root + 'transactions/',
|
||||
'streams_v1': websocket_root,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
@ -1,8 +1,6 @@
|
||||
"""This module provides the blueprint for the statuses API endpoints.
|
||||
|
||||
For more information please refer to the documentation on ReadTheDocs:
|
||||
- https://docs.bigchaindb.com/projects/server/en/latest/drivers-clients/
|
||||
http-client-server-api.html
|
||||
For more information please refer to the documentation: http://bigchaindb.com/http-api
|
||||
"""
|
||||
from flask import current_app
|
||||
from flask_restful import Resource, reqparse
|
||||
|
@ -1,8 +1,6 @@
|
||||
"""This module provides the blueprint for some basic API endpoints.
|
||||
|
||||
For more information please refer to the documentation on ReadTheDocs:
|
||||
- https://docs.bigchaindb.com/projects/server/en/latest/drivers-clients/
|
||||
http-client-server-api.html
|
||||
For more information please refer to the documentation: http://bigchaindb.com/http-api
|
||||
"""
|
||||
import logging
|
||||
|
||||
|
@ -1,8 +1,6 @@
|
||||
"""This module provides the blueprint for the votes API endpoints.
|
||||
|
||||
For more information please refer to the documentation on ReadTheDocs:
|
||||
- https://docs.bigchaindb.com/projects/server/en/latest/drivers-clients/
|
||||
http-client-server-api.html
|
||||
For more information please refer to the documentation: http://bigchaindb.com/http-api
|
||||
"""
|
||||
from flask import current_app
|
||||
from flask_restful import Resource, reqparse
|
||||
|
174
bigchaindb/web/websocket_server.py
Normal file
174
bigchaindb/web/websocket_server.py
Normal file
@ -0,0 +1,174 @@
|
||||
"""WebSocket server for the BigchainDB Event Stream API."""
|
||||
|
||||
# NOTE
|
||||
#
|
||||
# This module contains some functions and utilities that might belong to other
|
||||
# modules. For now, I prefer to keep everything in this module. Why? Because
|
||||
# those functions are needed only here.
|
||||
#
|
||||
# When we will extend this part of the project and we find that we need those
|
||||
# functionalities elsewhere, we can start creating new modules and organizing
|
||||
# things in a better way.
|
||||
|
||||
|
||||
import json
|
||||
import asyncio
|
||||
import logging
|
||||
import threading
|
||||
from uuid import uuid4
|
||||
|
||||
import aiohttp
|
||||
from aiohttp import web
|
||||
|
||||
from bigchaindb import config
|
||||
from bigchaindb.events import EventTypes
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
POISON_PILL = 'POISON_PILL'
|
||||
EVENTS_ENDPOINT = '/api/v1/streams/valid_tx'
|
||||
|
||||
|
||||
def _put_into_capped_queue(queue, value):
|
||||
"""Put a new item in a capped queue.
|
||||
|
||||
If the queue reached its limit, get the first element
|
||||
ready and put the new one. Note that the first element
|
||||
will be lost (that's the purpose of a capped queue).
|
||||
|
||||
Args:
|
||||
queue: a queue
|
||||
value: the value to put
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
queue.put_nowait(value)
|
||||
except asyncio.QueueFull:
|
||||
queue.get_nowait()
|
||||
else:
|
||||
return
|
||||
|
||||
|
||||
def _multiprocessing_to_asyncio(in_queue, out_queue, loop):
|
||||
"""Bridge between a synchronous multiprocessing queue
|
||||
and an asynchronous asyncio queue.
|
||||
|
||||
Args:
|
||||
in_queue (multiprocessing.Queue): input queue
|
||||
out_queue (asyncio.Queue): output queue
|
||||
"""
|
||||
|
||||
while True:
|
||||
value = in_queue.get()
|
||||
loop.call_soon_threadsafe(_put_into_capped_queue, out_queue, value)
|
||||
|
||||
|
||||
class Dispatcher:
|
||||
"""Dispatch events to websockets.
|
||||
|
||||
This class implements a simple publish/subscribe pattern.
|
||||
"""
|
||||
|
||||
def __init__(self, event_source):
|
||||
"""Create a new instance.
|
||||
|
||||
Args:
|
||||
event_source: a source of events. Elements in the queue
|
||||
should be strings.
|
||||
"""
|
||||
|
||||
self.event_source = event_source
|
||||
self.subscribers = {}
|
||||
|
||||
def subscribe(self, uuid, websocket):
|
||||
"""Add a websocket to the list of subscribers.
|
||||
|
||||
Args:
|
||||
uuid (str): a unique identifier for the websocket.
|
||||
websocket: the websocket to publish information.
|
||||
"""
|
||||
|
||||
self.subscribers[uuid] = websocket
|
||||
|
||||
@asyncio.coroutine
|
||||
def publish(self):
|
||||
"""Publish new events to the subscribers."""
|
||||
|
||||
while True:
|
||||
event = yield from self.event_source.get()
|
||||
str_buffer = []
|
||||
|
||||
if event == POISON_PILL:
|
||||
return
|
||||
|
||||
if isinstance(event, str):
|
||||
str_buffer.append(event)
|
||||
|
||||
elif event.type == EventTypes.BLOCK_VALID:
|
||||
block = event.data
|
||||
|
||||
for tx in block['block']['transactions']:
|
||||
asset_id = tx['id'] if tx['operation'] == 'CREATE' else tx['asset']['id']
|
||||
data = {'block_id': block['id'],
|
||||
'asset_id': asset_id,
|
||||
'tx_id': tx['id']}
|
||||
str_buffer.append(json.dumps(data))
|
||||
|
||||
for _, websocket in self.subscribers.items():
|
||||
for str_item in str_buffer:
|
||||
websocket.send_str(str_item)
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def websocket_handler(request):
|
||||
"""Handle a new socket connection."""
|
||||
|
||||
logger.debug('New websocket connection.')
|
||||
websocket = web.WebSocketResponse()
|
||||
yield from websocket.prepare(request)
|
||||
uuid = uuid4()
|
||||
request.app['dispatcher'].subscribe(uuid, websocket)
|
||||
|
||||
while True:
|
||||
# Consume input buffer
|
||||
msg = yield from websocket.receive()
|
||||
if msg.type == aiohttp.WSMsgType.ERROR:
|
||||
logger.debug('Websocket exception: %s', websocket.exception())
|
||||
return
|
||||
|
||||
|
||||
def init_app(event_source, *, loop=None):
|
||||
"""Init the application server.
|
||||
|
||||
Return:
|
||||
An aiohttp application.
|
||||
"""
|
||||
|
||||
dispatcher = Dispatcher(event_source)
|
||||
|
||||
# Schedule the dispatcher
|
||||
loop.create_task(dispatcher.publish())
|
||||
|
||||
app = web.Application(loop=loop)
|
||||
app['dispatcher'] = dispatcher
|
||||
app.router.add_get(EVENTS_ENDPOINT, websocket_handler)
|
||||
return app
|
||||
|
||||
|
||||
def start(sync_event_source, loop=None):
|
||||
"""Create and start the WebSocket server."""
|
||||
|
||||
if not loop:
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
event_source = asyncio.Queue(maxsize=1024, loop=loop)
|
||||
|
||||
bridge = threading.Thread(target=_multiprocessing_to_asyncio,
|
||||
args=(sync_event_source, event_source, loop),
|
||||
daemon=True)
|
||||
bridge.start()
|
||||
|
||||
app = init_app(event_source, loop=loop)
|
||||
aiohttp.web.run_app(app,
|
||||
host=config['wsserver']['host'],
|
||||
port=config['wsserver']['port'])
|
@ -2,7 +2,7 @@ version: '2'
|
||||
|
||||
services:
|
||||
mdb:
|
||||
image: mongo:3.4.1
|
||||
image: mongo:3.4.3
|
||||
ports:
|
||||
- "27017"
|
||||
command: mongod --replSet=bigchain-rs
|
||||
@ -28,7 +28,7 @@ services:
|
||||
- /data
|
||||
command: "true"
|
||||
|
||||
bdb:
|
||||
bdb-rdb:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile-dev
|
||||
@ -50,7 +50,7 @@ services:
|
||||
- "9984"
|
||||
command: bigchaindb start
|
||||
|
||||
bdb-mdb:
|
||||
bdb:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile-dev
|
||||
|
@ -53,7 +53,7 @@ At a high level, one can communicate with a BigchainDB cluster (set of nodes) us
|
||||
</style>
|
||||
|
||||
<div class="buttondiv">
|
||||
<a class="button" href="http://docs.bigchaindb.com/projects/server/en/latest/drivers-clients/http-client-server-api.html">HTTP API Docs</a>
|
||||
<a class="button" href="http://bigchaindb.com/http-api">HTTP API Docs</a>
|
||||
</div>
|
||||
<div class="buttondiv">
|
||||
<a class="button" href="http://docs.bigchaindb.com/projects/py-driver/en/latest/index.html">Python Driver Docs</a>
|
||||
|
@ -1,21 +1,21 @@
|
||||
# Terminology
|
||||
|
||||
There is some specialized terminology associated with BigchainDB. To get started, you should at least know what what we mean by a BigchainDB *node*, *cluster* and *consortium*.
|
||||
There is some specialized terminology associated with BigchainDB. To get started, you should at least know the following:
|
||||
|
||||
|
||||
## Node
|
||||
## BigchainDB Node
|
||||
|
||||
A **BigchainDB node** is a machine or set of closely-linked machines running RethinkDB/MongoDB Server, BigchainDB Server, and related software. (A "machine" might be a bare-metal server, a virtual machine or a container.) Each node is controlled by one person or organization.
|
||||
A **BigchainDB node** is a machine or set of closely-linked machines running RethinkDB/MongoDB Server, BigchainDB Server, and related software. Each node is controlled by one person or organization.
|
||||
|
||||
|
||||
## Cluster
|
||||
## BigchainDB Cluster
|
||||
|
||||
A set of BigchainDB nodes can connect to each other to form a **cluster**. Each node in the cluster runs the same software. A cluster contains one logical RethinkDB datastore. A cluster may have additional machines to do things such as cluster monitoring.
|
||||
A set of BigchainDB nodes can connect to each other to form a **BigchainDB cluster**. Each node in the cluster runs the same software. A cluster contains one logical RethinkDB/MongoDB datastore. A cluster may have additional machines to do things such as cluster monitoring.
|
||||
|
||||
|
||||
## Consortium
|
||||
## BigchainDB Consortium
|
||||
|
||||
The people and organizations that run the nodes in a cluster belong to a **consortium** (i.e. another organization). A consortium must have some sort of governance structure to make decisions. If a cluster is run by a single company, then the "consortium" is just that company.
|
||||
The people and organizations that run the nodes in a cluster belong to a **BigchainDB consortium** (i.e. another organization). A consortium must have some sort of governance structure to make decisions. If a cluster is run by a single company, then the "consortium" is just that company.
|
||||
|
||||
**What's the Difference Between a Cluster and a Consortium?**
|
||||
|
||||
|
@ -269,7 +269,7 @@ def main():
|
||||
ctx['block_list'] = pretty_json(block_list)
|
||||
|
||||
base_path = os.path.join(os.path.dirname(__file__),
|
||||
'source/drivers-clients/samples')
|
||||
'source/http-samples')
|
||||
if not os.path.exists(base_path):
|
||||
os.makedirs(base_path)
|
||||
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 82 KiB After Width: | Height: | Size: 38 KiB |
@ -18,7 +18,7 @@ pip install awscli
|
||||
|
||||
## Create an AWS Access Key
|
||||
|
||||
The next thing you'll need is an AWS access key. If you don't have one, you can create one using the [instructions in the AWS documentation](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html). You should get an access key ID (e.g. AKIAIOSFODNN7EXAMPLE) and a secret access key (e.g. wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY).
|
||||
The next thing you'll need is AWS access keys (access key ID and secret access key). If you don't have those, see [the AWS documentation about access keys](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys).
|
||||
|
||||
You should also pick a default AWS region name (e.g. `eu-central-1`). That's where your cluster will run. The AWS documentation has [a list of them](http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region).
|
||||
|
||||
|
@ -1,5 +0,0 @@
|
||||
#########
|
||||
Consensus
|
||||
#########
|
||||
|
||||
.. automodule:: bigchaindb.consensus
|
101
docs/server/source/appendices/docker-on-mac.md
Normal file
101
docs/server/source/appendices/docker-on-mac.md
Normal file
@ -0,0 +1,101 @@
|
||||
# Run BigchainDB with Docker On Mac
|
||||
|
||||
**NOT for Production Use**
|
||||
|
||||
Those developing on Mac can follow this document to run BigchainDB in docker
|
||||
containers for a quick dev setup.
|
||||
Running BigchainDB on Mac (Docker or otherwise) is not officially supported.
|
||||
|
||||
Support is very much limited as there are certain things that work differently
|
||||
in Docker for Mac than Docker for other platforms.
|
||||
Also, we do not use mac for our development and testing. :)
|
||||
|
||||
This page may not be up to date with various settings and docker updates at
|
||||
all the times.
|
||||
|
||||
These steps work as of this writing (2017.Mar.09) and might break in the
|
||||
future with updates to Docker for mac.
|
||||
Community contribution to make BigchainDB run on Docker for Mac will always be
|
||||
welcome.
|
||||
|
||||
|
||||
## Prerequisite
|
||||
|
||||
Install Docker for Mac.
|
||||
|
||||
## (Optional) For a clean start
|
||||
|
||||
1. Stop all BigchainDB and RethinkDB/MongoDB containers.
|
||||
2. Delete all BigchainDB docker images.
|
||||
3. Delete the ~/bigchaindb_docker folder.
|
||||
|
||||
|
||||
## Pull the images
|
||||
|
||||
Pull the bigchaindb and other required docker images from docker hub.
|
||||
|
||||
```text
|
||||
docker pull bigchaindb/bigchaindb:master
|
||||
docker pull [rethinkdb:2.3|mongo:3.4.1]
|
||||
```
|
||||
|
||||
## Create the BigchainDB configuration file on Mac
|
||||
```text
|
||||
docker run \
|
||||
--rm \
|
||||
--volume $HOME/bigchaindb_docker:/data \
|
||||
bigchaindb/bigchaindb:master \
|
||||
-y configure \
|
||||
[mongodb|rethinkdb]
|
||||
```
|
||||
|
||||
To ensure that BigchainDB connects to the backend database bound to the virtual
|
||||
interface `172.17.0.1`, you must edit the BigchainDB configuration file
|
||||
(`~/bigchaindb_docker/.bigchaindb`) and change database.host from `localhost`
|
||||
to `172.17.0.1`.
|
||||
|
||||
|
||||
## Run the backend database on Mac
|
||||
|
||||
From v0.9 onwards, you can run RethinkDB or MongoDB.
|
||||
|
||||
We use the virtual interface created by the Docker daemon to allow
|
||||
communication between the BigchainDB and database containers.
|
||||
It has an IP address of 172.17.0.1 by default.
|
||||
|
||||
You can also use docker host networking or bind to your primary (eth)
|
||||
interface, if needed.
|
||||
|
||||
### For RethinkDB backend
|
||||
```text
|
||||
docker run \
|
||||
--name=rethinkdb \
|
||||
--publish=28015:28015 \
|
||||
--publish=8080:8080 \
|
||||
--restart=always \
|
||||
--volume $HOME/bigchaindb_docker:/data \
|
||||
rethinkdb:2.3
|
||||
```
|
||||
|
||||
### For MongoDB backend
|
||||
```text
|
||||
docker run \
|
||||
--name=mongodb \
|
||||
--publish=27017:27017 \
|
||||
--restart=always \
|
||||
--volume=$HOME/bigchaindb_docker/db:/data/db \
|
||||
--volume=$HOME/bigchaindb_docker/configdb:/data/configdb \
|
||||
mongo:3.4.1 --replSet=bigchain-rs
|
||||
```
|
||||
|
||||
### Run BigchainDB on Mac
|
||||
```text
|
||||
docker run \
|
||||
--name=bigchaindb \
|
||||
--publish=9984:9984 \
|
||||
--restart=always \
|
||||
--volume=$HOME/bigchaindb_docker:/data \
|
||||
bigchaindb/bigchaindb \
|
||||
start
|
||||
```
|
||||
|
@ -1,25 +0,0 @@
|
||||
# Example RethinkDB Storage Setups
|
||||
|
||||
## Example Amazon EC2 Setups
|
||||
|
||||
We have some scripts for [deploying a _test_ BigchainDB cluster on AWS](../clusters-feds/aws-testing-cluster.html). Those scripts include command sequences to set up storage for RethinkDB.
|
||||
In particular, look in the file [/deploy-cluster-aws/fabfile.py](https://github.com/bigchaindb/bigchaindb/blob/master/deploy-cluster-aws/fabfile.py), under `def prep_rethinkdb_storage(USING_EBS)`. Note that there are two cases:
|
||||
|
||||
1. **Using EBS ([Amazon Elastic Block Store](https://aws.amazon.com/ebs/)).** This is always an option, and for some instance types ("EBS-only"), it's the only option.
|
||||
2. **Using an "instance store" volume provided with an Amazon EC2 instance.** Note that our scripts only use one of the (possibly many) volumes in the instance store.
|
||||
|
||||
There's some explanation of the steps in the [Amazon EC2 documentation about making an Amazon EBS volume available for use](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html).
|
||||
|
||||
You shouldn't use an EC2 "instance store" to store RethinkDB data for a production node, because it's not replicated and it's only intended for temporary, ephemeral data. If the associated instance crashes, is stopped, or is terminated, the data in the instance store is lost forever. Amazon EBS storage is replicated, has incremental snapshots, and is low-latency.
|
||||
|
||||
|
||||
## Example Using Amazon EFS
|
||||
|
||||
TODO
|
||||
|
||||
|
||||
## Other Examples?
|
||||
|
||||
TODO
|
||||
|
||||
Maybe RAID, ZFS, ... (over EBS volumes, i.e. a DIY Amazon EFS)
|
@ -8,9 +8,10 @@ This is a page of notes on the ports potentially used by BigchainDB nodes and th
|
||||
Assuming you aren't exposing the RethinkDB web interface on port 8080 (or any other port, because [there are more secure ways to access it](https://www.rethinkdb.com/docs/security/#binding-the-web-interface-port)), there are only three ports that should expect unsolicited inbound traffic:
|
||||
|
||||
1. **Port 22** can expect inbound SSH (TCP) traffic from the node administrator (i.e. a small set of IP addresses).
|
||||
2. **Port 9984** can expect inbound HTTP (TCP) traffic from BigchainDB clients sending transactions to the BigchainDB HTTP API.
|
||||
3. If you're using RethinkDB, **Port 29015** can expect inbound TCP traffic from other RethinkDB nodes in the RethinkDB cluster (for RethinkDB intracluster communications).
|
||||
4. If you're using MongoDB, **Port 27017** can expect inbound TCP traffic from other nodes.
|
||||
1. **Port 9984** can expect inbound HTTP (TCP) traffic from BigchainDB clients sending transactions to the BigchainDB HTTP API.
|
||||
1. **Port 9985** can expect inbount WebSocket traffic from BigchainDB clients.
|
||||
1. If you're using RethinkDB, **Port 29015** can expect inbound TCP traffic from other RethinkDB nodes in the RethinkDB cluster (for RethinkDB intracluster communications).
|
||||
1. If you're using MongoDB, **Port 27017** can expect inbound TCP traffic from other nodes.
|
||||
|
||||
All other ports should only get inbound traffic in response to specific requests from inside the node.
|
||||
|
||||
@ -59,6 +60,11 @@ If Gunicorn and the reverse proxy are running on the same server, then you'll ha
|
||||
You may want to have Gunicorn and the reverse proxy running on different servers, so that both can listen on port 9984. That would also help isolate the effects of a denial-of-service attack.
|
||||
|
||||
|
||||
## Port 9985
|
||||
|
||||
Port 9985 is the default port for the [BigchainDB WebSocket Event Stream API](../websocket-event-stream-api.html).
|
||||
|
||||
|
||||
## Port 28015
|
||||
|
||||
Port 28015 is the default port used by RethinkDB client driver connections (TCP). If your BigchainDB node is just one server, then Port 28015 only needs to listen on localhost, because all the client drivers will be running on localhost. Port 28015 doesn't need to accept inbound traffic from the outside world.
|
||||
|
@ -10,10 +10,10 @@ Appendices
|
||||
install-os-level-deps
|
||||
install-latest-pip
|
||||
run-with-docker
|
||||
docker-on-mac
|
||||
json-serialization
|
||||
cryptography
|
||||
the-Bigchain-class
|
||||
consensus
|
||||
pipelines
|
||||
backend
|
||||
commands
|
||||
@ -21,6 +21,7 @@ Appendices
|
||||
generate-key-pair-for-ssh
|
||||
firewall-notes
|
||||
ntp-notes
|
||||
example-rethinkdb-storage-setups
|
||||
rethinkdb-reqs
|
||||
rethinkdb-backup
|
||||
licenses
|
||||
install-with-lxd
|
||||
|
@ -24,7 +24,7 @@ deserialize(serialize(data)) == data
|
||||
True
|
||||
```
|
||||
|
||||
Since BigchainDB performs a lot of serialization we decided to use [python-rapidjson](https://github.com/kenrobbins/python-rapidjson)
|
||||
Since BigchainDB performs a lot of serialization we decided to use [python-rapidjson](https://github.com/python-rapidjson/python-rapidjson)
|
||||
which is a python wrapper for [rapidjson](https://github.com/miloyip/rapidjson) a fast and fully RFC complient JSON parser.
|
||||
|
||||
```python
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Backing Up & Restoring Data
|
||||
# Backing Up and Restoring Data
|
||||
|
||||
There are several ways to backup and restore the data in a BigchainDB cluster.
|
||||
This page was written when BigchainDB only worked with RethinkDB, so its focus is on RethinkDB-based backup. BigchainDB now supports MongoDB as a backend database and we recommend that you use MongoDB in production. Nevertheless, some of the following backup ideas are still relevant regardless of the backend database being used, so we moved this page to the Appendices.
|
||||
|
||||
|
||||
## RethinkDB's Replication as a form of Backup
|
@ -1,20 +1,8 @@
|
||||
# Production Node Requirements
|
||||
# RethinkDB Requirements
|
||||
|
||||
Note: This section will be broken apart into several pages, e.g. NTP requirements, RethinkDB requirements, BigchainDB requirements, etc. and those pages will add more details.
|
||||
[The RethinkDB documentation](https://rethinkdb.com/docs/) should be your first source of information about its requirements. This page serves mostly to document some of its more obscure requirements.
|
||||
|
||||
|
||||
## OS Requirements
|
||||
|
||||
* RethinkDB Server [will run on any modern OS](https://www.rethinkdb.com/docs/install/). Note that the Fedora package isn't officially supported. Also, official support for Windows is fairly recent ([April 2016](https://rethinkdb.com/blog/2.3-release/)).
|
||||
* BigchainDB Server requires Python 3.4+ and Python 3.4+ [will run on any modern OS](https://docs.python.org/3.4/using/index.html).
|
||||
* BigchaindB Server uses the Python `multiprocessing` package and [some functionality in the `multiprocessing` package doesn't work on OS X](https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.qsize). You can still use Mac OS X if you use Docker or a virtual machine.
|
||||
|
||||
The BigchainDB core dev team uses recent LTS versions of Ubuntu and recent versions of Fedora.
|
||||
|
||||
We don't test BigchainDB on Windows or Mac OS X, but you can try.
|
||||
|
||||
* If you run into problems on Windows, then you may want to try using Vagrant. One of our community members ([@Mec-Is](https://github.com/Mec-iS)) wrote [a page about how to install BigchainDB on a VM with Vagrant](https://gist.github.com/Mec-iS/b84758397f1b21f21700).
|
||||
* If you have Mac OS X and want to experiment with BigchainDB, then you could do that [using Docker](../appendices/run-with-docker.html).
|
||||
RethinkDB Server [will run on any modern OS](https://www.rethinkdb.com/docs/install/). Note that the Fedora package isn't officially supported. Also, official support for Windows is fairly recent ([April 2016](https://rethinkdb.com/blog/2.3-release/)).
|
||||
|
||||
|
||||
## Storage Requirements
|
||||
@ -28,6 +16,20 @@ For RethinkDB's failover mechanisms to work, [every RethinkDB table must have at
|
||||
|
||||
As for the read & write rates, what do you expect those to be for your situation? It's not enough for the storage system alone to handle those rates: the interconnects between the nodes must also be able to handle them.
|
||||
|
||||
**Storage Notes Specific to RethinkDB**
|
||||
|
||||
* The RethinkDB storage engine has a number of SSD optimizations, so you _can_ benefit from using SSDs. ([source](https://www.rethinkdb.com/docs/architecture/))
|
||||
|
||||
* If you have an N-node RethinkDB cluster and 1) you want to use it to store an amount of data D (unique records, before replication), 2) you want the replication factor to be R (all tables), and 3) you want N shards (all tables), then each BigchainDB node must have storage space of at least R×D/N.
|
||||
|
||||
* RethinkDB tables can have [at most 64 shards](https://rethinkdb.com/limitations/). What does that imply? Suppose you only have one table, with 64 shards. How big could that table be? It depends on how much data can be stored in each node. If the maximum amount of data that a node can store is d, then the biggest-possible shard is d, and the biggest-possible table size is 64 times that. (All shard replicas would have to be stored on other nodes beyond the initial 64.) If there are two tables, the second table could also have 64 shards, stored on 64 other maxed-out nodes, so the total amount of unique data in the database would be (64 shards/table)×(2 tables)×d. In general, if you have T tables, the maximum amount of unique data that can be stored in the database (i.e. the amount of data before replication) is 64×T×d.
|
||||
|
||||
* When you set up storage for your RethinkDB data, you may have to select a filesystem. (Sometimes, the filesystem is already decided by the choice of storage.) We recommend using a filesystem that supports direct I/O (Input/Output). Many compressed or encrypted file systems don't support direct I/O. The ext4 filesystem supports direct I/O (but be careful: if you enable the data=journal mode, then direct I/O support will be disabled; the default is data=ordered). If your chosen filesystem supports direct I/O and you're using Linux, then you don't need to do anything to request or enable direct I/O. RethinkDB does that.
|
||||
|
||||
<p style="background-color: lightgrey;">What is direct I/O? It allows RethinkDB to write directly to the storage device (or use its own in-memory caching mechanisms), rather than relying on the operating system's file read and write caching mechanisms. (If you're using Linux, a write-to-file normally writes to the in-memory Page Cache first; only later does that Page Cache get flushed to disk. The Page Cache is also used when reading files.)</p>
|
||||
|
||||
* RethinkDB stores its data in a specific directory. You can tell RethinkDB _which_ directory using the RethinkDB config file, as explained below. In this documentation, we assume the directory is `/data`. If you set up a separate device (partition, RAID array, or logical volume) to store the RethinkDB data, then mount that device on `/data`.
|
||||
|
||||
|
||||
## Memory (RAM) Requirements
|
||||
|
@ -25,7 +25,7 @@ docker run \
|
||||
--interactive \
|
||||
--rm \
|
||||
--tty \
|
||||
--volume "$HOME/bigchaindb_docker:/data" \
|
||||
--volume $HOME/bigchaindb_docker:/data \
|
||||
bigchaindb/bigchaindb \
|
||||
-y configure \
|
||||
[mongodb|rethinkdb]
|
||||
@ -45,7 +45,7 @@ Let's analyze that command:
|
||||
`$HOME/bigchaindb_docker` to the container directory `/data`;
|
||||
this allows us to have the data persisted on the host machine,
|
||||
you can read more in the [official Docker
|
||||
documentation](https://docs.docker.com/engine/tutorials/dockervolumes/#/mount-a-host-directory-as-a-data-volume)
|
||||
documentation](https://docs.docker.com/engine/tutorials/dockervolumes)
|
||||
* `bigchaindb/bigchaindb` the image to use. All the options after the container name are passed on to the entrypoint inside the container.
|
||||
* `-y configure` execute the `configure` sub-command (of the `bigchaindb`
|
||||
command) inside the container, with the `-y` option to automatically use all the default config values
|
||||
@ -76,13 +76,13 @@ docker run \
|
||||
--publish=172.17.0.1:28015:28015 \
|
||||
--publish=172.17.0.1:58080:8080 \
|
||||
--restart=always \
|
||||
--volume "$HOME/bigchaindb_docker:/data" \
|
||||
--volume $HOME/bigchaindb_docker:/data \
|
||||
rethinkdb:2.3
|
||||
```
|
||||
|
||||
<!-- Don't hyperlink http://172.17.0.1:58080/ because Sphinx will fail when you do "make linkcheck" -->
|
||||
|
||||
You can also access the RethinkDB dashboard at
|
||||
[http://172.17.0.1:58080/](http://172.17.0.1:58080/)
|
||||
You can also access the RethinkDB dashboard at http://172.17.0.1:58080/
|
||||
|
||||
|
||||
#### For MongoDB
|
||||
@ -95,7 +95,7 @@ be owned by this user in the host.
|
||||
If there is no owner with UID 999, you can create the corresponding user and
|
||||
group.
|
||||
|
||||
`groupadd -r --gid 999 mongodb && useradd -r --uid 999 -g mongodb mongodb`
|
||||
`useradd -r --uid 999 mongodb` OR `groupadd -r --gid 999 mongodb && useradd -r --uid 999 -g mongodb mongodb` should work.
|
||||
|
||||
|
||||
```text
|
||||
@ -156,3 +156,4 @@ docker build --tag local-bigchaindb .
|
||||
```
|
||||
|
||||
Now you can use your own image to run BigchainDB containers.
|
||||
|
||||
|
@ -157,7 +157,7 @@ Step 5: Create the Config Map - Optional
|
||||
|
||||
This step is required only if you are planning to set up multiple
|
||||
`BigchainDB nodes
|
||||
<https://docs.bigchaindb.com/en/latest/terminology.html#node>`_.
|
||||
<https://docs.bigchaindb.com/en/latest/terminology.html>`_.
|
||||
|
||||
MongoDB reads the local ``/etc/hosts`` file while bootstrapping a replica set
|
||||
to resolve the hostname provided to the ``rs.initiate()`` command. It needs to
|
||||
@ -268,7 +268,7 @@ Step 7: Initialize a MongoDB Replica Set - Optional
|
||||
|
||||
This step is required only if you are planning to set up multiple
|
||||
`BigchainDB nodes
|
||||
<https://docs.bigchaindb.com/en/latest/terminology.html#node>`_.
|
||||
<https://docs.bigchaindb.com/en/latest/terminology.html>`_.
|
||||
|
||||
|
||||
Login to the running MongoDB instance and access the mongo shell using:
|
||||
@ -315,7 +315,7 @@ Step 8: Create a DNS record - Optional
|
||||
|
||||
This step is required only if you are planning to set up multiple
|
||||
`BigchainDB nodes
|
||||
<https://docs.bigchaindb.com/en/latest/terminology.html#node>`_.
|
||||
<https://docs.bigchaindb.com/en/latest/terminology.html>`_.
|
||||
|
||||
**Azure.** Select the current Azure resource group and look for the ``Public IP``
|
||||
resource. You should see at least 2 entries there - one for the Kubernetes
|
||||
@ -426,9 +426,8 @@ on the cluster and query the internal DNS and IP endpoints.
|
||||
$ kubectl run -it toolbox -- image <docker image to run> --restart=Never --rm
|
||||
|
||||
There is a generic image based on alpine:3.5 with the required utilities
|
||||
hosted at Docker Hub under ``bigchaindb/toolbox``.
|
||||
The corresponding Dockerfile is `here
|
||||
<https://github.com/bigchaindb/bigchaindb/k8s/toolbox/Dockerfile>`_.
|
||||
hosted at Docker Hub under `bigchaindb/toolbox <https://hub.docker.com/r/bigchaindb/toolbox/>`_.
|
||||
The corresponding Dockerfile is in the bigchaindb/bigchaindb repository on GitHub, at `https://github.com/bigchaindb/bigchaindb/blob/master/k8s/toolbox/Dockerfile <https://github.com/bigchaindb/bigchaindb/blob/master/k8s/toolbox/Dockerfile>`_.
|
||||
|
||||
You can use it as below to get started immediately:
|
||||
|
||||
|
@ -81,4 +81,4 @@ where, as before, `<key-name>` must be replaced.
|
||||
|
||||
## Next Steps
|
||||
|
||||
You could make changes to the Ansible playbook (and the resources it uses) to make the node more production-worthy. See [the section on production node assumptions, components and requirements](../nodes/index.html).
|
||||
You could make changes to the Ansible playbook (and the resources it uses) to make the node more production-worthy. See [the section on production node assumptions, components and requirements](../production-nodes/index.html).
|
||||
|
@ -53,7 +53,7 @@ on the node and mark it as unscheduleable
|
||||
|
||||
kubectl drain $NODENAME
|
||||
|
||||
There are `more details in the Kubernetes docs <https://kubernetes.io/docs/admin/cluster-management/#maintenance-on-a-node>`_,
|
||||
There are `more details in the Kubernetes docs <https://kubernetes.io/docs/concepts/cluster-administration/cluster-management/#maintenance-on-a-node>`_,
|
||||
including instructions to make the node scheduleable again.
|
||||
|
||||
To manually upgrade the host OS,
|
||||
@ -82,13 +82,13 @@ A typical upgrade workflow for a single Deployment would be:
|
||||
|
||||
$ KUBE_EDITOR=nano kubectl edit deployment/<name of Deployment>
|
||||
|
||||
The `kubectl edit <https://kubernetes.io/docs/user-guide/kubectl/kubectl_edit/>`_
|
||||
command opens the specified editor (nano in the above example),
|
||||
The ``kubectl edit`` command
|
||||
opens the specified editor (nano in the above example),
|
||||
allowing you to edit the specified Deployment *in the Kubernetes cluster*.
|
||||
You can change the version tag on the Docker image, for example.
|
||||
Don't forget to save your edits before exiting the editor.
|
||||
The Kubernetes docs have more information about
|
||||
`updating a Deployment <https://kubernetes.io/docs/user-guide/deployments/#updating-a-deployment>`_.
|
||||
`Deployments <https://kubernetes.io/docs/concepts/workloads/controllers/deployment/>`_ (including updating them).
|
||||
|
||||
|
||||
The upgrade story for the MongoDB StatefulSet is *different*.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Deploy a Testing Cluster on AWS
|
||||
# Deploy a RethinkDB-Based Testing Cluster on AWS
|
||||
|
||||
This section explains a way to deploy a cluster of BigchainDB nodes on Amazon Web Services (AWS) for testing purposes.
|
||||
This section explains a way to deploy a _RethinkDB-based_ cluster of BigchainDB nodes on Amazon Web Services (AWS) for testing purposes.
|
||||
|
||||
## Why?
|
||||
|
||||
|
@ -5,6 +5,5 @@ Clusters
|
||||
:maxdepth: 1
|
||||
|
||||
set-up-a-cluster
|
||||
backup
|
||||
aws-testing-cluster
|
||||
|
||||
|
@ -3,7 +3,9 @@
|
||||
This section is about how to set up a BigchainDB cluster where each node is operated by a different operator. If you want to set up and run a testing cluster on AWS (where all nodes are operated by you), then see [the section about that](aws-testing-cluster.html).
|
||||
|
||||
|
||||
## Initial Checklist
|
||||
## Initial Questions
|
||||
|
||||
There are many questions that must be answered before setting up a BigchainDB cluster. For example:
|
||||
|
||||
* Do you have a governance process for making consortium-level decisions, such as how to admit new members?
|
||||
* What will you store in creation transactions (data payload)? Is there a data schema?
|
||||
@ -15,14 +17,12 @@ This section is about how to set up a BigchainDB cluster where each node is oper
|
||||
|
||||
The consortium must decide some things before setting up the initial cluster (initial set of BigchainDB nodes):
|
||||
|
||||
1. Who will operate a node in the initial cluster?
|
||||
2. What will the replication factor be? (It must be 3 or more for [RethinkDB failover](https://rethinkdb.com/docs/failover/) to work.)
|
||||
3. Which node will be responsible for sending the commands to configure the RethinkDB database?
|
||||
1. Who will operate each node in the initial cluster?
|
||||
2. What will the replication factor be? (It should be 3 or more.)
|
||||
3. Who will deploy the first node, second node, etc.?
|
||||
|
||||
Once those things have been decided, each node operator can begin setting up their BigchainDB (production) node.
|
||||
Once those things have been decided, the cluster deployment process can begin. The process for deploying a production node is outlined in [the section on production nodes](../production-nodes/index.html).
|
||||
|
||||
Each node operator will eventually need two pieces of information from all other nodes:
|
||||
|
||||
1. Their RethinkDB hostname, e.g. `rdb.farm2.organization.org`
|
||||
2. Their BigchainDB public key, e.g. `Eky3nkbxDTMgkmiJC8i5hKyVFiAQNmPP4a2G4JdDxJCK`
|
||||
Every time a new BigchainDB node is added, every other node must update their [BigchainDB keyring](../server-reference/configuration.html#keyring) (one of the BigchainDB configuration settings): they must add the public key of the new node.
|
||||
|
||||
To secure communications between BigchainDB nodes, each BigchainDB node can use a firewall or similar, and doing that will require additional coordination.
|
||||
|
@ -25,9 +25,16 @@ The (single) output of a threshold condition can be used as one of the inputs of
|
||||
When one creates a condition, one can calculate its fulfillment length (e.g.
|
||||
96). The more complex the condition, the larger its fulfillment length will be.
|
||||
A BigchainDB federation can put an upper limit on the complexity of the
|
||||
conditions, either directly by setting an allowed maximum fulfillment length,
|
||||
or indirectly by setting a maximum allowed transaction size which would limit
|
||||
conditions, either directly by setting a maximum allowed fulfillment length,
|
||||
or
|
||||
`indirectly <https://github.com/bigchaindb/bigchaindb/issues/356#issuecomment-288085251>`_
|
||||
by :ref:`setting a maximum allowed transaction size <Enforcing a Max Transaction Size>`
|
||||
which would limit
|
||||
the overall complexity accross all inputs and outputs of a transaction.
|
||||
Note: At the time of writing, there was no configuration setting
|
||||
to set a maximum allowed fulfillment length,
|
||||
so the only real option was to
|
||||
:ref:`set a maximum allowed transaction size <Enforcing a Max Transaction Size>`.
|
||||
|
||||
If someone tries to make a condition where the output of a threshold condition feeds into the input of another “earlier” threshold condition (i.e. in a closed logical circuit), then their computer will take forever to calculate the (infinite) “condition URI”, at least in theory. In practice, their computer will run out of memory or their client software will timeout after a while.
|
||||
|
||||
|
@ -23,7 +23,9 @@ Start RethinkDB using:
|
||||
$ rethinkdb
|
||||
```
|
||||
|
||||
You can verify that RethinkDB is running by opening the RethinkDB web interface in your web browser. It should be at [http://localhost:8080/](http://localhost:8080/).
|
||||
You can verify that RethinkDB is running by opening the RethinkDB web interface in your web browser. It should be at http://localhost:8080/
|
||||
|
||||
<!-- Don't hyperlink http://localhost:8080/ because Sphinx will fail when you do "make linkcheck" -->
|
||||
|
||||
To run BigchainDB Server, do:
|
||||
```text
|
||||
@ -87,28 +89,28 @@ Start RethinkDB:
|
||||
docker-compose up -d rdb
|
||||
```
|
||||
|
||||
The RethinkDB web interface should be accessible at <http://localhost:58080/>.
|
||||
The RethinkDB web interface should be accessible at http://localhost:58080/.
|
||||
Depending on which platform, and/or how you are running docker, you may need
|
||||
to change `localhost` for the `ip` of the machine that is running docker. As a
|
||||
dummy example, if the `ip` of that machine was `0.0.0.0`, you would access the
|
||||
web interface at: <http://0.0.0.0:58080/>.
|
||||
web interface at: http://0.0.0.0:58080/.
|
||||
|
||||
Start a BigchainDB node:
|
||||
|
||||
```bash
|
||||
docker-compose up -d bdb
|
||||
docker-compose up -d bdb-rdb
|
||||
```
|
||||
|
||||
You can monitor the logs:
|
||||
|
||||
```bash
|
||||
docker-compose logs -f bdb
|
||||
docker-compose logs -f bdb-rdb
|
||||
```
|
||||
|
||||
If you wish to run the tests:
|
||||
|
||||
```bash
|
||||
docker-compose run --rm bdb py.test -v -n auto
|
||||
docker-compose run --rm bdb-rdb py.test -v -n auto
|
||||
```
|
||||
|
||||
### Docker with MongoDB
|
||||
@ -128,19 +130,19 @@ $ docker-compose port mdb 27017
|
||||
Start a BigchainDB node:
|
||||
|
||||
```bash
|
||||
docker-compose up -d bdb-mdb
|
||||
docker-compose up -d bdb
|
||||
```
|
||||
|
||||
You can monitor the logs:
|
||||
|
||||
```bash
|
||||
docker-compose logs -f bdb-mdb
|
||||
docker-compose logs -f bdb
|
||||
```
|
||||
|
||||
If you wish to run the tests:
|
||||
|
||||
```bash
|
||||
docker-compose run --rm bdb-mdb py.test -v --database-backend=mongodb
|
||||
docker-compose run --rm bdb py.test -v --database-backend=mongodb
|
||||
```
|
||||
|
||||
### Accessing the HTTP API
|
||||
|
@ -14,18 +14,16 @@ community projects listed below.
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
http-client-server-api
|
||||
websocket-event-stream-api
|
||||
The Python Driver <https://docs.bigchaindb.com/projects/py-driver/en/latest/index.html>
|
||||
Transaction CLI <https://docs.bigchaindb.com/projects/cli/en/latest/>
|
||||
|
||||
|
||||
Community Driven Libraries and Tools
|
||||
Community-Driven Libraries and Tools
|
||||
------------------------------------
|
||||
Please note that some of these projects may be work in progress, but may
|
||||
nevertheless be very useful.
|
||||
|
||||
* `Javascript transaction builder <https://github.com/sohkai/js-bigchaindb-quickstart>`_
|
||||
* `Haskell transaction builder <https://github.com/libscott/bigchaindb-hs>`_
|
||||
* `Haskell transaction builder <https://github.com/bigchaindb/bigchaindb-hs>`_
|
||||
* `Go driver <https://github.com/zbo14/envoke/blob/master/bigchain/bigchain.go>`_
|
||||
* `Java driver <https://github.com/mgrand/bigchaindb-java-driver>`_
|
||||
|
@ -22,7 +22,7 @@ or ``https://example.com:9984``
|
||||
then you should get an HTTP response
|
||||
with something like the following in the body:
|
||||
|
||||
.. literalinclude:: samples/index-response.http
|
||||
.. literalinclude:: http-samples/index-response.http
|
||||
:language: http
|
||||
|
||||
|
||||
@ -35,7 +35,7 @@ or ``https://example.com:9984/api/v1/``,
|
||||
then you should get an HTTP response
|
||||
that allows you to discover the BigchainDB API endpoints:
|
||||
|
||||
.. literalinclude:: samples/api-index-response.http
|
||||
.. literalinclude:: http-samples/api-index-response.http
|
||||
:language: http
|
||||
|
||||
|
||||
@ -58,12 +58,12 @@ Transactions
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. literalinclude:: samples/get-tx-id-request.http
|
||||
.. literalinclude:: http-samples/get-tx-id-request.http
|
||||
:language: http
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. literalinclude:: samples/get-tx-id-response.http
|
||||
.. literalinclude:: http-samples/get-tx-id-response.http
|
||||
:language: http
|
||||
|
||||
:resheader Content-Type: ``application/json``
|
||||
@ -110,12 +110,12 @@ Transactions
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. literalinclude:: samples/get-tx-by-asset-request.http
|
||||
.. literalinclude:: http-samples/get-tx-by-asset-request.http
|
||||
:language: http
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. literalinclude:: samples/get-tx-by-asset-response.http
|
||||
.. literalinclude:: http-samples/get-tx-by-asset-response.http
|
||||
:language: http
|
||||
|
||||
:resheader Content-Type: ``application/json``
|
||||
@ -139,12 +139,12 @@ Transactions
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. literalinclude:: samples/post-tx-request.http
|
||||
.. literalinclude:: http-samples/post-tx-request.http
|
||||
:language: http
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. literalinclude:: samples/post-tx-response.http
|
||||
.. literalinclude:: http-samples/post-tx-response.http
|
||||
:language: http
|
||||
|
||||
:resheader Content-Type: ``application/json``
|
||||
@ -227,12 +227,12 @@ Statuses
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. literalinclude:: samples/get-statuses-tx-request.http
|
||||
.. literalinclude:: http-samples/get-statuses-tx-request.http
|
||||
:language: http
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. literalinclude:: samples/get-statuses-tx-valid-response.http
|
||||
.. literalinclude:: http-samples/get-statuses-tx-valid-response.http
|
||||
:language: http
|
||||
|
||||
:resheader Content-Type: ``application/json``
|
||||
@ -250,17 +250,17 @@ Statuses
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. literalinclude:: samples/get-statuses-block-request.http
|
||||
.. literalinclude:: http-samples/get-statuses-block-request.http
|
||||
:language: http
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. literalinclude:: samples/get-statuses-block-invalid-response.http
|
||||
.. literalinclude:: http-samples/get-statuses-block-invalid-response.http
|
||||
:language: http
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. literalinclude:: samples/get-statuses-block-valid-response.http
|
||||
.. literalinclude:: http-samples/get-statuses-block-valid-response.http
|
||||
:language: http
|
||||
|
||||
:resheader Content-Type: ``application/json``
|
||||
@ -298,12 +298,12 @@ Blocks
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. literalinclude:: samples/get-block-request.http
|
||||
.. literalinclude:: http-samples/get-block-request.http
|
||||
:language: http
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. literalinclude:: samples/get-block-response.http
|
||||
.. literalinclude:: http-samples/get-block-response.http
|
||||
:language: http
|
||||
|
||||
|
||||
@ -353,12 +353,12 @@ Blocks
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. literalinclude:: samples/get-block-txid-request.http
|
||||
.. literalinclude:: http-samples/get-block-txid-request.http
|
||||
:language: http
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. literalinclude:: samples/get-block-txid-response.http
|
||||
.. literalinclude:: http-samples/get-block-txid-response.http
|
||||
:language: http
|
||||
|
||||
:resheader Content-Type: ``application/json``
|
||||
@ -384,12 +384,12 @@ Votes
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. literalinclude:: samples/get-vote-request.http
|
||||
.. literalinclude:: http-samples/get-vote-request.http
|
||||
:language: http
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. literalinclude:: samples/get-vote-response.http
|
||||
.. literalinclude:: http-samples/get-vote-response.http
|
||||
:language: http
|
||||
|
||||
:resheader Content-Type: ``application/json``
|
||||
@ -406,7 +406,7 @@ Determining the API Root URL
|
||||
When you start BigchainDB Server using ``bigchaindb start``,
|
||||
an HTTP API is exposed at some address. The default is:
|
||||
|
||||
`http://localhost:9984/api/v1/ <http://localhost:9984/api/v1/>`_
|
||||
``http://localhost:9984/api/v1/``
|
||||
|
||||
It's bound to ``localhost``,
|
||||
so you can access it from the same machine,
|
@ -8,9 +8,11 @@ BigchainDB Server Documentation
|
||||
introduction
|
||||
quickstart
|
||||
cloud-deployment-templates/index
|
||||
nodes/index
|
||||
production-nodes/index
|
||||
dev-and-test/index
|
||||
server-reference/index
|
||||
http-client-server-api
|
||||
websocket-event-stream-api
|
||||
drivers-clients/index
|
||||
clusters-feds/index
|
||||
data-models/index
|
||||
|
@ -1,10 +0,0 @@
|
||||
Production Node Assumptions, Components & Requirements
|
||||
======================================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
node-assumptions
|
||||
node-components
|
||||
node-requirements
|
||||
setup-run-node
|
@ -1,13 +0,0 @@
|
||||
# Production Node Assumptions
|
||||
|
||||
If you're not sure what we mean by a BigchainDB *node*, *cluster*, *consortium*, or *production node*, then see [the section in the Introduction where we defined those terms](../introduction.html#some-basic-vocabulary).
|
||||
|
||||
We make some assumptions about production nodes:
|
||||
|
||||
1. **Each production node is set up and managed by an experienced professional system administrator (or a team of them).**
|
||||
|
||||
2. Each production node in a cluster is managed by a different person or team.
|
||||
|
||||
Because of the first assumption, we don't provide a detailed cookbook explaining how to secure a server, or other things that a sysadmin should know. (We do provide some [templates](../cloud-deployment-templates/index.html), but those are just a starting point.)
|
||||
|
||||
|
@ -1,23 +0,0 @@
|
||||
# Production Node Components
|
||||
|
||||
A BigchainDB node must include, at least:
|
||||
|
||||
* BigchainDB Server and
|
||||
* RethinkDB Server.
|
||||
|
||||
When doing development and testing, it's common to install both on the same machine, but in a production environment, it may make more sense to install them on separate machines.
|
||||
|
||||
In a production environment, a BigchainDB node should have several other components, including:
|
||||
|
||||
* nginx or similar, as a reverse proxy and/or load balancer for the Gunicorn server(s) inside the node
|
||||
* An NTP daemon running on all machines running BigchainDB code, and possibly other machines
|
||||
* A RethinkDB proxy server
|
||||
* A RethinkDB "wire protocol firewall" (in the future: this component doesn't exist yet)
|
||||
* Scalable storage for RethinkDB (e.g. using RAID)
|
||||
* Monitoring software, to monitor all the machines in the node
|
||||
* Configuration management agents (if you're using a configuration managment system that uses agents)
|
||||
* Maybe more
|
||||
|
||||
The relationship between these components is illustrated below.
|
||||
|
||||

|
@ -1,193 +0,0 @@
|
||||
# Set Up and Run a Cluster Node
|
||||
|
||||
This is a page of general guidelines for setting up a production node. It says nothing about how to upgrade software, storage, processing, etc. or other details of node management. It will be expanded more in the future.
|
||||
|
||||
|
||||
## Get a Server
|
||||
|
||||
The first step is to get a server (or equivalent) which meets [the requirements for a BigchainDB node](node-requirements.html).
|
||||
|
||||
|
||||
## Secure Your Server
|
||||
|
||||
The steps that you must take to secure your server depend on your server OS and where your server is physically located. There are many articles and books about how to secure a server. Here we just cover special considerations when securing a BigchainDB node.
|
||||
|
||||
There are some [notes on BigchainDB-specific firewall setup](../appendices/firewall-notes.html) in the Appendices.
|
||||
|
||||
|
||||
## Sync Your System Clock
|
||||
|
||||
A BigchainDB node uses its system clock to generate timestamps for blocks and votes, so that clock should be kept in sync with some standard clock(s). The standard way to do that is to run an NTP daemon (Network Time Protocol daemon) on the node. (You could also use tlsdate, which uses TLS timestamps rather than NTP, but don't: it's not very accurate and it will break with TLS 1.3, which removes the timestamp.)
|
||||
|
||||
NTP is a standard protocol. There are many NTP daemons implementing it. We don't recommend a particular one. On the contrary, we recommend that different nodes in a cluster run different NTP daemons, so that a problem with one daemon won't affect all nodes.
|
||||
|
||||
Please see the [notes on NTP daemon setup](../appendices/ntp-notes.html) in the Appendices.
|
||||
|
||||
|
||||
## Set Up Storage for RethinkDB Data
|
||||
|
||||
Below are some things to consider when setting up storage for the RethinkDB data. The Appendices have a [section with concrete examples](../appendices/example-rethinkdb-storage-setups.html).
|
||||
|
||||
We suggest you set up a separate storage "device" (partition, RAID array, or logical volume) to store the RethinkDB data. Here are some questions to ask:
|
||||
|
||||
* How easy will it be to add storage in the future? Will I have to shut down my server?
|
||||
* How big can the storage get? (Remember that [RAID](https://en.wikipedia.org/wiki/RAID) can be used to make several physical drives look like one.)
|
||||
* How fast can it read & write data? How many input/output operations per second (IOPS)?
|
||||
* How does IOPS scale as more physical hard drives are added?
|
||||
* What's the latency?
|
||||
* What's the reliability? Is there replication?
|
||||
* What's in the Service Level Agreement (SLA), if applicable?
|
||||
* What's the cost?
|
||||
|
||||
There are many options and tradeoffs. Don't forget to look into Amazon Elastic Block Store (EBS) and Amazon Elastic File System (EFS), or their equivalents from other providers.
|
||||
|
||||
**Storage Notes Specific to RethinkDB**
|
||||
|
||||
* The RethinkDB storage engine has a number of SSD optimizations, so you _can_ benefit from using SSDs. ([source](https://www.rethinkdb.com/docs/architecture/))
|
||||
|
||||
* If you want a RethinkDB cluster to store an amount of data D, with a replication factor of R (on every table), and the cluster has N nodes, then each node will need to be able to store R×D/N data.
|
||||
|
||||
* RethinkDB tables can have [at most 64 shards](https://rethinkdb.com/limitations/). For example, if you have only one table and more than 64 nodes, some nodes won't have the primary of any shard, i.e. they will have replicas only. In other words, once you pass 64 nodes, adding more nodes won't provide more storage space for new data. If the biggest single-node storage available is d, then the most you can store in a RethinkDB cluster is < 64×d: accomplished by putting one primary shard in each of 64 nodes, with all replica shards on other nodes. (This is assuming one table. If there are T tables, then the most you can store is < 64×d×T.)
|
||||
|
||||
* When you set up storage for your RethinkDB data, you may have to select a filesystem. (Sometimes, the filesystem is already decided by the choice of storage.) We recommend using a filesystem that supports direct I/O (Input/Output). Many compressed or encrypted file systems don't support direct I/O. The ext4 filesystem supports direct I/O (but be careful: if you enable the data=journal mode, then direct I/O support will be disabled; the default is data=ordered). If your chosen filesystem supports direct I/O and you're using Linux, then you don't need to do anything to request or enable direct I/O. RethinkDB does that.
|
||||
|
||||
<p style="background-color: lightgrey;">What is direct I/O? It allows RethinkDB to write directly to the storage device (or use its own in-memory caching mechanisms), rather than relying on the operating system's file read and write caching mechanisms. (If you're using Linux, a write-to-file normally writes to the in-memory Page Cache first; only later does that Page Cache get flushed to disk. The Page Cache is also used when reading files.)</p>
|
||||
|
||||
* RethinkDB stores its data in a specific directory. You can tell RethinkDB _which_ directory using the RethinkDB config file, as explained below. In this documentation, we assume the directory is `/data`. If you set up a separate device (partition, RAID array, or logical volume) to store the RethinkDB data, then mount that device on `/data`.
|
||||
|
||||
|
||||
## Install RethinkDB Server
|
||||
|
||||
If you don't already have RethinkDB Server installed, you must install it. The RethinkDB documentation has instructions for [how to install RethinkDB Server on a variety of operating systems](https://rethinkdb.com/docs/install/).
|
||||
|
||||
|
||||
## Configure RethinkDB Server
|
||||
|
||||
Create a RethinkDB configuration file (text file) named `instance1.conf` with the following contents (explained below):
|
||||
```text
|
||||
directory=/data
|
||||
bind=all
|
||||
direct-io
|
||||
# Replace node?_hostname with actual node hostnames below, e.g. rdb.examples.com
|
||||
join=node0_hostname:29015
|
||||
join=node1_hostname:29015
|
||||
join=node2_hostname:29015
|
||||
# continue until there's a join= line for each node in the cluster
|
||||
```
|
||||
|
||||
* `directory=/data` tells the RethinkDB node to store its share of the database data in `/data`.
|
||||
* `bind=all` binds RethinkDB to all local network interfaces (e.g. loopback, Ethernet, wireless, whatever is available), so it can communicate with the outside world. (The default is to bind only to local interfaces.)
|
||||
* `direct-io` tells RethinkDB to use direct I/O (explained earlier). Only include this line if your file system supports direct I/O.
|
||||
* `join=hostname:29015` lines: A cluster node needs to find out the hostnames of all the other nodes somehow. You _could_ designate one node to be the one that every other node asks, and put that node's hostname in the config file, but that wouldn't be very decentralized. Instead, we include _every_ node in the list of nodes-to-ask.
|
||||
|
||||
If you're curious about the RethinkDB config file, there's [a RethinkDB documentation page about it](https://www.rethinkdb.com/docs/config-file/). The [explanations of the RethinkDB command-line options](https://rethinkdb.com/docs/cli-options/) are another useful reference.
|
||||
|
||||
See the [RethinkDB documentation on securing your cluster](https://rethinkdb.com/docs/security/).
|
||||
|
||||
|
||||
## Install Python 3.4+
|
||||
|
||||
If you don't already have it, then you should [install Python 3.4+](https://www.python.org/downloads/).
|
||||
|
||||
If you're testing or developing BigchainDB on a stand-alone node, then you should probably create a Python 3.4+ virtual environment and activate it (e.g. using virtualenv or conda). Later we will install several Python packages and you probably only want those installed in the virtual environment.
|
||||
|
||||
|
||||
## Install BigchainDB Server
|
||||
|
||||
First, [install the OS-level dependencies of BigchainDB Server (link)](../appendices/install-os-level-deps.html).
|
||||
|
||||
With OS-level dependencies installed, you can install BigchainDB Server with `pip` or from source.
|
||||
|
||||
|
||||
### How to Install BigchainDB with pip
|
||||
|
||||
BigchainDB (i.e. both the Server and the officially-supported drivers) is distributed as a Python package on PyPI so you can install it using `pip`. First, make sure you have an up-to-date Python 3.4+ version of `pip` installed:
|
||||
```text
|
||||
pip -V
|
||||
```
|
||||
|
||||
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.4, then you must install a `pip` version associated with Python 3.4+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
|
||||
|
||||
On Ubuntu 16.04, we found that this works:
|
||||
```text
|
||||
sudo apt-get install python3-pip
|
||||
```
|
||||
|
||||
That should install a Python 3 version of `pip` named `pip3`. If that didn't work, then another way to get `pip3` is to do `sudo apt-get install python3-setuptools` followed by `sudo easy_install3 pip`.
|
||||
|
||||
You can upgrade `pip` (`pip3`) and `setuptools` to the latest versions using:
|
||||
```text
|
||||
pip3 install --upgrade pip setuptools
|
||||
pip3 -V
|
||||
```
|
||||
|
||||
Now you can install BigchainDB Server (and officially-supported BigchainDB drivers) using:
|
||||
```text
|
||||
pip3 install bigchaindb
|
||||
```
|
||||
|
||||
(If you're not in a virtualenv and you want to install bigchaindb system-wide, then put `sudo` in front.)
|
||||
|
||||
Note: You can use `pip3` to upgrade the `bigchaindb` package to the latest version using `pip3 install --upgrade bigchaindb`.
|
||||
|
||||
|
||||
### How to Install BigchainDB from Source
|
||||
|
||||
If you want to install BitchainDB from source because you want to use the very latest bleeding-edge code, clone the public repository:
|
||||
```text
|
||||
git clone git@github.com:bigchaindb/bigchaindb.git
|
||||
python setup.py install
|
||||
```
|
||||
|
||||
|
||||
## Configure BigchainDB Server
|
||||
|
||||
Start by creating a default BigchainDB config file:
|
||||
```text
|
||||
bigchaindb -y configure rethinkdb
|
||||
```
|
||||
|
||||
(There's documentation for the `bigchaindb` command is in the section on [the BigchainDB Command Line Interface (CLI)](bigchaindb-cli.html).)
|
||||
|
||||
Edit the created config file:
|
||||
|
||||
* Open `$HOME/.bigchaindb` (the created config file) in your text editor.
|
||||
* Change `"server": {"bind": "localhost:9984", ... }` to `"server": {"bind": "0.0.0.0:9984", ... }`. This makes it so traffic can come from any IP address to port 9984 (the HTTP Client-Server API port).
|
||||
* Change `"keyring": []` to `"keyring": ["public_key_of_other_node_A", "public_key_of_other_node_B", "..."]` i.e. a list of the public keys of all the other nodes in the cluster. The keyring should _not_ include your node's public key.
|
||||
|
||||
For more information about the BigchainDB config file, see [Configuring a BigchainDB Node](configuration.html).
|
||||
|
||||
|
||||
## Run RethinkDB Server
|
||||
|
||||
Start RethinkDB using:
|
||||
```text
|
||||
rethinkdb --config-file path/to/instance1.conf
|
||||
```
|
||||
|
||||
except replace the path with the actual path to `instance1.conf`.
|
||||
|
||||
Note: It's possible to [make RethinkDB start at system startup](https://www.rethinkdb.com/docs/start-on-startup/).
|
||||
|
||||
You can verify that RethinkDB is running by opening the RethinkDB web interface in your web browser. It should be at `http://rethinkdb-hostname:8080/`. If you're running RethinkDB on localhost, that would be [http://localhost:8080/](http://localhost:8080/).
|
||||
|
||||
|
||||
## Run BigchainDB Server
|
||||
|
||||
After all node operators have started RethinkDB, but before they start BigchainDB, one designated node operator must configure the RethinkDB database by running the following commands:
|
||||
```text
|
||||
bigchaindb init
|
||||
bigchaindb set-shards numshards
|
||||
bigchaindb set-replicas numreplicas
|
||||
```
|
||||
|
||||
where:
|
||||
|
||||
* `bigchaindb init` creates the database within RethinkDB, the tables, the indexes, and the genesis block.
|
||||
* `numshards` should be set to the number of nodes in the initial cluster.
|
||||
* `numreplicas` should be set to the database replication factor decided by the consortium. It must be 3 or more for [RethinkDB failover](https://rethinkdb.com/docs/failover/) to work.
|
||||
|
||||
Once the RethinkDB database is configured, every node operator can start BigchainDB using:
|
||||
```text
|
||||
bigchaindb start
|
||||
```
|
12
docs/server/source/production-nodes/index.rst
Normal file
12
docs/server/source/production-nodes/index.rst
Normal file
@ -0,0 +1,12 @@
|
||||
Production Nodes
|
||||
================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
node-assumptions
|
||||
node-components
|
||||
node-requirements
|
||||
setup-run-node
|
||||
reverse-proxy-notes
|
||||
|
16
docs/server/source/production-nodes/node-assumptions.md
Normal file
16
docs/server/source/production-nodes/node-assumptions.md
Normal file
@ -0,0 +1,16 @@
|
||||
# Production Node Assumptions
|
||||
|
||||
Be sure you know the key BigchainDB terminology:
|
||||
|
||||
* [BigchainDB node, BigchainDB cluster and BigchainDB consortum](https://docs.bigchaindb.com/en/latest/terminology.html)
|
||||
* [dev/test node, bare-bones node and production node](../introduction.html)
|
||||
|
||||
We make some assumptions about production nodes:
|
||||
|
||||
1. Production nodes use MongoDB, not RethinkDB.
|
||||
1. Each production node is set up and managed by an experienced professional system administrator or a team of them.
|
||||
1. Each production node in a cluster is managed by a different person or team.
|
||||
|
||||
You can use RethinkDB when building prototypes, but we don't advise or support using it in production.
|
||||
|
||||
We don't provide a detailed cookbook explaining how to secure a server, or other things that a sysadmin should know. (We do provide some [templates](../cloud-deployment-templates/index.html), but those are just a starting point.)
|
22
docs/server/source/production-nodes/node-components.md
Normal file
22
docs/server/source/production-nodes/node-components.md
Normal file
@ -0,0 +1,22 @@
|
||||
# Production Node Components
|
||||
|
||||
A production BigchainDB node must include:
|
||||
|
||||
* BigchainDB Server
|
||||
* MongoDB Server 3.4+ (mongod)
|
||||
* Scalable storage for MongoDB
|
||||
|
||||
It could also include several other components, including:
|
||||
|
||||
* NGINX or similar, to provide authentication, rate limiting, etc.
|
||||
* An NTP daemon running on all machines running BigchainDB Server or mongod, and possibly other machines
|
||||
* **Not** MongoDB Automation Agent. It's for automating the deployment of an entire MongoDB cluster, not just one MongoDB node within a cluster.
|
||||
* MongoDB Monitoring Agent
|
||||
* MongoDB Backup Agent
|
||||
* Log aggregation software
|
||||
* Monitoring software
|
||||
* Maybe more
|
||||
|
||||
The relationship between the main components is illustrated below. Note that BigchainDB Server must be able to communicate with the _primary_ MongoDB instance, and any of the MongoDB instances might be the primary, so BigchainDB Server must be able to communicate with all the MongoDB instances. Also, all MongoDB instances must be able to communicate with each other.
|
||||
|
||||

|
17
docs/server/source/production-nodes/node-requirements.md
Normal file
17
docs/server/source/production-nodes/node-requirements.md
Normal file
@ -0,0 +1,17 @@
|
||||
# Production Node Requirements
|
||||
|
||||
**This page is about the requirements of BigchainDB Server.** You can find the requirements of MongoDB, NGINX, your NTP daemon, your monitoring software, and other [production node components](node-components.html) in the documentation for that software.
|
||||
|
||||
|
||||
## OS Requirements
|
||||
|
||||
BigchainDB Server requires Python 3.4+ and Python 3.4+ [will run on any modern OS](https://docs.python.org/3.4/using/index.html), but we recommend using an LTS version of [Ubuntu Server](https://www.ubuntu.com/server) or a similarly server-grade Linux distribution.
|
||||
|
||||
_Don't use macOS_ (formerly OS X, formerly Mac OS X), because it's not a server-grade operating system. Also, BigchaindB Server uses the Python multiprocessing package and [some functionality in the multiprocessing package doesn't work on Mac OS X](https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.qsize).
|
||||
|
||||
|
||||
## General Considerations
|
||||
|
||||
BigchainDB Server runs many concurrent processes, so more RAM and more CPU cores is better.
|
||||
|
||||
As mentioned on the page about [production node components](node-components.html), every machine running BigchainDB Server should be running an NTP daemon.
|
72
docs/server/source/production-nodes/reverse-proxy-notes.md
Normal file
72
docs/server/source/production-nodes/reverse-proxy-notes.md
Normal file
@ -0,0 +1,72 @@
|
||||
# Using a Reverse Proxy
|
||||
|
||||
You may want to:
|
||||
|
||||
* rate limit inbound HTTP requests,
|
||||
* authenticate/authorize inbound HTTP requests,
|
||||
* block requests with an HTTP request body that's too large, or
|
||||
* enable HTTPS (TLS) between your users and your node.
|
||||
|
||||
While we could have built all that into BigchainDB Server,
|
||||
we didn't, because you can do all that (and more)
|
||||
using a reverse proxy such as NGINX or HAProxy.
|
||||
(You would put it in front of your BigchainDB Server,
|
||||
so that all inbound HTTP requests would arrive
|
||||
at the reverse proxy before *maybe* being proxied
|
||||
onwards to your BigchainDB Server.)
|
||||
For detailed instructions, see the documentation
|
||||
for your reverse proxy.
|
||||
|
||||
Below, we note how a reverse proxy can be used
|
||||
to do some BigchainDB-specific things.
|
||||
|
||||
You may also be interested in
|
||||
[our NGINX configuration file template](https://github.com/bigchaindb/nginx_3scale/blob/master/nginx.conf.template)
|
||||
(open source, on GitHub).
|
||||
|
||||
|
||||
## Enforcing a Max Transaction Size
|
||||
|
||||
The BigchainDB HTTP API has several endpoints,
|
||||
but only one of them, the `POST /transactions` endpoint,
|
||||
expects a non-empty HTTP request body:
|
||||
the transaction (JSON) being submitted by the user.
|
||||
|
||||
If you want to enforce a maximum-allowed transaction size
|
||||
(discarding any that are larger),
|
||||
then you can do so by configuring a maximum request body size
|
||||
in your reverse proxy.
|
||||
For example, NGINX has the `client_max_body_size`
|
||||
configuration setting. You could set it to 15 kB
|
||||
with the following line in your NGINX config file:
|
||||
|
||||
```text
|
||||
client_max_body_size 15k;
|
||||
```
|
||||
|
||||
For more information, see
|
||||
[the NGINX docs about client_max_body_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size).
|
||||
|
||||
Note: By enforcing a maximum transaction size, you
|
||||
[indirectly enforce a maximum crypto-conditions complexity](https://github.com/bigchaindb/bigchaindb/issues/356#issuecomment-288085251).
|
||||
|
||||
|
||||
**Aside: Why 15 kB?**
|
||||
|
||||
Both [RethinkDB](https://rethinkdb.com/limitations/) and
|
||||
[MongoDB have a maximum document size of 16 MB](https://docs.mongodb.com/manual/reference/limits/#limit-bson-document-size).
|
||||
In BigchainDB, the biggest documents are the blocks.
|
||||
A BigchainDB block can contain up to 1000 transactions,
|
||||
plus some other data (e.g. the timestamp).
|
||||
If we ignore the other data as negligible relative to all the transactions,
|
||||
then a block of size 16 MB
|
||||
will have an average transaction size of (16 MB)/1000 = 16 kB.
|
||||
Therefore by limiting the max transaction size to 15 kB,
|
||||
you can be fairly sure that no blocks will ever be
|
||||
bigger than 16 MB.
|
||||
|
||||
Note: Technically, the documents that MongoDB stores aren't the JSON
|
||||
that BigchainDB users think of; they're JSON converted to BSON.
|
||||
Moreover, [one can use GridFS with MongoDB to store larger documents](https://docs.mongodb.com/manual/core/gridfs/).
|
||||
Therefore the above calculation shoud be seen as a rough guide,
|
||||
not the last word.
|
137
docs/server/source/production-nodes/setup-run-node.md
Normal file
137
docs/server/source/production-nodes/setup-run-node.md
Normal file
@ -0,0 +1,137 @@
|
||||
# Set Up and Run a Cluster Node
|
||||
|
||||
This is a page of general guidelines for setting up a production BigchainDB node. Before continuing, make sure you've read the pages about production node [assumptions](node-assumptions.html), [components](node-components.html) and [requirements](node-requirements.html).
|
||||
|
||||
Note: These are just guidelines. You can modify them to suit your needs. For example, if you want to initialize the MongoDB replica set before installing BigchainDB, you _can_ do that. If you'd prefer to use Docker and Kubernetes, you can (and [we have a template](../cloud-deployment-templates/node-on-kubernetes.html)). We don't cover all possible setup procedures here.
|
||||
|
||||
|
||||
## Security Guidelines
|
||||
|
||||
There are many articles, websites and books about securing servers, virtual machines, networks, etc. Consult those.
|
||||
There are some [notes on BigchainDB-specific firewall setup](../appendices/firewall-notes.html) in the Appendices.
|
||||
|
||||
|
||||
## Sync Your System Clock
|
||||
|
||||
A BigchainDB node uses its system clock to generate timestamps for blocks and votes, so that clock should be kept in sync with some standard clock(s). The standard way to do that is to run an NTP daemon (Network Time Protocol daemon) on the node.
|
||||
|
||||
MongoDB also recommends having an NTP daemon running on all MongoDB nodes.
|
||||
|
||||
NTP is a standard protocol. There are many NTP daemons implementing it. We don't recommend a particular one. On the contrary, we recommend that different nodes in a cluster run different NTP daemons, so that a problem with one daemon won't affect all nodes.
|
||||
|
||||
Please see the [notes on NTP daemon setup](../appendices/ntp-notes.html) in the Appendices.
|
||||
|
||||
|
||||
## Set Up Storage for MongoDB
|
||||
|
||||
We suggest you set up a separate storage device (partition, RAID array, or logical volume) to store the data in the MongoDB database. Here are some questions to ask:
|
||||
|
||||
* How easy will it be to add storage in the future? Will I have to shut down my server?
|
||||
* How big can the storage get? (Remember that [RAID](https://en.wikipedia.org/wiki/RAID) can be used to make several physical drives look like one.)
|
||||
* How fast can it read & write data? How many input/output operations per second (IOPS)?
|
||||
* How does IOPS scale as more physical hard drives are added?
|
||||
* What's the latency?
|
||||
* What's the reliability? Is there replication?
|
||||
* What's in the Service Level Agreement (SLA), if applicable?
|
||||
* What's the cost?
|
||||
|
||||
There are many options and tradeoffs.
|
||||
|
||||
Consult the MongoDB documentation for its recommendations regarding storage hardware, software and settings, e.g. in the [MongoDB Production Notes](https://docs.mongodb.com/manual/administration/production-notes/).
|
||||
|
||||
|
||||
## Install and Run MongoDB
|
||||
|
||||
* [Install MongoDB 3.4+](https://docs.mongodb.com/manual/installation/). (BigchainDB only works with MongoDB 3.4+.)
|
||||
* [Run MongoDB (mongod)](https://docs.mongodb.com/manual/reference/program/mongod/)
|
||||
|
||||
|
||||
## Install BigchainDB Server
|
||||
|
||||
### Install BigchainDB Server Dependencies
|
||||
|
||||
Before you can install BigchainDB Server, you must [install its OS-level dependencies](../appendices/install-os-level-deps.html) and you may have to [install Python 3.4+](https://www.python.org/downloads/).
|
||||
|
||||
### How to Install BigchainDB Server with pip
|
||||
|
||||
BigchainDB is distributed as a Python package on PyPI so you can install it using `pip`. First, make sure you have an up-to-date Python 3.4+ version of `pip` installed:
|
||||
```text
|
||||
pip -V
|
||||
```
|
||||
|
||||
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.4, then you must install a `pip` version associated with Python 3.4+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
|
||||
|
||||
On Ubuntu 16.04, we found that this works:
|
||||
```text
|
||||
sudo apt-get install python3-pip
|
||||
```
|
||||
|
||||
That should install a Python 3 version of `pip` named `pip3`. If that didn't work, then another way to get `pip3` is to do `sudo apt-get install python3-setuptools` followed by `sudo easy_install3 pip`.
|
||||
|
||||
You can upgrade `pip` (`pip3`) and `setuptools` to the latest versions using:
|
||||
```text
|
||||
pip3 install --upgrade pip setuptools
|
||||
pip3 -V
|
||||
```
|
||||
|
||||
Now you can install BigchainDB Server using:
|
||||
```text
|
||||
pip3 install bigchaindb
|
||||
```
|
||||
|
||||
(If you're not in a virtualenv and you want to install bigchaindb system-wide, then put `sudo` in front.)
|
||||
|
||||
Note: You can use `pip3` to upgrade the `bigchaindb` package to the latest version using `pip3 install --upgrade bigchaindb`.
|
||||
|
||||
|
||||
### How to Install BigchainDB Server from Source
|
||||
|
||||
If you want to install BitchainDB from source because you want to use the very latest bleeding-edge code, clone the public repository:
|
||||
```text
|
||||
git clone git@github.com:bigchaindb/bigchaindb.git
|
||||
cd bigchaindb
|
||||
python setup.py install
|
||||
```
|
||||
|
||||
|
||||
## Configure BigchainDB Server
|
||||
|
||||
Start by creating a default BigchainDB config file for a MongoDB backend:
|
||||
```text
|
||||
bigchaindb -y configure mongodb
|
||||
```
|
||||
|
||||
(There's documentation for the `bigchaindb` command is in the section on [the BigchainDB Command Line Interface (CLI)](../server-reference/bigchaindb-cli.html).)
|
||||
|
||||
Edit the created config file by opening `$HOME/.bigchaindb` (the created config file) in your text editor:
|
||||
|
||||
* Change `"server": {"bind": "localhost:9984", ... }` to `"server": {"bind": "0.0.0.0:9984", ... }`. This makes it so traffic can come from any IP address to port 9984 (the HTTP Client-Server API port).
|
||||
* Change `"keyring": []` to `"keyring": ["public_key_of_other_node_A", "public_key_of_other_node_B", "..."]` i.e. a list of the public keys of all the other nodes in the cluster. The keyring should _not_ include your node's public key.
|
||||
* Ensure that `database.host` and `database.port` are set to the hostname and port of your MongoDB instance. (The port is usually 27017, unless you changed it.)
|
||||
|
||||
For more information about the BigchainDB config file, see the page about the [BigchainDB configuration settings](../server-reference/configuration.html).
|
||||
|
||||
|
||||
## Get All Other Nodes to Update Their Keyring
|
||||
|
||||
All other BigchainDB nodes in the cluster must add your new node's public key to their BigchainDB keyring. Currently, the only way to get BigchainDB Server to "notice" a changed keyring is to shut it down and start it back up again (with the new keyring).
|
||||
|
||||
|
||||
## Maybe Update the MongoDB Replica Set
|
||||
|
||||
**If this isn't the first node in the BigchainDB cluster**, then someone with an existing BigchainDB node (not you) must add your MongoDB instance to the MongoDB replica set. They can do so (on their node) using:
|
||||
```text
|
||||
bigchaindb add-replicas your-mongod-hostname:27017
|
||||
```
|
||||
|
||||
where they must replace `your-mongod-hostname` with the actual hostname of your MongoDB instance, and they may have to replace `27017` with the actual port.
|
||||
|
||||
|
||||
## Start BigchainDB
|
||||
|
||||
**Warning: If you're not deploying the first node in the BigchainDB cluster, then don't start BigchainDB before your MongoDB instance has been added to the MongoDB replica set (as outlined above).**
|
||||
|
||||
```text
|
||||
# See warning above
|
||||
bigchaindb start
|
||||
```
|
@ -16,12 +16,14 @@ For convenience, here's a list of all the relevant environment variables (docume
|
||||
`BIGCHAINDB_DATABASE_PORT`<br>
|
||||
`BIGCHAINDB_DATABASE_NAME`<br>
|
||||
`BIGCHAINDB_DATABASE_REPLICASET`<br>
|
||||
`BIGCHAINDB_DATABASE_CONNECTION_TIMEOUT`<br>
|
||||
`BIGCHAINDB_DATABASE_MAX_TRIES`<br>
|
||||
`BIGCHAINDB_SERVER_BIND`<br>
|
||||
`BIGCHAINDB_SERVER_LOGLEVEL`<br>
|
||||
`BIGCHAINDB_SERVER_WORKERS`<br>
|
||||
`BIGCHAINDB_SERVER_THREADS`<br>
|
||||
`BIGCHAINDB_CONFIG_PATH`<br>
|
||||
`BIGCHAINDB_BACKLOG_REASSIGN_DELAY`<br>
|
||||
`BIGCHAINDB_CONSENSUS_PLUGIN`<br>
|
||||
`BIGCHAINDB_LOG`<br>
|
||||
`BIGCHAINDB_LOG_FILE`<br>
|
||||
`BIGCHAINDB_LOG_LEVEL_CONSOLE`<br>
|
||||
@ -85,9 +87,18 @@ Note how the keys in the list are separated by colons.
|
||||
```
|
||||
|
||||
|
||||
## database.backend, database.host, database.port, database.name & database.replicaset
|
||||
## database.*
|
||||
|
||||
The database backend to use (`rethinkdb` or `mongodb`) and its hostname, port and name. If the database backend is `mongodb`, then there's a fifth setting: the name of the replica set. If the database backend is `rethinkdb`, you *can* set the name of the replica set, but it won't be used for anything.
|
||||
The settings with names of the form `database.*` are for the database backend
|
||||
(currently either RethinkDB or MongoDB). They are:
|
||||
|
||||
* `database.backend` is either `rethinkdb` or `mongodb`.
|
||||
* `database.host` is the hostname (FQDN) of the backend database.
|
||||
* `database.port` is self-explanatory.
|
||||
* `database.name` is a user-chosen name for the database inside RethinkDB or MongoDB, e.g. `bigchain`.
|
||||
* `database.replicaset` is only relevant if using MongoDB; it's the name of the MongoDB replica set, e.g. `bigchain-rs`.
|
||||
* `database.connection_timeout` is the maximum number of milliseconds that BigchainDB will wait before giving up on one attempt to connect to the database backend. Note: At the time of writing, this setting was only used by MongoDB; there was an open [issue to make RethinkDB use it as well](https://github.com/bigchaindb/bigchaindb/issues/1337).
|
||||
* `database.max_tries` is the maximum number of times that BigchainDB will try to establish a connection with the database backend. If 0, then it will try forever.
|
||||
|
||||
**Example using environment variables**
|
||||
```text
|
||||
@ -96,6 +107,8 @@ export BIGCHAINDB_DATABASE_HOST=localhost
|
||||
export BIGCHAINDB_DATABASE_PORT=27017
|
||||
export BIGCHAINDB_DATABASE_NAME=bigchain
|
||||
export BIGCHAINDB_DATABASE_REPLICASET=bigchain-rs
|
||||
export BIGCHAINDB_DATABASE_CONNECTION_TIMEOUT=5000
|
||||
export BIGCHAINDB_DATABASE_MAX_TRIES=3
|
||||
```
|
||||
|
||||
**Default values**
|
||||
@ -105,8 +118,10 @@ If (no environment variables were set and there's no local config file), or you
|
||||
"database": {
|
||||
"backend": "rethinkdb",
|
||||
"host": "localhost",
|
||||
"port": 28015,
|
||||
"name": "bigchain",
|
||||
"port": 28015
|
||||
"connection_timeout": 5000,
|
||||
"max_tries": 3
|
||||
}
|
||||
```
|
||||
|
||||
@ -115,24 +130,31 @@ If you used `bigchaindb -y configure mongodb` to create a default local config f
|
||||
"database": {
|
||||
"backend": "mongodb",
|
||||
"host": "localhost",
|
||||
"name": "bigchain",
|
||||
"port": 27017,
|
||||
"replicaset": "bigchain-rs"
|
||||
"name": "bigchain",
|
||||
"replicaset": "bigchain-rs",
|
||||
"connection_timeout": 5000,
|
||||
"max_tries": 3
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## server.bind, server.workers & server.threads
|
||||
## server.bind, server.loglevel, server.workers & server.threads
|
||||
|
||||
These settings are for the [Gunicorn HTTP server](http://gunicorn.org/), which is used to serve the [HTTP client-server API](../drivers-clients/http-client-server-api.html).
|
||||
These settings are for the [Gunicorn HTTP server](http://gunicorn.org/), which is used to serve the [HTTP client-server API](../http-client-server-api.html).
|
||||
|
||||
`server.bind` is where to bind the Gunicorn HTTP server socket. It's a string. It can be any valid value for [Gunicorn's bind setting](http://docs.gunicorn.org/en/stable/settings.html#bind). If you want to allow IPv4 connections from anyone, on port 9984, use '0.0.0.0:9984'. In a production setting, we recommend you use Gunicorn behind a reverse proxy server. If Gunicorn and the reverse proxy are running on the same machine, then use 'localhost:PORT' where PORT is _not_ 9984 (because the reverse proxy needs to listen on port 9984). Maybe use PORT=9983 in that case because we know 9983 isn't used. If Gunicorn and the reverse proxy are running on different machines, then use 'A.B.C.D:9984' where A.B.C.D is the IP address of the reverse proxy. There's [more information about deploying behind a reverse proxy in the Gunicorn documentation](http://docs.gunicorn.org/en/stable/deploy.html). (They call it a proxy.)
|
||||
|
||||
`server.loglevel` sets the log level of Gunicorn's Error log outputs. See
|
||||
[Gunicorn's documentation](http://docs.gunicorn.org/en/latest/settings.html#loglevel)
|
||||
for more information.
|
||||
|
||||
`server.workers` is [the number of worker processes](http://docs.gunicorn.org/en/stable/settings.html#workers) for handling requests. If `None` (the default), the value will be (cpu_count * 2 + 1). `server.threads` is [the number of threads-per-worker](http://docs.gunicorn.org/en/stable/settings.html#threads) for handling requests. If `None` (the default), the value will be (cpu_count * 2 + 1). The HTTP server will be able to handle `server.workers` * `server.threads` requests simultaneously.
|
||||
|
||||
**Example using environment variables**
|
||||
```text
|
||||
export BIGCHAINDB_SERVER_BIND=0.0.0.0:9984
|
||||
export BIGCHAINDB_SERVER_LOGLEVEL=debug
|
||||
export BIGCHAINDB_SERVER_WORKERS=5
|
||||
export BIGCHAINDB_SERVER_THREADS=5
|
||||
```
|
||||
@ -141,6 +163,7 @@ export BIGCHAINDB_SERVER_THREADS=5
|
||||
```js
|
||||
"server": {
|
||||
"bind": "0.0.0.0:9984",
|
||||
"loglevel": "debug",
|
||||
"workers": 5,
|
||||
"threads": 5
|
||||
}
|
||||
@ -150,6 +173,7 @@ export BIGCHAINDB_SERVER_THREADS=5
|
||||
```js
|
||||
"server": {
|
||||
"bind": "localhost:9984",
|
||||
"loglevel": "info",
|
||||
"workers": null,
|
||||
"threads": null
|
||||
}
|
||||
@ -169,21 +193,9 @@ export BIGCHAINDB_BACKLOG_REASSIGN_DELAY=30
|
||||
"backlog_reassign_delay": 120
|
||||
```
|
||||
|
||||
## consensus_plugin
|
||||
|
||||
The [consensus plugin](../appendices/consensus.html) to use.
|
||||
|
||||
**Example using an environment variable**
|
||||
```text
|
||||
export BIGCHAINDB_CONSENSUS_PLUGIN=default
|
||||
```
|
||||
|
||||
**Example config file snippet: the default**
|
||||
```js
|
||||
"consensus_plugin": "default"
|
||||
```
|
||||
|
||||
## log
|
||||
|
||||
The `log` key is expected to point to a mapping (set of key/value pairs)
|
||||
holding the logging configuration.
|
||||
|
||||
|
@ -2,7 +2,9 @@ The WebSocket Event Stream API
|
||||
==============================
|
||||
|
||||
.. important::
|
||||
This is currently scheduled to be implemented in BigchainDB Server 0.10.
|
||||
The WebSocket Event Stream runs on a different port than the Web API. The
|
||||
default port for the Web API is `9984`, while the one for the Event Stream
|
||||
is `9985`.
|
||||
|
||||
BigchainDB provides real-time event streams over the WebSocket protocol with
|
||||
the Event Stream API.
|
||||
@ -28,7 +30,7 @@ response contains a ``streams_<version>`` property in ``_links``::
|
||||
|
||||
{
|
||||
"_links": {
|
||||
"streams_v1": "ws://example.com:9984/api/v1/streams/"
|
||||
"streams_v1": "ws://example.com:9985/api/v1/streams/"
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,9 +82,9 @@ the transaction's ID, associated asset ID, and containing block's ID.
|
||||
Example message::
|
||||
|
||||
{
|
||||
"txid": "<sha3-256 hash>",
|
||||
"assetid": "<sha3-256 hash>",
|
||||
"blockid": "<sha3-256 hash>"
|
||||
"tx_id": "<sha3-256 hash>",
|
||||
"asset_id": "<sha3-256 hash>",
|
||||
"block_id": "<sha3-256 hash>"
|
||||
}
|
||||
|
||||
|
13
k8s/nginx-3scale/nginx-3scale-cm.yaml
Normal file
13
k8s/nginx-3scale/nginx-3scale-cm.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
############################################################################
|
||||
# This YAML file desribes a ConfigMap with a valid list of ':' separated #
|
||||
# IP addresses (or 'all' for all IP addresses) that can connect to the #
|
||||
# MongoDB instance. We only support the value 'all' currently. #
|
||||
############################################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mongodb-whitelist
|
||||
namespace: default
|
||||
data:
|
||||
allowed-hosts: "all"
|
96
k8s/nginx-3scale/nginx-3scale-dep.yaml
Normal file
96
k8s/nginx-3scale/nginx-3scale-dep.yaml
Normal file
@ -0,0 +1,96 @@
|
||||
###############################################################
|
||||
# This config file runs nginx as a k8s deployment and exposes #
|
||||
# it using an external load balancer. #
|
||||
# This deployment is used as a front end to both BigchainDB #
|
||||
# and MongoDB. #
|
||||
###############################################################
|
||||
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ngx-instance-0-dep
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ngx-instance-0-dep
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: nginx-3scale
|
||||
image: bigchaindb/nginx_3scale:0.1
|
||||
# TODO(Krish): Change later to IfNotPresent
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: MONGODB_FRONTEND_PORT
|
||||
value: $(NGX_INSTANCE_0_SERVICE_PORT_NGX_PUBLIC_MDB_PORT)
|
||||
- name: MONGODB_BACKEND_HOST
|
||||
value: mdb-instance-0.default.svc.cluster.local
|
||||
- name: MONGODB_BACKEND_PORT
|
||||
value: "27017"
|
||||
- name: BIGCHAINDB_FRONTEND_PORT
|
||||
value: $(NGX_INSTANCE_0_SERVICE_PORT_NGX_PUBLIC_BDB_PORT)
|
||||
- name: BIGCHAINDB_BACKEND_HOST
|
||||
value: bdb-instance-0.default.svc.cluster.local
|
||||
- name: BIGCHAINDB_BACKEND_PORT
|
||||
value: "9984"
|
||||
- name: MONGODB_WHITELIST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: mongodb-whitelist
|
||||
key: allowed-hosts
|
||||
- name: DNS_SERVER
|
||||
value: "10.0.0.10"
|
||||
- name: NGINX_HEALTH_CHECK_PORT
|
||||
value: "8888"
|
||||
# TODO(Krish): use secrets for sensitive info
|
||||
- name: THREESCALE_SECRET_TOKEN
|
||||
value: "<Secret Token Here>"
|
||||
- name: THREESCALE_SERVICE_ID
|
||||
value: "<Service ID Here>"
|
||||
- name: THREESCALE_VERSION_HEADER
|
||||
value: "<Version Header Here>"
|
||||
- name: THREESCALE_PROVIDER_KEY
|
||||
value: "<Provider Key Here>"
|
||||
- name: THREESCALE_FRONTEND_API_DNS_NAME
|
||||
value: "<Frontend API FQDN Here>"
|
||||
- name: THREESCALE_UPSTREAM_API_PORT
|
||||
value: "<Upstream API Port Here>"
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
hostPort: 27017
|
||||
name: public-mdb-port
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
hostPort: 443
|
||||
name: public-bdb-port
|
||||
protocol: TCP
|
||||
- containerPort: 8888
|
||||
hostPort: 8888
|
||||
name: health-check
|
||||
protocol: TCP
|
||||
- containerPort: 8080
|
||||
hostPort: 8080
|
||||
name: public-api-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: https
|
||||
mountPath: /usr/local/openresty/nginx/conf/ssl/
|
||||
readOnly: true
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8888
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: https
|
||||
secret:
|
||||
secretName: certs
|
||||
defaultMode: 0400
|
13
k8s/nginx-3scale/nginx-3scale-secret.yaml
Normal file
13
k8s/nginx-3scale/nginx-3scale-secret.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
# Certificate data should be base64 encoded before embedding them here by using
|
||||
# `cat cert.pem | base64 -w 0 > cert.pem.b64` and then copy the resulting
|
||||
# value here. Same goes for cert.key.
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/secret/
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: certs
|
||||
type: Opaque
|
||||
data:
|
||||
cert.pem: <certificate data here>
|
||||
cert.key: <key data here>
|
29
k8s/nginx-3scale/nginx-3scale-svc.yaml
Normal file
29
k8s/nginx-3scale/nginx-3scale-svc.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ngx-instance-0
|
||||
namespace: default
|
||||
labels:
|
||||
name: ngx-instance-0
|
||||
annotations:
|
||||
# NOTE: the following annotation is a beta feature and
|
||||
# only available in GCE/GKE and Azure as of now
|
||||
# Ref: https://kubernetes.io/docs/tutorials/services/source-ip/
|
||||
service.beta.kubernetes.io/external-traffic: OnlyLocal
|
||||
spec:
|
||||
selector:
|
||||
app: ngx-instance-0-dep
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 443
|
||||
name: ngx-public-bdb-port
|
||||
protocol: TCP
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
name: ngx-public-3scale-port
|
||||
protocol: TCP
|
||||
- port: 27017
|
||||
targetPort: 27017
|
||||
name: ngx-public-mdb-port
|
||||
protocol: TCP
|
||||
type: LoadBalancer
|
2
setup.py
2
setup.py
@ -54,6 +54,7 @@ tests_require = [
|
||||
'pytest-mock',
|
||||
'pytest-xdist',
|
||||
'pytest-flask',
|
||||
'pytest-aiohttp',
|
||||
'tox',
|
||||
] + docs_require
|
||||
|
||||
@ -76,6 +77,7 @@ install_requires = [
|
||||
'multipipes~=0.1.0',
|
||||
'jsonschema~=2.5.1',
|
||||
'pyyaml~=3.12',
|
||||
'aiohttp~=2.0',
|
||||
]
|
||||
|
||||
setup(
|
||||
|
@ -68,20 +68,6 @@ The `pytest` command has many options. If you want to learn about all the things
|
||||
|
||||
You can also use [Docker Compose](https://docs.docker.com/compose/) to run all the tests.
|
||||
|
||||
#### With RethinkDB as the backend
|
||||
|
||||
First, start `RethinkDB` in the background:
|
||||
|
||||
```text
|
||||
$ docker-compose up -d rdb
|
||||
```
|
||||
|
||||
then run the tests using:
|
||||
|
||||
```text
|
||||
$ docker-compose run --rm bdb py.test -v
|
||||
```
|
||||
|
||||
#### With MongoDB as the backend
|
||||
|
||||
First, start `MongoDB` in the background:
|
||||
@ -93,7 +79,7 @@ $ docker-compose up -d mdb
|
||||
then run the tests using:
|
||||
|
||||
```text
|
||||
$ docker-compose run --rm bdb-mdb py.test -v
|
||||
$ docker-compose run --rm bdb py.test -v
|
||||
```
|
||||
|
||||
If you've upgraded to a newer version of BigchainDB, you might have to rebuild
|
||||
@ -103,8 +89,22 @@ the images before being able to run the tests. Run:
|
||||
$ docker-compose build
|
||||
```
|
||||
|
||||
#### With RethinkDB as the backend
|
||||
|
||||
First, start `RethinkDB` in the background:
|
||||
|
||||
```text
|
||||
$ docker-compose up -d rdb
|
||||
```
|
||||
|
||||
then run the tests using:
|
||||
|
||||
```text
|
||||
$ docker-compose run --rm bdb-rdb py.test -v
|
||||
```
|
||||
|
||||
to rebuild all the images (usually you only need to rebuild the `bdb` and
|
||||
`bdb-mdb` images).
|
||||
`bdb-rdb` images).
|
||||
|
||||
## Automated Testing of All Pull Requests
|
||||
|
||||
|
@ -124,10 +124,39 @@ def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch):
|
||||
"This node's public key wasn't set anywhere so it can't be exported"
|
||||
|
||||
|
||||
def test_bigchain_run_init_when_db_exists(mock_db_init_with_existing_db):
|
||||
def test_bigchain_run_init_when_db_exists(mocker, capsys):
|
||||
from bigchaindb.commands.bigchaindb import run_init
|
||||
from bigchaindb.common.exceptions import DatabaseAlreadyExists
|
||||
init_db_mock = mocker.patch(
|
||||
'bigchaindb.commands.bigchaindb.schema.init_database',
|
||||
autospec=True,
|
||||
spec_set=True,
|
||||
)
|
||||
init_db_mock.side_effect = DatabaseAlreadyExists
|
||||
args = Namespace(config=None)
|
||||
run_init(args)
|
||||
output_message = capsys.readouterr()[1]
|
||||
print(output_message)
|
||||
assert output_message == (
|
||||
'The database already exists.\n'
|
||||
'If you wish to re-initialize it, first drop it.\n'
|
||||
)
|
||||
|
||||
|
||||
def test__run_init(mocker):
|
||||
from bigchaindb.commands.bigchaindb import _run_init
|
||||
bigchain_mock = mocker.patch(
|
||||
'bigchaindb.commands.bigchaindb.bigchaindb.Bigchain')
|
||||
init_db_mock = mocker.patch(
|
||||
'bigchaindb.commands.bigchaindb.schema.init_database',
|
||||
autospec=True,
|
||||
spec_set=True,
|
||||
)
|
||||
_run_init()
|
||||
bigchain_mock.assert_called_once_with()
|
||||
init_db_mock.assert_called_once_with(
|
||||
connection=bigchain_mock.return_value.connection)
|
||||
bigchain_mock.return_value.create_genesis_block.assert_called_once_with()
|
||||
|
||||
|
||||
@patch('bigchaindb.backend.schema.drop_database')
|
||||
|
@ -199,3 +199,27 @@ def test_full_pipeline(b, user_pk):
|
||||
tx_from_block = set([tx.id for tx in invalid_block.transactions])
|
||||
tx_from_backlog = set([tx['id'] for tx in list(query.get_stale_transactions(b.connection, 0))])
|
||||
assert tx_from_block == tx_from_backlog
|
||||
|
||||
|
||||
def test_handle_block_events():
|
||||
from bigchaindb.events import setup_events_queue, EventTypes
|
||||
|
||||
events_queue = setup_events_queue()
|
||||
e = election.Election(events_queue=events_queue)
|
||||
block_id = 'a' * 64
|
||||
|
||||
assert events_queue.qsize() == 0
|
||||
|
||||
# no event should be emitted in case a block is undecided
|
||||
e.handle_block_events({'status': Bigchain.BLOCK_UNDECIDED}, block_id)
|
||||
assert events_queue.qsize() == 0
|
||||
|
||||
# put an invalid block event in the queue
|
||||
e.handle_block_events({'status': Bigchain.BLOCK_INVALID}, block_id)
|
||||
event = e.event_handler.get_event()
|
||||
assert event.type == EventTypes.BLOCK_INVALID
|
||||
|
||||
# put a valid block event in the queue
|
||||
e.handle_block_events({'status': Bigchain.BLOCK_VALID}, block_id)
|
||||
event = e.event_handler.get_event()
|
||||
assert event.type == EventTypes.BLOCK_VALID
|
||||
|
@ -144,6 +144,8 @@ def test_autoconfigure_read_both_from_file_and_env(monkeypatch, request):
|
||||
DATABASE_PORT = 4242
|
||||
DATABASE_BACKEND = request.config.getoption('--database-backend')
|
||||
SERVER_BIND = '1.2.3.4:56'
|
||||
WSSERVER_HOST = '1.2.3.4'
|
||||
WSSERVER_PORT = 57
|
||||
KEYRING = 'pubkey_0:pubkey_1:pubkey_2'
|
||||
|
||||
file_config = {
|
||||
@ -157,6 +159,8 @@ def test_autoconfigure_read_both_from_file_and_env(monkeypatch, request):
|
||||
'BIGCHAINDB_DATABASE_PORT': str(DATABASE_PORT),
|
||||
'BIGCHAINDB_DATABASE_BACKEND': DATABASE_BACKEND,
|
||||
'BIGCHAINDB_SERVER_BIND': SERVER_BIND,
|
||||
'BIGCHAINDB_WSSERVER_HOST': WSSERVER_HOST,
|
||||
'BIGCHAINDB_WSSERVER_PORT': WSSERVER_PORT,
|
||||
'BIGCHAINDB_KEYRING': KEYRING})
|
||||
|
||||
import bigchaindb
|
||||
@ -198,6 +202,10 @@ def test_autoconfigure_read_both_from_file_and_env(monkeypatch, request):
|
||||
'workers': None,
|
||||
'threads': None,
|
||||
},
|
||||
'wsserver': {
|
||||
'host': WSSERVER_HOST,
|
||||
'port': WSSERVER_PORT,
|
||||
},
|
||||
'database': database,
|
||||
'keypair': {
|
||||
'public': None,
|
||||
|
21
tests/test_events.py
Normal file
21
tests/test_events.py
Normal file
@ -0,0 +1,21 @@
|
||||
def tests_event_handler():
|
||||
from bigchaindb.events import (EventTypes, Event, EventHandler,
|
||||
setup_events_queue)
|
||||
|
||||
# create and event
|
||||
event_data = {'msg': 'some data'}
|
||||
event = Event(EventTypes.BLOCK_VALID, event_data)
|
||||
# create the events queue
|
||||
events_queue = setup_events_queue()
|
||||
|
||||
# create event handler
|
||||
event_handler = EventHandler(events_queue)
|
||||
|
||||
# push and event to the queue
|
||||
event_handler.put_event(event)
|
||||
|
||||
# get the event from the queue
|
||||
event_from_queue = event_handler.get_event()
|
||||
|
||||
assert event_from_queue.type == event.type
|
||||
assert event_from_queue.data == event.data
|
@ -9,14 +9,16 @@ from bigchaindb.pipelines import vote, block, election, stale
|
||||
@patch.object(block, 'start')
|
||||
@patch.object(vote, 'start')
|
||||
@patch.object(Process, 'start')
|
||||
def test_processes_start(mock_vote, mock_block, mock_election, mock_stale,
|
||||
mock_process):
|
||||
@patch('bigchaindb.events.setup_events_queue', spec_set=True, autospec=True)
|
||||
def test_processes_start(mock_setup_events_queue, mock_process, mock_vote,
|
||||
mock_block, mock_election, mock_stale):
|
||||
from bigchaindb import processes
|
||||
|
||||
processes.start()
|
||||
|
||||
mock_vote.assert_called_with()
|
||||
mock_block.assert_called_with()
|
||||
mock_election.assert_called_with()
|
||||
mock_stale.assert_called_with()
|
||||
mock_process.assert_called_with()
|
||||
mock_election.assert_called_once_with(
|
||||
events_queue=mock_setup_events_queue.return_value)
|
||||
|
@ -1,7 +1,9 @@
|
||||
import pytest
|
||||
from unittest.mock import patch
|
||||
from collections import Counter
|
||||
|
||||
from bigchaindb.core import Bigchain
|
||||
from bigchaindb.exceptions import CriticalDuplicateVote
|
||||
from bigchaindb.voting import Voting, INVALID, VALID, UNDECIDED
|
||||
|
||||
|
||||
@ -37,24 +39,22 @@ def test_count_votes():
|
||||
def verify_vote_schema(cls, vote):
|
||||
return vote['node_pubkey'] != 'malformed'
|
||||
|
||||
voters = (['cheat', 'cheat', 'says invalid', 'malformed'] +
|
||||
voters = (['says invalid', 'malformed'] +
|
||||
['kosher' + str(i) for i in range(10)])
|
||||
|
||||
votes = [Bigchain(v).vote('block', 'a', True) for v in voters]
|
||||
votes[2]['vote']['is_block_valid'] = False
|
||||
votes[0]['vote']['is_block_valid'] = False
|
||||
# Incorrect previous block subtracts from n_valid and adds to n_invalid
|
||||
votes[-1]['vote']['previous_block'] = 'z'
|
||||
|
||||
assert TestVoting.count_votes(votes) == {
|
||||
by_voter = dict(enumerate(votes))
|
||||
|
||||
assert TestVoting.count_votes(by_voter) == {
|
||||
'counts': {
|
||||
'n_valid': 9, # 9 kosher votes
|
||||
'n_invalid': 4, # 1 cheat, 1 invalid, 1 malformed, 1 rogue prev block
|
||||
# One of the cheat votes counts towards n_invalid, the other is
|
||||
# not counted here.
|
||||
# len(cheat) + n_valid + n_invalid == len(votes)
|
||||
'n_invalid': 3, # 1 invalid, 1 malformed, 1 rogue prev block
|
||||
},
|
||||
'cheat': [votes[:2]],
|
||||
'malformed': [votes[3]],
|
||||
'malformed': [votes[1]],
|
||||
'previous_block': 'a',
|
||||
'other_previous_block': {'z': 1},
|
||||
}
|
||||
@ -70,7 +70,8 @@ def test_must_agree_prev_block():
|
||||
votes = [Bigchain(v).vote('block', 'a', True) for v in voters]
|
||||
votes[0]['vote']['previous_block'] = 'b'
|
||||
votes[1]['vote']['previous_block'] = 'c'
|
||||
assert TestVoting.count_votes(votes) == {
|
||||
by_voter = dict(enumerate(votes))
|
||||
assert TestVoting.count_votes(by_voter) == {
|
||||
'counts': {
|
||||
'n_valid': 2,
|
||||
'n_invalid': 2,
|
||||
@ -78,7 +79,6 @@ def test_must_agree_prev_block():
|
||||
'previous_block': 'a',
|
||||
'other_previous_block': {'b': 1, 'c': 1},
|
||||
'malformed': [],
|
||||
'cheat': [],
|
||||
}
|
||||
|
||||
|
||||
@ -230,8 +230,19 @@ def test_block_election(b):
|
||||
'block_id': 'xyz',
|
||||
'counts': {'n_valid': 2, 'n_invalid': 0},
|
||||
'ineligible': [votes[-1]],
|
||||
'cheat': [],
|
||||
'malformed': [],
|
||||
'previous_block': 'a',
|
||||
'other_previous_block': {},
|
||||
}
|
||||
|
||||
|
||||
@patch('bigchaindb.voting.Voting.verify_vote_signature', return_value=True)
|
||||
def test_duplicate_vote_throws_critical_error(b):
|
||||
keyring = 'abc'
|
||||
block = {'id': 'xyz', 'block': {'voters': 'ab'}}
|
||||
votes = [{
|
||||
'node_pubkey': c,
|
||||
'vote': {'is_block_valid': True, 'previous_block': 'a'}
|
||||
} for c in 'aabc']
|
||||
with pytest.raises(CriticalDuplicateVote):
|
||||
Voting.block_election(block, votes, keyring)
|
||||
|
@ -23,7 +23,7 @@ def test_api_root_endpoint(client):
|
||||
def test_api_v1_endpoint(client):
|
||||
res = client.get('/api/v1')
|
||||
docs_url = ['https://docs.bigchaindb.com/projects/server/en/vtsttst',
|
||||
'/drivers-clients/http-client-server-api.html',
|
||||
'/http-client-server-api.html',
|
||||
]
|
||||
assert res.json == {
|
||||
'_links': {
|
||||
@ -31,5 +31,6 @@ def test_api_v1_endpoint(client):
|
||||
'self': 'http://localhost/api/v1/',
|
||||
'statuses': 'http://localhost/api/v1/statuses/',
|
||||
'transactions': 'http://localhost/api/v1/transactions/',
|
||||
'streams_v1': 'ws://localhost:9985/api/v1/streams/valid_tx',
|
||||
}
|
||||
}
|
||||
|
238
tests/web/test_websocket_server.py
Normal file
238
tests/web/test_websocket_server.py
Normal file
@ -0,0 +1,238 @@
|
||||
import asyncio
|
||||
import json
|
||||
import queue
|
||||
import random
|
||||
import threading
|
||||
import time
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _block(b, request):
|
||||
from bigchaindb.models import Transaction
|
||||
total = getattr(request, 'param', 1)
|
||||
transactions = [
|
||||
Transaction.create(
|
||||
[b.me],
|
||||
[([b.me], 1)],
|
||||
metadata={'msg': random.random()},
|
||||
).sign([b.me_private])
|
||||
for _ in range(total)
|
||||
]
|
||||
return b.create_block(transactions)
|
||||
|
||||
|
||||
class MockWebSocket:
|
||||
def __init__(self):
|
||||
self.received = []
|
||||
|
||||
def send_str(self, s):
|
||||
self.received.append(s)
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def test_bridge_sync_async_queue(loop):
|
||||
from bigchaindb.web.websocket_server import _multiprocessing_to_asyncio
|
||||
|
||||
sync_queue = queue.Queue()
|
||||
async_queue = asyncio.Queue(loop=loop)
|
||||
|
||||
bridge = threading.Thread(target=_multiprocessing_to_asyncio,
|
||||
args=(sync_queue, async_queue, loop),
|
||||
daemon=True)
|
||||
bridge.start()
|
||||
|
||||
sync_queue.put('fahren')
|
||||
sync_queue.put('auf')
|
||||
sync_queue.put('der')
|
||||
sync_queue.put('Autobahn')
|
||||
|
||||
result = yield from async_queue.get()
|
||||
assert result == 'fahren'
|
||||
|
||||
result = yield from async_queue.get()
|
||||
assert result == 'auf'
|
||||
|
||||
result = yield from async_queue.get()
|
||||
assert result == 'der'
|
||||
|
||||
result = yield from async_queue.get()
|
||||
assert result == 'Autobahn'
|
||||
|
||||
assert async_queue.qsize() == 0
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def test_put_into_capped_queue(loop):
|
||||
from bigchaindb.web.websocket_server import _put_into_capped_queue
|
||||
q = asyncio.Queue(maxsize=2, loop=loop)
|
||||
|
||||
_put_into_capped_queue(q, 'Friday')
|
||||
assert q._queue[0] == 'Friday'
|
||||
|
||||
_put_into_capped_queue(q, "I'm")
|
||||
assert q._queue[0] == 'Friday'
|
||||
assert q._queue[1] == "I'm"
|
||||
|
||||
_put_into_capped_queue(q, 'in')
|
||||
assert q._queue[0] == "I'm"
|
||||
assert q._queue[1] == 'in'
|
||||
|
||||
_put_into_capped_queue(q, 'love')
|
||||
assert q._queue[0] == 'in'
|
||||
assert q._queue[1] == 'love'
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def test_capped_queue(loop):
|
||||
from bigchaindb.web.websocket_server import _multiprocessing_to_asyncio
|
||||
|
||||
sync_queue = queue.Queue()
|
||||
async_queue = asyncio.Queue(maxsize=2, loop=loop)
|
||||
|
||||
bridge = threading.Thread(target=_multiprocessing_to_asyncio,
|
||||
args=(sync_queue, async_queue, loop),
|
||||
daemon=True)
|
||||
bridge.start()
|
||||
|
||||
sync_queue.put('we')
|
||||
sync_queue.put('are')
|
||||
sync_queue.put('the')
|
||||
sync_queue.put('robots')
|
||||
|
||||
# Wait until the thread processes all the items
|
||||
time.sleep(1)
|
||||
|
||||
result = yield from async_queue.get()
|
||||
assert result == 'the'
|
||||
|
||||
result = yield from async_queue.get()
|
||||
assert result == 'robots'
|
||||
|
||||
assert async_queue.qsize() == 0
|
||||
|
||||
|
||||
@patch('threading.Thread')
|
||||
@patch('aiohttp.web.run_app')
|
||||
@patch('bigchaindb.web.websocket_server.init_app')
|
||||
@patch('asyncio.get_event_loop', return_value='event-loop')
|
||||
@patch('asyncio.Queue', return_value='event-queue')
|
||||
def test_start_creates_an_event_loop(queue_mock, get_event_loop_mock,
|
||||
init_app_mock, run_app_mock,
|
||||
thread_mock):
|
||||
from bigchaindb import config
|
||||
from bigchaindb.web.websocket_server import start, _multiprocessing_to_asyncio
|
||||
|
||||
start(None)
|
||||
thread_mock.assert_called_once_with(
|
||||
target=_multiprocessing_to_asyncio,
|
||||
args=(None, queue_mock.return_value, get_event_loop_mock.return_value),
|
||||
daemon=True,
|
||||
)
|
||||
thread_mock.return_value.start.assert_called_once_with()
|
||||
init_app_mock.assert_called_with('event-queue', loop='event-loop')
|
||||
run_app_mock.assert_called_once_with(
|
||||
init_app_mock.return_value,
|
||||
host=config['wsserver']['host'],
|
||||
port=config['wsserver']['port'],
|
||||
)
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def test_websocket_string_event(test_client, loop):
|
||||
from bigchaindb.web.websocket_server import init_app, POISON_PILL, EVENTS_ENDPOINT
|
||||
|
||||
event_source = asyncio.Queue(loop=loop)
|
||||
app = init_app(event_source, loop=loop)
|
||||
client = yield from test_client(app)
|
||||
ws = yield from client.ws_connect(EVENTS_ENDPOINT)
|
||||
|
||||
yield from event_source.put('hack')
|
||||
yield from event_source.put('the')
|
||||
yield from event_source.put('planet!')
|
||||
|
||||
result = yield from ws.receive()
|
||||
assert result.data == 'hack'
|
||||
|
||||
result = yield from ws.receive()
|
||||
assert result.data == 'the'
|
||||
|
||||
result = yield from ws.receive()
|
||||
assert result.data == 'planet!'
|
||||
|
||||
yield from event_source.put(POISON_PILL)
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
@pytest.mark.parametrize('_block', (10,), indirect=('_block',), ids=('block',))
|
||||
def test_websocket_block_event(b, _block, test_client, loop):
|
||||
from bigchaindb import events
|
||||
from bigchaindb.web.websocket_server import init_app, POISON_PILL, EVENTS_ENDPOINT
|
||||
|
||||
event_source = asyncio.Queue(loop=loop)
|
||||
app = init_app(event_source, loop=loop)
|
||||
client = yield from test_client(app)
|
||||
ws = yield from client.ws_connect(EVENTS_ENDPOINT)
|
||||
block = _block.to_dict()
|
||||
block_event = events.Event(events.EventTypes.BLOCK_VALID, block)
|
||||
|
||||
yield from event_source.put(block_event)
|
||||
|
||||
for tx in block['block']['transactions']:
|
||||
result = yield from ws.receive()
|
||||
json_result = json.loads(result.data)
|
||||
assert json_result['tx_id'] == tx['id']
|
||||
# Since the transactions are all CREATEs, asset id == transaction id
|
||||
assert json_result['asset_id'] == tx['id']
|
||||
assert json_result['block_id'] == block['id']
|
||||
|
||||
yield from event_source.put(POISON_PILL)
|
||||
|
||||
|
||||
@pytest.mark.skip('Processes are not stopping properly, and the whole test suite would hang')
|
||||
@pytest.mark.genesis
|
||||
def test_integration_from_webapi_to_websocket(monkeypatch, client, loop):
|
||||
# XXX: I think that the `pytest-aiohttp` plugin is sparkling too much
|
||||
# magic in the `asyncio` module: running this test without monkey-patching
|
||||
# `asycio.get_event_loop` (and without the `loop` fixture) raises a:
|
||||
# RuntimeError: There is no current event loop in thread 'MainThread'.
|
||||
#
|
||||
# That's pretty weird because this test doesn't use the pytest-aiohttp
|
||||
# plugin explicitely.
|
||||
monkeypatch.setattr('asyncio.get_event_loop', lambda: loop)
|
||||
|
||||
import json
|
||||
import random
|
||||
import aiohttp
|
||||
|
||||
from bigchaindb.common import crypto
|
||||
from bigchaindb import processes
|
||||
from bigchaindb.models import Transaction
|
||||
|
||||
# Start BigchainDB
|
||||
processes.start()
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
import time
|
||||
time.sleep(1)
|
||||
|
||||
ws_url = client.get('http://localhost:9984/api/v1/').json['_links']['streams_v1']
|
||||
|
||||
# Connect to the WebSocket endpoint
|
||||
session = aiohttp.ClientSession()
|
||||
ws = loop.run_until_complete(session.ws_connect(ws_url))
|
||||
|
||||
# Create a keypair and generate a new asset
|
||||
user_priv, user_pub = crypto.generate_key_pair()
|
||||
asset = {'random': random.random()}
|
||||
tx = Transaction.create([user_pub], [([user_pub], 1)], asset=asset)
|
||||
tx = tx.sign([user_priv])
|
||||
# Post the transaction to the BigchainDB Web API
|
||||
client.post('/api/v1/transactions/', data=json.dumps(tx.to_dict()))
|
||||
|
||||
result = loop.run_until_complete(ws.receive())
|
||||
json_result = json.loads(result.data)
|
||||
assert json_result['tx_id'] == tx.id
|
Loading…
x
Reference in New Issue
Block a user