mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge branch 'tendermint' into proposal-integration-testing
This commit is contained in:
commit
883ba3832c
@ -2,30 +2,26 @@
|
||||
|
||||
set -e -x
|
||||
|
||||
if [[ "${BIGCHAINDB_DATABASE_BACKEND}" == rethinkdb ]]; then
|
||||
docker pull rethinkdb:2.3.5
|
||||
docker run -d --publish=28015:28015 --name rdb rethinkdb:2.3.5
|
||||
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb && \
|
||||
if [[ "${BIGCHAINDB_DATABASE_BACKEND}" == localmongodb && \
|
||||
-z "${BIGCHAINDB_DATABASE_SSL}" ]]; then
|
||||
# Connect to MongoDB on port 27017 via a normal, unsecure connection if
|
||||
# BIGCHAINDB_DATABASE_SSL is unset.
|
||||
# It is unset in this case in .travis.yml.
|
||||
docker pull mongo:3.4.4
|
||||
docker run -d --publish=27017:27017 --name mdb-without-ssl mongo:3.4.4 \
|
||||
--replSet=bigchain-rs
|
||||
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb && \
|
||||
docker pull mongo:3.4
|
||||
docker run -d --publish=27017:27017 --name mdb-without-ssl mongo:3.4 # --replSet=bigchain-rs
|
||||
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == localmongodb && \
|
||||
"${BIGCHAINDB_DATABASE_SSL}" == true ]]; then
|
||||
# Connect to MongoDB on port 27017 via TLS/SSL connection if
|
||||
# BIGCHAINDB_DATABASE_SSL is set.
|
||||
# It is set to 'true' here in .travis.yml. Dummy certificates for testing
|
||||
# are stored under bigchaindb/tests/backend/mongodb-ssl/certs/ directory.
|
||||
docker pull mongo:3.4.4
|
||||
docker pull mongo:3.4
|
||||
docker run -d \
|
||||
--name mdb-with-ssl \
|
||||
--publish=27017:27017 \
|
||||
--volume=${TRAVIS_BUILD_DIR}/tests/backend/mongodb-ssl/certs:/certs \
|
||||
mongo:3.4.4 \
|
||||
--replSet=bigchain-rs \
|
||||
mongo:3.4 \
|
||||
# --replSet=bigchain-rs \
|
||||
--sslAllowInvalidHostnames \
|
||||
--sslMode=requireSSL \
|
||||
--sslCAFile=/certs/ca.crt \
|
||||
|
@ -4,15 +4,14 @@ set -e -x
|
||||
|
||||
if [[ -n ${TOXENV} ]]; then
|
||||
tox -e ${TOXENV}
|
||||
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb && \
|
||||
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == localmongodb && \
|
||||
-z "${BIGCHAINDB_DATABASE_SSL}" ]]; then
|
||||
# Run the full suite of tests for MongoDB over an unsecure connection
|
||||
pytest -sv --database-backend=mongodb -m "serial"
|
||||
pytest -sv --database-backend=mongodb --cov=bigchaindb -m "not serial"
|
||||
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb && \
|
||||
pytest -sv --database-backend=localmongodb --cov=bigchaindb -m tendermint
|
||||
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == localmongodb && \
|
||||
"${BIGCHAINDB_DATABASE_SSL}" == true ]]; then
|
||||
# Run a sub-set of tests over SSL; those marked as 'pytest.mark.bdb_ssl'.
|
||||
pytest -sv --database-backend=mongodb-ssl --cov=bigchaindb -m bdb_ssl
|
||||
pytest -sv --database-backend=localmongodb-ssl --cov=bigchaindb -m bdb_ssl
|
||||
else
|
||||
# Run the full suite of tests for RethinkDB (the default backend when testing)
|
||||
pytest -sv -m "serial"
|
||||
|
9
.gitattributes
vendored
9
.gitattributes
vendored
@ -1,9 +0,0 @@
|
||||
deploy-cluster-aws export-ignore
|
||||
docs export-ignore
|
||||
ntools export-ignore
|
||||
tests export-ignore
|
||||
.gitattributes export-ignore
|
||||
.gitignore export-ignore
|
||||
.travis.yml export-ignore
|
||||
*.md export-ignore
|
||||
codecov.yml export-ignore
|
15
.gitignore
vendored
15
.gitignore
vendored
@ -65,17 +65,6 @@ target/
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# Some files created when deploying a cluster on AWS
|
||||
deploy-cluster-aws/conf/rethinkdb.conf
|
||||
deploy-cluster-aws/confiles/
|
||||
deploy-cluster-aws/client_confile
|
||||
deploy-cluster-aws/hostlist.py
|
||||
deploy-cluster-aws/ssh_key.py
|
||||
|
||||
# Ansible-specific files
|
||||
ntools/one-m/ansible/hosts
|
||||
ntools/one-m/ansible/ansible.cfg
|
||||
|
||||
# Just in time documentation
|
||||
docs/server/source/http-samples
|
||||
|
||||
@ -83,3 +72,7 @@ docs/server/source/http-samples
|
||||
# See https://stackoverflow.com/a/41482391
|
||||
terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
|
||||
# tendermint data
|
||||
tmdata/data
|
||||
network/*/data
|
||||
|
30
.travis.yml
30
.travis.yml
@ -9,9 +9,8 @@ language: python
|
||||
cache: pip
|
||||
|
||||
python:
|
||||
- 3.5
|
||||
- 3.6
|
||||
|
||||
|
||||
env:
|
||||
- TOXENV=flake8
|
||||
- TOXENV=docsroot
|
||||
@ -19,34 +18,11 @@ env:
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
exclude:
|
||||
- python: 3.5
|
||||
env: TOXENV=flake8
|
||||
- python: 3.5
|
||||
env: TOXENV=docsroot
|
||||
- python: 3.5
|
||||
env: TOXENV=docsserver
|
||||
include:
|
||||
- python: 3.5
|
||||
env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb
|
||||
- python: 3.5
|
||||
- python: 3.6
|
||||
env:
|
||||
- BIGCHAINDB_DATABASE_BACKEND=mongodb
|
||||
- BIGCHAINDB_DATABASE_BACKEND=localmongodb
|
||||
- BIGCHAINDB_DATABASE_SSL=
|
||||
- python: 3.6
|
||||
env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb
|
||||
- python: 3.6
|
||||
env:
|
||||
- BIGCHAINDB_DATABASE_BACKEND=mongodb
|
||||
- BIGCHAINDB_DATABASE_SSL=
|
||||
- python: 3.5
|
||||
env:
|
||||
- BIGCHAINDB_DATABASE_BACKEND=mongodb
|
||||
- BIGCHAINDB_DATABASE_SSL=true
|
||||
- python: 3.6
|
||||
env:
|
||||
- BIGCHAINDB_DATABASE_BACKEND=mongodb
|
||||
- BIGCHAINDB_DATABASE_SSL=true
|
||||
|
||||
before_install: sudo .ci/travis-before-install.sh
|
||||
|
||||
|
@ -2,15 +2,9 @@
|
||||
|
||||
There are many ways you can contribute to the BigchainDB project, some very easy and others more involved. We want to be friendly and welcoming to all potential contributors, so we ask that everyone involved abide by some simple guidelines outlined in our [Code of Conduct](./CODE_OF_CONDUCT.md).
|
||||
|
||||
Or, are you interested in contributing full-time? BigchainDB is hiring. See [here](https://github.com/bigchaindb/org/blob/master/engjob.md).
|
||||
|
||||
## Easy Ways to Contribute
|
||||
|
||||
The BigchainDB community has a Google Group and a Gitter chatroom. Our [Community page](https://www.bigchaindb.com/community) has more information about those.
|
||||
|
||||
You can also follow us on Twitter [@BigchainDB](https://twitter.com/BigchainDB) or read [our blog on Medium](https://medium.com/the-bigchaindb-blog).
|
||||
|
||||
If you want to file a bug report, suggest a feature, or ask a code-related question, please go to the `bigchaindb/bigchaindb` repository on GitHub and [create a new Issue](https://github.com/bigchaindb/bigchaindb/issues/new). (You will need a [GitHub account](https://github.com/signup/free) (free).) Please describe the issue clearly, including steps to reproduce when it is a bug.
|
||||
If you want to file a bug report or suggest a feature, please go to the `bigchaindb/bigchaindb` repository on GitHub and [create a new Issue](https://github.com/bigchaindb/bigchaindb/issues/new). (You will need a [GitHub account](https://github.com/signup/free) (free).) Please describe the issue clearly, including steps to reproduce it, if it's a bug.
|
||||
|
||||
## How to Contribute Code or Documentation
|
||||
|
||||
@ -34,24 +28,22 @@ Familiarize yourself with how we do coding and documentation in the BigchainDB p
|
||||
* [how we write and run tests](./tests/README.md)
|
||||
* [our documentation strategy](./docs/README.md) (including in-code documentation)
|
||||
* the GitHub Flow (workflow)
|
||||
* [GitHub Guide: Understanding the GitHub Flow](https://guides.github.com/introduction/flow/)
|
||||
* [Scott Chacon's blog post about GitHub Flow](http://scottchacon.com/2011/08/31/github-flow.html)
|
||||
* [GitHub Guide: Understanding the GitHub Flow](https://guides.github.com/introduction/flow/)
|
||||
* [Scott Chacon's blog post about GitHub Flow](http://scottchacon.com/2011/08/31/github-flow.html)
|
||||
* [semantic versioning](http://semver.org/)
|
||||
|
||||
### Step 2 - Install some Dependencies
|
||||
|
||||
* [Install RethinkDB Server](https://rethinkdb.com/docs/install/)
|
||||
* Make sure you have Python 3.5+ (preferably in a virtualenv)
|
||||
* [Install BigchaindB Server's OS-level dependencies](https://docs.bigchaindb.com/projects/server/en/latest/appendices/install-os-level-deps.html)
|
||||
* [Make sure you have the latest Python 3 version of pip and setuptools](https://docs.bigchaindb.com/projects/server/en/latest/appendices/install-latest-pip.html)
|
||||
Install MongoDB, Tendermint, and all of BigchainDB Server's dependencies. The [Quickstart page](https://docs.bigchaindb.com/projects/server/en/latest/quickstart.html) has some pointers. In fact, you could do everything in the Quickstart page short of installing BigchainDB with pip (since you will install from the source on GitHub), and you shouldn't run MongoDB or Tendermint yet.
|
||||
|
||||
### Step 3 - Fork bigchaindb on GitHub
|
||||
### Step 3 - Fork the bigchaindb/bigchaindb GitHub Repository
|
||||
|
||||
In your web browser, go to [the BigchainDB repository on GitHub](https://github.com/bigchaindb/bigchaindb) and click the `Fork` button in the top right corner. This creates a new Git repository named `bigchaindb` in _your_ GitHub account.
|
||||
In your web browser, go to [the bigchaindb/bigchaindb repository on GitHub](https://github.com/bigchaindb/bigchaindb) and click the `Fork` button in the top right corner. This creates a new Git repository named `bigchaindb` in _your_ GitHub account.
|
||||
|
||||
### Step 4 - Clone Your Fork
|
||||
|
||||
(This only has to be done once.) In your local terminal, use Git to clone _your_ `bigchaindb` repository to your local computer. Also add the original GitHub bigchaindb/bigchaindb repository as a remote named `upstream` (a convention):
|
||||
|
||||
```text
|
||||
git clone git@github.com:your-github-username/bigchaindb.git
|
||||
cd bigchaindb
|
||||
@ -61,6 +53,7 @@ git remote add upstream git@github.com:bigchaindb/bigchaindb.git
|
||||
### Step 5 - Fetch and Merge the Latest from `upstream/master`
|
||||
|
||||
Switch to the `master` branch locally, fetch all `upstream` branches, and merge the just-fetched `upstream/master` branch with the local `master` branch:
|
||||
|
||||
```text
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
@ -69,27 +62,28 @@ git merge upstream/master
|
||||
|
||||
### Step 6 - Install the Python module and the CLI
|
||||
|
||||
In order to use and run the source you just cloned from your fork, you need to install BigchainDB on your computer.
|
||||
To use and run the source code you just cloned from your fork, you need to install BigchainDB on your computer.
|
||||
The core of BigchainDB is a Python module you can install using the standard [Python packaging tools](http://python-packaging-user-guide.readthedocs.org/en/latest/).
|
||||
We highly suggest you use `pip` and `virtualenv` to manage your local development.
|
||||
If you need more information on how to do that, refer to the *Python Packaging User Guide* to [install `pip`](http://python-packaging-user-guide.readthedocs.org/en/latest/installing/#requirements-for-installing-packages) and to [create your first `virtualenv`](http://python-packaging-user-guide.readthedocs.org/en/latest/installing/#creating-virtual-environments).
|
||||
|
||||
Once you have `pip` installed and (optionally) you are in a virtualenv, go to the root of the repository (i.e. where the `setup.py` file is), and type:
|
||||
|
||||
```text
|
||||
pip install -e .[dev]
|
||||
```
|
||||
|
||||
This will install the BigchainDB Python module, the CLI, and all the dependencies useful for contributing to the development of BigchainDB.
|
||||
This will install the `bigchaindb` Python module, the BigchainDB Server CLI, and all the dependencies useful for contributing to the development of BigchainDB.
|
||||
How? Let's split the command down into its components:
|
||||
- `pip` is the Python command to install packages
|
||||
- `install` tells pip to use the *install* action
|
||||
- `-e` installs a project in [editable mode](https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs)
|
||||
- `.` installs what's in the current directory
|
||||
- `[dev]` adds some [extra requirements](https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies) to the installation. (If you are curious, open `setup.py` and look for `dev` in the `extras_require` section.)
|
||||
|
||||
* `pip` is the Python command to install packages
|
||||
* `install` tells pip to use the *install* action
|
||||
* `-e` installs a project in [editable mode](https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs)
|
||||
* `.` installs what's in the current directory
|
||||
* `[dev]` adds some [extra requirements](https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies) to the installation. (If you are curious, open `setup.py` and look for `dev` in the `extras_require` section.)
|
||||
|
||||
Aside: An alternative to `pip install -e .[dev]` is `python setup.py develop`.
|
||||
|
||||
|
||||
### Step 7 - Create a New Branch for Each Bug/Feature
|
||||
|
||||
If your new branch is to **fix a bug** identified in a specific GitHub Issue with number `ISSNO`, then name your new branch `bug/ISSNO/short-description-here`. For example, `bug/67/fix-leap-year-crash`.
|
||||
@ -97,6 +91,7 @@ If your new branch is to **fix a bug** identified in a specific GitHub Issue wit
|
||||
If your new branch is to **add a feature** requested in a specific GitHub Issue with number `ISSNO`, then name your new branch `feat/ISSNO/short-description-here`. For example, `feat/135/blue-background-on-mondays`.
|
||||
|
||||
Otherwise, please give your new branch a short, descriptive, all-lowercase name.
|
||||
|
||||
```text
|
||||
git checkout -b new-branch-name
|
||||
```
|
||||
@ -110,16 +105,18 @@ With your new branch checked out locally, make changes or additions to the code
|
||||
* add or update documentation as necessary. Follow [our documentation strategy](./docs/README.md).
|
||||
|
||||
As you go, git add and git commit your changes or additions, e.g.
|
||||
|
||||
```text
|
||||
git add new-or-changed-file-1
|
||||
git add new-or-changed-file-2
|
||||
git commit -m "Short description of new or changed things"
|
||||
```
|
||||
We use [pre-commit](http://pre-commit.com/) which should be triggered with every commit. Some hooks will change files but others will give errors that needs to be fixed. Every time a hook is failing you need to add the changed files again.
|
||||
|
||||
We use [pre-commit](http://pre-commit.com/) which should be triggered with every commit. Some hooks will change files but others will give errors that need to be fixed. Every time a hook is failing you need to add the changed files again.
|
||||
The hooks we use can be found in the [.pre-commit-config.yaml](https://github.com/bigchaindb/bigchaindb/blob/master/.pre-commit-config.yaml) file.
|
||||
|
||||
|
||||
You will want to merge changes from upstream (i.e. the original repository) into your new branch from time to time, using something like:
|
||||
|
||||
```text
|
||||
git fetch upstream
|
||||
git merge upstream/master
|
||||
@ -132,6 +129,7 @@ Once you're done commiting a set of new things and you're ready to submit them f
|
||||
### Step 9 - Push Your New Branch to origin
|
||||
|
||||
Make sure you've commited all the additions or changes you want to include in your pull request. Then push your new branch to origin (i.e. _your_ remote bigchaindb repository).
|
||||
|
||||
```text
|
||||
git push origin new-branch-name
|
||||
```
|
||||
@ -153,28 +151,28 @@ Someone will then merge your branch or suggest changes. If we suggest changes, y
|
||||
Before you submit a pull request, check that it meets these guidelines:
|
||||
|
||||
1. The pull request should include tests.
|
||||
2. If the pull request adds functionality, the docs should be updated. Put
|
||||
1. If the pull request adds functionality, the docs should be updated. Put
|
||||
your new functionality into a function with a docstring, and add the
|
||||
feature to the list in README.rst.
|
||||
3. The pull request should work for Python 3.5, and pass the flake8 check.
|
||||
Check https://travis-ci.org/bigchaindb/bigchaindb-driver/pull_requests
|
||||
and make sure that the tests pass for all supported Python versions.
|
||||
4. Follow the pull request template while creating new PRs, the template will
|
||||
be visible to you when you create a new pull request.
|
||||
1. The pull request should work for Python 3.5, and pass the flake8 check.
|
||||
1. Follow the pull request template when creating new PRs. The template will
|
||||
be inserted when you create a new pull request.
|
||||
|
||||
### Tip: Upgrading All BigchainDB Dependencies
|
||||
|
||||
Over time, your versions of the Python packages used by BigchainDB will get out of date. You can upgrade them using:
|
||||
|
||||
```text
|
||||
pip install --upgrade -e .[dev]
|
||||
```
|
||||
|
||||
## Quick Links
|
||||
|
||||
* [BigchainDB Community links](https://www.bigchaindb.com/community)
|
||||
* [BigchainDB chatroom on Gitter](https://gitter.im/bigchaindb/bigchaindb)
|
||||
* [BigchainDB on Twitter](https://twitter.com/BigchainDB)
|
||||
* [General GitHub Documentation](https://help.github.com/)
|
||||
* [Code of Conduct](./CODE_OF_CONDUCT.md)
|
||||
* [BigchainDB Licenses](./LICENSES.md)
|
||||
* [Contributor License Agreement](https://www.bigchaindb.com/cla)
|
||||
|
||||
(Note: GitHub automatically links to CONTRIBUTING.md when a contributor creates an Issue or opens a Pull Request.)
|
||||
(Note: GitHub automatically links to this file [CONTRIBUTING.md] when a contributor creates a new issue or pull request.)
|
||||
|
2
Makefile
2
Makefile
@ -57,7 +57,7 @@ test-all: ## run tests on every Python version with tox
|
||||
tox
|
||||
|
||||
coverage: ## check code coverage quickly with the default Python
|
||||
pytest -v -n auto --cov=bigchaindb --cov-report term --cov-report html
|
||||
pytest -v -n auto --database-backend=localmongodb --cov=bigchaindb --cov-report term --cov-report html
|
||||
$(BROWSER) htmlcov/index.html
|
||||
|
||||
docs: ## generate Sphinx HTML documentation, including API docs
|
||||
|
55
TENDERMINT_INTEGRATION.rst
Normal file
55
TENDERMINT_INTEGRATION.rst
Normal file
@ -0,0 +1,55 @@
|
||||
**********************
|
||||
Tendermint Integration
|
||||
**********************
|
||||
Quick reference for developers working on the Tendermint integration in
|
||||
BigchainDB.
|
||||
|
||||
Running a single node with ``docker-compose``
|
||||
=============================================
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker-compose -f docker-compose.tendermint.yml up bdb
|
||||
|
||||
The above command will launch all 3 main required services/processes:
|
||||
|
||||
* ``mongodb``
|
||||
* ``tendermint``
|
||||
* ``bigchaindb``
|
||||
|
||||
To follow the logs of the ``tendermint`` service:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker-compose -f docker-compose.tendermint.yml logs -f tendermint
|
||||
|
||||
Simple health check:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker-compose -f docker-compose.tendermint.yml up curl-client
|
||||
|
||||
Post and retrieve a transaction -- copy/paste a driver basic example of a
|
||||
``CREATE`` transaction:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker-compose -f docker-compose.tendermint.yml run --rm driver ipython
|
||||
|
||||
.. todo:: small python script to post and retrieve a transaction.
|
||||
|
||||
|
||||
Running a 4-node cluster with ``docker-compose``
|
||||
================================================
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker-compose -f docker-compose.network.yml up -d bdb-one bdb-two bdb-three bdb-four
|
||||
|
||||
|
||||
Simple health check:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker-compose -f docker-compose.network.yml up curl-client
|
@ -21,10 +21,20 @@ _base_database_rethinkdb = {
|
||||
# because dicts are unordered. I tried to configure
|
||||
|
||||
_database_keys_map = {
|
||||
'localmongodb': ('host', 'port', 'name'),
|
||||
'mongodb': ('host', 'port', 'name', 'replicaset'),
|
||||
'rethinkdb': ('host', 'port', 'name')
|
||||
}
|
||||
|
||||
_base_database_localmongodb = {
|
||||
'host': os.environ.get('BIGCHAINDB_DATABASE_HOST', 'localhost'),
|
||||
'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 27017)),
|
||||
'name': os.environ.get('BIGCHAINDB_DATABASE_NAME', 'bigchain'),
|
||||
'replicaset': os.environ.get('BIGCHAINDB_DATABASE_REPLICASET'),
|
||||
'login': os.environ.get('BIGCHAINDB_DATABASE_LOGIN'),
|
||||
'password': os.environ.get('BIGCHAINDB_DATABASE_PASSWORD')
|
||||
}
|
||||
|
||||
_base_database_mongodb = {
|
||||
'host': os.environ.get('BIGCHAINDB_DATABASE_HOST', 'localhost'),
|
||||
'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 27017)),
|
||||
@ -54,7 +64,21 @@ _database_mongodb = {
|
||||
}
|
||||
_database_mongodb.update(_base_database_mongodb)
|
||||
|
||||
_database_localmongodb = {
|
||||
'backend': os.environ.get('BIGCHAINDB_DATABASE_BACKEND', 'localmongodb'),
|
||||
'connection_timeout': 5000,
|
||||
'max_tries': 3,
|
||||
'ssl': bool(os.environ.get('BIGCHAINDB_DATABASE_SSL', False)),
|
||||
'ca_cert': os.environ.get('BIGCHAINDB_DATABASE_CA_CERT'),
|
||||
'certfile': os.environ.get('BIGCHAINDB_DATABASE_CERTFILE'),
|
||||
'keyfile': os.environ.get('BIGCHAINDB_DATABASE_KEYFILE'),
|
||||
'keyfile_passphrase': os.environ.get('BIGCHAINDB_DATABASE_KEYFILE_PASSPHRASE'),
|
||||
'crlfile': os.environ.get('BIGCHAINDB_DATABASE_CRLFILE')
|
||||
}
|
||||
_database_localmongodb.update(_base_database_localmongodb)
|
||||
|
||||
_database_map = {
|
||||
'localmongodb': _database_localmongodb,
|
||||
'mongodb': _database_mongodb,
|
||||
'rethinkdb': _database_rethinkdb
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ from bigchaindb.backend.exceptions import ConnectionError
|
||||
|
||||
|
||||
BACKENDS = {
|
||||
'localmongodb': 'bigchaindb.backend.localmongodb.connection.LocalMongoDBConnection',
|
||||
'mongodb': 'bigchaindb.backend.mongodb.connection.MongoDBConnection',
|
||||
'rethinkdb': 'bigchaindb.backend.rethinkdb.connection.RethinkDBConnection'
|
||||
}
|
||||
|
21
bigchaindb/backend/localmongodb/__init__.py
Normal file
21
bigchaindb/backend/localmongodb/__init__.py
Normal file
@ -0,0 +1,21 @@
|
||||
"""MongoDB backend implementation.
|
||||
|
||||
Contains a MongoDB-specific implementation of the
|
||||
:mod:`~bigchaindb.backend.schema` interface.
|
||||
|
||||
You can specify BigchainDB to use MongoDB as its database backend by either
|
||||
setting ``database.backend`` to ``'localmongodb'`` in your configuration file, or
|
||||
setting the ``BIGCHAINDB_DATABASE_BACKEND`` environment variable to
|
||||
``'localmongodb'``.
|
||||
|
||||
If configured to use MongoDB, BigchainDB will automatically return instances
|
||||
of :class:`~bigchaindb.backend.localmongodb.LocalMongoDBConnection` for
|
||||
:func:`~bigchaindb.backend.connection.connect` and dispatch calls of the
|
||||
generic backend interfaces to the implementations in this module.
|
||||
"""
|
||||
|
||||
# Register the single dispatched modules on import.
|
||||
from bigchaindb.backend.localmongodb import schema, query # noqa
|
||||
|
||||
# MongoDBConnection should always be accessed via
|
||||
# ``bigchaindb.backend.connect()``.
|
5
bigchaindb/backend/localmongodb/connection.py
Normal file
5
bigchaindb/backend/localmongodb/connection.py
Normal file
@ -0,0 +1,5 @@
|
||||
from bigchaindb.backend.mongodb.connection import MongoDBConnection
|
||||
|
||||
|
||||
class LocalMongoDBConnection(MongoDBConnection):
|
||||
pass
|
137
bigchaindb/backend/localmongodb/query.py
Normal file
137
bigchaindb/backend/localmongodb/query.py
Normal file
@ -0,0 +1,137 @@
|
||||
"""Query implementation for MongoDB"""
|
||||
|
||||
from pymongo import DESCENDING
|
||||
|
||||
from bigchaindb import backend
|
||||
from bigchaindb.backend.exceptions import DuplicateKeyError
|
||||
from bigchaindb.backend.utils import module_dispatch_registrar
|
||||
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
|
||||
from bigchaindb.common.transaction import Transaction
|
||||
from bigchaindb.backend import mongodb
|
||||
|
||||
register_query = module_dispatch_registrar(backend.query)
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def store_transaction(conn, signed_transaction):
|
||||
try:
|
||||
return conn.run(
|
||||
conn.collection('transactions')
|
||||
.insert_one(signed_transaction))
|
||||
except DuplicateKeyError:
|
||||
pass
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_transaction(conn, transaction_id):
|
||||
try:
|
||||
return conn.run(
|
||||
conn.collection('transactions')
|
||||
.find_one({'id': transaction_id}, {'_id': 0}))
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def store_asset(conn, asset):
|
||||
try:
|
||||
return conn.run(
|
||||
conn.collection('assets')
|
||||
.insert_one(asset))
|
||||
except DuplicateKeyError:
|
||||
pass
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_asset(conn, asset_id):
|
||||
try:
|
||||
return conn.run(
|
||||
conn.collection('assets')
|
||||
.find_one({'id': asset_id}, {'_id': 0, 'id': 0}))
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_spent(conn, transaction_id, output):
|
||||
try:
|
||||
return conn.run(
|
||||
conn.collection('transactions')
|
||||
.find_one({'inputs.fulfills.transaction_id': transaction_id,
|
||||
'inputs.fulfills.output_index': output},
|
||||
{'_id': 0}))
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_latest_block(conn):
|
||||
return conn.run(
|
||||
conn.collection('blocks')
|
||||
.find_one(sort=[('height', DESCENDING)]))
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def store_block(conn, block):
|
||||
try:
|
||||
return conn.run(
|
||||
conn.collection('blocks')
|
||||
.insert_one(block))
|
||||
except DuplicateKeyError:
|
||||
pass
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_txids_filtered(conn, asset_id, operation=None):
|
||||
match_create = {
|
||||
'operation': 'CREATE',
|
||||
'id': asset_id
|
||||
}
|
||||
match_transfer = {
|
||||
'operation': 'TRANSFER',
|
||||
'asset.id': asset_id
|
||||
}
|
||||
|
||||
if operation == Transaction.CREATE:
|
||||
match = match_create
|
||||
elif operation == Transaction.TRANSFER:
|
||||
match = match_transfer
|
||||
else:
|
||||
match = {'$or': [match_create, match_transfer]}
|
||||
|
||||
pipeline = [
|
||||
{'$match': match}
|
||||
]
|
||||
cursor = conn.run(
|
||||
conn.collection('transactions')
|
||||
.aggregate(pipeline))
|
||||
return (elem['id'] for elem in cursor)
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def text_search(*args, **kwargs):
|
||||
return mongodb.query.text_search(*args, **kwargs)
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_owned_ids(conn, owner):
|
||||
cursor = conn.run(
|
||||
conn.collection('transactions').aggregate([
|
||||
{'$match': {'outputs.public_keys': owner}},
|
||||
{'$project': {'_id': False}}
|
||||
]))
|
||||
return cursor
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_spending_transactions(conn, inputs):
|
||||
cursor = conn.run(
|
||||
conn.collection('transactions').aggregate([
|
||||
{'$match': {
|
||||
'inputs.fulfills': {
|
||||
'$in': inputs,
|
||||
},
|
||||
}},
|
||||
{'$project': {'_id': False}}
|
||||
]))
|
||||
return cursor
|
88
bigchaindb/backend/localmongodb/schema.py
Normal file
88
bigchaindb/backend/localmongodb/schema.py
Normal file
@ -0,0 +1,88 @@
|
||||
"""Utils to initialize and drop the database."""
|
||||
|
||||
import logging
|
||||
|
||||
from pymongo import ASCENDING, DESCENDING, TEXT
|
||||
|
||||
from bigchaindb import backend
|
||||
from bigchaindb.common import exceptions
|
||||
from bigchaindb.backend.utils import module_dispatch_registrar
|
||||
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
register_schema = module_dispatch_registrar(backend.schema)
|
||||
|
||||
|
||||
@register_schema(LocalMongoDBConnection)
|
||||
def create_database(conn, dbname):
|
||||
if dbname in conn.conn.database_names():
|
||||
raise exceptions.DatabaseAlreadyExists('Database `{}` already exists'
|
||||
.format(dbname))
|
||||
|
||||
logger.info('Create database `%s`.', dbname)
|
||||
# TODO: read and write concerns can be declared here
|
||||
conn.conn.get_database(dbname)
|
||||
|
||||
|
||||
@register_schema(LocalMongoDBConnection)
|
||||
def create_tables(conn, dbname):
|
||||
for table_name in ['transactions', 'assets', 'blocks']:
|
||||
logger.info('Create `%s` table.', table_name)
|
||||
# create the table
|
||||
# TODO: read and write concerns can be declared here
|
||||
conn.conn[dbname].create_collection(table_name)
|
||||
|
||||
|
||||
@register_schema(LocalMongoDBConnection)
|
||||
def create_indexes(conn, dbname):
|
||||
create_transactions_secondary_index(conn, dbname)
|
||||
create_assets_secondary_index(conn, dbname)
|
||||
create_blocks_secondary_index(conn, dbname)
|
||||
|
||||
|
||||
@register_schema(LocalMongoDBConnection)
|
||||
def drop_database(conn, dbname):
|
||||
conn.conn.drop_database(dbname)
|
||||
|
||||
|
||||
def create_transactions_secondary_index(conn, dbname):
|
||||
logger.info('Create `transactions` secondary index.')
|
||||
|
||||
# to query the transactions for a transaction id, this field is unique
|
||||
conn.conn[dbname]['transactions'].create_index('transactions.id',
|
||||
name='transaction_id')
|
||||
|
||||
# secondary index for asset uuid, this field is unique
|
||||
conn.conn[dbname]['transactions']\
|
||||
.create_index('asset.id', name='asset_id')
|
||||
|
||||
# secondary index on the public keys of outputs
|
||||
conn.conn[dbname]['transactions']\
|
||||
.create_index('outputs.public_keys',
|
||||
name='outputs')
|
||||
|
||||
# secondary index on inputs/transaction links (transaction_id, output)
|
||||
conn.conn[dbname]['transactions']\
|
||||
.create_index([
|
||||
('inputs.fulfills.transaction_id', ASCENDING),
|
||||
('inputs.fulfills.output_index', ASCENDING),
|
||||
], name='inputs')
|
||||
|
||||
|
||||
def create_assets_secondary_index(conn, dbname):
|
||||
logger.info('Create `assets` secondary index.')
|
||||
|
||||
# unique index on the id of the asset.
|
||||
# the id is the txid of the transaction that created the asset
|
||||
conn.conn[dbname]['assets'].create_index('id',
|
||||
name='asset_id',
|
||||
unique=True)
|
||||
|
||||
# full text search index
|
||||
conn.conn[dbname]['assets'].create_index([('$**', TEXT)], name='text')
|
||||
|
||||
|
||||
def create_blocks_secondary_index(conn, dbname):
|
||||
conn.conn[dbname]['blocks']\
|
||||
.create_index([('height', DESCENDING)], name='height')
|
@ -30,7 +30,7 @@ class MongoDBConnection(Connection):
|
||||
"""
|
||||
|
||||
super().__init__(**kwargs)
|
||||
self.replicaset = replicaset or bigchaindb.config['database']['replicaset']
|
||||
self.replicaset = replicaset or bigchaindb.config['database'].get('replicaset')
|
||||
self.ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)
|
||||
self.login = login or bigchaindb.config['database'].get('login')
|
||||
self.password = password or bigchaindb.config['database'].get('password')
|
||||
@ -83,21 +83,22 @@ class MongoDBConnection(Connection):
|
||||
"""
|
||||
|
||||
try:
|
||||
# we should only return a connection if the replica set is
|
||||
# initialized. initialize_replica_set will check if the
|
||||
# replica set is initialized else it will initialize it.
|
||||
initialize_replica_set(self.host,
|
||||
self.port,
|
||||
self.connection_timeout,
|
||||
self.dbname,
|
||||
self.ssl,
|
||||
self.login,
|
||||
self.password,
|
||||
self.ca_cert,
|
||||
self.certfile,
|
||||
self.keyfile,
|
||||
self.keyfile_passphrase,
|
||||
self.crlfile)
|
||||
if self.replicaset:
|
||||
# we should only return a connection if the replica set is
|
||||
# initialized. initialize_replica_set will check if the
|
||||
# replica set is initialized else it will initialize it.
|
||||
initialize_replica_set(self.host,
|
||||
self.port,
|
||||
self.connection_timeout,
|
||||
self.dbname,
|
||||
self.ssl,
|
||||
self.login,
|
||||
self.password,
|
||||
self.ca_cert,
|
||||
self.certfile,
|
||||
self.keyfile,
|
||||
self.keyfile_passphrase,
|
||||
self.crlfile)
|
||||
|
||||
# FYI: the connection process might raise a
|
||||
# `ServerSelectionTimeoutError`, that is a subclass of
|
||||
|
@ -19,6 +19,55 @@ def write_transaction(connection, signed_transaction):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def store_asset(connection, asset):
|
||||
"""Write an asset to the asset table.
|
||||
|
||||
Args:
|
||||
asset (dict): the asset.
|
||||
|
||||
Returns:
|
||||
The result of the operation.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def store_transaction(connection, signed_transaction):
|
||||
"""Same as write_transaction."""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def get_transaction(connection, transaction_id):
|
||||
"""Get a transaction from the transactions table.
|
||||
|
||||
Args:
|
||||
transaction_id (str): the id of the transaction.
|
||||
|
||||
Returns:
|
||||
The result of the operation.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def get_asset(connection, asset_id):
|
||||
"""Get a transaction from the transactions table.
|
||||
|
||||
Args:
|
||||
asset_id (str): the id of the asset
|
||||
|
||||
Returns:
|
||||
The result of the operation.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def update_transaction(connection, transaction_id, doc):
|
||||
"""Update a transaction in the backlog table.
|
||||
@ -414,3 +463,24 @@ def text_search(conn, search, *, language='english', case_sensitive=False,
|
||||
|
||||
raise OperationError('This query is only supported when running '
|
||||
'BigchainDB with MongoDB as the backend.')
|
||||
|
||||
|
||||
@singledispatch
|
||||
def get_latest_block(conn):
|
||||
"""Get the latest commited block i.e. block with largest height """
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def store_block(conn, block):
|
||||
"""Write a new block to the `blocks` table
|
||||
|
||||
Args:
|
||||
block (dict): block with current height and block hash.
|
||||
|
||||
Returns:
|
||||
The result of the operation.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
@ -15,7 +15,7 @@ from bigchaindb.common.exceptions import (StartupError,
|
||||
KeypairNotFoundException,
|
||||
DatabaseDoesNotExist)
|
||||
import bigchaindb
|
||||
from bigchaindb import backend, processes
|
||||
from bigchaindb import backend
|
||||
from bigchaindb.backend import schema
|
||||
from bigchaindb.backend.admin import (set_replicas, set_shards, add_replicas,
|
||||
remove_replicas)
|
||||
@ -206,7 +206,8 @@ def run_start(args):
|
||||
|
||||
logger.info('Starting BigchainDB main process with public key %s',
|
||||
bigchaindb.config['keypair']['public'])
|
||||
processes.start()
|
||||
from bigchaindb.tendermint.commands import start
|
||||
start()
|
||||
|
||||
|
||||
@configure_bigchaindb
|
||||
@ -270,7 +271,7 @@ def create_parser():
|
||||
help='Prepare the config file '
|
||||
'and create the node keypair')
|
||||
config_parser.add_argument('backend',
|
||||
choices=['rethinkdb', 'mongodb'],
|
||||
choices=['rethinkdb', 'mongodb', 'localmongodb'],
|
||||
help='The backend to use. It can be either '
|
||||
'rethinkdb or mongodb.')
|
||||
|
||||
|
@ -30,7 +30,11 @@ class Transaction(Transaction):
|
||||
"""
|
||||
input_conditions = []
|
||||
|
||||
if self.operation == Transaction.TRANSFER:
|
||||
if self.operation == Transaction.CREATE:
|
||||
if bigchain.get_transaction(self.to_dict()['id']):
|
||||
raise DuplicateTransaction('transaction `{}` already exists'
|
||||
.format(self.id))
|
||||
elif self.operation == Transaction.TRANSFER:
|
||||
# store the inputs so that we can check if the asset ids match
|
||||
input_txs = []
|
||||
for input_ in self.inputs:
|
||||
|
5
bigchaindb/tendermint/__init__.py
Normal file
5
bigchaindb/tendermint/__init__.py
Normal file
@ -0,0 +1,5 @@
|
||||
# Order is important!
|
||||
# If we import core first, core will try to load BigchainDB from
|
||||
# __init__ itself, causing a loop.
|
||||
from bigchaindb.tendermint.lib import BigchainDB # noqa
|
||||
from bigchaindb.tendermint.core import App # noqa
|
57
bigchaindb/tendermint/commands.py
Normal file
57
bigchaindb/tendermint/commands.py
Normal file
@ -0,0 +1,57 @@
|
||||
import logging
|
||||
import subprocess
|
||||
import multiprocessing as mp
|
||||
from os import getenv
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb.tendermint.lib import BigchainDB
|
||||
from bigchaindb.tendermint.core import App
|
||||
from bigchaindb.web import server
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
BANNER = """
|
||||
****************************************************************************
|
||||
* *
|
||||
* Initialization complete. BigchainDB Server is ready and waiting. *
|
||||
* You can send HTTP requests via the HTTP API documented in the *
|
||||
* BigchainDB Server docs at: *
|
||||
* https://bigchaindb.com/http-api *
|
||||
* *
|
||||
* Listening to client connections on: {:<15} *
|
||||
* *
|
||||
****************************************************************************
|
||||
"""
|
||||
|
||||
|
||||
def start():
|
||||
# start the web api
|
||||
app_server = server.create_server(
|
||||
settings=bigchaindb.config['server'],
|
||||
log_config=bigchaindb.config['log'],
|
||||
bigchaindb_factory=BigchainDB)
|
||||
p_webapi = mp.Process(name='webapi', target=app_server.run)
|
||||
p_webapi.start()
|
||||
|
||||
# start message
|
||||
logger.info(BANNER.format(bigchaindb.config['server']['bind']))
|
||||
|
||||
if int(getenv('BIGCHAINDB_START_TENDERMINT', 1)):
|
||||
subprocess.Popen([
|
||||
'tendermint',
|
||||
'node',
|
||||
'--consensus.create_empty_blocks=false',
|
||||
])
|
||||
|
||||
# We need to import this after spawning the web server
|
||||
# because import ABCIServer will monkeypatch all sockets
|
||||
# for gevent.
|
||||
from abci import ABCIServer
|
||||
|
||||
app = ABCIServer(app=App())
|
||||
app.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
start()
|
119
bigchaindb/tendermint/core.py
Normal file
119
bigchaindb/tendermint/core.py
Normal file
@ -0,0 +1,119 @@
|
||||
"""This module contains all the goodness to integrate BigchainDB
|
||||
with Tendermint."""
|
||||
import logging
|
||||
|
||||
from abci import BaseApplication, Result
|
||||
from abci.types_pb2 import ResponseEndBlock, ResponseInfo
|
||||
|
||||
from bigchaindb.tendermint import BigchainDB
|
||||
from bigchaindb.tendermint.utils import decode_transaction, calculate_hash
|
||||
from bigchaindb.tendermint.lib import Block
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class App(BaseApplication):
|
||||
"""Bridge between BigchainDB and Tendermint.
|
||||
|
||||
The role of this class is to expose the BigchainDB
|
||||
transactional logic to the Tendermint Consensus
|
||||
State Machine."""
|
||||
|
||||
def __init__(self, bigchaindb=None):
|
||||
if not bigchaindb:
|
||||
bigchaindb = BigchainDB()
|
||||
self.bigchaindb = bigchaindb
|
||||
self.block_txn_ids = []
|
||||
self.block_txn_hash = ''
|
||||
self.validators = None
|
||||
self.new_height = None
|
||||
|
||||
def init_chain(self, validators):
|
||||
"""Initialize chain with block of height 0"""
|
||||
|
||||
block = Block(app_hash='', height=0)
|
||||
self.bigchaindb.store_block(block._asdict())
|
||||
|
||||
def info(self):
|
||||
"""Return height of the latest committed block."""
|
||||
|
||||
r = ResponseInfo()
|
||||
block = self.bigchaindb.get_latest_block()
|
||||
if block:
|
||||
r.last_block_height = block['height']
|
||||
r.last_block_app_hash = block['app_hash'].encode('utf-8')
|
||||
else:
|
||||
r.last_block_height = 0
|
||||
r.last_block_app_hash = b''
|
||||
return r
|
||||
|
||||
def check_tx(self, raw_transaction):
|
||||
"""Validate the transaction before entry into
|
||||
the mempool.
|
||||
|
||||
Args:
|
||||
raw_tx: a raw string (in bytes) transaction."""
|
||||
logger.debug('check_tx: %s', raw_transaction)
|
||||
transaction = decode_transaction(raw_transaction)
|
||||
if self.bigchaindb.validate_transaction(transaction):
|
||||
logger.debug('check_tx: VALID')
|
||||
return Result.ok()
|
||||
else:
|
||||
logger.debug('check_tx: INVALID')
|
||||
return Result.error()
|
||||
|
||||
def begin_block(self, req_begin_block):
|
||||
"""Initialize list of transaction.
|
||||
Args:
|
||||
req_begin_block: block object which contains block header
|
||||
and block hash.
|
||||
"""
|
||||
|
||||
self.block_txn_ids = []
|
||||
|
||||
def deliver_tx(self, raw_transaction):
|
||||
"""Validate the transaction before mutating the state.
|
||||
|
||||
Args:
|
||||
raw_tx: a raw string (in bytes) transaction."""
|
||||
logger.debug('deliver_tx: %s', raw_transaction)
|
||||
transaction = self.bigchaindb.validate_transaction(
|
||||
decode_transaction(raw_transaction))
|
||||
|
||||
if not transaction:
|
||||
logger.debug('deliver_tx: INVALID')
|
||||
return Result.error(log='Invalid transaction')
|
||||
else:
|
||||
logger.debug('storing tx')
|
||||
self.bigchaindb.store_transaction(transaction)
|
||||
self.block_txn_ids.append(transaction.id)
|
||||
return Result.ok()
|
||||
|
||||
def end_block(self, height):
|
||||
"""Calculate block hash using transaction ids and previous block
|
||||
hash to be stored in the next block.
|
||||
|
||||
Args:
|
||||
height (int): new height of the chain."""
|
||||
|
||||
self.new_height = height
|
||||
block_txn_hash = calculate_hash(self.block_txn_ids)
|
||||
block = self.bigchaindb.get_latest_block()
|
||||
|
||||
if self.block_txn_ids:
|
||||
self.block_txn_hash = calculate_hash([block['app_hash'], block_txn_hash])
|
||||
else:
|
||||
self.block_txn_hash = block['app_hash']
|
||||
|
||||
return ResponseEndBlock()
|
||||
|
||||
def commit(self):
|
||||
"""Store the new height and along with block hash."""
|
||||
|
||||
# register a new block only when new transactions are received
|
||||
if self.block_txn_ids:
|
||||
block = Block(app_hash=self.block_txn_hash, height=self.new_height)
|
||||
self.bigchaindb.store_block(block._asdict())
|
||||
|
||||
data = self.block_txn_hash.encode('utf-8')
|
||||
return Result.ok(data=data)
|
48
bigchaindb/tendermint/fastquery.py
Normal file
48
bigchaindb/tendermint/fastquery.py
Normal file
@ -0,0 +1,48 @@
|
||||
from bigchaindb.utils import condition_details_has_owner
|
||||
from bigchaindb.backend import query
|
||||
from bigchaindb.common.transaction import TransactionLink
|
||||
|
||||
|
||||
class FastQuery():
|
||||
"""
|
||||
Database queries that join on block results from a single node.
|
||||
"""
|
||||
|
||||
def get_outputs_by_public_key(self, public_key):
|
||||
"""
|
||||
Get outputs for a public key
|
||||
"""
|
||||
txs = list(query.get_owned_ids(self.connection, public_key))
|
||||
return [TransactionLink(tx['id'], index)
|
||||
for tx in txs
|
||||
for index, output in enumerate(tx['outputs'])
|
||||
if condition_details_has_owner(output['condition']['details'],
|
||||
public_key)]
|
||||
|
||||
def filter_spent_outputs(self, outputs):
|
||||
"""
|
||||
Remove outputs that have been spent
|
||||
|
||||
Args:
|
||||
outputs: list of TransactionLink
|
||||
"""
|
||||
links = [o.to_dict() for o in outputs]
|
||||
txs = list(query.get_spending_transactions(self.connection, links))
|
||||
spends = {TransactionLink.from_dict(input_['fulfills'])
|
||||
for tx in txs
|
||||
for input_ in tx['inputs']}
|
||||
return [ff for ff in outputs if ff not in spends]
|
||||
|
||||
def filter_unspent_outputs(self, outputs):
|
||||
"""
|
||||
Remove outputs that have not been spent
|
||||
|
||||
Args:
|
||||
outputs: list of TransactionLink
|
||||
"""
|
||||
links = [o.to_dict() for o in outputs]
|
||||
txs = list(query.get_spending_transactions(self.connection, links))
|
||||
spends = {TransactionLink.from_dict(input_['fulfills'])
|
||||
for tx in txs
|
||||
for input_ in tx['inputs']}
|
||||
return [ff for ff in outputs if ff in spends]
|
126
bigchaindb/tendermint/lib.py
Normal file
126
bigchaindb/tendermint/lib.py
Normal file
@ -0,0 +1,126 @@
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
from copy import deepcopy
|
||||
from os import getenv
|
||||
from uuid import uuid4
|
||||
|
||||
import requests
|
||||
|
||||
from bigchaindb import backend
|
||||
from bigchaindb import Bigchain
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.common.exceptions import SchemaValidationError, ValidationError
|
||||
from bigchaindb.tendermint.utils import encode_transaction
|
||||
from bigchaindb.tendermint import fastquery
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TENDERMINT_HOST = getenv('TENDERMINT_HOST', 'localhost')
|
||||
TENDERMINT_PORT = getenv('TENDERMINT_PORT', '46657')
|
||||
ENDPOINT = 'http://{}:{}/'.format(TENDERMINT_HOST, TENDERMINT_PORT)
|
||||
|
||||
|
||||
class BigchainDB(Bigchain):
|
||||
|
||||
def post_transaction(self, transaction):
|
||||
"""Submit a valid transaction to the mempool."""
|
||||
|
||||
payload = {
|
||||
'method': 'broadcast_tx_async',
|
||||
'jsonrpc': '2.0',
|
||||
'params': [encode_transaction(transaction.to_dict())],
|
||||
'id': str(uuid4())
|
||||
}
|
||||
# TODO: handle connection errors!
|
||||
requests.post(ENDPOINT, json=payload)
|
||||
|
||||
def write_transaction(self, transaction):
|
||||
# This method offers backward compatibility with the Web API.
|
||||
"""Submit a valid transaction to the mempool."""
|
||||
|
||||
self.post_transaction(transaction)
|
||||
|
||||
def store_transaction(self, transaction):
|
||||
"""Store a valid transaction to the transactions collection."""
|
||||
|
||||
transaction = deepcopy(transaction.to_dict())
|
||||
if transaction['operation'] == 'CREATE':
|
||||
asset = transaction.pop('asset')
|
||||
asset['id'] = transaction['id']
|
||||
if asset['data']:
|
||||
backend.query.store_asset(self.connection, asset)
|
||||
|
||||
return backend.query.store_transaction(self.connection, transaction)
|
||||
|
||||
def get_transaction(self, transaction_id, include_status=False):
|
||||
transaction = backend.query.get_transaction(self.connection, transaction_id)
|
||||
asset = backend.query.get_asset(self.connection, transaction_id)
|
||||
|
||||
if transaction:
|
||||
if asset:
|
||||
transaction['asset'] = asset
|
||||
else:
|
||||
transaction['asset'] = {'data': None}
|
||||
|
||||
transaction = Transaction.from_dict(transaction)
|
||||
|
||||
if include_status:
|
||||
return transaction, self.TX_VALID if transaction else None
|
||||
else:
|
||||
return transaction
|
||||
|
||||
def get_spent(self, txid, output):
|
||||
transaction = backend.query.get_spent(self.connection, txid,
|
||||
output)
|
||||
if transaction and transaction['operation'] == 'CREATE':
|
||||
asset = backend.query.get_asset(self.connection, transaction['id'])
|
||||
|
||||
if asset:
|
||||
transaction['asset'] = asset
|
||||
else:
|
||||
transaction['asset'] = {'data': None}
|
||||
|
||||
return Transaction.from_dict(transaction)
|
||||
elif transaction and transaction['operation'] == 'TRANSFER':
|
||||
return Transaction.from_dict(transaction)
|
||||
else:
|
||||
return None
|
||||
|
||||
def store_block(self, block):
|
||||
"""Create a new block."""
|
||||
|
||||
return backend.query.store_block(self.connection, block)
|
||||
|
||||
def get_latest_block(self):
|
||||
"""Get the block with largest height."""
|
||||
|
||||
return backend.query.get_latest_block(self.connection)
|
||||
|
||||
def validate_transaction(self, tx):
|
||||
"""Validate a transaction against the current status of the database."""
|
||||
|
||||
transaction = tx
|
||||
|
||||
if not isinstance(transaction, Transaction):
|
||||
try:
|
||||
transaction = Transaction.from_dict(tx)
|
||||
except SchemaValidationError as e:
|
||||
logger.warning('Invalid transaction schema: %s', e.__cause__.message)
|
||||
return False
|
||||
except ValidationError as e:
|
||||
logger.warning('Invalid transaction (%s): %s', type(e).__name__, e)
|
||||
return False
|
||||
try:
|
||||
return transaction.validate(self)
|
||||
except ValidationError as e:
|
||||
logger.warning('Invalid transaction (%s): %s', type(e).__name__, e)
|
||||
return False
|
||||
return transaction
|
||||
|
||||
@property
|
||||
def fastquery(self):
|
||||
return fastquery.FastQuery(self.connection, self.me)
|
||||
|
||||
|
||||
Block = namedtuple('Block', ('app_hash', 'height'))
|
26
bigchaindb/tendermint/utils.py
Normal file
26
bigchaindb/tendermint/utils.py
Normal file
@ -0,0 +1,26 @@
|
||||
import base64
|
||||
import json
|
||||
import sha3
|
||||
|
||||
|
||||
def encode_transaction(value):
|
||||
"""Encode a transaction (dict) to Base64."""
|
||||
|
||||
return base64.b64encode(json.dumps(value).encode('utf8')).decode('utf8')
|
||||
|
||||
|
||||
def decode_transaction(raw):
|
||||
"""Decode a transaction from bytes to a dict."""
|
||||
|
||||
return json.loads(raw.decode('utf8'))
|
||||
|
||||
|
||||
def calculate_hash(key_list):
|
||||
if not key_list:
|
||||
return ''
|
||||
|
||||
full_hash = sha3.sha3_256()
|
||||
for key in key_list:
|
||||
full_hash.update(key.encode('utf8'))
|
||||
|
||||
return full_hash.hexdigest()
|
@ -54,7 +54,7 @@ class StandaloneApplication(gunicorn.app.base.BaseApplication):
|
||||
return self.application
|
||||
|
||||
|
||||
def create_app(*, debug=False, threads=1):
|
||||
def create_app(*, debug=False, threads=1, bigchaindb_factory=None):
|
||||
"""Return an instance of the Flask application.
|
||||
|
||||
Args:
|
||||
@ -65,6 +65,9 @@ def create_app(*, debug=False, threads=1):
|
||||
an instance of the Flask application.
|
||||
"""
|
||||
|
||||
if not bigchaindb_factory:
|
||||
bigchaindb_factory = Bigchain
|
||||
|
||||
app = Flask(__name__)
|
||||
app.wsgi_app = StripContentTypeMiddleware(app.wsgi_app)
|
||||
|
||||
@ -72,14 +75,14 @@ def create_app(*, debug=False, threads=1):
|
||||
|
||||
app.debug = debug
|
||||
|
||||
app.config['bigchain_pool'] = utils.pool(Bigchain, size=threads)
|
||||
app.config['bigchain_pool'] = utils.pool(bigchaindb_factory, size=threads)
|
||||
|
||||
add_routes(app)
|
||||
|
||||
return app
|
||||
|
||||
|
||||
def create_server(settings, log_config=None):
|
||||
def create_server(settings, log_config=None, bigchaindb_factory=None):
|
||||
"""Wrap and return an application ready to be run.
|
||||
|
||||
Args:
|
||||
@ -104,6 +107,7 @@ def create_server(settings, log_config=None):
|
||||
settings['logger_class'] = 'bigchaindb.log.loggers.HttpServerLogger'
|
||||
settings['custom_log_config'] = log_config
|
||||
app = create_app(debug=settings.get('debug', False),
|
||||
threads=settings['threads'])
|
||||
threads=settings['threads'],
|
||||
bigchaindb_factory=bigchaindb_factory)
|
||||
standalone = StandaloneApplication(app, options=settings)
|
||||
return standalone
|
||||
|
@ -25,11 +25,9 @@ coverage:
|
||||
if_no_uploads: error
|
||||
|
||||
ignore: # files and folders that will be removed during processing
|
||||
- "deploy-cluster-aws/*"
|
||||
- "docs/*"
|
||||
- "tests/*"
|
||||
- "bigchaindb/version.py"
|
||||
- "ntools/*"
|
||||
- "k8s/*"
|
||||
|
||||
comment:
|
||||
|
8
compose/bigchaindb-driver/Dockerfile
Normal file
8
compose/bigchaindb-driver/Dockerfile
Normal file
@ -0,0 +1,8 @@
|
||||
FROM python:3.6.3
|
||||
|
||||
RUN apt-get update && apt-get install -y vim
|
||||
|
||||
RUN mkdir -p /usr/src/app
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
RUN pip install --upgrade pip ipython bigchaindb-driver
|
32
compose/bigchaindb-server/Dockerfile
Normal file
32
compose/bigchaindb-server/Dockerfile
Normal file
@ -0,0 +1,32 @@
|
||||
FROM python:3.6
|
||||
LABEL maintainer "dev@bigchaindb.com"
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y vim \
|
||||
&& pip install -U pip \
|
||||
&& pip install pynacl \
|
||||
&& apt-get autoremove \
|
||||
&& apt-get clean
|
||||
|
||||
ARG backend
|
||||
|
||||
ENV PYTHONUNBUFFERED 0
|
||||
|
||||
ENV BIGCHAINDB_DATABASE_PORT 27017
|
||||
ENV BIGCHAINDB_DATABASE_BACKEND $backend
|
||||
ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984
|
||||
ENV BIGCHAINDB_WSSERVER_HOST 0.0.0.0
|
||||
ENV BIGCHAINDB_WSSERVER_SCHEME ws
|
||||
|
||||
ENV BIGCHAINDB_WSSERVER_ADVERTISED_HOST 0.0.0.0
|
||||
ENV BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME ws
|
||||
|
||||
ENV BIGCHAINDB_START_TENDERMINT 0
|
||||
ENV TENDERMINT_PORT 46657
|
||||
|
||||
|
||||
RUN mkdir -p /usr/src/app
|
||||
COPY . /usr/src/app/
|
||||
WORKDIR /usr/src/app
|
||||
RUN pip install --no-cache-dir -e .[dev]
|
||||
RUN bigchaindb -y configure "$backend"
|
@ -1,34 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Shared AWS-related global constants and functions.
|
||||
"""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
|
||||
# Global constants
|
||||
# None yet
|
||||
|
||||
|
||||
# Functions
|
||||
def get_naeips(client0):
|
||||
"""Get a list of (allocated) non-associated elastic IP addresses
|
||||
(NAEIPs) on EC2.
|
||||
|
||||
Args:
|
||||
client0: A client created from an EC2 resource.
|
||||
e.g. client0 = ec2.meta.client
|
||||
See http://boto3.readthedocs.org/en/latest/guide/clients.html
|
||||
|
||||
Returns:
|
||||
A list of NAEIPs in the EC2 account associated with the client.
|
||||
To interpret the contents, see http://tinyurl.com/hrnuy74
|
||||
"""
|
||||
# response is a dict with 2 keys: Addresses and ResponseMetadata
|
||||
# See http://tinyurl.com/hrnuy74
|
||||
response = client0.describe_addresses()
|
||||
allocated_eips = response['Addresses']
|
||||
non_associated_eips = []
|
||||
for eip in allocated_eips:
|
||||
if 'InstanceId' not in eip:
|
||||
non_associated_eips.append(eip)
|
||||
return non_associated_eips
|
@ -1,193 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
# -e Abort at the first failed line (i.e. if exit status is not 0)
|
||||
# -u Abort when undefined variable is used
|
||||
# -o pipefail (Bash-only) Piped commands return the status
|
||||
# of the last failed command, rather than the status of the last command
|
||||
|
||||
# Check for the first command-line argument
|
||||
# (the name of the AWS deployment config file)
|
||||
if [ -z "$1" ]; then
|
||||
# no first argument was provided
|
||||
echo "awsdeploy: missing file operand"
|
||||
echo "Usage: awsdeploy DEPLOY_CONF_FILE"
|
||||
echo "Deploy BigchainDB on AWS using the specified AWS deployment configuration file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DEPLOY_CONF_FILE=$1
|
||||
|
||||
# Check to make sure DEPLOY_CONF_FILE exists
|
||||
if [ ! -f "$DEPLOY_CONF_FILE" ]; then
|
||||
echo "AWS deployment configuration file not found: "$DEPLOY_CONF_FILE
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Read DEPLOY_CONF_FILE
|
||||
# to set environment variables related to AWS deployment
|
||||
echo "Reading "$DEPLOY_CONF_FILE
|
||||
source $DEPLOY_CONF_FILE
|
||||
|
||||
# Check if SSH_KEY_NAME got set
|
||||
if [ "$SSH_KEY_NAME" == "not-set-yet" ] || \
|
||||
[ "$SSH_KEY_NAME" == "" ] || \
|
||||
[ -z ${SSH_KEY_NAME+x} ]; then
|
||||
echo "SSH_KEY_NAME was not set in that file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "NUM_NODES = "$NUM_NODES
|
||||
echo "BRANCH = "$BRANCH
|
||||
echo "SSH_KEY_NAME" = $SSH_KEY_NAME
|
||||
echo "USE_KEYPAIRS_FILE = "$USE_KEYPAIRS_FILE
|
||||
echo "IMAGE_ID = "$IMAGE_ID
|
||||
echo "INSTANCE_TYPE = "$INSTANCE_TYPE
|
||||
echo "SECURITY_GROUP = "$SECURITY_GROUP
|
||||
echo "USING_EBS = "$USING_EBS
|
||||
# Treat booleans as strings which must be either "True" or "False"
|
||||
if [ "$USING_EBS" == "True" ]; then
|
||||
echo "EBS_VOLUME_SIZE = "$EBS_VOLUME_SIZE
|
||||
echo "EBS_OPTIMIZED = "$EBS_OPTIMIZED
|
||||
fi
|
||||
echo "ENABLE_WEB_ADMIN = "$ENABLE_WEB_ADMIN
|
||||
if [ "$ENABLE_WEB_ADMIN" == "True" ]; then
|
||||
echo "BIND_HTTP_TO_LOCALHOST = "$BIND_HTTP_TO_LOCALHOST
|
||||
fi
|
||||
|
||||
# Check for the SSH private key file
|
||||
if [ ! -f "$HOME/.ssh/$SSH_KEY_NAME" ]; then
|
||||
echo "The SSH private key file "$HOME"/.ssh/"$SSH_KEY_NAME" is missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for the confiles directory
|
||||
if [ ! -d "confiles" ]; then
|
||||
echo "Directory confiles is needed but does not exist"
|
||||
echo "See make_confiles.sh to find out how to make it"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if NUM_NODES got set
|
||||
if [ -z "$NUM_NODES" ]; then
|
||||
echo "NUM_NODES is not set in the AWS deployment configuration file "$DEPLOY_CONF_FILE
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if the number of files in confiles directory == NUM_NODES
|
||||
CONFILES_COUNT=`ls confiles | wc -l`
|
||||
if [[ $CONFILES_COUNT != $NUM_NODES ]]; then
|
||||
echo "ERROR: CONFILES_COUNT = "$CONFILES_COUNT
|
||||
echo "but NUM_NODES = "$NUM_NODES
|
||||
echo "so there should be "$NUM_NODES" files in the confiles directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Auto-generate the tag to apply to all nodes in the cluster
|
||||
TAG="BDB-Server-"`date +%m-%d@%H:%M`
|
||||
echo "TAG = "$TAG
|
||||
|
||||
# Change the file permissions on the SSH private key file
|
||||
# so that the owner can read it, but that's all
|
||||
chmod 0400 $HOME/.ssh/$SSH_KEY_NAME
|
||||
|
||||
# The following Python script does these things:
|
||||
# 0. allocates more elastic IP addresses if necessary,
|
||||
# 1. launches the specified number of nodes (instances) on Amazon EC2,
|
||||
# 2. tags them with the specified tag,
|
||||
# 3. waits until those instances exist and are running,
|
||||
# 4. for each instance, it associates an elastic IP address
|
||||
# with that instance,
|
||||
# 5. writes the shellscript add2known_hosts.sh
|
||||
# 6. (over)writes a file named hostlist.py
|
||||
# containing a list of all public DNS names.
|
||||
# 7. (over)writes a file named ssh_key.py
|
||||
# containing the location of the private SSH key file.
|
||||
python launch_ec2_nodes.py --deploy-conf-file $DEPLOY_CONF_FILE --tag $TAG
|
||||
|
||||
# Make add2known_hosts.sh executable then execute it.
|
||||
# This adds remote keys to ~/.ssh/known_hosts
|
||||
chmod +x add2known_hosts.sh
|
||||
./add2known_hosts.sh
|
||||
|
||||
# Test an SSH connection to one of the hosts
|
||||
# and prompt the user for their SSH password if necessary
|
||||
fab set_host:0 test_ssh
|
||||
|
||||
# Rollout base packages (dependencies) needed before
|
||||
# storage backend (RethinkDB) and BigchainDB can be rolled out
|
||||
fab install_base_software
|
||||
fab get_pip3
|
||||
fab upgrade_setuptools
|
||||
|
||||
# (Re)create the RethinkDB configuration file conf/rethinkdb.conf
|
||||
if [ "$ENABLE_WEB_ADMIN" == "True" ]; then
|
||||
if [ "$BIND_HTTP_TO_LOCALHOST" == "True" ]; then
|
||||
python create_rethinkdb_conf.py --enable-web-admin --bind-http-to-localhost
|
||||
else
|
||||
python create_rethinkdb_conf.py --enable-web-admin
|
||||
fi
|
||||
else
|
||||
python create_rethinkdb_conf.py
|
||||
fi
|
||||
|
||||
# Rollout RethinkDB and start it
|
||||
fab prep_rethinkdb_storage:$USING_EBS
|
||||
fab install_rethinkdb
|
||||
fab configure_rethinkdb
|
||||
fab delete_rethinkdb_data
|
||||
fab start_rethinkdb
|
||||
|
||||
# Rollout BigchainDB (but don't start it yet)
|
||||
if [ "$BRANCH" == "pypi" ]; then
|
||||
fab install_bigchaindb_from_pypi
|
||||
else
|
||||
cd ..
|
||||
rm -f bigchaindb-archive.tar.gz
|
||||
git archive $BRANCH --format=tar --output=bigchaindb-archive.tar
|
||||
gzip bigchaindb-archive.tar
|
||||
mv bigchaindb-archive.tar.gz deploy-cluster-aws
|
||||
cd deploy-cluster-aws
|
||||
fab install_bigchaindb_from_git_archive
|
||||
rm bigchaindb-archive.tar.gz
|
||||
fi
|
||||
|
||||
# Configure BigchainDB on all nodes
|
||||
|
||||
# The idea is to send a bunch of locally-created configuration
|
||||
# files out to each of the instances / nodes.
|
||||
|
||||
# Assume a set of $NUM_NODES BigchaindB config files
|
||||
# already exists in the confiles directory.
|
||||
# One can create a set using a command like
|
||||
# ./make_confiles.sh confiles $NUM_NODES
|
||||
# (We can't do that here now because this virtual environment
|
||||
# is a Python 2 environment that may not even have
|
||||
# bigchaindb installed, so bigchaindb configure can't be called)
|
||||
|
||||
# Transform the config files in the confiles directory
|
||||
# to have proper keyrings etc.
|
||||
if [ "$USE_KEYPAIRS_FILE" == "True" ]; then
|
||||
python clusterize_confiles.py -k confiles $NUM_NODES
|
||||
else
|
||||
python clusterize_confiles.py confiles $NUM_NODES
|
||||
fi
|
||||
|
||||
# Send one of the config files to each instance
|
||||
for (( HOST=0 ; HOST<$NUM_NODES ; HOST++ )); do
|
||||
CONFILE="bcdb_conf"$HOST
|
||||
echo "Sending "$CONFILE
|
||||
fab set_host:$HOST send_confile:$CONFILE
|
||||
done
|
||||
|
||||
# Initialize BigchainDB (i.e. Create the RethinkDB database,
|
||||
# the tables, the indexes, and genesis glock). Note that
|
||||
# this will only be sent to one of the nodes, see the
|
||||
# definition of init_bigchaindb() in fabfile.py to see why.
|
||||
fab init_bigchaindb
|
||||
fab set_shards:$NUM_NODES
|
||||
echo "To set the replication factor to 3, do: fab set_replicas:3"
|
||||
echo "To start BigchainDB on all the nodes, do: fab start_bigchaindb"
|
||||
|
||||
# cleanup
|
||||
rm add2known_hosts.sh
|
@ -1,108 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Given a directory full of default BigchainDB config files,
|
||||
transform them into config files for a cluster with proper
|
||||
keyrings, API endpoint values, etc. This script is meant to
|
||||
be interpreted as a Python 2 script.
|
||||
|
||||
Note 1: This script assumes that there is a file named hostlist.py
|
||||
containing public_dns_names = a list of the public DNS names of
|
||||
all the hosts in the cluster.
|
||||
|
||||
Note 2: If the optional -k argument is included, then a keypairs.py
|
||||
file must exist and must have enough keypairs in it to assign one
|
||||
to each of the config files in the directory of config files.
|
||||
You can create a keypairs.py file using write_keypairs_file.py
|
||||
|
||||
Usage:
|
||||
python clusterize_confiles.py [-h] [-k] dir number_of_files
|
||||
"""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import json
|
||||
import argparse
|
||||
|
||||
from hostlist import public_dns_names
|
||||
|
||||
if os.path.isfile('keypairs.py'):
|
||||
from keypairs import keypairs_list
|
||||
|
||||
|
||||
# Parse the command-line arguments
|
||||
desc = 'Transform a directory of default BigchainDB config files '
|
||||
desc += 'into config files for a cluster'
|
||||
parser = argparse.ArgumentParser(description=desc)
|
||||
parser.add_argument('dir',
|
||||
help='Directory containing the config files')
|
||||
parser.add_argument('number_of_files',
|
||||
help='Number of config files expected in dir',
|
||||
type=int)
|
||||
parser.add_argument('-k', '--use-keypairs',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Use public and private keys from keypairs.py')
|
||||
args = parser.parse_args()
|
||||
|
||||
conf_dir = args.dir
|
||||
num_files_expected = int(args.number_of_files)
|
||||
use_keypairs = args.use_keypairs
|
||||
|
||||
# Check if the number of files in conf_dir is what was expected
|
||||
conf_files = sorted(os.listdir(conf_dir))
|
||||
num_files = len(conf_files)
|
||||
if num_files != num_files_expected:
|
||||
raise ValueError('There are {} files in {} but {} were expected'.
|
||||
format(num_files, conf_dir, num_files_expected))
|
||||
|
||||
# If the -k option was included, check to make sure there are enough keypairs
|
||||
# in keypairs_list
|
||||
num_keypairs = len(keypairs_list)
|
||||
if use_keypairs:
|
||||
if num_keypairs < num_files:
|
||||
raise ValueError('There are {} config files in {} but '
|
||||
'there are only {} keypairs in keypairs.py'.
|
||||
format(num_files, conf_dir, num_keypairs))
|
||||
|
||||
# Make a list containing all the public keys
|
||||
if use_keypairs:
|
||||
print('Using keypairs from keypairs.py')
|
||||
pubkeys = [keypair[1] for keypair in keypairs_list[:num_files]]
|
||||
else:
|
||||
# read the pubkeys from the config files in conf_dir
|
||||
pubkeys = []
|
||||
for filename in conf_files:
|
||||
file_path = os.path.join(conf_dir, filename)
|
||||
with open(file_path, 'r') as f:
|
||||
conf_dict = json.load(f)
|
||||
pubkey = conf_dict['keypair']['public']
|
||||
pubkeys.append(pubkey)
|
||||
|
||||
# Rewrite each config file, one at a time
|
||||
for i, filename in enumerate(conf_files):
|
||||
file_path = os.path.join(conf_dir, filename)
|
||||
with open(file_path, 'r') as f:
|
||||
conf_dict = json.load(f)
|
||||
# If the -k option was included
|
||||
# then replace the private and public keys
|
||||
# with those from keypairs_list
|
||||
if use_keypairs:
|
||||
keypair = keypairs_list[i]
|
||||
conf_dict['keypair']['private'] = keypair[0]
|
||||
conf_dict['keypair']['public'] = keypair[1]
|
||||
# The keyring is the list of *all* public keys
|
||||
# minus the config file's own public key
|
||||
keyring = list(pubkeys)
|
||||
keyring.remove(conf_dict['keypair']['public'])
|
||||
conf_dict['keyring'] = keyring
|
||||
# Allow incoming server traffic from any IP address
|
||||
# to port 9984
|
||||
conf_dict['server']['bind'] = '0.0.0.0:9984'
|
||||
|
||||
# Delete the config file
|
||||
os.remove(file_path)
|
||||
|
||||
# Write new config file with the same filename
|
||||
print('Rewriting {}'.format(file_path))
|
||||
with open(file_path, 'w') as f2:
|
||||
json.dump(conf_dict, f2)
|
@ -1,105 +0,0 @@
|
||||
#
|
||||
# RethinkDB instance configuration sample
|
||||
#
|
||||
# - Give this file the extension .conf and put it in /etc/rethinkdb/instances.d in order to enable it.
|
||||
# - See http://www.rethinkdb.com/docs/guides/startup/ for the complete documentation
|
||||
# - Uncomment an option to change its value.
|
||||
#
|
||||
|
||||
###############################
|
||||
## RethinkDB configuration
|
||||
###############################
|
||||
|
||||
### Process options
|
||||
|
||||
## User and group used to run rethinkdb
|
||||
## Command line default: do not change user or group
|
||||
## Init script default: rethinkdb user and group
|
||||
# runuser=rethinkdb
|
||||
# rungroup=rethinkdb
|
||||
|
||||
## Stash the pid in this file when the process is running
|
||||
## Note for systemd users: Systemd uses its own internal mechanism. Do not set this parameter.
|
||||
## Command line default: none
|
||||
## Init script default: /var/run/rethinkdb/<name>/pid_file (where <name> is the name of this config file without the extension)
|
||||
# pid-file=/var/run/rethinkdb/rethinkdb.pid
|
||||
|
||||
### File path options
|
||||
|
||||
## Directory to store data and metadata
|
||||
## Command line default: ./rethinkdb_data
|
||||
## Init script default: /var/lib/rethinkdb/<name>/ (where <name> is the name of this file without the extension)
|
||||
directory=/data
|
||||
|
||||
## Log file options
|
||||
## Default: <directory>/log_file
|
||||
#log-file=/var/log/rethinkdb
|
||||
|
||||
### Network options
|
||||
|
||||
## Address of local interfaces to listen on when accepting connections
|
||||
## May be 'all' or an IP address, loopback addresses are enabled by default
|
||||
## Default: all local addresses
|
||||
# bind=127.0.0.1
|
||||
bind=all
|
||||
|
||||
## Address that other rethinkdb instances will use to connect to this server.
|
||||
## It can be specified multiple times
|
||||
# canonical-address=
|
||||
|
||||
## The port for rethinkdb protocol for client drivers
|
||||
## Default: 28015 + port-offset
|
||||
# driver-port=28015
|
||||
|
||||
## The port for receiving connections from other nodes
|
||||
## Default: 29015 + port-offset
|
||||
# cluster-port=29015
|
||||
|
||||
## The host:port of a node that rethinkdb will connect to
|
||||
## This option can be specified multiple times.
|
||||
## Default: none
|
||||
# join=example.com:29015
|
||||
|
||||
## All ports used locally will have this value added
|
||||
## Default: 0
|
||||
# port-offset=0
|
||||
|
||||
## r.http(...) queries will use the given server as a web proxy
|
||||
## Default: no proxy
|
||||
# reql-http-proxy=socks5://example.com:1080
|
||||
|
||||
### Web options
|
||||
|
||||
## Port for the http admin console
|
||||
## Default: 8080 + port-offset
|
||||
# http-port=8080
|
||||
|
||||
## Disable web administration console
|
||||
# no-http-admin
|
||||
|
||||
### CPU options
|
||||
|
||||
## The number of cores to use
|
||||
## Default: total number of cores of the CPU
|
||||
# cores=2
|
||||
|
||||
### Memory options
|
||||
|
||||
## Size of the cache in MB
|
||||
## Default: Half of the available RAM on startup
|
||||
# cache-size=1024
|
||||
|
||||
### Disk
|
||||
|
||||
## How many simultaneous I/O operations can happen at the same time
|
||||
# io-threads=64
|
||||
#io-threads=128
|
||||
|
||||
## Enable direct I/O
|
||||
direct-io
|
||||
|
||||
### Meta
|
||||
|
||||
## The name for this server (as will appear in the metadata).
|
||||
## If not specified, it will be randomly chosen from a short list of names.
|
||||
# server-name=server1
|
@ -1,69 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""(Re)create the RethinkDB configuration file conf/rethinkdb.conf.
|
||||
Start with conf/rethinkdb.conf.template
|
||||
then append additional configuration settings (lines).
|
||||
"""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import os
|
||||
import os.path
|
||||
import shutil
|
||||
import argparse
|
||||
from hostlist import public_dns_names
|
||||
|
||||
# Parse the command-line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
# The next line isn't strictly necessary, but it clarifies the default case:
|
||||
parser.set_defaults(enable_web_admin=False)
|
||||
parser.add_argument('--enable-web-admin',
|
||||
action='store_true',
|
||||
help='should the RethinkDB web interface be enabled?')
|
||||
# The next line isn't strictly necessary, but it clarifies the default case:
|
||||
parser.set_defaults(bind_http_to_localhost=False)
|
||||
parser.add_argument('--bind-http-to-localhost',
|
||||
action='store_true',
|
||||
help='should RethinkDB web interface be bound to localhost?')
|
||||
args = parser.parse_args()
|
||||
enable_web_admin = args.enable_web_admin
|
||||
bind_http_to_localhost = args.bind_http_to_localhost
|
||||
|
||||
# cwd = current working directory
|
||||
old_cwd = os.getcwd()
|
||||
os.chdir('conf')
|
||||
if os.path.isfile('rethinkdb.conf'):
|
||||
os.remove('rethinkdb.conf')
|
||||
|
||||
# Create the initial rethinkdb.conf using rethinkdb.conf.template
|
||||
shutil.copy2('rethinkdb.conf.template', 'rethinkdb.conf')
|
||||
|
||||
# Append additional lines to rethinkdb.conf
|
||||
with open('rethinkdb.conf', 'a') as f:
|
||||
f.write('## The host:port of a node that RethinkDB will connect to\n')
|
||||
for public_dns_name in public_dns_names:
|
||||
f.write('join=' + public_dns_name + ':29015\n')
|
||||
if not enable_web_admin:
|
||||
f.write('## Disable the RethinkDB web administration console\n')
|
||||
f.write('no-http-admin\n')
|
||||
else:
|
||||
# enable the web admin, i.e. don't disable it (the default), and:
|
||||
if bind_http_to_localhost:
|
||||
f.write('## Bind the web interface port to localhost\n')
|
||||
# 127.0.0.1 is the usual IP address for localhost
|
||||
f.write('bind-http=127.0.0.1\n')
|
||||
|
||||
os.chdir(old_cwd)
|
||||
|
||||
# Note: The original code by Andreas wrote a file with lines of the form
|
||||
# join=public_dns_name_0:29015
|
||||
# join=public_dns_name_1:29015
|
||||
# but it stopped about halfway through the list of public_dns_names
|
||||
# (publist). In principle, it's only strictly necessary to
|
||||
# have one join= line.
|
||||
# Maybe Andreas thought that more is better, but all is too much?
|
||||
# Below is Andreas' original code. -Troy
|
||||
# lfile = open('add2dbconf', 'w')
|
||||
# before = 'join='
|
||||
# after = ':29015'
|
||||
# lfile.write('## The host:port of a node that rethinkdb will connect to\n')
|
||||
# for entry in range(0,int(len(publist)/2)):
|
||||
# lfile.write(before + publist[entry] + after + '\n')
|
@ -1,82 +0,0 @@
|
||||
# AWS deployment config file
|
||||
|
||||
# To use in a Bash shell script:
|
||||
# source example_deploy_conf.py
|
||||
# # $EXAMPLEVAR now has a value
|
||||
|
||||
# To use in a Python script:
|
||||
# from example_deploy_conf import *
|
||||
# or
|
||||
# import importlib
|
||||
# cf = importlib.import_module('example_deploy_conf')
|
||||
# # cf.EXAMPLEVAR now has a value
|
||||
|
||||
# DON'T PUT SPACES AROUND THE =
|
||||
# because that would confuse Bash.
|
||||
# Example values: "string in double quotes", 32, True, False
|
||||
|
||||
# NUM_NODES is the number of nodes to deploy
|
||||
NUM_NODES=3
|
||||
|
||||
# BRANCH is either "pypi" or the name of a local Git branch
|
||||
# (e.g. "master" or "feat/3627/optional-delimiter-in-txfile")
|
||||
# It's where to get the BigchainDB code to be deployed on the nodes
|
||||
BRANCH="master"
|
||||
|
||||
# SSH_KEY_NAME is the name of the SSH private key file
|
||||
# in $HOME/.ssh/
|
||||
# It is used for SSH communications with AWS instances.
|
||||
SSH_KEY_NAME="not-set-yet"
|
||||
|
||||
# USE_KEYPAIRS_FILE is either True or False
|
||||
# Should node keypairs be read from keypairs.py?
|
||||
# (If False, then the keypairs will be whatever is in the the
|
||||
# BigchainDB config files in the confiles directory.)
|
||||
USE_KEYPAIRS_FILE=False
|
||||
|
||||
# IMAGE_ID is the Amazon Machine Image (AMI) id to use
|
||||
# in all the servers/instances to be launched.
|
||||
# Canonical (the company behind Ubuntu) generates many AMIs
|
||||
# and you can search for one that meets your needs at:
|
||||
# https://cloud-images.ubuntu.com/locator/ec2/
|
||||
# Example: At one point, if you searched for
|
||||
# eu-central-1 16.04 LTS amd64 hvm:ebs-ssd
|
||||
# you would get this AMI ID:
|
||||
IMAGE_ID="ami-8504fdea"
|
||||
|
||||
# INSTANCE_TYPE is the type of AWS instance to launch
|
||||
# i.e. How many CPUs do you want? How much storage? etc.
|
||||
# Examples: "t2.medium", "m3.2xlarge", "c3.8xlarge", "c4.8xlarge"
|
||||
# For all options, see https://aws.amazon.com/ec2/instance-types/
|
||||
INSTANCE_TYPE="t2.medium"
|
||||
|
||||
# SECURITY_GROUP is the name of the AWS security group to use.
|
||||
# That security group must exist.
|
||||
# Examples: "bigchaindb", "bcdbsecure"
|
||||
SECURITY_GROUP="bigchaindb"
|
||||
|
||||
# USING_EBS is True if you want to attach an Amazon EBS volume
|
||||
USING_EBS=True
|
||||
|
||||
# EBS_VOLUME_SIZE is the size of the EBS volume to attach, in GiB
|
||||
# Since we assume 'gp2' volumes (for now), the possible range is 1 to 16384
|
||||
# If USING_EBS=False, EBS_VOLUME_SIZE is irrelevant and not used
|
||||
EBS_VOLUME_SIZE=30
|
||||
|
||||
# EBS_OPTIMIZED is True or False, depending on whether you want
|
||||
# EBS-optimized instances. See:
|
||||
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html
|
||||
# Not all instance types support EBS optimization.
|
||||
# Setting EBS_OPTIMIZED=True may cost more, but not always.
|
||||
# If USING_EBS=False, EBS_OPTIMIZED is irrelevant and not used
|
||||
EBS_OPTIMIZED=False
|
||||
|
||||
# ENABLE_WEB_ADMIN is True or False, depending on whether you want
|
||||
# the RethinkDB web administration console / interface to be enabled.
|
||||
ENABLE_WEB_ADMIN=True
|
||||
|
||||
# BIND_HTTP_TO_LOCALHOST is True or False, depending on whether
|
||||
# you want the RethinkDB web interface port to be bound to localhost
|
||||
# (which is more secure). See https://www.rethinkdb.com/docs/security/
|
||||
# If ENABLE_WEB_ADMIN is False, BIND_HTTP_TO_LOCALHOST is irrelevant and not used
|
||||
BIND_HTTP_TO_LOCALHOST=True
|
367
deploy-cluster-aws/fabfile.py
vendored
367
deploy-cluster-aws/fabfile.py
vendored
@ -1,367 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""A Fabric fabfile with functionality to prepare, install, and configure
|
||||
BigchainDB, including its storage backend (RethinkDB).
|
||||
"""
|
||||
|
||||
from __future__ import with_statement, unicode_literals
|
||||
|
||||
from os import environ # a mapping (like a dict)
|
||||
import sys
|
||||
|
||||
from fabric.api import sudo, env, hosts
|
||||
from fabric.api import task, parallel
|
||||
from fabric.contrib.files import sed
|
||||
from fabric.operations import run, put
|
||||
from fabric.context_managers import settings
|
||||
|
||||
from hostlist import public_dns_names
|
||||
from ssh_key import ssh_key_path
|
||||
|
||||
# Ignore known_hosts
|
||||
# http://docs.fabfile.org/en/1.10/usage/env.html#disable-known-hosts
|
||||
env.disable_known_hosts = True
|
||||
|
||||
# What remote servers should Fabric connect to? With what usernames?
|
||||
env.user = 'ubuntu'
|
||||
env.hosts = public_dns_names
|
||||
|
||||
# SSH key files to try when connecting:
|
||||
# http://docs.fabfile.org/en/1.10/usage/env.html#key-filename
|
||||
env.key_filename = ssh_key_path
|
||||
|
||||
|
||||
######################################################################
|
||||
|
||||
# DON'T PUT @parallel
|
||||
@task
|
||||
def set_host(host_index):
|
||||
"""A helper task to change env.hosts from the
|
||||
command line. It will only "stick" for the duration
|
||||
of the fab command that called it.
|
||||
|
||||
Args:
|
||||
host_index (int): 0, 1, 2, 3, etc.
|
||||
Example:
|
||||
fab set_host:4 fab_task_A fab_task_B
|
||||
will set env.hosts = [public_dns_names[4]]
|
||||
but only for doing fab_task_A and fab_task_B
|
||||
"""
|
||||
env.hosts = [public_dns_names[int(host_index)]]
|
||||
|
||||
|
||||
@task
|
||||
def test_ssh():
|
||||
run('echo "If you see this, then SSH to a remote host worked."')
|
||||
|
||||
|
||||
# Install base software
|
||||
@task
|
||||
@parallel
|
||||
def install_base_software():
|
||||
# This deletes the dir where "apt-get update" stores the list of packages
|
||||
sudo('rm -rf /var/lib/apt/lists/')
|
||||
# Re-create that directory, and its subdirectory named "partial"
|
||||
sudo('mkdir -p /var/lib/apt/lists/partial/')
|
||||
# Repopulate the list of packages in /var/lib/apt/lists/
|
||||
# See https://tinyurl.com/zjvj9g3
|
||||
sudo('apt-get -y update')
|
||||
# Configure all unpacked but unconfigured packages.
|
||||
# See https://tinyurl.com/zf24hm5
|
||||
sudo('dpkg --configure -a')
|
||||
# Attempt to correct a system with broken dependencies in place.
|
||||
# See https://tinyurl.com/zpktd7l
|
||||
sudo('apt-get -y -f install')
|
||||
# For some reason, repeating the last three things makes this
|
||||
# installation process more reliable...
|
||||
sudo('apt-get -y update')
|
||||
sudo('dpkg --configure -a')
|
||||
sudo('apt-get -y -f install')
|
||||
# Install the base dependencies not already installed.
|
||||
sudo('apt-get -y install git g++ python3-dev libffi-dev')
|
||||
sudo('apt-get -y -f install')
|
||||
|
||||
|
||||
# Get an up-to-date Python 3 version of pip
|
||||
@task
|
||||
@parallel
|
||||
def get_pip3():
|
||||
# One way:
|
||||
# sudo('apt-get -y install python3-setuptools')
|
||||
# sudo('easy_install3 pip')
|
||||
# Another way:
|
||||
sudo('apt-get -y install python3-pip')
|
||||
# Upgrade pip
|
||||
sudo('pip3 install --upgrade pip')
|
||||
# Check the version of pip3
|
||||
run('pip3 --version')
|
||||
|
||||
|
||||
# Upgrade setuptools
|
||||
@task
|
||||
@parallel
|
||||
def upgrade_setuptools():
|
||||
sudo('pip3 install --upgrade setuptools')
|
||||
|
||||
|
||||
# Prepare RethinkDB storage
|
||||
@task
|
||||
@parallel
|
||||
def prep_rethinkdb_storage(USING_EBS):
|
||||
"""Prepare RethinkDB storage"""
|
||||
# Convert USING_EBS from a string to a bool
|
||||
USING_EBS = (USING_EBS.lower() == 'true')
|
||||
|
||||
# Make the /data directory for RethinkDB data
|
||||
sudo("mkdir -p /data")
|
||||
|
||||
# OLD: with settings(warn_only=True):
|
||||
if USING_EBS: # on /dev/xvdp
|
||||
# See https://tinyurl.com/h2nut68
|
||||
sudo("mkfs -t ext4 /dev/xvdp")
|
||||
sudo("mount /dev/xvdp /data")
|
||||
# To mount this EBS volume on every system reboot,
|
||||
# add an entry for the device to the /etc/fstab file.
|
||||
# First, make a copy of the current /etc/fstab file
|
||||
sudo("cp /etc/fstab /etc/fstab.orig")
|
||||
# Append a line to /etc/fstab
|
||||
sudo("echo '/dev/xvdp /data ext4 defaults,nofail,nobootwait 0 2' >> /etc/fstab")
|
||||
# Veryify the /etc/fstab file. If something is wrong with it,
|
||||
# then this should produce an error:
|
||||
sudo("mount -a")
|
||||
# Set the I/O scheduler for /dev/xdvp to deadline
|
||||
with settings(sudo_user='root'):
|
||||
sudo("echo deadline > /sys/block/xvdp/queue/scheduler")
|
||||
else: # not using EBS.
|
||||
# Using the "instance store" that comes with the instance.
|
||||
# If the instance store comes with more than one volume,
|
||||
# this only mounts ONE of them: /dev/xvdb
|
||||
# For example, m3.2xlarge instances have /dev/xvdb and /dev/xvdc
|
||||
# and /mnt is mounted on /dev/xvdb by default.
|
||||
try:
|
||||
sudo("umount /mnt")
|
||||
sudo("mkfs -t ext4 /dev/xvdb")
|
||||
sudo("mount /dev/xvdb /data")
|
||||
except:
|
||||
pass
|
||||
sudo("rm -rf /etc/fstab")
|
||||
sudo("echo 'LABEL=cloudimg-rootfs / ext4 defaults,discard 0 0' >> /etc/fstab")
|
||||
sudo("echo '/dev/xvdb /data ext4 defaults,noatime 0 0' >> /etc/fstab")
|
||||
# Set the I/O scheduler for /dev/xdvb to deadline
|
||||
with settings(sudo_user='root'):
|
||||
sudo("echo deadline > /sys/block/xvdb/queue/scheduler")
|
||||
|
||||
|
||||
# Install RethinkDB
|
||||
@task
|
||||
@parallel
|
||||
def install_rethinkdb():
|
||||
"""Install RethinkDB"""
|
||||
# Old way:
|
||||
# sudo("echo 'deb http://download.rethinkdb.com/apt trusty main' | sudo tee /etc/apt/sources.list.d/rethinkdb.list")
|
||||
# New way: (from https://www.rethinkdb.com/docs/install/ubuntu/ )
|
||||
sudo('source /etc/lsb-release && '
|
||||
'echo "deb http://download.rethinkdb.com/apt $DISTRIB_CODENAME main" | '
|
||||
'sudo tee /etc/apt/sources.list.d/rethinkdb.list')
|
||||
sudo("wget -qO- http://download.rethinkdb.com/apt/pubkey.gpg | sudo apt-key add -")
|
||||
sudo("apt-get update")
|
||||
sudo("apt-get -y install rethinkdb")
|
||||
# Change owner:group of the RethinkDB data directory to rethinkdb:rethinkdb
|
||||
sudo('chown -R rethinkdb:rethinkdb /data')
|
||||
|
||||
|
||||
# Configure RethinkDB
|
||||
@task
|
||||
@parallel
|
||||
def configure_rethinkdb():
|
||||
"""Copy the RethinkDB config file to the remote host"""
|
||||
put('conf/rethinkdb.conf',
|
||||
'/etc/rethinkdb/instances.d/instance1.conf',
|
||||
mode=0600,
|
||||
use_sudo=True)
|
||||
|
||||
|
||||
# Delete RethinkDB data
|
||||
@task
|
||||
@parallel
|
||||
def delete_rethinkdb_data():
|
||||
"""Delete the contents of the RethinkDB /data directory
|
||||
but not the directory itself.
|
||||
"""
|
||||
sudo('rm -rf /data/*')
|
||||
|
||||
|
||||
# Start RethinkDB
|
||||
@task
|
||||
@parallel
|
||||
def start_rethinkdb():
|
||||
"""Start RethinkDB"""
|
||||
sudo('/etc/init.d/rethinkdb restart')
|
||||
|
||||
|
||||
# Install BigchainDB from PyPI
|
||||
@task
|
||||
@parallel
|
||||
def install_bigchaindb_from_pypi():
|
||||
sudo('pip3 install bigchaindb')
|
||||
|
||||
|
||||
# Install BigchainDB from a Git archive file
|
||||
# named bigchaindb-archive.tar.gz
|
||||
@task
|
||||
@parallel
|
||||
def install_bigchaindb_from_git_archive():
|
||||
put('bigchaindb-archive.tar.gz')
|
||||
run('tar xvfz bigchaindb-archive.tar.gz')
|
||||
sudo('pip3 install .')
|
||||
# sudo('python3 setup.py install')
|
||||
run('rm bigchaindb-archive.tar.gz')
|
||||
|
||||
|
||||
# Configure BigchainDB
|
||||
@task
|
||||
@parallel
|
||||
def configure_bigchaindb():
|
||||
run('bigchaindb -y configure rethinkdb', pty=False)
|
||||
|
||||
|
||||
# Send the specified configuration file to
|
||||
# the remote host and save it there in
|
||||
# ~/.bigchaindb
|
||||
# Use in conjunction with set_host()
|
||||
# No @parallel
|
||||
@task
|
||||
def send_confile(confile):
|
||||
put('confiles/' + confile, 'tempfile')
|
||||
run('mv tempfile ~/.bigchaindb')
|
||||
print('For this node, bigchaindb show-config says:')
|
||||
run('bigchaindb show-config')
|
||||
|
||||
|
||||
# Initialize BigchainDB
|
||||
# i.e. create the database, the tables,
|
||||
# the indexes, and the genesis block.
|
||||
# (The @hosts decorator is used to make this
|
||||
# task run on only one node. See http://tinyurl.com/h9qqf3t )
|
||||
@task
|
||||
@hosts(public_dns_names[0])
|
||||
def init_bigchaindb():
|
||||
run('bigchaindb init', pty=False)
|
||||
|
||||
|
||||
# Set the number of shards (in all tables)
|
||||
@task
|
||||
@hosts(public_dns_names[0])
|
||||
def set_shards(num_shards):
|
||||
run('bigchaindb set-shards {}'.format(num_shards))
|
||||
|
||||
|
||||
# Set the number of replicas (in all tables)
|
||||
@task
|
||||
@hosts(public_dns_names[0])
|
||||
def set_replicas(num_replicas):
|
||||
run('bigchaindb set-replicas {}'.format(num_replicas))
|
||||
|
||||
|
||||
# Start BigchainDB using screen
|
||||
@task
|
||||
@parallel
|
||||
def start_bigchaindb():
|
||||
sudo('screen -d -m bigchaindb -y start &', pty=False)
|
||||
|
||||
|
||||
# Install and run New Relic
|
||||
@task
|
||||
@parallel
|
||||
def install_newrelic():
|
||||
newrelic_license_key = environ.get('NEWRELIC_KEY')
|
||||
if newrelic_license_key is None:
|
||||
sys.exit('The NEWRELIC_KEY environment variable is not set')
|
||||
else:
|
||||
# Andreas had this "with settings(..." line, but I'm not sure why:
|
||||
# with settings(warn_only=True):
|
||||
# Use the installation instructions from NewRelic:
|
||||
# http://tinyurl.com/q9kyrud
|
||||
# ...with some modifications
|
||||
sudo("echo 'deb http://apt.newrelic.com/debian/ newrelic non-free' >> "
|
||||
"/etc/apt/sources.list.d/newrelic.list")
|
||||
sudo('wget -O- https://download.newrelic.com/548C16BF.gpg | '
|
||||
'apt-key add -')
|
||||
sudo('apt-get update')
|
||||
sudo('apt-get -y --force-yes install newrelic-sysmond')
|
||||
sudo('nrsysmond-config --set license_key=' + newrelic_license_key)
|
||||
sudo('/etc/init.d/newrelic-sysmond start')
|
||||
|
||||
|
||||
###########################
|
||||
# Security / Firewall Stuff
|
||||
###########################
|
||||
|
||||
@task
|
||||
def harden_sshd():
|
||||
"""Security harden sshd.
|
||||
"""
|
||||
# Disable password authentication
|
||||
sed('/etc/ssh/sshd_config',
|
||||
'#PasswordAuthentication yes',
|
||||
'PasswordAuthentication no',
|
||||
use_sudo=True)
|
||||
# Deny root login
|
||||
sed('/etc/ssh/sshd_config',
|
||||
'PermitRootLogin yes',
|
||||
'PermitRootLogin no',
|
||||
use_sudo=True)
|
||||
|
||||
|
||||
@task
|
||||
def disable_root_login():
|
||||
"""Disable `root` login for even more security. Access to `root` account
|
||||
is now possible by first connecting with your dedicated maintenance
|
||||
account and then running ``sudo su -``.
|
||||
"""
|
||||
sudo('passwd --lock root')
|
||||
|
||||
|
||||
@task
|
||||
def set_fw():
|
||||
# snmp
|
||||
sudo('iptables -A INPUT -p tcp --dport 161 -j ACCEPT')
|
||||
sudo('iptables -A INPUT -p udp --dport 161 -j ACCEPT')
|
||||
# dns
|
||||
sudo('iptables -A OUTPUT -p udp -o eth0 --dport 53 -j ACCEPT')
|
||||
sudo('iptables -A INPUT -p udp -i eth0 --sport 53 -j ACCEPT')
|
||||
# rethinkdb
|
||||
sudo('iptables -A INPUT -p tcp --dport 28015 -j ACCEPT')
|
||||
sudo('iptables -A INPUT -p udp --dport 28015 -j ACCEPT')
|
||||
sudo('iptables -A INPUT -p tcp --dport 29015 -j ACCEPT')
|
||||
sudo('iptables -A INPUT -p udp --dport 29015 -j ACCEPT')
|
||||
sudo('iptables -A INPUT -p tcp --dport 8080 -j ACCEPT')
|
||||
sudo('iptables -A INPUT -i eth0 -p tcp --dport 8080 -j DROP')
|
||||
sudo('iptables -I INPUT -i eth0 -s 127.0.0.1 -p tcp --dport 8080 -j ACCEPT')
|
||||
# save rules
|
||||
sudo('iptables-save > /etc/sysconfig/iptables')
|
||||
|
||||
|
||||
#########################################################
|
||||
# Some helper-functions to handle bad behavior of cluster
|
||||
#########################################################
|
||||
|
||||
# rebuild indexes
|
||||
@task
|
||||
@parallel
|
||||
def rebuild_indexes():
|
||||
run('rethinkdb index-rebuild -n 2')
|
||||
|
||||
|
||||
@task
|
||||
def stopdb():
|
||||
sudo('service rethinkdb stop')
|
||||
|
||||
|
||||
@task
|
||||
def startdb():
|
||||
sudo('service rethinkdb start')
|
||||
|
||||
|
||||
@task
|
||||
def restartdb():
|
||||
sudo('/etc/init.d/rethinkdb restart')
|
@ -1,264 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""A set of keypairs for use in deploying
|
||||
BigchainDB servers with a predictable set of keys.
|
||||
"""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
keypairs_list = [('E72MmhHGiwywGMHdensZreNPTtAKvRxYQEyQEqUpLvXL',
|
||||
'Ar1Xt6bpmeyNnWoBUAAi8VqLPboK84bvB417FKmxcJzp'),
|
||||
('BYupx6PLnAqcTgrqsYopKeHYjmSY5F4rpSVoFv6vK3r6',
|
||||
'6UkRsEhRW7RT6WYPJkW4j4aiqLiXhpyP7H1WRj2toCv3'),
|
||||
('A3FwThyWmydgjukcpF9SmTzWQ4yoRoV9jTni1t4oicz4',
|
||||
'91cuZ3GvQkEkR8UVV456fVxiujBSqd9JMp7p3XaHnVUT'),
|
||||
('CkA7fS6aGmo8JPw3yuchz31AnP7KcxncQiu3pQ81X2Mj',
|
||||
'PDuBGWm4BnSSkTTxSWVd59PAzFqidaLfFo86aTLZoub'),
|
||||
('7aoKCGN4QK82yVpErN1EJyn8ciQXBUkVBe1snx7wypEf',
|
||||
'AXoar7qdJZF2kaTJb19PhqY7iSdX3AEef7GBwb9N8WT6'),
|
||||
('1GGrwAx34CbTfaV55KdCKah2G5FThjrTdTQk3gTD97x',
|
||||
'853HbGVt6hT7q17cN6CtDycuTyoncUDWscdo4GUMKntp'),
|
||||
('7C6BZbk3Xi4nB1o4mUXMty4rs22CeF8dLQ2dUKhyi9qs',
|
||||
'GVu5QTqKeMGhgz8AgzfVRYP5F3HopkqgabQhRqjujEdN'),
|
||||
('2WXPBsMGmwjMv7Eg5iqgLAq2VQW1GF6AVAWuveYLXy3Z',
|
||||
'AuBnVm277newtkgfyjGQrr3X8ykKwzVzrcpqF67muQ4N'),
|
||||
('H67sRSm8W6gVmR1N3SqWXF3WTx9Arhc1RtwvLEjmhm9t',
|
||||
'5SQamPP4dWUhu2L247TMnf8vX1C5vuB3jtfh1BpVSsPg'),
|
||||
('GYztiuCLEvG4wrVszbXKs9AXbKBbDZVhw35xsq8XF63S',
|
||||
'6pxa9WydnD1xRFReR1yHruXL8VtFu3c6kCNBXkwAyDXA'),
|
||||
('G7x9iHnJkjKkdLEsrV2kGZ7tFBm9wj2Pive7vRZss47',
|
||||
'23MvXauT6cKMLrNyxN41jnZv83aKghLP4B37bvemjENa'),
|
||||
('3MhdzHYRrFrPmQZfXfpNKLw9xEdFNZNfUcehgBmboH43',
|
||||
'Buqfw4nFfuLoHJcfYZvxXBJf5rTm5ypSyuJfL11jDFkn'),
|
||||
('B2MbWPXDPSWQAwNirepSws7a4sgKNCVtmduGh5f54Koq',
|
||||
'Cus791pRcuoVJe24WME2QYeEAX1R4uiTGNxa3HwzwtQY'),
|
||||
('7yHSmHHX4WwsZ4H6oQxxytkGRqKPiMdqftSvRqXiomYj',
|
||||
'2qVE6baeD57raXJnNwyUeWi1VyfpQ21QW1J374zMGD6o'),
|
||||
('E2V7mzxce6J8PZw8rUEZXYYVnTFRkMSfTty7duohox6V',
|
||||
'HSs1oWnvTfjrMmVouRtFJYLjfgeC1uxEiA8MX9F98A34'),
|
||||
('4yP4RH18nt3DDFzhpLGborEJuS7hx4cKaz6AAe1xNChe',
|
||||
'FziConq7CF4h6TYc1W4wYtmJbhNnrAGoareRkeoRLKTi'),
|
||||
('HGgVjtNG2U6odxbAAR31UAcHknzenY88GxYhnURy2S5C',
|
||||
'82miL67GzT9fTVt8hFiE2XJBRr7iNXAvFLnuiFj5HyjV'),
|
||||
('AWY2DyCDbMQqx6v5KtcoW1f9qQd5NqiibeLFpABwibEn',
|
||||
'9KgHN7xTLa34hfwGq4RpW71jsKjyPKRtaAdAvjHuATtb'),
|
||||
('BYE1oV6Dyf49Qedrtf3UaVny9D7NEUhtx78pD1X38M6x',
|
||||
'3ve8upjPmX9vvdEqvir7QBxnXQAyBWiZKwWyEhq47ptx'),
|
||||
('BiZLPsA8Q3faqLPxrcMP1TT77XUYd2jceAkuB9bFCzUa',
|
||||
'DrL1j2ZXLvBzk2TmA4DxsRmoR3oCSpW8YPvDCMCei1dU'),
|
||||
('FNPkTYojwJ4F4psnzbWt8XnNRBqRhwHXog8Pb8Kuuq7V',
|
||||
'FRxatYaiuuKBtvvERSADKNtSGPDY7iPzCmMaLDnPSuG8'),
|
||||
('2LiAeAJHjGsrkUmd42jdgPAyKEfzWVttrmnXu6rzfyYM',
|
||||
'FwQ3jTBnJpY62paSLdudyymZPUDSWy3827wY13jTJUmC'),
|
||||
('Gcu8TPtFM2tv9gtUo5yQYyBg7KHQWxnW9Dk3bp4zh5VC',
|
||||
'G3UrGxBB4UCUnxFNKrhmS1rpj3Z7bq484hZUUfNqprx1'),
|
||||
('HQGHpzMDdB3sYqfJJC5GuyreqaSjpqfoGytZL9NVtg8T',
|
||||
'GA9eu5RDuReBnjUcSSg9CK4X4A678YTrxHFxCpmWhDjM'),
|
||||
('2of61RBw2ARZcPD4XJFcnpx18Aj1mU4viUMVN2AXaJsE',
|
||||
'3aDSyU3E5Kmn9emoUXrktFKH4i7t4uaKBmHNFFhErYU8'),
|
||||
('J8oF1sfJzXxJL1bDxPwCtDYw1FEh1prVAhWZF8dw1fRa',
|
||||
'2atybus8CnehWNVj1DcCEidc3uD2Q7q4tiP5ok2CuNSD'),
|
||||
('AxMvjM1w3i3XQVjH8oVVGhiic9kgtvrrDzxnWKdwhdQo',
|
||||
'DXYvSgETSxy4vfU2rqPoZFumKw5aLWErkmEDR2w2sR7h'),
|
||||
('GBuyEpUQTf2v21NAKozUbUQnwwiugHNY9Uh2kPqBwqXK',
|
||||
'CLDPdckwDKa3qiLnoKuNFW8yHRjJdU37XE6skAmageJK'),
|
||||
('Bc8ykuXeq7HutQkveQfYXQ28BbFkjRpZCAEuRsAMtxuF',
|
||||
'B45qxKWDPnoE1C5KzunsMvfHmRgZHfz2LzxaM1LTqVwF'),
|
||||
('9H9v7uKAWScvy34ZQfWJW2NoJ3SWf2NuaqzabcaVuh4h',
|
||||
'4Kj9wUpHKfgJbjyLNmMYwEwnotUmsgTDKMCusHbM5gcz'),
|
||||
('2kWx8nor8McDSZsg8vJ7hZrc3aUVtZhcVcvNeT14iSFo',
|
||||
'3S9ase3dQd5oz3L7ELGivAsUyaTosK9C5X1aiZNtgcwi'),
|
||||
('ENEDnokpqJhziw9CPiGDCnNRwSDgnGjAPh1L7XABWP6s',
|
||||
'2sUKDdtfVaUXZCN6V6WecweBL8ZEY5mCfPBTj4xzhQtq'),
|
||||
('FPUYgS4VvQ5WaZaQqnrJppBZQasoSMwZ4LyhUBKYnE6Q',
|
||||
'FtP6Zak6EEWpuptqxSoPAySfm4yA6rWAQqxMCi6s6RYp'),
|
||||
('FhQjcEjy36p27YGjKzWicdABNWzEYGciSU5Eht98o2eg',
|
||||
'2hZ3Fby9K5jYQdtrhvehKTeJgq4NDJY46p4oBT7NUAv5'),
|
||||
('5JD7STAtYDUeMqvA75FxTGUw6mSFmFvnVMJZJkTHLafH',
|
||||
'HCGf4nWF7q4v4GBPXxTdWMjU7o3SifxfmKzTQ1dWmFqo'),
|
||||
('3VLPrCmUog6mBVqkTuSJzXP7ZABCA6ejQKu9LpzkJs6s',
|
||||
'Bap6iTjmZb781zLxSmELoqVA25mbMuL7B8WdAz5wDygG'),
|
||||
('EiQ57ZLHQvtLbEbiJ41ViZmPctFfd51EFEaK6Y3HZcYb',
|
||||
'5uu84u8um1CfuT1pvpdFKMy5oWuU4BfWRbpRHzG4eW4A'),
|
||||
('3hM9hy2LSqe2SsrcE7XeNz1aPPPZgK5bnTeboyFgFsyj',
|
||||
'3ptDB8YwcU9EiafreJnFSyfNuoKMMws7U7auMadhRxdr'),
|
||||
('3LoFwupCNbPk4cMYVS58UHtkDhvKpdYNmMJPEbK5hnat',
|
||||
'CQ56mX3agjJoWwt2uDSa7gtzHWyg3y4Lqp16rZL9qUdF'),
|
||||
('F9X1XsTcFwJN196D1PdCc88WrVrBGhfDgQnezeXW9Vjx',
|
||||
'79cg39iLMZHPFbXrWo6aJAbsXFPk7kgqgBxijDbDLKA'),
|
||||
('Hf1XCRfcXc6sQZVchcvv54Sod8BjBFqsiU5Wu4eX6bTd',
|
||||
'4o8pJV5jaNVqbQhw1u9EzoHT9m69bkfDSGVGugBYeiPC'),
|
||||
('2hamLVNSruGH8uT7YvXa3AUcsspg2ERqFqT11gbKpbUK',
|
||||
'3SziPezcFQbhPrGVJrm5D8HVAZSjduBhFanaXBZcGr3s'),
|
||||
('6u92HEbihHiorTANWBs5nYsHJSJ21SfSqsD4FwZy8UZr',
|
||||
'9jo5yogiEVYwxCkzYoHrn7WMnxpRqqJxbAFuMA2TuzmW'),
|
||||
('4YJJNsfEz3eiBE48w8kihENuwDXGbS1vYLi27663EDvw',
|
||||
'xcAieBttVYi8g9NQBBjf9jPoaMoWx3hA1h3iCcB11jC'),
|
||||
('CUSUaZiUyy8f9yf59RSeorwQJGGnVgR6humfvmzpBMmS',
|
||||
'EbR1dthGhu82wPJT7MmqKu84eKNKQXEuUm6Lqdf4NLXu'),
|
||||
('5RBfhrADkYu5yFKtgdVZPq1k78VcQc3VZr4kjWpXmACs',
|
||||
'Ev4PviNfb87KH5HSXEj7gN7uBYLbHWFSFqQPsoYcMHK7'),
|
||||
('4M4UiTmPLY6H4AhSkgUkKQ6cRceixyL6oT86AUvK9tTs',
|
||||
'4VuGTkZ62PbgKEotnMtyG6L2M76v3qabhPZKXeJ1npca'),
|
||||
('BDAWs8i2GbRySDC5SCStzFdEvnfiqCTEbu9mpZRoKdA8',
|
||||
'FoyMqh9tcY6xCyLxdByrW8xgzAqGNJKR9dPEN7CjPmQ2'),
|
||||
('Dm1HwCxzLm76hBGAG2NEziNRiPBiYnQoKivPm5JC3477',
|
||||
'Ap747d6xaUofhsFoyXbg7SCpH53tsD8zbLY39QS2jWfC'),
|
||||
('6dRpaKGL3pzaoBX1dKsduuDkkPfUB1yBb1taCYZoNGw2',
|
||||
'7PoRrQTBXmCkKuwvLxQceBbUwqo4eReNTxVaGVT6npdn'),
|
||||
('Cb6sghYERbQA5VMdxKiZx1xk6j6heJNbW1TxRTMwkquu',
|
||||
'Am8zvPbAgk2ERqmhGzJZL1NCNkEUjF6enXCwczp4d97B'),
|
||||
('EhaLhpbbRCfCuLbq3rQS1d4PfE6rHgvYA9MpTGaxACgW',
|
||||
'EfeeApbq1jBChfhe13JkEPbUfm1JYYFCdKXdtue6YrH5'),
|
||||
('353aMTUrjH628XzVnKH2oyRmMaAdJ4antn5fGNAzfqMN',
|
||||
'AqustPmyDtVpFDiUEqWfFjDeVBQhvKYZFU4wjfpXRXee'),
|
||||
('7x8v2BEkdyDvzVzbRJR9AztZHLv8kUZfwRRmcPEpHEYj',
|
||||
'88MTxTfy7Btqxwdf5Xo7TmjzACeuNop8MeE63LikQn4k'),
|
||||
('2jnPZg4oeBzbqL6TdpyTdoxraqjWHqfSrzfmS5Qh8D4V',
|
||||
'3GSJUg4s6ydymn9obTxdSVBkxpmWZLCGuvBK9fMECefe'),
|
||||
('N8DS5DA18i2Bh7rEz7nJSSJZycz8qKaNkPP58BCh7Zr',
|
||||
'AKjy7whpaoUnbDJXzNHbtBHawWnS7tLha3nfMPXh4Qam'),
|
||||
('DUQ3pGX5XQtvucPDcNEQPMLrqCMxCbRBuWmHHddNg83Q',
|
||||
'F3vakqePy8xmpb23psZahDVEdu4dywCPQB7fCMsP5mp3'),
|
||||
('6ABw5HQZSWWJr2Ud6KmD73azu732iNTvEfWbCotCFLrn',
|
||||
'GW9eq8JgkHDLjtENDscTK5Bj9AAC3css7SPxLZCPcS2V'),
|
||||
('ByNJL8Eo8B6kKH5UuJxiXBRRrAKfALLvQmt2Rq5JgAA4',
|
||||
'GEtT15SrZUDxVpLjS4metu4BXYw4o1NmxzH5Wr2DcqAv'),
|
||||
('F9XaoqP4A4zZoPB6phfTP8i7CQsnSakh6bk8B1CTLwqy',
|
||||
'9XLZaFGco78AXQS9dmHZ6zypjtg1Z33pj4KoTtDmnLa6'),
|
||||
('ESamPv9kb87uEBZjfgarKTQwSTcEQMetBH44b8o3mPZC',
|
||||
'Nv7eXkgL84o8fQLjagN1bVj7bt5FKF6Ah1Md6WWwyLk'),
|
||||
('E43hqzYjZZ1XRSME6d37Q99UifT4d23piq1CN3fMp6cv',
|
||||
'HLMB1uPdRuYcQyM9UmY9zerxQa3cYqEaRUku3h9oRBQn'),
|
||||
('3qfPXUTeCsVRk9L68uyGF5u3XxFSVBtPkATtHayVgCGs',
|
||||
'ZEkiCeoj3FGfudrN4xgtbz1VihkKWm4cgHN9qJ4p4GH'),
|
||||
('7fxCmzKhvNGpbn9U2vih9N1aF5UXaVER6NSpwn3HPpoy',
|
||||
'CmhLU67kWqbL2kyj8bA5TNcg7HiQFJeamcJhe5BB1PAx'),
|
||||
('BhJsfuvhj9PqfvnvNGQX26fR5SXvcq7JdhhrWyZHoXT9',
|
||||
'CgMqrhrjr4mBMvTgiHLqgvf4tRzUpZuLtQnMSG1Jjgx2'),
|
||||
('GZbkL2W22Z2YwHf5SBwRfSEYQf1tquPkELDQjkwm2yU4',
|
||||
'E47ijUUheN1Zz8TWKmbcDDWz5nduBvZNtcgqbGRiiGv6'),
|
||||
('9Puc7H9PRHZ2oowzxiuGueZCzNY1X3aSuopy7k4w8TTo',
|
||||
'FTjTVxsPjiNw6TnbwBeE7WpZbvJuVEMwbdPCt1NppHhc'),
|
||||
('BczGQKaQNu8QkTc4PWmPdrbLfmXFzAqnoJ9YzHTU1vez',
|
||||
'4m4xe8fjWAFHyNYLMRYDXskG2d5o9xZxgzCzca23uBBH'),
|
||||
('BZwZrE1hNzKzfnbThE9MiB5Movox67c7uGJmi9Nhef1j',
|
||||
'5G6reNxH3e1gyMSgBRCYQJypFtTSBQ85r5fQGw6DfnpM'),
|
||||
('DFJxcvaR5Xk2bHiuxZzaqDxLDSq6fGSUdg54c5zAFLKz',
|
||||
'BRL9LWweehDAcEPc8MXjd3uQtAt4ZK1LY8J5KT3GeYKm'),
|
||||
('5wfyCc1mAhp2DCKMmEQG9nW4bKfaVkk8kpjuerApiFXv',
|
||||
'rdqo7bdePrF6wR8v8dzJopEHgqNgt2yNmMjxz6wMguQ'),
|
||||
('8S42sTQQqr5LJTa6jBjCfNg6xvjeL95btPJt2MPHBrDo',
|
||||
'7VJjwATaownwJyTWXJxKEtJk46eEXTm9UaioPvVFD2zD'),
|
||||
('57WwYQgHHSu7SYrXXmovjiPDmc2BB25itp6xSu5KrQQn',
|
||||
'FGW86z4ymEbtqiSpp6zzpDkzdPZv9xDMCGUdGVBz8KLU'),
|
||||
('CcxnCDQ4JgH2ceTEPW75GcfW8rP7aiAT8ZuEtYbqEa7w',
|
||||
'7kQdXRZNJaWo7Gj4XtT1fV4LD4ZtN8VmxdZFiJE8q8xF'),
|
||||
('8CYTgLp2kbVJKqnadQNGZorWcdWNpbaXrt6kvdzJnEjv',
|
||||
'57Zwyf4FUEWTxEWrmbSb6vrcZBukHmCs7TKzKoygV6cf'),
|
||||
('4buY9tDvVRpTjfAjM8Up4vWw5yh37xWteSMAerbxpKpv',
|
||||
'5FvFDCSZgtc57hSpvBqBd8VjhyAJ2a2vxTiHzg2nPyg9'),
|
||||
('5jJ8hry8Pu7rkgKkWcmZzfZ5FWk6rT3TnYGesEhfijvt',
|
||||
'7hmVhrQ8vmHmNhxyvyW1cHF5N6gzRoBy7kimfj4b2uZ5'),
|
||||
('6MUnCTEZFZvsKTCW4NKDiPv4a3SRWZLS7gUNP4nXsFBh',
|
||||
'5m2oXtepVwbKt9t5er72bFNioiHYMRtHcUu176DVFBQu'),
|
||||
('GXuU171dpf8JpBLiVgXksyXrdkqDqm6AWJ5A2JZLkkwV',
|
||||
'BF6xtHg3kcBKHCJ9Y6TTPyGYn3MDKLqxVDshVUbgaCAk'),
|
||||
('DoRUYrhULJbAnsGot4hYZeHUaFgXj4hwhHiGRUP3rZCj',
|
||||
'8i67E6uPyrRvAN5WqSX9V7xeSGr4nPXqAgnR2pPQj3ew'),
|
||||
('At4gvM1wZt6ACte2o26Yxbn5qaodMZBd7U1WsiBwB42x',
|
||||
'GBPGPkSkkcM4KmJRqKjCPiEygVLW8LmRRarYvj967fbV'),
|
||||
('48D3mw2apqVQa6KYCjGqDFiG5cbwqZEotL2w8aPWCxtE',
|
||||
'2Byvg9DGK7Axk9Bp6bmiUoBQkkLsRNrcoq2ZPZu5ZyGg'),
|
||||
('2YncoUMac2tNMcRFEGEgvuDXb58RdyqHMWmSN2QTMiCP',
|
||||
'BSNXYAX8Em2TjuCDvmLV3GgnxYT6vX68YFwoGPaPpsSa'),
|
||||
('7As7DVaC6FBqojvFoTo9jgZTcTGx9QYdVwUhNSNNvUsz',
|
||||
'E5cMypehm8j2Zxw3dCXKc1MGTCftJJm9FzqPNwnVEgQn'),
|
||||
('AAwj9V5iW88EwoZpLwuRvqwKn8c8rSgKAyZduFAfvqvV',
|
||||
'CkTks2ZGnQdM19wucQMehPe1imANjnRAAfLd1uewGUx8'),
|
||||
('axH9mijksto4vnoTjbbJJfY8iBFziJL2y39ago41WNM',
|
||||
'GJV8hxcjpieuXmVz9j5JjB2eFLXmRoBF7EYWpbMNKq7Q'),
|
||||
('6vv2FyJcTNJRnEmsRgHai5Bf7trJ8CsBMqbZckXWkbGk',
|
||||
'5YXtgt3ZVKKYM3qvHXXKKSpStfH38akRYvg9shNNChWS'),
|
||||
('DKK6kfAGnLV1mowm9m52yYFViVbQfVEtmRuveiXbnC93',
|
||||
'YvrVGNzxXSTLQ5QQJ3GHWHDQJnd3qJ5npGQQvZtb4m1'),
|
||||
('4QWSQeeu9oQA3ZQG7d6LKzZLR3YZ79q999Zzb7hb2cbh',
|
||||
'42ARr6nFsZXLAgGGwZ5p55kVSW5ETjrnJBUxaV6sFmzk'),
|
||||
('43oJ9CvF3Wsymj8zrkC19VfzjMiwntw3AXrTvc2UFuuf',
|
||||
'A661APGeLXuLgYUwmQjKWnuz1XmjuLNW8XVGuGjmEm76'),
|
||||
('3uN8UwhNcg219uX1GffC3a9tCZrVY327ZUk5rs3YfAR2',
|
||||
'Ca5B2Z9PAeBkEPuYeUyvs3dHhTqpAzFuXERfHZT3zxto'),
|
||||
('HuV5FPtboYQe2EEVFVhBkjRxbUBjeBCHRk2VuiNnBS7N',
|
||||
'5AJCbvgfLmdGdWKjLpDBZtrrJC6NNCQJK5P9NmpvbByy'),
|
||||
('2Rbr8Lasv1CDhL2Xxu5ZfLHf4fhCfxuTr25YDB2Q5VXN',
|
||||
'FQTbtsHjw1oYyKF3pUamwubB27UqG1ista1ezL2kgF3N'),
|
||||
('CLGF2xs7YyJrNZ8ertsPwofzqTBfQiJ5cMiRNcMjgEkh',
|
||||
'4uSue7UmSr1H8QCYrerRRyUh2BTqX5t5qPWRdVrcyL43'),
|
||||
('o6jUu8mqTQMaawxRBbvuWd3b7syXYEUPFWJGuNuoDs6',
|
||||
'7uJuBMMZD3d6mq2ihUtJQLWsAqACAkmQSJ3gUcEgW18W'),
|
||||
('2wo2o5rqEEyijwm4MuCXHNVp2oJPEYQBF2eU6CoXYuVy',
|
||||
'AZY2HCpLGjsUgKo7PZ6gdx6btReR6gRCeE9gmzebgGZ2'),
|
||||
('Eo1z9xyGbHZxH1ezG7iLxJFhuL8YWJ6NREu4T2VtRZky',
|
||||
'GbjDtbwPBf6pcczRbANBvHeBNb3obMtEMoQTxmmafq2g'),
|
||||
('8oPaUg1Wc7293c8HR7Phs4m4DvzDjYuzFUBqffJUhJKP',
|
||||
'9vJKX3jgc1K4sdhnVYLhU6iv7vf8mRygRDYr784mYUpp'),
|
||||
('K2BCZLghAwL4Y9eiPboQM2sz4GWYFM5WApZT6firnig',
|
||||
'7j9QMXcyqgeVFejyNMhXszKAbZuNdECFYwZFDNCwHN3V'),
|
||||
('Dz8Ft3YeeuMcsPKMWNqDDbdx6Qo2s2H2cZNUoX2uDwgY',
|
||||
'3HqEP9EvU9852orfSh9WZd54pDMJnT5nMnGkjhZibbZg'),
|
||||
('3cq9D9s9vZgyDertxiZr21etinCYKCMYcf3LXe3o8zT4',
|
||||
'5174KhHkMsti7XNSYh5j1jFEv22PHQQizTXxT7gT2ZPb'),
|
||||
('5uJwmzmoZDkADaeyenBvceP4mSzBgEgbqU5cc8JQpTDE',
|
||||
'HEYiTYWaTwjXkzfbE4eZ1RL78ciJkWqEio8tDTvCXzk8'),
|
||||
('BkHzLwC5bkLVB4b5KPAqbWc4ekhqmMtk34tfYpLQ53KR',
|
||||
'537uFsVdCU81kSG7eUZBFV5q3PvadsS4KgzaLuGWGzgG'),
|
||||
('3eQT6nC2BEqtXa5b5dn51cJEpj4eMHYsx7RkHXfwNEkq',
|
||||
'2NV1QhXppRfj19ZemqGUgxZ9Pd5yD13aQmrcNd6g25D4'),
|
||||
('GsBGHmKMiJoYDhoXJXwUnkbH4cVWWQ7emG1t7vTFDdS',
|
||||
'CsLyGG9J9E4ZLwhpTHRgp21tvGWyPj79SaLGEpqVhHKj'),
|
||||
('ALytZ6ygpy3hqHVXGHHdNuzuQh1hSoTVU8im5C6CgTR2',
|
||||
'5646BEZkpyoDWQHMscMav8bXoiAzf7giVmu8yepWsoMN'),
|
||||
('5XhJnzEfqVRM6trhL19K1AoGAQjbWC84Cv5XZ4nE9fF7',
|
||||
'BJdQwVTx2fuJWkStt3yPD2WUeopjV3yPQp1646Yi2pXL'),
|
||||
('7XLiDAjnggSU7PAvrTwsyPebC3bhuc5B2CMdiYAQBGWZ',
|
||||
'8xnXGiNp1ADNfuG6uLQ91h2h1ekjuiEC5SRdw19rbpnq'),
|
||||
('7kyFUtCcaiWKfGZmWfb9kvwcYLxxmocBC7qXYwNwotgV',
|
||||
'574EqNs3exLKJxgqFxKyLE5XQMBkadQf5MKQ8qpjsVJS'),
|
||||
('ESJSEPbWb13NaDkde8rEdcippc58AMCZodfmJP1SK16m',
|
||||
'5iwWfDDjgyFfeLpS9EYmwszScwtxTACcgAbinCjFLZTZ'),
|
||||
('AjnWLT2vZnEmLfioGeseLuxGQFFiyoqtFJj2oEUgzax5',
|
||||
'9JeUGkGHPyB7s7XVVik1aFyCxarH2tWhpSJapnRXveb8'),
|
||||
('32yM1jbRpZt7EjnH2UDimusAPfMQ83Wd1AULxLYMv2hq',
|
||||
'73v6uEUhL12MEwdfFFDmqbWmSQXoC8Y3VPB9vKUYEW5X'),
|
||||
('F5DjMdHvqqym53MtBG1v9shrza74EttHn1zPFL1ic1hT',
|
||||
'FpkXbvZsW4LbU4XZYvy6euR7F9SxDMPdyVVCfJFUaT2C'),
|
||||
('3EPdMUSAXFuQLaVwq1fPHNUPzvSHXqfNupgu6kGhdEVc',
|
||||
'28RxZbx71Y8ZaYt9f9D2HnAhkH2CvAPT4PXpDgCnXhVY'),
|
||||
('47YXW4Escn71q7xf6qip8NwdKTq2ScL1i4xmAnJ1RvDW',
|
||||
'3NQxT4ukLvPPZV3J6qDmx5PFPa7GvaiMBwc1r47SXdfj'),
|
||||
('AiCfcc6viFsxTxfEJxo82b3GWzim2nRXvBBfB14w4dMr',
|
||||
'FBCcBLpFUss64MWjf3nuSRrLNoqnWpJGfXKJVaduPezJ'),
|
||||
('CkeGi1XM3nquJcp3osb2EhTJ99gsisPfTpnsQdYViWWa',
|
||||
'4L12aHJtN96XGrYbhBFhmEQuPTnsHu95NATsz3X2Uo4Z'),
|
||||
('A78PS3MuQtWQ937ow5mzHhXUS1LNSzX2nMcmqLN57c3G',
|
||||
'87T6viSDWX7Rrw2VWsqEXhwVmrsrmf2rjDHRkeUGU4rX'),
|
||||
('2SzYHP21J4KXwVgSwtNfDQKUbyC7RE8feAwfVuW7PSmD',
|
||||
'4NCA5NxnhxPAAcWqyxtg4us7MJYSbn8g3Kw6v35Vmnm5'),
|
||||
('GxGuWY5A1ADiXFrdCiAcVJX4foveGxDfhcJd2Yirg3D8',
|
||||
'2Jjo3w5gQ7TsQaN2N7iNejfGLjzucaNg4hYZBcwT7AzC'),
|
||||
('5dYeKTvxfH6s9Esbys8TVMDTZMCzjFJAH4xe623ykmZ2',
|
||||
'5q7Le5Kcm1eBY1r8XwEseDXnEUKkZE5qtNb6p5BSSKwz'),
|
||||
('EkbeQ7eoiHxiTmq7ksw6FLvf59b3pGuoDR9LF29KYw4m',
|
||||
'CDpJ8VmgiBvYUcZMcPYr3B5UxSVEtLxRfq5dH3AxboNT'),
|
||||
('2zXT2EUMwWKPMWHK5rYvxgLNdmkoedXH754uzUBphaCE',
|
||||
'5oHnEFaUaM1QRZjV48K1DrqKeEdcbmb8uG2zucTYc5qH'),
|
||||
('H6c78e97srwPEg5PsW1uuKAovSxTvmNyFt9qJwoeJP4y',
|
||||
'inwncuMiPRuw6PEucVG2Kempk91yq3dT5kpuf3Umf4j'),
|
||||
('6yJDrenNeRBpdQxqxMY3C2V6cBrfvpzYpz6MbefxuxsZ',
|
||||
'CnCjmTECDrqJP5nTPSL2NWJ9LPyyFzLmrTYiRcSjwU7e'),
|
||||
('3YTX3ntzsjG9CxbkCayToGEzmn1Fgdvw1W8gefCUTa9L',
|
||||
'FkCbQBoKRZbndsNP44CWheEchwPC65UNdrZ8FntRTyvu'),
|
||||
('8Y7xgZ5M8qBYdX5iCHe7mPQ6ZcQNXDJd28ZVDdx7FSBa',
|
||||
'AYTdxj598H36RGmBzEnR4QK8pVF6k5YTRBypxWsDkXUB'),
|
||||
('AtzLLpKuPehdP4g6x4J4BH2RjNbvXewxf8ibSgKSiJtL',
|
||||
'vC8C3u71YueJcUhtyfn9Xx5PjpJuizDZNGW23tFb5VY'),
|
||||
]
|
@ -1,337 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""This script:
|
||||
0. allocates more elastic IP addresses if necessary,
|
||||
1. launches the specified number of nodes (instances) on Amazon EC2,
|
||||
2. tags them with the specified tag,
|
||||
3. waits until those instances exist and are running,
|
||||
4. for each instance, it associates an elastic IP address
|
||||
with that instance,
|
||||
5. writes the shellscript add2known_hosts.sh
|
||||
6. (over)writes a file named hostlist.py
|
||||
containing a list of all public DNS names.
|
||||
7. (over)writes a file named ssh_key.py
|
||||
containing the location of the private SSH key file.
|
||||
"""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
from os.path import expanduser
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import argparse
|
||||
import importlib
|
||||
import botocore
|
||||
import boto3
|
||||
|
||||
from awscommon import get_naeips
|
||||
|
||||
|
||||
SETTINGS = ['NUM_NODES', 'BRANCH', 'SSH_KEY_NAME',
|
||||
'USE_KEYPAIRS_FILE', 'IMAGE_ID', 'INSTANCE_TYPE', 'SECURITY_GROUP',
|
||||
'USING_EBS', 'EBS_VOLUME_SIZE', 'EBS_OPTIMIZED',
|
||||
'ENABLE_WEB_ADMIN', 'BIND_HTTP_TO_LOCALHOST']
|
||||
|
||||
|
||||
class SettingsTypeError(TypeError):
|
||||
pass
|
||||
|
||||
|
||||
# Ensure they're using Python 2.5-2.7
|
||||
pyver = sys.version_info
|
||||
major = pyver[0]
|
||||
minor = pyver[1]
|
||||
print('You are in an environment where "python" is Python {}.{}'.
|
||||
format(major, minor))
|
||||
if not ((major == 2) and (minor >= 5) and (minor <= 7)):
|
||||
print('but Fabric only works with Python 2.5-2.7')
|
||||
sys.exit(1)
|
||||
|
||||
# Parse the command-line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--tag",
|
||||
help="tag to add to all launched instances on AWS",
|
||||
required=True)
|
||||
parser.add_argument("--deploy-conf-file",
|
||||
help="AWS deployment configuration file",
|
||||
required=True)
|
||||
args = parser.parse_args()
|
||||
tag = args.tag
|
||||
deploy_conf_file = args.deploy_conf_file
|
||||
|
||||
# Import all the variables set in the AWS deployment configuration file
|
||||
# (Remove the '.py' from the end of deploy_conf_file.)
|
||||
cf = importlib.import_module(deploy_conf_file[:-3])
|
||||
|
||||
dir_cf = dir(cf) # = a list of the attributes of cf
|
||||
for setting in SETTINGS:
|
||||
if setting not in dir_cf:
|
||||
sys.exit('{} was not set '.format(setting) +
|
||||
'in the specified AWS deployment '
|
||||
'configuration file {}'.format(deploy_conf_file))
|
||||
exec('{0} = cf.{0}'.format(setting))
|
||||
|
||||
# Validate the variables set in the AWS deployment configuration file
|
||||
if not isinstance(NUM_NODES, int):
|
||||
raise SettingsTypeError('NUM_NODES should be an int')
|
||||
|
||||
if not isinstance(BRANCH, str):
|
||||
raise SettingsTypeError('BRANCH should be a string')
|
||||
|
||||
if not isinstance(SSH_KEY_NAME, str):
|
||||
raise SettingsTypeError('SSH_KEY_NAME should be a string')
|
||||
|
||||
if not isinstance(USE_KEYPAIRS_FILE, bool):
|
||||
msg = 'USE_KEYPAIRS_FILE should be a boolean (True or False)'
|
||||
raise SettingsTypeError(msg)
|
||||
|
||||
if not isinstance(IMAGE_ID, str):
|
||||
raise SettingsTypeError('IMAGE_ID should be a string')
|
||||
|
||||
if not isinstance(INSTANCE_TYPE, str):
|
||||
raise SettingsTypeError('INSTANCE_TYPE should be a string')
|
||||
|
||||
if not isinstance(SECURITY_GROUP, str):
|
||||
raise SettingsTypeError('SECURITY_GROUP should be a string')
|
||||
|
||||
if not isinstance(USING_EBS, bool):
|
||||
raise SettingsTypeError('USING_EBS should be a boolean (True or False)')
|
||||
|
||||
if not isinstance(EBS_VOLUME_SIZE, int):
|
||||
raise SettingsTypeError('EBS_VOLUME_SIZE should be an int')
|
||||
|
||||
if not isinstance(EBS_OPTIMIZED, bool):
|
||||
raise SettingsTypeError('EBS_OPTIMIZED should be a boolean (True or False)')
|
||||
|
||||
if not isinstance(ENABLE_WEB_ADMIN, bool):
|
||||
raise SettingsTypeError('ENABLE_WEB_ADMIN should be a boolean (True or False)')
|
||||
|
||||
if not isinstance(BIND_HTTP_TO_LOCALHOST, bool):
|
||||
raise SettingsTypeError('BIND_HTTP_TO_LOCALHOST should be a boolean '
|
||||
'(True or False)')
|
||||
|
||||
if NUM_NODES > 64:
|
||||
raise ValueError('NUM_NODES should be less than or equal to 64. '
|
||||
'The AWS deployment configuration file sets it to {}'.
|
||||
format(NUM_NODES))
|
||||
|
||||
if SSH_KEY_NAME in ['not-set-yet', '', None]:
|
||||
raise ValueError('SSH_KEY_NAME should be set. '
|
||||
'The AWS deployment configuration file sets it to {}'.
|
||||
format(SSH_KEY_NAME))
|
||||
|
||||
# Since we assume 'gp2' volumes (for now), the possible range is 1 to 16384
|
||||
if EBS_VOLUME_SIZE > 16384:
|
||||
raise ValueError('EBS_VOLUME_SIZE should be <= 16384. '
|
||||
'The AWS deployment configuration file sets it to {}'.
|
||||
format(EBS_VOLUME_SIZE))
|
||||
|
||||
# Get an AWS EC2 "resource"
|
||||
# See http://boto3.readthedocs.org/en/latest/guide/resources.html
|
||||
ec2 = boto3.resource(service_name='ec2')
|
||||
|
||||
# Create a client from the EC2 resource
|
||||
# See http://boto3.readthedocs.org/en/latest/guide/clients.html
|
||||
client = ec2.meta.client
|
||||
|
||||
# Ensure they don't already have some instances with the specified tag
|
||||
# Get a list of all instances with the specified tag.
|
||||
# (Technically, instances_with_tag is an ec2.instancesCollection.)
|
||||
filters = [{'Name': 'tag:Name', 'Values': [tag]}]
|
||||
instances_with_tag = ec2.instances.filter(Filters=filters)
|
||||
# len() doesn't work on instances_with_tag. This does:
|
||||
num_ins = 0
|
||||
for instance in instances_with_tag:
|
||||
num_ins += 1
|
||||
if num_ins != 0:
|
||||
print('You already have {} instances with the tag {} on EC2.'.
|
||||
format(num_ins, tag))
|
||||
print('You should either pick a different tag or '
|
||||
'terminate all those instances and '
|
||||
'wait until they vanish from your EC2 Console.')
|
||||
sys.exit(1)
|
||||
|
||||
# Before launching any instances, make sure they have sufficient
|
||||
# allocated-but-unassociated EC2 elastic IP addresses
|
||||
print('Checking if you have enough allocated-but-unassociated ' +
|
||||
'EC2 elastic IP addresses...')
|
||||
|
||||
non_associated_eips = get_naeips(client)
|
||||
|
||||
print('You have {} allocated elastic IPs which are '
|
||||
'not already associated with instances'.
|
||||
format(len(non_associated_eips)))
|
||||
|
||||
if NUM_NODES > len(non_associated_eips):
|
||||
num_eips_to_allocate = NUM_NODES - len(non_associated_eips)
|
||||
print('You want to launch {} instances'.
|
||||
format(NUM_NODES))
|
||||
print('so {} more elastic IPs must be allocated'.
|
||||
format(num_eips_to_allocate))
|
||||
for _ in range(num_eips_to_allocate):
|
||||
try:
|
||||
# Allocate an elastic IP address
|
||||
# response is a dict. See http://tinyurl.com/z2n7u9k
|
||||
response = client.allocate_address(DryRun=False, Domain='standard')
|
||||
except botocore.exceptions.ClientError:
|
||||
print('Something went wrong when allocating an '
|
||||
'EC2 elastic IP address on EC2. '
|
||||
'Maybe you are already at the maximum number allowed '
|
||||
'by your AWS account? More details:')
|
||||
raise
|
||||
except:
|
||||
print('Unexpected error:')
|
||||
raise
|
||||
|
||||
print('Commencing launch of {} instances on Amazon EC2...'.
|
||||
format(NUM_NODES))
|
||||
|
||||
sg_list = [SECURITY_GROUP]
|
||||
|
||||
for _ in range(NUM_NODES):
|
||||
# Request the launch of one instance at a time
|
||||
# (so list_of_instances should contain only one item)
|
||||
# See https://tinyurl.com/hbjewbb
|
||||
if USING_EBS:
|
||||
dm = {
|
||||
'DeviceName': '/dev/sdp',
|
||||
# Why /dev/sdp? See https://tinyurl.com/z2zqm6n
|
||||
'Ebs': {
|
||||
'VolumeSize': EBS_VOLUME_SIZE, # GiB
|
||||
'DeleteOnTermination': False,
|
||||
'VolumeType': 'gp2',
|
||||
'Encrypted': False
|
||||
},
|
||||
# 'NoDevice': 'device'
|
||||
# Suppresses the specified device included
|
||||
# in the block device mapping of the AMI.
|
||||
}
|
||||
list_of_instances = ec2.create_instances(
|
||||
ImageId=IMAGE_ID,
|
||||
MinCount=1,
|
||||
MaxCount=1,
|
||||
KeyName=SSH_KEY_NAME,
|
||||
InstanceType=INSTANCE_TYPE,
|
||||
SecurityGroupIds=sg_list,
|
||||
BlockDeviceMappings=[dm],
|
||||
EbsOptimized=EBS_OPTIMIZED
|
||||
)
|
||||
else: # not USING_EBS
|
||||
list_of_instances = ec2.create_instances(
|
||||
ImageId=IMAGE_ID,
|
||||
MinCount=1,
|
||||
MaxCount=1,
|
||||
KeyName=SSH_KEY_NAME,
|
||||
InstanceType=INSTANCE_TYPE,
|
||||
SecurityGroupIds=sg_list
|
||||
)
|
||||
|
||||
# Tag the just-launched instances (should be just one)
|
||||
for instance in list_of_instances:
|
||||
time.sleep(5)
|
||||
instance.create_tags(Tags=[{'Key': 'Name', 'Value': tag}])
|
||||
|
||||
# Get a list of all instances with the specified tag.
|
||||
# (Technically, instances_with_tag is an ec2.instancesCollection.)
|
||||
filters = [{'Name': 'tag:Name', 'Values': [tag]}]
|
||||
instances_with_tag = ec2.instances.filter(Filters=filters)
|
||||
print('The launched instances will have these ids:'.format(tag))
|
||||
for instance in instances_with_tag:
|
||||
print(instance.id)
|
||||
|
||||
print('Waiting until all those instances exist...')
|
||||
for instance in instances_with_tag:
|
||||
instance.wait_until_exists()
|
||||
|
||||
print('Waiting until all those instances are running...')
|
||||
for instance in instances_with_tag:
|
||||
instance.wait_until_running()
|
||||
|
||||
print('Associating allocated-but-unassociated elastic IPs ' +
|
||||
'with the instances...')
|
||||
|
||||
# Get a list of elastic IPs which are allocated but
|
||||
# not associated with any instances.
|
||||
# There should be enough because we checked earlier and
|
||||
# allocated more if necessary.
|
||||
non_associated_eips_2 = get_naeips(client)
|
||||
|
||||
for i, instance in enumerate(instances_with_tag):
|
||||
print('Grabbing an allocated but non-associated elastic IP...')
|
||||
eip = non_associated_eips_2[i]
|
||||
public_ip = eip['PublicIp']
|
||||
print('The public IP address {}'.format(public_ip))
|
||||
|
||||
# Associate that Elastic IP address with an instance
|
||||
response2 = client.associate_address(
|
||||
DryRun=False,
|
||||
InstanceId=instance.instance_id,
|
||||
PublicIp=public_ip
|
||||
)
|
||||
print('was associated with the instance with id {}'.
|
||||
format(instance.instance_id))
|
||||
|
||||
# Get a list of the pubic DNS names of the instances_with_tag
|
||||
public_dns_names = []
|
||||
for instance in instances_with_tag:
|
||||
public_dns_name = getattr(instance, 'public_dns_name', None)
|
||||
if public_dns_name is not None:
|
||||
public_dns_names.append(public_dns_name)
|
||||
|
||||
# Write a shellscript to add remote keys to ~/.ssh/known_hosts
|
||||
print('Preparing shellscript to add remote keys to known_hosts')
|
||||
with open('add2known_hosts.sh', 'w') as f:
|
||||
f.write('#!/bin/bash\n')
|
||||
for public_dns_name in public_dns_names:
|
||||
f.write('ssh-keyscan ' + public_dns_name + ' >> ~/.ssh/known_hosts\n')
|
||||
|
||||
# Create a file named hostlist.py containing public_dns_names.
|
||||
# If a hostlist.py already exists, it will be overwritten.
|
||||
print('Writing hostlist.py')
|
||||
with open('hostlist.py', 'w') as f:
|
||||
f.write('# -*- coding: utf-8 -*-\n')
|
||||
f.write('"""A list of the public DNS names of all the nodes in this\n')
|
||||
f.write('BigchainDB cluster.\n')
|
||||
f.write('"""\n')
|
||||
f.write('\n')
|
||||
f.write('from __future__ import unicode_literals\n')
|
||||
f.write('\n')
|
||||
f.write('public_dns_names = {}\n'.format(public_dns_names))
|
||||
|
||||
# Create a file named ssh_key.py
|
||||
# containing the location of the private SSH key file.
|
||||
# If a ssh_key.py already exists, it will be overwritten.
|
||||
print('Writing ssh_key.py')
|
||||
with open('ssh_key.py', 'w') as f:
|
||||
f.write('# -*- coding: utf-8 -*-\n')
|
||||
f.write('"""This file exists as a convenient way for Fabric to get\n')
|
||||
f.write('the location of the private SSH key file.')
|
||||
f.write('"""\n')
|
||||
f.write('\n')
|
||||
f.write('from __future__ import unicode_literals\n')
|
||||
f.write('\n')
|
||||
home = expanduser('~')
|
||||
f.write('ssh_key_path = "{}/.ssh/{}"\n'.format(home, SSH_KEY_NAME))
|
||||
|
||||
# For each node in the cluster, check port 22 (ssh) until it's reachable
|
||||
for instance in instances_with_tag:
|
||||
ip_address = instance.public_ip_address
|
||||
# Create a socket
|
||||
# Address Family: AF_INET (means IPv4)
|
||||
# Type: SOCK_STREAM (means connection-oriented TCP protocol)
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
print('Attempting to connect to {} on port 22 (ssh)...'.
|
||||
format(ip_address))
|
||||
unreachable = True
|
||||
while unreachable:
|
||||
try:
|
||||
# Open a connection to the remote node on port 22
|
||||
s.connect((ip_address, 22))
|
||||
except socket.error as e:
|
||||
print(' Socket error: {}'.format(e))
|
||||
print(' Trying again in 3 seconds')
|
||||
time.sleep(3.0)
|
||||
else:
|
||||
print(' Port 22 is reachable!')
|
||||
s.shutdown(socket.SHUT_WR)
|
||||
s.close()
|
||||
unreachable = False
|
@ -1,38 +0,0 @@
|
||||
#! /bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
function printErr()
|
||||
{
|
||||
echo "usage: ./make_confiles.sh <dir> <number_of_files>"
|
||||
echo "No argument $1 supplied"
|
||||
}
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
printErr "<dir>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$2" ]; then
|
||||
printErr "<number_of_files>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CONFDIR=$1
|
||||
NUMFILES=$2
|
||||
|
||||
# If $CONFDIR exists, remove it
|
||||
if [ -d "$CONFDIR" ]; then
|
||||
rm -rf $CONFDIR
|
||||
fi
|
||||
|
||||
# Create $CONFDIR
|
||||
mkdir $CONFDIR
|
||||
|
||||
# Use the bigchaindb configure command to create
|
||||
# $NUMFILES BigchainDB config files in $CONFDIR
|
||||
for (( i=0; i<$NUMFILES; i++ )); do
|
||||
CONPATH=$CONFDIR"/bcdb_conf"$i
|
||||
echo "Writing "$CONPATH
|
||||
bigchaindb -y -c $CONPATH configure rethinkdb
|
||||
done
|
@ -1,43 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Release all allocated but non-associated elastic IP addresses
|
||||
(EIPs). Why? From the AWS docs:
|
||||
|
||||
``To ensure efficient use of Elastic IP addresses, we impose a small
|
||||
hourly charge if an Elastic IP address is not associated with a
|
||||
running instance, or if it is associated with a stopped instance or
|
||||
an unattached network interface. While your instance is running,
|
||||
you are not charged for one Elastic IP address associated with the
|
||||
instance, but you are charged for any additional Elastic IP
|
||||
addresses associated with the instance. For more information, see
|
||||
Amazon EC2 Pricing.''
|
||||
|
||||
Source: http://tinyurl.com/ozhxatx
|
||||
"""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import boto3
|
||||
from awscommon import get_naeips
|
||||
|
||||
# Get an AWS EC2 "resource"
|
||||
# See http://boto3.readthedocs.org/en/latest/guide/resources.html
|
||||
ec2 = boto3.resource(service_name='ec2')
|
||||
|
||||
# Create a client from the EC2 resource
|
||||
# See http://boto3.readthedocs.org/en/latest/guide/clients.html
|
||||
client = ec2.meta.client
|
||||
|
||||
non_associated_eips = get_naeips(client)
|
||||
|
||||
print('You have {} allocated elactic IPs which are '
|
||||
'not associated with instances'.
|
||||
format(len(non_associated_eips)))
|
||||
|
||||
for i, eip in enumerate(non_associated_eips):
|
||||
public_ip = eip['PublicIp']
|
||||
print('{}: Releasing {}'.format(i, public_ip))
|
||||
domain = eip['Domain']
|
||||
print('(It has Domain = {}.)'.format(domain))
|
||||
if domain == 'vpc':
|
||||
client.release_address(AllocationId=eip['AllocationId'])
|
||||
else:
|
||||
client.release_address(PublicIp=public_ip)
|
@ -1,49 +0,0 @@
|
||||
"""A Python 3 script to write a file with a specified number
|
||||
of keypairs, using bigchaindb.common.crypto.generate_key_pair()
|
||||
The written file is always named keypairs.py and it should be
|
||||
interpreted as a Python 2 script.
|
||||
|
||||
Usage:
|
||||
$ python3 write_keypairs_file.py num_pairs
|
||||
|
||||
Using the list in other Python scripts:
|
||||
# in a Python 2 script:
|
||||
from keypairs import keypairs_list
|
||||
# keypairs_list is a list of (sk, pk) tuples
|
||||
# sk = private key
|
||||
# pk = public key
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
from bigchaindb.common import crypto
|
||||
|
||||
|
||||
# Parse the command-line arguments
|
||||
desc = 'Write a set of keypairs to keypairs.py'
|
||||
parser = argparse.ArgumentParser(description=desc)
|
||||
parser.add_argument('num_pairs',
|
||||
help='number of keypairs to write',
|
||||
type=int)
|
||||
args = parser.parse_args()
|
||||
num_pairs = int(args.num_pairs)
|
||||
|
||||
# Generate and write the keypairs to keypairs.py
|
||||
print('Writing {} keypairs to keypairs.py...'.format(num_pairs))
|
||||
with open('keypairs.py', 'w') as f:
|
||||
f.write('# -*- coding: utf-8 -*-\n')
|
||||
f.write('"""A set of keypairs for use in deploying\n')
|
||||
f.write('BigchainDB servers with a predictable set of keys.\n')
|
||||
f.write('"""\n')
|
||||
f.write('\n')
|
||||
f.write('from __future__ import unicode_literals\n')
|
||||
f.write('\n')
|
||||
f.write('keypairs_list = [')
|
||||
for pair_num in range(num_pairs):
|
||||
keypair = crypto.generate_key_pair()
|
||||
spacer = '' if pair_num == 0 else ' '
|
||||
f.write("{}('{}',\n '{}'),\n".format(
|
||||
spacer, keypair[0], keypair[1]))
|
||||
f.write(' ]\n')
|
||||
|
||||
print('Done.')
|
170
docker-compose.network.yml
Normal file
170
docker-compose.network.yml
Normal file
@ -0,0 +1,170 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
|
||||
#############################################################################
|
||||
# #
|
||||
# NODE 1 #
|
||||
# #
|
||||
#############################################################################
|
||||
mdb-one:
|
||||
image: mongo:3.4.3
|
||||
ports:
|
||||
- "27017"
|
||||
command: mongod
|
||||
bdb-one:
|
||||
depends_on:
|
||||
- mdb-one
|
||||
- tendermint-one
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./compose/bigchaindb-server/Dockerfile
|
||||
args:
|
||||
backend: localmongodb
|
||||
volumes:
|
||||
- ./bigchaindb:/usr/src/app/bigchaindb
|
||||
- ./tests:/usr/src/app/tests
|
||||
environment:
|
||||
BIGCHAINDB_DATABASE_BACKEND: localmongodb
|
||||
BIGCHAINDB_DATABASE_HOST: mdb-one
|
||||
TENDERMINT_HOST: tendermint-one
|
||||
ports:
|
||||
- "9984"
|
||||
command: bigchaindb -l DEBUG start
|
||||
tendermint-one:
|
||||
image: tendermint/tendermint
|
||||
volumes:
|
||||
- ./network/node1:/tendermint
|
||||
entrypoint: ''
|
||||
command: bash -c "tendermint unsafe_reset_all && tendermint --log_level debug node"
|
||||
|
||||
|
||||
#############################################################################
|
||||
# #
|
||||
# NODE 2 #
|
||||
# #
|
||||
#############################################################################
|
||||
mdb-two:
|
||||
image: mongo:3.4.3
|
||||
ports:
|
||||
- "27017"
|
||||
command: mongod
|
||||
bdb-two:
|
||||
depends_on:
|
||||
- mdb-two
|
||||
- tendermint-two
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./compose/bigchaindb-server/Dockerfile
|
||||
args:
|
||||
backend: localmongodb
|
||||
volumes:
|
||||
- ./bigchaindb:/usr/src/app/bigchaindb
|
||||
- ./tests:/usr/src/app/tests
|
||||
environment:
|
||||
BIGCHAINDB_DATABASE_BACKEND: localmongodb
|
||||
BIGCHAINDB_DATABASE_HOST: mdb-two
|
||||
TENDERMINT_HOST: tendermint-two
|
||||
ports:
|
||||
- "9984"
|
||||
command: bigchaindb -l DEBUG start
|
||||
tendermint-two:
|
||||
image: tendermint/tendermint
|
||||
volumes:
|
||||
- ./network/node2:/tendermint
|
||||
entrypoint: ''
|
||||
command: bash -c "tendermint unsafe_reset_all && tendermint --log_level debug node"
|
||||
|
||||
|
||||
#############################################################################
|
||||
# #
|
||||
# NODE 3 #
|
||||
# #
|
||||
#############################################################################
|
||||
mdb-three:
|
||||
image: mongo:3.4.3
|
||||
ports:
|
||||
- "27017"
|
||||
command: mongod
|
||||
bdb-three:
|
||||
depends_on:
|
||||
- mdb-three
|
||||
- tendermint-three
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./compose/bigchaindb-server/Dockerfile
|
||||
args:
|
||||
backend: localmongodb
|
||||
volumes:
|
||||
- ./bigchaindb:/usr/src/app/bigchaindb
|
||||
- ./tests:/usr/src/app/tests
|
||||
environment:
|
||||
BIGCHAINDB_DATABASE_BACKEND: localmongodb
|
||||
BIGCHAINDB_DATABASE_HOST: mdb-three
|
||||
TENDERMINT_HOST: tendermint-three
|
||||
ports:
|
||||
- "9984"
|
||||
command: bigchaindb -l DEBUG start
|
||||
tendermint-three:
|
||||
image: tendermint/tendermint
|
||||
volumes:
|
||||
- ./network/node3:/tendermint
|
||||
entrypoint: ''
|
||||
command: bash -c "tendermint unsafe_reset_all && tendermint --log_level debug node"
|
||||
|
||||
|
||||
#############################################################################
|
||||
# #
|
||||
# NODE 4 #
|
||||
# #
|
||||
#############################################################################
|
||||
mdb-four:
|
||||
image: mongo:3.4.3
|
||||
ports:
|
||||
- "27017"
|
||||
command: mongod
|
||||
bdb-four:
|
||||
depends_on:
|
||||
- mdb-four
|
||||
- tendermint-four
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./compose/bigchaindb-server/Dockerfile
|
||||
args:
|
||||
backend: localmongodb
|
||||
volumes:
|
||||
- ./bigchaindb:/usr/src/app/bigchaindb
|
||||
- ./tests:/usr/src/app/tests
|
||||
environment:
|
||||
BIGCHAINDB_DATABASE_BACKEND: localmongodb
|
||||
BIGCHAINDB_DATABASE_HOST: mdb-four
|
||||
TENDERMINT_HOST: tendermint-four
|
||||
ports:
|
||||
- "9984"
|
||||
command: bigchaindb -l DEBUG start
|
||||
tendermint-four:
|
||||
image: tendermint/tendermint
|
||||
volumes:
|
||||
- ./network/node4:/tendermint
|
||||
entrypoint: ''
|
||||
command: bash -c "tendermint unsafe_reset_all && tendermint --log_level debug node"
|
||||
|
||||
|
||||
#############################################################################
|
||||
#############################################################################
|
||||
#############################################################################
|
||||
#
|
||||
# clients
|
||||
#
|
||||
#############################################################################
|
||||
#############################################################################
|
||||
#############################################################################
|
||||
curl-client:
|
||||
image: appropriate/curl
|
||||
volumes:
|
||||
- ./network/health-check.sh:/health-check.sh
|
||||
command: /bin/sh health-check.sh
|
||||
driver:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./compose/bigchaindb-driver/Dockerfile
|
45
docker-compose.tendermint.yml
Normal file
45
docker-compose.tendermint.yml
Normal file
@ -0,0 +1,45 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
mdb:
|
||||
image: mongo:3.4.3
|
||||
ports:
|
||||
- "27017"
|
||||
command: mongod
|
||||
bdb:
|
||||
depends_on:
|
||||
- mdb
|
||||
- tendermint
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./compose/bigchaindb-server/Dockerfile
|
||||
args:
|
||||
backend: localmongodb
|
||||
volumes:
|
||||
- ./bigchaindb:/usr/src/app/bigchaindb
|
||||
- ./tests:/usr/src/app/tests
|
||||
environment:
|
||||
BIGCHAINDB_DATABASE_BACKEND: localmongodb
|
||||
BIGCHAINDB_DATABASE_HOST: mdb
|
||||
BIGCHAINDB_DATABASE_PORT: 27017
|
||||
BIGCHAINDB_SERVER_BIND: 0.0.0.0:9984
|
||||
BIGCHAINDB_WSSERVER_HOST: 0.0.0.0
|
||||
BIGCHAINDB_START_TENDERMINT: 0
|
||||
TENDERMINT_HOST: tendermint
|
||||
TENDERMINT_PORT: 46657
|
||||
ports:
|
||||
- "9984"
|
||||
command: bigchaindb -l DEBUG start
|
||||
tendermint:
|
||||
image: tendermint/tendermint
|
||||
volumes:
|
||||
- ./tmdata:/tendermint
|
||||
entrypoint: ''
|
||||
command: bash -c "tendermint unsafe_reset_all && tendermint node"
|
||||
curl-client:
|
||||
image: appropriate/curl
|
||||
command: /bin/sh -c "curl http://tendermint:46657/abci_query && curl http://bdb:9984/"
|
||||
driver:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./compose/bigchaindb-driver/Dockerfile
|
@ -5,7 +5,7 @@ services:
|
||||
image: mongo:3.4.3
|
||||
ports:
|
||||
- "27017"
|
||||
command: mongod --replSet=bigchain-rs
|
||||
command: mongod
|
||||
|
||||
bdb:
|
||||
build:
|
||||
@ -23,7 +23,7 @@ services:
|
||||
- ./pytest.ini:/usr/src/app/pytest.ini
|
||||
- ./tox.ini:/usr/src/app/tox.ini
|
||||
environment:
|
||||
BIGCHAINDB_DATABASE_BACKEND: mongodb
|
||||
BIGCHAINDB_DATABASE_BACKEND: localmongodb
|
||||
BIGCHAINDB_DATABASE_HOST: mdb
|
||||
BIGCHAINDB_DATABASE_PORT: 27017
|
||||
BIGCHAINDB_SERVER_BIND: 0.0.0.0:9984
|
||||
|
@ -6,19 +6,11 @@ BigchainDB can store data of any kind (within reason), but it's designed to be p
|
||||
* The fundamental thing that one sends to a BigchainDB cluster, to be checked and stored (if valid), is a *transaction*, and there are two kinds: CREATE transactions and TRANSFER transactions.
|
||||
* A CREATE transaction can be use to register any kind of asset (divisible or indivisible), along with arbitrary metadata.
|
||||
* An asset can have zero, one, or several owners.
|
||||
* The owners of an asset can specify (crypto-)conditions which must be satisfied by anyone wishing transfer the asset to new owners. For example, a condition might be that at least 3 of the 5 current owners must cryptographically sign a transfer transaction.
|
||||
* BigchainDB verifies that the conditions have been satisfied as part of checking the validity of transfer transactions. (Moreover, anyone can check that they were satisfied.)
|
||||
* The owners of an asset can specify (crypto-)conditions which must be satisfied by anyone wishing transfer the asset to new owners. For example, a condition might be that at least 3 of the 5 current owners must cryptographically sign a TRANSFER transaction.
|
||||
* BigchainDB verifies that the conditions have been satisfied as part of checking the validity of TRANSFER transactions. (Moreover, anyone can check that they were satisfied.)
|
||||
* BigchainDB prevents double-spending of an asset.
|
||||
* Validated transactions are strongly tamper-resistant; see :doc:`the page about immutability <immutable>`.
|
||||
|
||||
|
||||
BigchainDB Integration with Other Blockchains
|
||||
---------------------------------------------
|
||||
|
||||
BigchainDB works with the `Interledger protocol <https://interledger.org/>`_, enabling the transfer of assets between BigchainDB and other blockchains, ledgers, and payment systems.
|
||||
|
||||
We’re actively exploring ways that BigchainDB can be used with other blockchains and platforms.
|
||||
* Validated transactions are :doc:`"immutable" <immutable>`.
|
||||
|
||||
.. note::
|
||||
|
||||
We used the word "owners" somewhat loosely above. A more accurate word might be fulfillers, signers, controllers, or transfer-enablers. See BigchainDB Server `issue #626 <https://github.com/bigchaindb/bigchaindb/issues/626>`_.
|
||||
We used the word "owners" somewhat loosely above. A more accurate word might be fulfillers, signers, controllers, or transfer-enablers. See the `note about "owners" in the IPDB Transaction Spec <https://the-ipdb-transaction-spec.readthedocs.io/en/latest/ownership.html>`_.
|
||||
|
@ -1,8 +1,6 @@
|
||||
# BigchainDB and Byzantine Fault Tolerance
|
||||
|
||||
While BigchainDB is not currently [Byzantine fault tolerant (BFT)](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance), we plan to offer it as an option.
|
||||
Update Nov 2017: we're actively working on this, the next release or two will likely have support. More details to come in blog form and github issues
|
||||
|
||||
Related issue: [Issue #293](https://github.com/bigchaindb/bigchaindb/issues/293). We anticipate that turning on BFT will cause a dropoff in performance (for a gain in security).
|
||||
|
||||
In the meantime, there are practical things that one can do to increase security (e.g. firewalls, key management, and access controls).
|
||||
[BigchainDB Server](https://docs.bigchaindb.com/projects/server/en/latest/index.html)
|
||||
uses [Tendermint](https://tendermint.com/)
|
||||
for consensus and transaction replication,
|
||||
and Tendermint is [Byzantine Fault Tolerant (BFT)](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance).
|
||||
|
@ -14,8 +14,6 @@ A consortium can increase its decentralization (and its resilience) by increasin
|
||||
|
||||
There’s no node that has a long-term special position in the cluster. All nodes run the same software and perform the same duties.
|
||||
|
||||
MongoDB and RethinkDB have an “admin” user which can’t be deleted and which can make big changes to the database, such as dropping a table. Right now, that’s a big security vulnerability, but we have plans to mitigate it by:
|
||||
1. Locking down the admin user as much as possible.
|
||||
2. Having all nodes inspect admin-type requests before acting on them. Requests can be checked against an evolving whitelist of allowed actions. Nodes requesting non-allowed requests can be removed from the list of cluster nodes.
|
||||
If someone has (or gets) admin access to a node, they can mess with that node (e.g. change or delete data stored on that node), but those changes should remain isolated to that node. The BigchainDB cluster can only be compromised if more than one third of the nodes get compromised. See the [Tendermint documentation](https://tendermint.readthedocs.io/projects/tools/en/master/introduction.html) for more details.
|
||||
|
||||
It’s worth noting that the admin user can’t transfer assets, even today. The only way to create a valid transfer transaction is to fulfill the current (crypto) conditions on the asset, and the admin user can’t do that because the admin user doesn’t have the necessary private keys (or preimages, in the case of hashlock conditions). They’re not stored in the database.
|
||||
It’s worth noting that not even the admin or superuser of a node can transfer assets. The only way to create a valid transfer transaction is to fulfill the current crypto-conditions on the asset, and the admin/superuser can’t do that because the admin user doesn’t have the necessary information (e.g. private keys).
|
||||
|
@ -1,11 +1,10 @@
|
||||
# Kinds of Node Diversity
|
||||
|
||||
Steps should be taken to make it difficult for any one actor or event to control or damage “enough” of the nodes. (“Enough” is usually a quorum.) There are many kinds of diversity to consider, listed below. It may be quite difficult to have high diversity of all kinds.
|
||||
Steps should be taken to make it difficult for any one actor or event to control or damage “enough” of the nodes. (Because BigchainDB Server uses Tendermint, "enough" is ⅓.) There are many kinds of diversity to consider, listed below. It may be quite difficult to have high diversity of all kinds.
|
||||
|
||||
1. **Jurisdictional diversity.** The nodes should be controlled by entities within multiple legal jurisdictions, so that it becomes difficult to use legal means to compel enough of them to do something.
|
||||
2. **Geographic diversity.** The servers should be physically located at multiple geographic locations, so that it becomes difficult for a natural disaster (such as a flood or earthquake) to damage enough of them to cause problems.
|
||||
3. **Hosting diversity.** The servers should be hosted by multiple hosting providers (e.g. Amazon Web Services, Microsoft Azure, Digital Ocean, Rackspace), so that it becomes difficult for one hosting provider to influence enough of the nodes.
|
||||
4. **Operating system diversity.** The servers should use a variety of operating systems, so that a security bug in one OS can’t be used to exploit enough of the nodes.
|
||||
5. **Diversity in general.** In general, membership diversity (of all kinds) confers many advantages on a consortium. For example, it provides the consortium with a source of various ideas for addressing challenges.
|
||||
1. **Geographic diversity.** The servers should be physically located at multiple geographic locations, so that it becomes difficult for a natural disaster (such as a flood or earthquake) to damage enough of them to cause problems.
|
||||
1. **Hosting diversity.** The servers should be hosted by multiple hosting providers (e.g. Amazon Web Services, Microsoft Azure, Digital Ocean, Rackspace), so that it becomes difficult for one hosting provider to influence enough of the nodes.
|
||||
1. **Diversity in general.** In general, membership diversity (of all kinds) confers many advantages on a consortium. For example, it provides the consortium with a source of various ideas for addressing challenges.
|
||||
|
||||
Note: If all the nodes are running the same code, i.e. the same implementation of BigchainDB, then a bug in that code could be used to compromise all of the nodes. Ideally, there would be several different, well-maintained implementations of BigchainDB Server (e.g. one in Python, one in Go, etc.), so that a consortium could also have a diversity of server implementations.
|
||||
Note: If all the nodes are running the same code, i.e. the same implementation of BigchainDB, then a bug in that code could be used to compromise all of the nodes. Ideally, there would be several different, well-maintained implementations of BigchainDB Server (e.g. one in Python, one in Go, etc.), so that a consortium could also have a diversity of server implementations. Similar remarks can be made about the operating system.
|
||||
|
@ -2,13 +2,12 @@ BigchainDB Documentation
|
||||
========================
|
||||
|
||||
`BigchainDB <https://www.bigchaindb.com/>`_ is a scalable blockchain database.
|
||||
That is, it's a "big data" database with some blockchain characteristics added, including `decentralization <decentralized.html>`_,
|
||||
It has some database characteristics and some blockchain characteristics,
|
||||
including `decentralization <decentralized.html>`_,
|
||||
`immutability <immutable.html>`_
|
||||
and
|
||||
`native support for assets <assets.html>`_.
|
||||
You can read about the motivations, goals and high-level architecture in the `BigchainDB whitepaper <https://www.bigchaindb.com/whitepaper/>`_.
|
||||
and `native support for assets <assets.html>`_.
|
||||
|
||||
At a high level, one can communicate with a BigchainDB cluster (set of nodes) using the BigchainDB Client-Server HTTP API, or a wrapper for that API, such as the BigchainDB Python Driver. Each BigchainDB node runs BigchainDB Server and various other software. The `terminology page <terminology.html>`_ explains some of those terms in more detail.
|
||||
At a high level, one can communicate with a BigchainDB cluster (set of nodes) using the BigchainDB HTTP API, or a wrapper for that API, such as the BigchainDB Python Driver. Each BigchainDB node runs BigchainDB Server and various other software. The `terminology page <terminology.html>`_ explains some of those terms in more detail.
|
||||
|
||||
.. raw:: html
|
||||
|
||||
@ -89,5 +88,4 @@ More About BigchainDB
|
||||
smart-contracts
|
||||
transaction-concepts
|
||||
permissions
|
||||
timestamps
|
||||
Data Models <https://docs.bigchaindb.com/projects/server/en/latest/data-models/index.html>
|
||||
|
@ -15,7 +15,8 @@ To spend/transfer an unspent output, a user (or group of users) must fulfill the
|
||||
- "…three of these four people must sign."
|
||||
- "…either Bob must sign, or both Tom and Sylvia must sign."
|
||||
|
||||
For details, see `the documentation about conditions in BigchainDB <https://docs.bigchaindb.com/projects/server/en/latest/data-models/conditions.html>`_.
|
||||
For details, see
|
||||
`the documentation about conditions in the IPDB Transaction Spec <https://the-ipdb-transaction-spec.readthedocs.io/en/latest/transaction-components/conditions.html>`_.
|
||||
|
||||
Once an output has been spent, it can't be spent again: *nobody* has permission to do that. That is, BigchainDB doesn't permit anyone to "double spend" an output.
|
||||
|
||||
|
@ -1,14 +1,3 @@
|
||||
# Production-Ready?
|
||||
|
||||
BigchainDB is not production-ready. You can use it to build a prototype or proof-of-concept (POC); many people are already doing that.
|
||||
Once BigchainDB is production-ready, we'll make an announcement.
|
||||
|
||||
BigchainDB version numbers follow the conventions of *Semantic Versioning* as documented at [semver.org](http://semver.org/). (For Python stuff, we use [Python's version of Semantic Versioning](https://packaging.python.org/tutorials/distributing-packages/#choosing-a-versioning-scheme).) This means, among other things:
|
||||
|
||||
* Before version 1.0, breaking API changes could happen in any new version, even in a change from version 0.Y.4 to 0.Y.5.
|
||||
|
||||
* Starting with version 1.0.0, breaking API changes will only happen when the MAJOR version changes (e.g. from 1.7.4 to 2.0.0, or from 4.9.3 to 5.0.0).
|
||||
|
||||
To review the release history of some particular BigchainDB software, go to the GitHub repository of that software and click on "Releases". For example, the release history of BigchainDB Server can be found at [https://github.com/bigchaindb/bigchaindb/releases](https://github.com/bigchaindb/bigchaindb/releases).
|
||||
|
||||
[The BigchainDB Roadmap](https://github.com/bigchaindb/org/blob/master/ROADMAP.md) will give you a sense of the things we intend to do with BigchainDB in the near term and the long term.
|
||||
Depending on your use case, BigchainDB may or may not be production-ready. You should ask your service provider.
|
||||
|
@ -15,5 +15,4 @@ Crypto-conditions can be quite complex. They can't include loops or recursion an
|
||||
|
||||
.. note::
|
||||
|
||||
We used the word "owners" somewhat loosely above. A more accurate word might be fulfillers, signers, controllers, or transfer-enablers. See BigchainDB Server `issue #626 <https://github.com/bigchaindb/bigchaindb/issues/626>`_.
|
||||
|
||||
We used the word "owners" somewhat loosely above. A more accurate word might be fulfillers, signers, controllers, or transfer-enablers. See the `note about "owners" in the IPDB Transaction Spec <https://the-ipdb-transaction-spec.readthedocs.io/en/latest/ownership.html>`_.
|
||||
|
@ -2,16 +2,13 @@
|
||||
|
||||
There is some specialized terminology associated with BigchainDB. To get started, you should at least know the following:
|
||||
|
||||
|
||||
## BigchainDB Node
|
||||
|
||||
A **BigchainDB node** is a machine or set of closely-linked machines running MongoDB Server (or RethinkDB Server), BigchainDB Server, and related software. Each node is controlled by one person or organization.
|
||||
|
||||
A **BigchainDB node** is a machine (or logical machine) running [BigchainDB Server](https://docs.bigchaindb.com/projects/server/en/latest/introduction.html) and related software. Each node is controlled by one person or organization.
|
||||
|
||||
## BigchainDB Cluster
|
||||
|
||||
A set of BigchainDB nodes can connect to each other to form a **BigchainDB cluster**. Each node in the cluster runs the same software. A cluster contains one logical MongoDB/RethinkDB datastore. A cluster may have additional machines to do things such as cluster monitoring.
|
||||
|
||||
A set of BigchainDB nodes can connect to each other to form a **BigchainDB cluster**. Each node in the cluster runs the same software. A cluster may have additional machines to do things such as cluster monitoring.
|
||||
|
||||
## BigchainDB Consortium
|
||||
|
||||
|
@ -1,84 +0,0 @@
|
||||
# Timestamps in BigchainDB
|
||||
|
||||
Each block and vote has an associated timestamp. Interpreting those timestamps is tricky, hence the need for this section.
|
||||
|
||||
|
||||
## Timestamp Sources & Accuracy
|
||||
|
||||
Timestamps in BigchainDB are provided by the node which created the block and the node that created the vote.
|
||||
|
||||
When a BigchainDB node needs a timestamp, it calls a BigchainDB utility function named `timestamp()`. There's a detailed explanation of how that function works below, but the short version is that it gets the [Unix time](https://en.wikipedia.org/wiki/Unix_time) from its system clock, rounded to the nearest second.
|
||||
|
||||
We advise BigchainDB nodes to run special software (an "NTP daemon") to keep their system clock in sync with standard time servers. (NTP stands for [Network Time Protocol](https://en.wikipedia.org/wiki/Network_Time_Protocol).)
|
||||
|
||||
|
||||
## Converting Timestamps to UTC
|
||||
|
||||
To convert a BigchainDB timestamp (a Unix time) to UTC, you need to know how the node providing the timestamp was set up. That's because different setups will report a different "Unix time" value around leap seconds! There's [a nice Red Hat Developer Blog post about the various setup options](https://developers.redhat.com/blog/2015/06/01/five-different-ways-handle-leap-seconds-ntp/). If you want more details, see [David Mills' pages about leap seconds, NTP, etc.](https://www.eecis.udel.edu/~mills/leap.html) (David Mills designed NTP.)
|
||||
|
||||
We advise BigchainDB nodes to run an NTP daemon with particular settings so that their timestamps are consistent.
|
||||
|
||||
If a timestamp comes from a node that's set up as we advise, it can be converted to UTC as follows:
|
||||
|
||||
1. Use a standard "Unix time to UTC" converter to get a UTC timestamp.
|
||||
2. Is the UTC timestamp a leap second, or the second before/after a leap second? There's [a list of all the leap seconds on Wikipedia](https://en.wikipedia.org/wiki/Leap_second).
|
||||
3. If no, then you are done.
|
||||
4. If yes, then it might not be possible to convert it to a single UTC timestamp. Even if it can't be converted to a single UTC timestamp, it _can_ be converted to a list of two possible UTC timestamps.
|
||||
Showing how to do that is beyond the scope of this documentation.
|
||||
In all likelihood, you will never have to worry about leap seconds because they are very rare.
|
||||
(There were only 26 between 1972 and the end of 2015.)
|
||||
|
||||
|
||||
## Calculating Elapsed Time Between Two Timestamps
|
||||
|
||||
There's another gotcha with (Unix time) timestamps: you can't calculate the real-world elapsed time between two timestamps (correctly) by subtracting the smaller timestamp from the larger one. The result won't include any of the leap seconds that occured between the two timestamps. You could look up how many leap seconds happened between the two timestamps and add that to the result. There are many library functions for working with timestamps; those are beyond the scope of this documentation.
|
||||
|
||||
|
||||
## Interpreting Sets of Timestamps
|
||||
|
||||
You can look at many timestamps to get a statistical sense of when something happened. For example, a transaction in a decided-valid block has many associated timestamps:
|
||||
|
||||
* the timestamp of the block
|
||||
* the timestamps of all the votes on the block
|
||||
|
||||
|
||||
## How BigchainDB Uses Timestamps
|
||||
|
||||
BigchainDB _doesn't_ use timestamps to determine the order of transactions or blocks. In particular, the order of blocks is determined by MongoDB's oplog (or RethinkDB's changefeed) on the bigchain table.
|
||||
|
||||
BigchainDB does use timestamps for some things. When a Transaction is written to the backlog, a timestamp is assigned called the `assignment_timestamp`, to determine if it has been waiting in the backlog for too long (i.e. because the node assigned to it hasn't handled it yet).
|
||||
|
||||
|
||||
## Including Trusted Timestamps
|
||||
|
||||
If you want to create a transaction payload with a trusted timestamp, you can.
|
||||
|
||||
One way to do that would be to send a payload to a trusted timestamping service. They will send back a timestamp, a signature, and their public key. They should also explain how you can verify the signature. You can then include the original payload, the timestamp, the signature, and the service's public key in your transaction metadata. That way, anyone with the verification instructions can verify that the original payload was signed by the trusted timestamping service.
|
||||
|
||||
|
||||
## How the timestamp() Function Works
|
||||
|
||||
BigchainDB has a utility function named `timestamp()` which amounts to:
|
||||
```python
|
||||
timestamp() = str(round(time.time()))
|
||||
```
|
||||
|
||||
In other words, it calls the `time()` function in Python's `time` module, [rounds](https://docs.python.org/3/library/functions.html#round) that to the nearest integer, and converts the result to a string.
|
||||
|
||||
It rounds the output of `time.time()` to the nearest second because, according to [the Python documentation for `time.time()`](https://docs.python.org/3.4/library/time.html#time.time), "...not all systems provide time with a better precision than 1 second."
|
||||
|
||||
How does `time.time()` work? If you look in the C source code, it calls `floattime()` and `floattime()` calls [clock_gettime()](https://www.cs.rutgers.edu/~pxk/416/notes/c-tutorials/gettime.html), if it's available.
|
||||
```text
|
||||
ret = clock_gettime(CLOCK_REALTIME, &tp);
|
||||
```
|
||||
|
||||
With `CLOCK_REALTIME` as the first argument, it returns the "Unix time." ("Unix time" is in quotes because its value around leap seconds depends on how the system is set up; see above.)
|
||||
|
||||
|
||||
## Why Not Use UTC, TAI or Some Other Time that Has Unambiguous Timestamps for Leap Seconds?
|
||||
|
||||
It would be nice to use UTC or TAI timestamps, but unfortunately there's no commonly-available, standard way to get always-accurate UTC or TAI timestamps from the operating system on typical computers today (i.e. accurate around leap seconds).
|
||||
|
||||
There _are_ commonly-available, standard ways to get the "Unix time," such as clock_gettime() function available in C. That's what we use (indirectly via Python). ("Unix time" is in quotes because its value around leap seconds depends on how the system is set up; see above.)
|
||||
|
||||
The Unix-time-based timestamps we use are only ambiguous circa leap seconds, and those are very rare. Even for those timestamps, the extra uncertainty is only one second, and that's not bad considering that we only report timestamps to a precision of one second in the first place. All other timestamps can be converted to UTC with no ambiguity.
|
@ -6,7 +6,6 @@ things (e.g. assets).
|
||||
Transactions are the most basic kind of record stored by BigchainDB. There are
|
||||
two kinds: CREATE transactions and TRANSFER transactions.
|
||||
|
||||
|
||||
## CREATE Transactions
|
||||
|
||||
A CREATE transaction can be used to register, issue, create or otherwise
|
||||
@ -31,20 +30,19 @@ Each output also has an associated condition: the condition that must be met
|
||||
BigchainDB supports a variety of conditions,
|
||||
a subset of the [Interledger Protocol (ILP)](https://interledger.org/)
|
||||
crypto-conditions. For details, see
|
||||
[the documentation about Inputs and Outputs](https://docs.bigchaindb.com/projects/server/en/latest/data-models/inputs-outputs.html).
|
||||
[the documentation about conditions in the IPDB Transaction Spec](https://the-ipdb-transaction-spec.readthedocs.io/en/latest/transaction-components/conditions.html).
|
||||
|
||||
Each output also has a list of all the public keys associated
|
||||
with the conditions on that output.
|
||||
Loosely speaking, that list might be interpreted as the list of "owners."
|
||||
A more accurate word might be fulfillers, signers, controllers,
|
||||
or transfer-enablers.
|
||||
See BigchainDB Server [issue #626](https://github.com/bigchaindb/bigchaindb/issues/626).
|
||||
See the [note about "owners" in the IPDB Transaction Spec](https://the-ipdb-transaction-spec.readthedocs.io/en/latest/ownership.html).
|
||||
|
||||
A CREATE transaction must be signed by all the owners.
|
||||
(If you're looking for that signature,
|
||||
it's in the one "fulfillment" of the one input, albeit encoded.)
|
||||
|
||||
|
||||
## TRANSFER Transactions
|
||||
|
||||
A TRANSFER transaction can transfer/spend one or more outputs
|
||||
@ -82,7 +80,6 @@ transferred if both Jack and Kelly sign.
|
||||
Note how the sum of the incoming paperclips must equal the sum
|
||||
of the outgoing paperclips (100).
|
||||
|
||||
|
||||
## Transaction Validity
|
||||
|
||||
When a node is asked to check if a transaction is valid, it checks several
|
||||
@ -90,6 +87,7 @@ things. We documented those things in a post on *The BigchainDB Blog*:
|
||||
["What is a Valid Transaction in BigchainDB?"](https://blog.bigchaindb.com/what-is-a-valid-transaction-in-bigchaindb-9a1a075a9598)
|
||||
(Note: That post was about BigchainDB Server v1.0.0.)
|
||||
|
||||
The [IPDB Transaction Spec documents the conditions for a transaction to be valid](https://the-ipdb-transaction-spec.readthedocs.io/en/latest/transaction-validation.html).
|
||||
|
||||
## Example Transactions
|
||||
|
||||
|
@ -1,207 +0,0 @@
|
||||
# Deploy a RethinkDB-Based Testing Cluster on AWS
|
||||
|
||||
This section explains a way to deploy a _RethinkDB-based_ cluster of BigchainDB nodes on Amazon Web Services (AWS) for testing purposes.
|
||||
|
||||
## Why?
|
||||
|
||||
Why would anyone want to deploy a centrally-controlled BigchainDB cluster? Isn't BigchainDB supposed to be decentralized, where each node is controlled by a different person or organization?
|
||||
|
||||
Yes! These scripts are for deploying a testing cluster, not a production cluster.
|
||||
|
||||
## How?
|
||||
|
||||
We use some Bash and Python scripts to launch several instances (virtual servers) on Amazon Elastic Compute Cloud (EC2). Then we use Fabric to install RethinkDB and BigchainDB on all those instances.
|
||||
|
||||
## Python Setup
|
||||
|
||||
The instructions that follow have been tested on Ubuntu 16.04. Similar instructions should work on similar Linux distros.
|
||||
|
||||
**Note: Our Python scripts for deploying to AWS use Python 2 because Fabric doesn't work with Python 3.**
|
||||
|
||||
You must install the Python package named `fabric`, but it depends on the `cryptography` package, and that depends on some OS-level packages. On Ubuntu 16.04, you can install those OS-level packages using:
|
||||
```text
|
||||
sudo apt-get install build-essential libssl-dev libffi-dev python-dev
|
||||
```
|
||||
|
||||
For other operating systems, see [the installation instructions for the `cryptography` package](https://cryptography.io/en/latest/installation/).
|
||||
|
||||
Maybe create a Python 2 virtual environment and activate it. Then install the following Python packages (in that virtual environment):
|
||||
```text
|
||||
pip install fabric fabtools requests boto3 awscli
|
||||
```
|
||||
|
||||
What did you just install?
|
||||
|
||||
* "[Fabric](http://www.fabfile.org/) is a Python (2.5-2.7) library and command-line tool for streamlining the use of SSH for application deployment or systems administration tasks."
|
||||
* [fabtools](https://github.com/fabtools/fabtools) are "tools for writing awesome Fabric files"
|
||||
* [requests](http://docs.python-requests.org/en/master/) is a Python package/library for sending HTTP requests
|
||||
* "[Boto](https://boto3.readthedocs.io/en/latest/) is the Amazon Web Services (AWS) SDK for Python, which allows Python developers to write software that makes use of Amazon services like S3 and EC2." (`boto3` is the name of the latest Boto package.)
|
||||
* [The aws-cli package](https://pypi.python.org/pypi/awscli), which is an AWS Command Line Interface (CLI).
|
||||
|
||||
|
||||
## Setting up in AWS
|
||||
|
||||
See the page about [basic AWS Setup](../appendices/aws-setup.html) in the Appendices.
|
||||
|
||||
|
||||
## Get Enough Amazon Elastic IP Addresses
|
||||
|
||||
The AWS cluster deployment scripts use elastic IP addresses (although that may change in the future). By default, AWS accounts get five elastic IP addresses. If you want to deploy a cluster with more than five nodes, then you will need more than five elastic IP addresses; you may have to apply for those; see [the AWS documentation on elastic IP addresses](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html).
|
||||
|
||||
## Create an Amazon EC2 Security Group
|
||||
|
||||
Go to the AWS EC2 Console and select "Security Groups" in the left sidebar. Click the "Create Security Group" button. You can name it whatever you like. (Notes: The default name in the example AWS deployment configuration file is `bigchaindb`. We had problems with names containing dashes.) The description should be something to help you remember what the security group is for.
|
||||
|
||||
For a super lax, somewhat risky, anything-can-enter security group, add these rules for Inbound traffic:
|
||||
|
||||
* Type = All TCP, Protocol = TCP, Port Range = 0-65535, Source = 0.0.0.0/0
|
||||
* Type = SSH, Protocol = SSH, Port Range = 22, Source = 0.0.0.0/0
|
||||
* Type = All UDP, Protocol = UDP, Port Range = 0-65535, Source = 0.0.0.0/0
|
||||
* Type = All ICMP, Protocol = ICMP, Port Range = 0-65535, Source = 0.0.0.0/0
|
||||
|
||||
(Note: Source = 0.0.0.0/0 is [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) for "allow this traffic to come from _any_ IP address.")
|
||||
|
||||
If you want to set up a more secure security group, see the [Notes for Firewall Setup](../appendices/firewall-notes.html).
|
||||
|
||||
|
||||
## Deploy a BigchainDB Cluster
|
||||
|
||||
### Step 1
|
||||
|
||||
Suppose _N_ is the number of nodes you want in your BigchainDB cluster. If you already have a set of _N_ BigchainDB configuration files in the `deploy-cluster-aws/confiles` directory, then you can jump to the next step. To create such a set, you can do something like:
|
||||
```text
|
||||
# in a Python 3 virtual environment where bigchaindb is installed
|
||||
cd bigchaindb
|
||||
cd deploy-cluster-aws
|
||||
./make_confiles.sh confiles 3
|
||||
```
|
||||
|
||||
That will create three (3) _default_ BigchainDB configuration files in the `deploy-cluster-aws/confiles` directory (which will be created if it doesn't already exist). The three files will be named `bcdb_conf0`, `bcdb_conf1`, and `bcdb_conf2`.
|
||||
|
||||
You can look inside those files if you're curious. For example, the default keyring is an empty list. Later, the deployment script automatically changes the keyring of each node to be a list of the public keys of all other nodes. Other changes are also made. That is, the configuration files generated in this step are _not_ what will be sent to the deployed nodes; they're just a starting point.
|
||||
|
||||
### Step 2
|
||||
|
||||
Step 2 is to make an AWS deployment configuration file, if necessary. There's an example AWS configuration file named `example_deploy_conf.py`. It has many comments explaining each setting. The settings in that file are (or should be):
|
||||
```text
|
||||
NUM_NODES=3
|
||||
BRANCH="master"
|
||||
SSH_KEY_NAME="not-set-yet"
|
||||
USE_KEYPAIRS_FILE=False
|
||||
IMAGE_ID="ami-8504fdea"
|
||||
INSTANCE_TYPE="t2.medium"
|
||||
SECURITY_GROUP="bigchaindb"
|
||||
USING_EBS=True
|
||||
EBS_VOLUME_SIZE=30
|
||||
EBS_OPTIMIZED=False
|
||||
ENABLE_WEB_ADMIN=True
|
||||
BIND_HTTP_TO_LOCALHOST=True
|
||||
```
|
||||
|
||||
Make a copy of that file and call it whatever you like (e.g. `cp example_deploy_conf.py my_deploy_conf.py`). You can leave most of the settings at their default values, but you must change the value of `SSH_KEY_NAME` to the name of your private SSH key. You can do that with a text editor. Set `SSH_KEY_NAME` to the name you used for `<key-name>` when you generated an RSA key pair for SSH (in basic AWS setup).
|
||||
|
||||
You'll also want to change the `IMAGE_ID` to one that's up-to-date and available in your AWS region. If you don't remember your AWS region, then look in your `$HOME/.aws/config` file. You can find an up-to-date Ubuntu image ID for your region at [https://cloud-images.ubuntu.com/locator/ec2/](https://cloud-images.ubuntu.com/locator/ec2/). An example search string is "eu-central-1 16.04 LTS amd64 hvm:ebs-ssd". You should replace "eu-central-1" with your region name.
|
||||
|
||||
If you want your nodes to have a predictable set of pre-generated keypairs, then you should 1) set `USE_KEYPAIRS_FILE=True` in the AWS deployment configuration file, and 2) provide a `keypairs.py` file containing enough keypairs for all of your nodes. You can generate a `keypairs.py` file using the `write_keypairs_file.py` script. For example:
|
||||
```text
|
||||
# in a Python 3 virtual environment where bigchaindb is installed
|
||||
cd bigchaindb
|
||||
cd deploy-cluster-aws
|
||||
python3 write_keypairs_file.py 100
|
||||
```
|
||||
|
||||
The above command generates a `keypairs.py` file with 100 keypairs. You can generate more keypairs than you need, so you can use the same list over and over again, for different numbers of servers. The deployment scripts will only use the first NUM_NODES keypairs.
|
||||
|
||||
### Step 3
|
||||
|
||||
Step 3 is to launch the nodes ("instances") on AWS, to install all the necessary software on them, configure the software, run the software, and more. Here's how you'd do that:
|
||||
|
||||
```text
|
||||
# in a Python 2.5-2.7 virtual environment where fabric, boto3, etc. are installed
|
||||
cd bigchaindb
|
||||
cd deploy-cluster-aws
|
||||
./awsdeploy.sh my_deploy_conf.py
|
||||
# Only if you want to set the replication factor to 3
|
||||
fab set_replicas:3
|
||||
# Only if you want to start BigchainDB on all the nodes:
|
||||
fab start_bigchaindb
|
||||
```
|
||||
|
||||
`awsdeploy.sh` is a Bash script which calls some Python and Fabric scripts. If you're curious what it does, [the source code](https://github.com/bigchaindb/bigchaindb/blob/master/deploy-cluster-aws/awsdeploy.sh) has many explanatory comments.
|
||||
|
||||
It should take a few minutes for the deployment to finish. If you run into problems, see the section on **Known Deployment Issues** below.
|
||||
|
||||
The EC2 Console has a section where you can see all the instances you have running on EC2. You can `ssh` into a running instance using a command like:
|
||||
```text
|
||||
ssh -i pem/bigchaindb.pem ubuntu@ec2-52-29-197-211.eu-central-1.compute.amazonaws.com
|
||||
```
|
||||
|
||||
except you'd replace the `ec2-52-29-197-211.eu-central-1.compute.amazonaws.com` with the public DNS name of the instance you want to `ssh` into. You can get that from the EC2 Console: just click on an instance and look in its details pane at the bottom of the screen. Some commands you might try:
|
||||
```text
|
||||
ip addr show
|
||||
sudo service rethinkdb status
|
||||
bigchaindb --help
|
||||
bigchaindb show-config
|
||||
```
|
||||
|
||||
If you enabled the RethinkDB web interface (by setting `ENABLE_WEB_ADMIN=True` in your AWS configuration file), then you can also check that. The way to do that depends on how `BIND_HTTP_TO_LOCALHOST` was set (in your AWS deployment configuration file):
|
||||
|
||||
* If it was set to `False`, then just go to your web browser and visit a web address like `http://ec2-52-29-197-211.eu-central-1.compute.amazonaws.com:8080/`. (Replace `ec2-...aws.com` with the hostname of one of your instances.)
|
||||
* If it was set to `True` (the default in the example config file), then follow the instructions in the "Via a SOCKS proxy" section of [the "Secure your cluster" page of the RethinkDB documentation](https://www.rethinkdb.com/docs/security/).
|
||||
|
||||
|
||||
## Server Monitoring with New Relic
|
||||
|
||||
[New Relic](https://newrelic.com/) is a business that provides several monitoring services. One of those services, called Server Monitoring, can be used to monitor things like CPU usage and Network I/O on BigchainDB instances. To do that:
|
||||
|
||||
1. Sign up for a New Relic account
|
||||
2. Get your New Relic license key
|
||||
3. Put that key in an environment variable named `NEWRELIC_KEY`. For example, you might add a line like the following to your `~/.bashrc` file (if you use Bash): `export NEWRELIC_KEY=<insert your key here>`
|
||||
4. Once you've deployed a BigchainDB cluster on AWS as above, you can install a New Relic system monitor (agent) on all the instances using:
|
||||
|
||||
```text
|
||||
# in a Python 2.5-2.7 virtual environment where fabric, boto3, etc. are installed
|
||||
fab install_newrelic
|
||||
```
|
||||
|
||||
Once the New Relic system monitor (agent) is installed on the instances, it will start sending server stats to New Relic on a regular basis. It may take a few minutes for data to show up in your New Relic dashboard (under New Relic Servers).
|
||||
|
||||
## Shutting Down a Cluster
|
||||
|
||||
There are fees associated with running instances on EC2, so if you're not using them, you should terminate them. You can do that using the AWS EC2 Console.
|
||||
|
||||
The same is true of your allocated elastic IP addresses. There's a small fee to keep them allocated if they're not associated with a running instance. You can release them using the AWS EC2 Console, or by using a handy little script named `release_eips.py`. For example:
|
||||
```text
|
||||
$ python release_eips.py
|
||||
You have 2 allocated elactic IPs which are not associated with instances
|
||||
0: Releasing 52.58.110.110
|
||||
(It has Domain = vpc.)
|
||||
1: Releasing 52.58.107.211
|
||||
(It has Domain = vpc.)
|
||||
```
|
||||
|
||||
## Known Deployment Issues
|
||||
|
||||
### NetworkError
|
||||
|
||||
If you tested with a high sequence it might be possible that you run into an error message like this:
|
||||
```text
|
||||
NetworkError: Host key for ec2-xx-xx-xx-xx.eu-central-1.compute.amazonaws.com
|
||||
did not match pre-existing key! Server's key was changed recently, or possible
|
||||
man-in-the-middle attack.
|
||||
```
|
||||
|
||||
If so, just clean up your `known_hosts` file and start again. For example, you might copy your current `known_hosts` file to `old_known_hosts` like so:
|
||||
```text
|
||||
mv ~/.ssh/known_hosts ~/.ssh/old_known_hosts
|
||||
```
|
||||
|
||||
Then terminate your instances and try deploying again with a different tag.
|
||||
|
||||
### Failure of sudo apt-get update
|
||||
|
||||
The first thing that's done on all the instances, once they're running, is basically [`sudo apt-get update`](http://askubuntu.com/questions/222348/what-does-sudo-apt-get-update-do). Sometimes that fails. If so, just terminate your instances and try deploying again with a different tag. (These problems seem to be time-bounded, so maybe wait a couple of hours before retrying.)
|
||||
|
||||
### Failure when Installing Base Software
|
||||
|
||||
If you get an error with installing the base software on the instances, then just terminate your instances and try deploying again with a different tag.
|
@ -1,55 +1,41 @@
|
||||
# Notes for Firewall Setup
|
||||
|
||||
This is a page of notes on the ports potentially used by BigchainDB nodes and the traffic they should expect, to help with firewall setup (and security group setup on AWS). This page is _not_ a firewall tutorial or step-by-step guide.
|
||||
|
||||
This is a page of notes on the ports potentially used by BigchainDB nodes and the traffic they should expect, to help with firewall setup (or security group setup on cloud providers). This page is _not_ a firewall tutorial or step-by-step guide.
|
||||
|
||||
## Expected Unsolicited Inbound Traffic
|
||||
|
||||
Assuming you aren't exposing the RethinkDB web interface on port 8080 (or any other port, because [there are more secure ways to access it](https://www.rethinkdb.com/docs/security/#binding-the-web-interface-port)), there are only three ports that should expect unsolicited inbound traffic:
|
||||
The following ports should expect unsolicited inbound traffic:
|
||||
|
||||
1. **Port 22** can expect inbound SSH (TCP) traffic from the node administrator (i.e. a small set of IP addresses).
|
||||
1. **Port 9984** can expect inbound HTTP (TCP) traffic from BigchainDB clients sending transactions to the BigchainDB HTTP API.
|
||||
1. **Port 9985** can expect inbount WebSocket traffic from BigchainDB clients.
|
||||
1. If you're using RethinkDB, **Port 29015** can expect inbound TCP traffic from other RethinkDB nodes in the RethinkDB cluster (for RethinkDB intracluster communications).
|
||||
1. If you're using MongoDB, **Port 27017** can expect inbound TCP traffic from other nodes.
|
||||
1. **Port 9985** can expect inbound WebSocket traffic from BigchainDB clients.
|
||||
1. **Port 46656** can expect inbound Tendermint P2P traffic from other Tendermint peers.
|
||||
|
||||
All other ports should only get inbound traffic in response to specific requests from inside the node.
|
||||
|
||||
|
||||
## Port 22
|
||||
|
||||
Port 22 is the default SSH port (TCP) so you'll at least want to make it possible to SSH in from your remote machine(s).
|
||||
|
||||
|
||||
## Port 53
|
||||
|
||||
Port 53 is the default DNS port (UDP). It may be used, for example, by some package managers when look up the IP address associated with certain package sources.
|
||||
|
||||
|
||||
## Port 80
|
||||
|
||||
Port 80 is the default HTTP port (TCP). It's used by some package managers to get packages. It's _not_ used by the RethinkDB web interface (see Port 8080 below) or the BigchainDB client-server HTTP API (Port 9984).
|
||||
|
||||
Port 80 is the default HTTP port (TCP). It's used by some package managers to get packages. It's _not_ the default port for the BigchainDB client-server HTTP API.
|
||||
|
||||
## Port 123
|
||||
|
||||
Port 123 is the default NTP port (UDP). You should be running an NTP daemon on production BigchainDB nodes. NTP daemons must be able to send requests to external NTP servers and accept the respones.
|
||||
|
||||
|
||||
## Port 161
|
||||
|
||||
Port 161 is the default SNMP port (usually UDP, sometimes TCP). SNMP is used, for example, by some server monitoring systems.
|
||||
|
||||
|
||||
## Port 443
|
||||
|
||||
Port 443 is the default HTTPS port (TCP). You may need to open it up for outbound requests (and inbound responses) temporarily because some RethinkDB installation instructions use wget over HTTPS to get the RethinkDB GPG key. Package managers might also get some packages using HTTPS.
|
||||
|
||||
|
||||
## Port 8080
|
||||
|
||||
Port 8080 is the default port used by RethinkDB for its adminstrative web (HTTP) interface (TCP). While you _can_, you shouldn't allow traffic arbitrary external sources. You can still use the RethinkDB web interface by binding it to localhost and then accessing it via a SOCKS proxy or reverse proxy; see "Binding the web interface port" on [the RethinkDB page about securing your cluster](https://rethinkdb.com/docs/security/).
|
||||
|
||||
Port 443 is the default HTTPS port (TCP). Package managers might also get some packages using HTTPS.
|
||||
|
||||
## Port 9984
|
||||
|
||||
@ -59,21 +45,21 @@ If Gunicorn and the reverse proxy are running on the same server, then you'll ha
|
||||
|
||||
You may want to have Gunicorn and the reverse proxy running on different servers, so that both can listen on port 9984. That would also help isolate the effects of a denial-of-service attack.
|
||||
|
||||
|
||||
## Port 9985
|
||||
|
||||
Port 9985 is the default port for the [BigchainDB WebSocket Event Stream API](../websocket-event-stream-api.html).
|
||||
|
||||
## Port 46656
|
||||
|
||||
## Port 28015
|
||||
Port 46656 is the default port used by Tendermint Core to communicate with other instances of Tendermint Core (peers).
|
||||
|
||||
Port 28015 is the default port used by RethinkDB client driver connections (TCP). If your BigchainDB node is just one server, then Port 28015 only needs to listen on localhost, because all the client drivers will be running on localhost. Port 28015 doesn't need to accept inbound traffic from the outside world.
|
||||
## Port 46657
|
||||
|
||||
Port 46657 is the default port used by Tendermint Core for RPC traffic. BigchainDB nodes use that internally; they don't expect incoming traffic from the outside world on port 46657.
|
||||
|
||||
## Port 29015
|
||||
|
||||
Port 29015 is the default port for RethinkDB intracluster connections (TCP). It should only accept incoming traffic from other RethinkDB servers in the cluster (a list of IP addresses that you should be able to find out).
|
||||
## Port 46658
|
||||
|
||||
Port 46658 is the default port used by Tendermint Core for ABCI traffic. BigchainDB nodes use that internally; they don't expect incoming traffic from the outside world on port 46658.
|
||||
|
||||
## Other Ports
|
||||
|
||||
|
@ -1,6 +1,3 @@
|
||||
.. You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Appendices
|
||||
==========
|
||||
|
||||
@ -8,23 +5,17 @@ Appendices
|
||||
:maxdepth: 1
|
||||
|
||||
install-os-level-deps
|
||||
install-latest-pip
|
||||
run-with-docker
|
||||
json-serialization
|
||||
cryptography
|
||||
the-Bigchain-class
|
||||
pipelines
|
||||
backend
|
||||
commands
|
||||
aws-setup
|
||||
aws-testing-cluster
|
||||
generate-key-pair-for-ssh
|
||||
firewall-notes
|
||||
ntp-notes
|
||||
rethinkdb-reqs
|
||||
rethinkdb-backup
|
||||
licenses
|
||||
install-with-lxd
|
||||
run-with-vagrant
|
||||
run-with-ansible
|
||||
vote-yaml
|
||||
|
@ -1,20 +0,0 @@
|
||||
# How to Install the Latest pip and setuptools
|
||||
|
||||
You can check the version of `pip` you're using (in your current virtualenv) by doing:
|
||||
```text
|
||||
pip -V
|
||||
```
|
||||
|
||||
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.5, then you must install a `pip` version associated with Python 3.5+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
|
||||
|
||||
On Ubuntu 16.04, we found that this works:
|
||||
```text
|
||||
sudo apt-get install python3-pip
|
||||
```
|
||||
|
||||
That should install a Python 3 version of `pip` named `pip3`. If that didn't work, then another way to get `pip3` is to do `sudo apt-get install python3-setuptools` followed by `sudo easy_install3 pip`.
|
||||
|
||||
You can upgrade `pip` (`pip3`) and `setuptools` to the latest versions using:
|
||||
```text
|
||||
pip3 install --upgrade pip setuptools
|
||||
```
|
@ -1,43 +0,0 @@
|
||||
# Installing BigchainDB on LXC containers using LXD
|
||||
|
||||
**Note: This page was contributed by an external contributor and is not actively maintained. We include it in case someone is interested.**
|
||||
|
||||
You can visit this link to install LXD (instructions here): [LXD Install](https://linuxcontainers.org/lxd/getting-started-cli/)
|
||||
|
||||
(assumption is that you are using Ubuntu 14.04 for host/container)
|
||||
|
||||
Let us create an LXC container (via LXD) with the following command:
|
||||
|
||||
`lxc launch ubuntu:14.04 bigchaindb`
|
||||
|
||||
(ubuntu:14.04 - this is the remote server the command fetches the image from)
|
||||
(bigchaindb - is the name of the container)
|
||||
|
||||
Below is the `install.sh` script you will need to install BigchainDB within your container.
|
||||
|
||||
Here is my `install.sh`:
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get install -y wget
|
||||
source /etc/lsb-release && echo "deb http://download.rethinkdb.com/apt $DISTRIB_CODENAME main" | sudo tee /etc/apt/sources.list.d/rethinkdb.list
|
||||
wget -qO- https://download.rethinkdb.com/apt/pubkey.gpg | sudo apt-key add -
|
||||
apt-get update
|
||||
apt-get install -y rethinkdb python3-pip
|
||||
pip3 install --upgrade pip wheel setuptools
|
||||
pip install ptpython bigchaindb
|
||||
```
|
||||
|
||||
Copy/Paste the above `install.sh` into the directory/path you are going to execute your LXD commands from (ie. the host).
|
||||
|
||||
Make sure your container is running by typing:
|
||||
|
||||
`lxc list`
|
||||
|
||||
Now, from the host (and the correct directory) where you saved `install.sh`, run this command:
|
||||
|
||||
`cat install.sh | lxc exec bigchaindb /bin/bash`
|
||||
|
||||
If you followed the commands correctly, you will have successfully created an LXC container (using LXD) that can get you up and running with BigchainDB in <5 minutes (depending on how long it takes to download all the packages).
|
@ -1,26 +0,0 @@
|
||||
#########
|
||||
Pipelines
|
||||
#########
|
||||
|
||||
Block Creation
|
||||
==============
|
||||
|
||||
.. automodule:: bigchaindb.pipelines.block
|
||||
|
||||
|
||||
Block Voting
|
||||
============
|
||||
|
||||
.. automodule:: bigchaindb.pipelines.vote
|
||||
|
||||
|
||||
Block Status
|
||||
============
|
||||
|
||||
.. automodule:: bigchaindb.pipelines.election
|
||||
|
||||
|
||||
Stale Transaction Monitoring
|
||||
============================
|
||||
|
||||
.. automodule:: bigchaindb.pipelines.stale
|
@ -1,124 +0,0 @@
|
||||
# Backing Up and Restoring Data
|
||||
|
||||
This page was written when BigchainDB only worked with RethinkDB, so its focus is on RethinkDB-based backup. BigchainDB now supports MongoDB as a backend database and we recommend that you use MongoDB in production. Nevertheless, some of the following backup ideas are still relevant regardless of the backend database being used, so we moved this page to the Appendices.
|
||||
|
||||
|
||||
## RethinkDB's Replication as a form of Backup
|
||||
|
||||
RethinkDB already has internal replication: every document is stored on _R_ different nodes, where _R_ is the replication factor (set using `bigchaindb set-replicas R`). Those replicas can be thought of as "live backups" because if one node goes down, the cluster will continue to work and no data will be lost.
|
||||
|
||||
At this point, there should be someone saying, "But replication isn't backup!"
|
||||
|
||||
It's true. Replication alone isn't enough, because something bad might happen _inside_ the database, and that could affect the replicas. For example, what if someone logged in as a RethinkDB admin and did a "drop table"? We currently plan for each node to be protected by a next-generation firewall (or something similar) to prevent such things from getting very far. For example, see [issue #240](https://github.com/bigchaindb/bigchaindb/issues/240).
|
||||
|
||||
Nevertheless, you should still consider having normal, "cold" backups, because bad things can still happen.
|
||||
|
||||
|
||||
## Live Replication of RethinkDB Data Files
|
||||
|
||||
Each BigchainDB node stores its subset of the RethinkDB data in one directory. You could set up the node's file system so that directory lives on its own hard drive. Furthermore, you could make that hard drive part of a [RAID](https://en.wikipedia.org/wiki/RAID) array, so that a second hard drive would always have a copy of the original. If the original hard drive fails, then the second hard drive could take its place and the node would continue to function. Meanwhile, the original hard drive could be replaced.
|
||||
|
||||
That's just one possible way of setting up the file system so as to provide extra reliability.
|
||||
|
||||
Another way to get similar reliability would be to mount the RethinkDB data directory on an [Amazon EBS](https://aws.amazon.com/ebs/) volume. Each Amazon EBS volume is, "automatically replicated within its Availability Zone to protect you from component failure, offering high availability and durability."
|
||||
|
||||
As with shard replication, live file-system replication protects against many failure modes, but it doesn't protect against them all. You should still consider having normal, "cold" backups.
|
||||
|
||||
|
||||
## rethinkdb dump (to a File)
|
||||
|
||||
RethinkDB can create an archive of all data in the cluster (or all data in specified tables), as a compressed file. According to [the RethinkDB blog post when that functionality became available](https://rethinkdb.com/blog/1.7-release/):
|
||||
|
||||
> Since the backup process is using client drivers, it automatically takes advantage of the MVCC [multiversion concurrency control] functionality built into RethinkDB. It will use some cluster resources, but will not lock out any of the clients, so you can safely run it on a live cluster.
|
||||
|
||||
To back up all the data in a BigchainDB cluster, the RethinkDB admin user must run a command like the following on one of the nodes:
|
||||
```text
|
||||
rethinkdb dump -e bigchain.bigchain -e bigchain.votes
|
||||
```
|
||||
|
||||
That should write a file named `rethinkdb_dump_<date>_<time>.tar.gz`. The `-e` option is used to specify which tables should be exported. You probably don't need to export the backlog table, but you definitely need to export the bigchain and votes tables.
|
||||
`bigchain.votes` means the `votes` table in the RethinkDB database named `bigchain`. It's possible that your database has a different name: [the database name is a BigchainDB configuration setting](../server-reference/configuration.html#database-host-database-port-database-name). The default name is `bigchain`. (Tip: you can see the values of all configuration settings using the `bigchaindb show-config` command.)
|
||||
|
||||
There's [more information about the `rethinkdb dump` command in the RethinkDB documentation](https://www.rethinkdb.com/docs/backup/). It also explains how to restore data to a cluster from an archive file.
|
||||
|
||||
**Notes**
|
||||
|
||||
* If the `rethinkdb dump` subcommand fails and the last line of the Traceback says "NameError: name 'file' is not defined", then you need to update your RethinkDB Python driver; do a `pip install --upgrade rethinkdb`
|
||||
|
||||
* It might take a long time to backup data this way. The more data, the longer it will take.
|
||||
|
||||
* You need enough free disk space to store the backup file.
|
||||
|
||||
* If a document changes after the backup starts but before it ends, then the changed document may not be in the final backup. This shouldn't be a problem for BigchainDB, because blocks and votes can't change anyway.
|
||||
|
||||
* `rethinkdb dump` saves data and secondary indexes, but does *not* save cluster metadata. You will need to recreate your cluster setup yourself after you run `rethinkdb restore`.
|
||||
|
||||
* RethinkDB also has [subcommands to import/export](https://gist.github.com/coffeemug/5894257) collections of JSON or CSV files. While one could use those for backup/restore, it wouldn't be very practical.
|
||||
|
||||
|
||||
## Client-Side Backup
|
||||
|
||||
In the future, it will be possible for clients to query for the blocks containing the transactions they care about, and for the votes on those blocks. They could save a local copy of those blocks and votes.
|
||||
|
||||
**How could we be sure blocks and votes from a client are valid?**
|
||||
|
||||
All blocks and votes are signed by cluster nodes (owned and operated by consortium members). Only cluster nodes can produce valid signatures because only cluster nodes have the necessary private keys. A client can't produce a valid signature for a block or vote.
|
||||
|
||||
**Could we restore an entire BigchainDB database using client-saved blocks and votes?**
|
||||
|
||||
Yes, in principle, but it would be difficult to know if you've recovered every block and vote. Votes link to the block they're voting on and to the previous block, so one could detect some missing blocks. It would be difficult to know if you've recovered all the votes.
|
||||
|
||||
|
||||
## Backup by Copying RethinkDB Data Files
|
||||
|
||||
It's _possible_ to back up a BigchainDB database by creating a point-in-time copy of the RethinkDB data files (on all nodes, at roughly the same time). It's not a very practical approach to backup: the resulting set of files will be much larger (collectively) than what one would get using `rethinkdb dump`, and there are no guarantees on how consistent that data will be, especially for recently-written data.
|
||||
|
||||
If you're curious about what's involved, see the [MongoDB documentation about "Backup by Copying Underlying Data Files"](https://docs.mongodb.com/manual/core/backups/#backup-with-file-copies). (Yes, that's documentation for MongoDB, but the principles are the same.)
|
||||
|
||||
See the last subsection of this page for a better way to use this idea.
|
||||
|
||||
|
||||
## Incremental or Continuous Backup
|
||||
|
||||
**Incremental backup** is where backup happens on a regular basis (e.g. daily), and each one only records the changes since the last backup.
|
||||
|
||||
**Continuous backup** might mean incremental backup on a very regular basis (e.g. every ten minutes), or it might mean backup of every database operation as it happens. The latter is also called transaction logging or continuous archiving.
|
||||
|
||||
At the time of writing, RethinkDB didn't have a built-in incremental or continuous backup capability, but the idea was raised in RethinkDB issues [#89](https://github.com/rethinkdb/rethinkdb/issues/89) and [#5890](https://github.com/rethinkdb/rethinkdb/issues/5890). On July 5, 2016, Daniel Mewes (of RethinkDB) wrote the following comment on issue #5890: "We would like to add this feature [continuous backup], but haven't started working on it yet."
|
||||
|
||||
To get a sense of what continuous backup might look like for RethinkDB, one can look at the continuous backup options available for MongoDB. MongoDB, the company, offers continuous backup with [Ops Manager](https://www.mongodb.com/products/ops-manager) (self-hosted) or [Cloud Manager](https://www.mongodb.com/cloud) (fully managed). Features include:
|
||||
|
||||
* It "continuously maintains backups, so if your MongoDB deployment experiences a failure, the most recent backup is only moments behind..."
|
||||
* It "offers point-in-time backups of replica sets and cluster-wide snapshots of sharded clusters. You can restore to precisely the moment you need, quickly and safely."
|
||||
* "You can rebuild entire running clusters, just from your backups."
|
||||
* It enables, "fast and seamless provisioning of new dev and test environments."
|
||||
|
||||
The MongoDB documentation has more [details about how Ops Manager Backup works](https://docs.opsmanager.mongodb.com/current/application/#backup).
|
||||
|
||||
Considerations for BigchainDB:
|
||||
|
||||
* We'd like the cost of backup to be low. To get a sense of the cost, MongoDB Cloud Manager backup [costed $30 / GB / year prepaid](https://www.mongodb.com/blog/post/lower-mms-backup-prices-backing-mongodb-now-easier-and-more-affordable). One thousand gigabytes backed up (i.e. about a terabyte) would cost 30 thousand US dollars per year. (That's just for the backup; there's also a cost per server per year.)
|
||||
* We'd like the backup to be decentralized, with no single point of control or single point of failure. (Note: some file systems have a single point of failure. For example, HDFS has one Namenode.)
|
||||
* We only care to back up blocks and votes, and once written, those never change. There are no updates or deletes, just new blocks and votes.
|
||||
|
||||
|
||||
## Combining RethinkDB Replication with Storage Snapshots
|
||||
|
||||
Although it's not advertised as such, RethinkDB's built-in replication feature is similar to continous backup, except the "backup" (i.e. the set of replica shards) is spread across all the nodes. One could take that idea a bit farther by creating a set of backup-only servers with one full backup:
|
||||
|
||||
* Give all the original BigchainDB nodes (RethinkDB nodes) the server tag `original`.
|
||||
* Set up a group of servers running RethinkDB only, and give them the server tag `backup`. The `backup` servers could be geographically separated from all the `original` nodes (or not; it's up to the consortium to decide).
|
||||
* Clients shouldn't be able to read from or write to servers in the `backup` set.
|
||||
* Send a RethinkDB reconfigure command to the RethinkDB cluster to make it so that the `original` set has the same number of replicas as before (or maybe one less), and the `backup` set has one replica. Also, make sure the `primary_replica_tag='original'` so that all primary shards live on the `original` nodes.
|
||||
|
||||
The [RethinkDB documentation on sharding and replication](https://www.rethinkdb.com/docs/sharding-and-replication/) has the details of how to set server tags and do RethinkDB reconfiguration.
|
||||
|
||||
Once you've set up a set of backup-only RethinkDB servers, you could make a point-in-time snapshot of their storage devices, as a form of backup.
|
||||
|
||||
You might want to disconnect the `backup` set from the `original` set first, and then wait for reads and writes in the `backup` set to stop. (The `backup` set should have only one copy of each shard, so there's no opportunity for inconsistency between shards of the `backup` set.)
|
||||
|
||||
You will want to re-connect the `backup` set to the `original` set as soon as possible, so it's able to catch up.
|
||||
|
||||
If something bad happens to the entire original BigchainDB cluster (including the `backup` set) and you need to restore it from a snapshot, you can, but before you make BigchainDB live, you should 1) delete all entries in the backlog table, 2) delete all blocks after the last voted-valid block, 3) delete all votes on the blocks deleted in part 2, and 4) rebuild the RethinkDB indexes.
|
||||
|
||||
**NOTE:** Sometimes snapshots are _incremental_. For example, [Amazon EBS snapshots](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html) are incremental, meaning "only the blocks on the device that have changed after your most recent snapshot are saved. **This minimizes the time required to create the snapshot and saves on storage costs.**" [Emphasis added]
|
@ -1,61 +0,0 @@
|
||||
# RethinkDB Requirements
|
||||
|
||||
[The RethinkDB documentation](https://rethinkdb.com/docs/) should be your first source of information about its requirements. This page serves mostly to document some of its more obscure requirements.
|
||||
|
||||
RethinkDB Server [will run on any modern OS](https://www.rethinkdb.com/docs/install/). Note that the Fedora package isn't officially supported. Also, official support for Windows is fairly recent ([April 2016](https://rethinkdb.com/blog/2.3-release/)).
|
||||
|
||||
|
||||
## Storage Requirements
|
||||
|
||||
When it comes to storage for RethinkDB, there are many things that are nice to have (e.g. SSDs, high-speed input/output [IOPS], replication, reliability, scalability, pay-for-what-you-use), but there are few _requirements_ other than:
|
||||
|
||||
1. have enough storage to store all your data (and its replicas), and
|
||||
2. make sure your storage solution (hardware and interconnects) can handle your expected read & write rates.
|
||||
|
||||
For RethinkDB's failover mechanisms to work, [every RethinkDB table must have at least three replicas](https://rethinkdb.com/docs/failover/) (i.e. a primary replica and two others). For example, if you want to store 10 GB of unique data, then you need at least 30 GB of storage. (Indexes and internal metadata are stored in RAM.)
|
||||
|
||||
As for the read & write rates, what do you expect those to be for your situation? It's not enough for the storage system alone to handle those rates: the interconnects between the nodes must also be able to handle them.
|
||||
|
||||
**Storage Notes Specific to RethinkDB**
|
||||
|
||||
* The RethinkDB storage engine has a number of SSD optimizations, so you _can_ benefit from using SSDs. ([source](https://www.rethinkdb.com/docs/architecture/))
|
||||
|
||||
* If you have an N-node RethinkDB cluster and 1) you want to use it to store an amount of data D (unique records, before replication), 2) you want the replication factor to be R (all tables), and 3) you want N shards (all tables), then each BigchainDB node must have storage space of at least R×D/N.
|
||||
|
||||
* RethinkDB tables can have [at most 64 shards](https://rethinkdb.com/limitations/). What does that imply? Suppose you only have one table, with 64 shards. How big could that table be? It depends on how much data can be stored in each node. If the maximum amount of data that a node can store is d, then the biggest-possible shard is d, and the biggest-possible table size is 64 times that. (All shard replicas would have to be stored on other nodes beyond the initial 64.) If there are two tables, the second table could also have 64 shards, stored on 64 other maxed-out nodes, so the total amount of unique data in the database would be (64 shards/table)×(2 tables)×d. In general, if you have T tables, the maximum amount of unique data that can be stored in the database (i.e. the amount of data before replication) is 64×T×d.
|
||||
|
||||
* When you set up storage for your RethinkDB data, you may have to select a filesystem. (Sometimes, the filesystem is already decided by the choice of storage.) We recommend using a filesystem that supports direct I/O (Input/Output). Many compressed or encrypted file systems don't support direct I/O. The ext4 filesystem supports direct I/O (but be careful: if you enable the data=journal mode, then direct I/O support will be disabled; the default is data=ordered). If your chosen filesystem supports direct I/O and you're using Linux, then you don't need to do anything to request or enable direct I/O. RethinkDB does that.
|
||||
|
||||
<p style="background-color: lightgrey;">What is direct I/O? It allows RethinkDB to write directly to the storage device (or use its own in-memory caching mechanisms), rather than relying on the operating system's file read and write caching mechanisms. (If you're using Linux, a write-to-file normally writes to the in-memory Page Cache first; only later does that Page Cache get flushed to disk. The Page Cache is also used when reading files.)</p>
|
||||
|
||||
* RethinkDB stores its data in a specific directory. You can tell RethinkDB _which_ directory using the RethinkDB config file, as explained below. In this documentation, we assume the directory is `/data`. If you set up a separate device (partition, RAID array, or logical volume) to store the RethinkDB data, then mount that device on `/data`.
|
||||
|
||||
|
||||
## Memory (RAM) Requirements
|
||||
|
||||
In their [FAQ](https://rethinkdb.com/faq/), RethinkDB recommends that, "RethinkDB servers have at least 2GB of RAM..." ([source](https://rethinkdb.com/faq/))
|
||||
|
||||
In particular: "RethinkDB requires data structures in RAM on each server proportional to the size of the data on that server’s disk, usually around 1% of the size of the total data set." ([source](https://rethinkdb.com/limitations/)) We asked what they meant by "total data set" and [they said](https://github.com/rethinkdb/rethinkdb/issues/5902#issuecomment-230860607) it's "referring to only the data stored on the particular server."
|
||||
|
||||
Also, "The storage engine is used in conjunction with a custom, B-Tree-aware caching engine which allows file sizes many orders of magnitude greater than the amount of available memory. RethinkDB can operate on a terabyte of data with about ten gigabytes of free RAM." ([source](https://www.rethinkdb.com/docs/architecture/)) (In this case, it's the _cluster_ which has a total of one terabyte of data, and it's the _cluster_ which has a total of ten gigabytes of RAM. That is, if you add up the RethinkDB RAM on all the servers, it's ten gigabytes.)
|
||||
|
||||
In reponse to our questions about RAM requirements, @danielmewes (of RethinkDB) [wrote](https://github.com/rethinkdb/rethinkdb/issues/5902#issuecomment-230860607):
|
||||
|
||||
> ... If you replicate the data, the amount of data per server increases accordingly, because multiple copies of the same data will be held by different servers in the cluster.
|
||||
|
||||
For example, if you increase the data replication factor from 1 to 2 (i.e. the primary plus one copy), then that will double the RAM needed for metadata. Also from @danielmewes:
|
||||
|
||||
> **For reasonable performance, you should probably aim at something closer to 5-10% of the data size.** [Emphasis added] The 1% is the bare minimum and doesn't include any caching. If you want to run near the minimum, you'll also need to manually lower RethinkDB's cache size through the `--cache-size` parameter to free up enough RAM for the metadata overhead...
|
||||
|
||||
RethinkDB has [documentation about its memory requirements](https://rethinkdb.com/docs/memory-usage/). You can use that page to get a better estimate of how much memory you'll need. In particular, note that RethinkDB automatically configures the cache size limit to be about half the available memory, but it can be no lower than 100 MB. As @danielmewes noted, you can manually change the cache size limit (e.g. to free up RAM for queries, metadata, or other things).
|
||||
|
||||
If a RethinkDB process (on a server) runs out of RAM, the operating system will start swapping RAM out to disk, slowing everything down. According to @danielmewes:
|
||||
|
||||
> Going into swap is usually pretty bad for RethinkDB, and RethinkDB servers that have gone into swap often become so slow that other nodes in the cluster consider them unavailable and terminate the connection to them. I recommend adjusting RethinkDB's cache size conservatively to avoid this scenario. RethinkDB will still make use of additional RAM through the operating system's block cache (though less efficiently than when it can keep data in its own cache).
|
||||
|
||||
|
||||
## Filesystem Requirements
|
||||
|
||||
RethinkDB "supports most commonly used file systems" ([source](https://www.rethinkdb.com/docs/architecture/)) but it has [issues with BTRFS](https://github.com/rethinkdb/rethinkdb/issues/2781) (B-tree file system).
|
||||
|
||||
It's best to use a filesystem that supports direct I/O, because that will improve RethinkDB performance (if you tell RethinkDB to use direct I/O). Many compressed or encrypted filesystems don't support direct I/O.
|
@ -18,9 +18,8 @@ to some extent, on the decentralization of the associated consortium. See the pa
|
||||
|
||||
There are some pages and sections that will be of particular interest to anyone building or managing a BigchainDB cluster. In particular:
|
||||
|
||||
* [the page about how to set up and run a cluster node](production-nodes/setup-run-node.html),
|
||||
* [our production deployment template](production-deployment-template/index.html), and
|
||||
* [our old RethinkDB-based AWS deployment template](appendices/aws-testing-cluster.html).
|
||||
* [the page about how to set up and run a cluster node](production-nodes/setup-run-node.html) and
|
||||
* [our production deployment template](production-deployment-template/index.html).
|
||||
|
||||
|
||||
## Cluster DNS Records and SSL Certificates
|
||||
|
@ -32,8 +32,8 @@ To compute it, 1) construct an :term:`associative array` ``d`` containing
|
||||
``block.timestamp``, ``block.transactions``, ``block.node_pubkey``,
|
||||
``block.voters``, and their values. 2) compute ``id = hash_of_aa(d)``.
|
||||
There's pseudocode for the ``hash_of_aa()`` function
|
||||
in the `IPDB Protocol documentation page about cryptographic hashes
|
||||
<https://the-ipdb-protocol.readthedocs.io/en/latest/crypto-hashes.html#computing-the-hash-of-an-associative-array>`_.
|
||||
in the `IPDB Transaction Spec page about cryptographic hashes
|
||||
<https://the-ipdb-transaction-spec.readthedocs.io/en/latest/common-operations/crypto-hashes.html#computing-the-hash-of-an-associative-array>`_.
|
||||
The result (``id``) is a string: the block ID.
|
||||
An example is ``"b60adf655932bf47ef58c0bfb2dd276d4795b94346b36cbb477e10d7eb02cea8"``
|
||||
|
||||
@ -56,8 +56,8 @@ A list of the :ref:`transactions <The Transaction Model>` included in the block.
|
||||
|
||||
The public key of the node that created the block.
|
||||
It's a string.
|
||||
See the `IPDB Protocol documentation page about cryptographic keys & signatures
|
||||
<https://the-ipdb-protocol.readthedocs.io/en/latest/crypto-keys-and-sigs.html>`_.
|
||||
See the `IPDB Transaction Spec page about cryptographic keys & signatures
|
||||
<https://the-ipdb-transaction-spec.readthedocs.io/en/latest/common-operations/crypto-keys-and-sigs.html>`_.
|
||||
|
||||
|
||||
**block.voters**
|
||||
@ -82,8 +82,8 @@ To compute that:
|
||||
where ``private_key`` is the node's private key
|
||||
(i.e. ``node_pubkey`` and ``private_key`` are a key pair). There's pseudocode
|
||||
for the ``sig_of_aa()`` function
|
||||
on `the IPDB Protocol documentation page about cryptographic keys and signatures
|
||||
<https://the-ipdb-protocol.readthedocs.io/en/latest/crypto-keys-and-sigs.html#computing-the-signature-of-an-associative-array>`_.
|
||||
on `the IPDB Transaction Spec page about cryptographic keys and signatures
|
||||
<https://the-ipdb-transaction-spec.readthedocs.io/en/latest/common-operations/crypto-keys-and-sigs.html#computing-the-signature-of-an-associative-array>`_.
|
||||
|
||||
.. note::
|
||||
|
||||
|
@ -35,8 +35,8 @@ The JSON Keys in a Vote
|
||||
The public key of the node which cast this vote.
|
||||
It's a string.
|
||||
For more information about public keys,
|
||||
see the `IPDB Protocol documentation page about cryptographic keys and signatures
|
||||
<https://the-ipdb-protocol.readthedocs.io/en/latest/crypto-keys-and-sigs.html>`_.
|
||||
see the `IPDB Transaction Spec page about cryptographic keys and signatures
|
||||
<https://the-ipdb-transaction-spec.readthedocs.io/en/latest/common-operations/crypto-keys-and-sigs.html>`_.
|
||||
|
||||
|
||||
**vote.voting_for_block**
|
||||
@ -92,8 +92,8 @@ To compute that:
|
||||
#. Compute ``signature = sig_of_aa(d, private_key)``, where ``private_key``
|
||||
is the node's private key (i.e. ``node_pubkey`` and ``private_key`` are a key pair).
|
||||
There's pseudocode for the ``sig_of_aa()`` function
|
||||
on `the IPDB Protocol documentation page about cryptographic keys and signatures
|
||||
<https://the-ipdb-protocol.readthedocs.io/en/latest/crypto-keys-and-sigs.html#computing-the-signature-of-an-associative-array>`_.
|
||||
on `the IPDB Transaction Spec page about cryptographic keys and signatures
|
||||
<https://the-ipdb-transaction-spec.readthedocs.io/en/latest/common-operations/crypto-keys-and-sigs.html#computing-the-signature-of-an-associative-array>`_.
|
||||
|
||||
|
||||
The Vote Schema
|
||||
|
@ -19,12 +19,6 @@ Note that there are a few kinds of nodes:
|
||||
* [Set up a local BigchainDB node for development, experimenting and testing](dev-and-test/index.html)
|
||||
* [Set up and run a BigchainDB cluster](clusters.html)
|
||||
|
||||
There are some old RethinkDB-based deployment instructions as well:
|
||||
|
||||
* [Deploy a RethinkDB-based testing cluster on AWS](appendices/aws-testing-cluster.html)
|
||||
|
||||
Instructions for setting up a client will be provided once there's a public test net.
|
||||
|
||||
|
||||
## Can I Help?
|
||||
|
||||
|
@ -1,72 +1,70 @@
|
||||
# Quickstart
|
||||
|
||||
This page has instructions to set up a single stand-alone BigchainDB node for learning or experimenting. Instructions for other cases are [elsewhere](introduction.html). We will assume you're using Ubuntu 16.04 or similar. You can also try, [running BigchainDB with Docker](appendices/run-with-docker.html).
|
||||
<style media="screen" type="text/css">
|
||||
.button {
|
||||
border-top: 1px solid #96d1f8;
|
||||
background: #65a9d7;
|
||||
background: -webkit-gradient(linear, left top, left bottom, from(#3e779d), to(#65a9d7));
|
||||
background: -webkit-linear-gradient(top, #3e779d, #65a9d7);
|
||||
background: -moz-linear-gradient(top, #3e779d, #65a9d7);
|
||||
background: -ms-linear-gradient(top, #3e779d, #65a9d7);
|
||||
background: -o-linear-gradient(top, #3e779d, #65a9d7);
|
||||
padding: 8.5px 17px;
|
||||
-webkit-border-radius: 3px;
|
||||
-moz-border-radius: 3px;
|
||||
border-radius: 3px;
|
||||
-webkit-box-shadow: rgba(0,0,0,1) 0 1px 0;
|
||||
-moz-box-shadow: rgba(0,0,0,1) 0 1px 0;
|
||||
box-shadow: rgba(0,0,0,1) 0 1px 0;
|
||||
text-shadow: rgba(0,0,0,.4) 0 1px 0;
|
||||
color: white;
|
||||
font-size: 16px;
|
||||
font-family: Arial, Sans-Serif;
|
||||
text-decoration: none;
|
||||
vertical-align: middle;
|
||||
}
|
||||
.button:hover {
|
||||
border-top-color: #28597a;
|
||||
background: #28597a;
|
||||
color: #ccc;
|
||||
}
|
||||
.button:active {
|
||||
border-top-color: #1b435e;
|
||||
background: #1b435e;
|
||||
}
|
||||
a.button:visited {
|
||||
color: white
|
||||
}
|
||||
.buttondiv {
|
||||
margin-bottom: 1.5em;
|
||||
}
|
||||
</style>
|
||||
|
||||
A. Install MongoDB as the database backend. (There are other options but you can ignore them for now.)
|
||||
## Try BigchainDB
|
||||
|
||||
[Install MongoDB Server 3.4+](https://docs.mongodb.com/manual/tutorial/install-mongodb-on-ubuntu/)
|
||||
Create a BigchainDB transaction and post it to a BigchainDB network in 20 seconds or less:
|
||||
|
||||
B. To run MongoDB with default database path i.e. /data/db, open a Terminal and run the following command:
|
||||
```text
|
||||
$ sudo mkdir -p /data/db
|
||||
```
|
||||
<div class="buttondiv">
|
||||
<a class="button" href="https://www.bigchaindb.com/getstarted/">Try BigchainDB Now</a>
|
||||
</div>
|
||||
|
||||
C. Assign rwx(read/write/execute) permissions to the user for default database directory:
|
||||
```text
|
||||
$ sudo chmod -R 700 /data/db
|
||||
```
|
||||
## Develop an App
|
||||
|
||||
D. Run MongoDB (but do not close this terminal):
|
||||
```text
|
||||
$ sudo mongod --replSet=bigchain-rs
|
||||
```
|
||||
To develop an app that talks to a BigchainDB network, you'll want a test network to test it against. IPDB is the Interplanetary Database. The IPDB Test Network is a free-to-use, publicly-available BigchainDB network that you can test against.
|
||||
|
||||
E. Ubuntu 16.04 already has Python 3.5, so you don't need to install it, but you do need to install some other things within a new terminal:
|
||||
```text
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install libffi-dev libssl-dev
|
||||
```
|
||||
<div class="buttondiv">
|
||||
<a class="button" href="https://ipdb.io/#getstarted">Get started with IPDB</a>
|
||||
</div>
|
||||
|
||||
F. Get the latest version of pip and setuptools:
|
||||
```text
|
||||
$ sudo apt-get install python3-pip
|
||||
$ sudo pip3 install --upgrade pip setuptools
|
||||
```
|
||||
Regardless of which BigchainDB network you use, you'll probably use one of the [BigchainDB drivers or tools](https://www.bigchaindb.com/getstarted/#drivers).
|
||||
|
||||
G. Install the `bigchaindb` Python package from PyPI:
|
||||
```text
|
||||
$ sudo pip3 install bigchaindb
|
||||
```
|
||||
## Help Develop BigchainDB Server
|
||||
|
||||
In case you are having problems with installation or package/module versioning, please upgrade the relevant packages on your host by running one the following commands:
|
||||
```text
|
||||
$ sudo pip3 install [packageName]==[packageVersion]
|
||||
To help develop BigchainDB Server (the core software in each BigchainDB node), read the [CONTRIBUTING.md file](https://github.com/bigchaindb/bigchaindb/blob/master/CONTRIBUTING.md). It includes instructions for deploying and testing a single BigchainDB node, and multi-node clusters.
|
||||
|
||||
OR
|
||||
## Old Quickstart
|
||||
|
||||
$ sudo pip3 install [packageName] --upgrade
|
||||
```
|
||||
|
||||
H. Configure BigchainDB Server:
|
||||
```text
|
||||
$ bigchaindb -y configure mongodb
|
||||
```
|
||||
|
||||
I. Run BigchainDB Server:
|
||||
```text
|
||||
$ bigchaindb start
|
||||
```
|
||||
|
||||
J. Verify BigchainDB Server setup by visiting the BigchainDB Root URL in your browser:
|
||||
|
||||
[http://127.0.0.1:9984/](http://127.0.0.1:9984/)
|
||||
|
||||
A correctly installed installation will show you a JSON object with information about the API, docs, version and your public key.
|
||||
|
||||
You now have a running BigchainDB Server and can post transactions to it.
|
||||
One way to do that is to use the BigchainDB Python Driver.
|
||||
|
||||
[Install the BigchainDB Python Driver (link)](https://docs.bigchaindb.com/projects/py-driver/en/latest/quickstart.html)
|
||||
If you want something like the old Quickstart page, i.e. some command-line instructions to set up and run a BigchainDB node, then go to the [bigchaindb/bigchaindb repository on GitHub](https://github.com/bigchaindb/bigchaindb) and check out the stack.sh file in the pkg/scripts/ directory. Note that it uses functions defined in the functions-common file in that directory.
|
||||
|
||||
<hr>
|
||||
|
||||
|
27
network/health-check.sh
Normal file
27
network/health-check.sh
Normal file
@ -0,0 +1,27 @@
|
||||
echo "########################################################################"
|
||||
echo "# #"
|
||||
echo "# NODE ONE #"
|
||||
echo "# #"
|
||||
echo "########################################################################"
|
||||
curl http://tendermint-one:46657/abci_query && curl http://bdb-one:9984/
|
||||
|
||||
echo "########################################################################"
|
||||
echo "# #"
|
||||
echo "# NODE TWO #"
|
||||
echo "# #"
|
||||
echo "########################################################################"
|
||||
curl http://tendermint-two:46657/abci_query && curl http://bdb-two:9984/
|
||||
|
||||
echo "########################################################################"
|
||||
echo "# #"
|
||||
echo "# NODE THREE #"
|
||||
echo "# #"
|
||||
echo "########################################################################"
|
||||
curl http://tendermint-three:46657/abci_query && curl http://bdb-three:9984/
|
||||
|
||||
echo "########################################################################"
|
||||
echo "# #"
|
||||
echo "# NODE FOUR #"
|
||||
echo "# #"
|
||||
echo "########################################################################"
|
||||
curl http://tendermint-four:46657/abci_query && curl http://bdb-four:9984/
|
18
network/node1/config.toml
Normal file
18
network/node1/config.toml
Normal file
@ -0,0 +1,18 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://bdb-one:46658"
|
||||
moniker = "anonymous"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
[consensus]
|
||||
create_empty_blocks = false
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = ""
|
39
network/node1/genesis.json
Normal file
39
network/node1/genesis.json
Normal file
@ -0,0 +1,39 @@
|
||||
{
|
||||
"genesis_time": "0001-01-01T00:00:00Z",
|
||||
"chain_id": "test-chain-KPI1Ud",
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
"type":"ed25519",
|
||||
"data":"60C55D531F87D9AA0DFA3AE2DC1842FBFB531DA2161BE3B8D6CC03948CFC39C8"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node1"
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "981B3879F24CAC4833AFC7EBB71AAEBE79C61E717D7791295B253E7A8E454518"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node2"
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "E8C4FEE10BA60982CBD9C41614CF42E31E3DF9FE50124BA4C0A5E3CEA897DA9F"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node3"
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "531248E1D7E35EDB26F0C19F3211FA5CFDA818B226F1F9206D0D2047B48B20B9"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node4"
|
||||
}
|
||||
],
|
||||
"app_hash":""
|
||||
}
|
1
network/node1/priv_validator.json
Normal file
1
network/node1/priv_validator.json
Normal file
@ -0,0 +1 @@
|
||||
{"address":"F81B79DE3F8D8455F76BA1D2FCB62726D69B1253","pub_key":{"type":"ed25519","data":"60C55D531F87D9AA0DFA3AE2DC1842FBFB531DA2161BE3B8D6CC03948CFC39C8"},"last_height":0,"last_round":0,"last_step":0,"last_signature":null,"priv_key":{"type":"ed25519","data":"CD6BAD1433AA62AD9B384C49FD83507829FE39306FC0475903CCF0BA114FCCCB60C55D531F87D9AA0DFA3AE2DC1842FBFB531DA2161BE3B8D6CC03948CFC39C8"}}
|
19
network/node2/config.toml
Normal file
19
network/node2/config.toml
Normal file
@ -0,0 +1,19 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://bdb-two:46658"
|
||||
moniker = "anonymous"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
[consensus]
|
||||
create_empty_blocks = false
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
# TODO peers
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = "tendermint-one:46656"
|
39
network/node2/genesis.json
Normal file
39
network/node2/genesis.json
Normal file
@ -0,0 +1,39 @@
|
||||
{
|
||||
"genesis_time": "0001-01-01T00:00:00Z",
|
||||
"chain_id": "test-chain-KPI1Ud",
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
"type":"ed25519",
|
||||
"data":"60C55D531F87D9AA0DFA3AE2DC1842FBFB531DA2161BE3B8D6CC03948CFC39C8"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node1"
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "981B3879F24CAC4833AFC7EBB71AAEBE79C61E717D7791295B253E7A8E454518"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node2"
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "E8C4FEE10BA60982CBD9C41614CF42E31E3DF9FE50124BA4C0A5E3CEA897DA9F"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node3"
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "531248E1D7E35EDB26F0C19F3211FA5CFDA818B226F1F9206D0D2047B48B20B9"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node4"
|
||||
}
|
||||
],
|
||||
"app_hash":""
|
||||
}
|
1
network/node2/priv_validator.json
Normal file
1
network/node2/priv_validator.json
Normal file
@ -0,0 +1 @@
|
||||
{"address":"0DDAB4527921A5E36C099A3260900E8B49F55096","pub_key":{"type":"ed25519","data":"981B3879F24CAC4833AFC7EBB71AAEBE79C61E717D7791295B253E7A8E454518"},"last_height":0,"last_round":0,"last_step":0,"last_signature":null,"priv_key":{"type":"ed25519","data":"BE14C56DA02B1FEBD101EF6EF986F3F2984C0ABE96FEE9FEC9616384CC4F71FF981B3879F24CAC4833AFC7EBB71AAEBE79C61E717D7791295B253E7A8E454518"}}
|
18
network/node3/config.toml
Normal file
18
network/node3/config.toml
Normal file
@ -0,0 +1,18 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://bdb-three:46658"
|
||||
moniker = "anonymous"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
[consensus]
|
||||
create_empty_blocks = false
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = "tendermint-two:46656"
|
39
network/node3/genesis.json
Normal file
39
network/node3/genesis.json
Normal file
@ -0,0 +1,39 @@
|
||||
{
|
||||
"genesis_time": "0001-01-01T00:00:00Z",
|
||||
"chain_id": "test-chain-KPI1Ud",
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
"type":"ed25519",
|
||||
"data":"60C55D531F87D9AA0DFA3AE2DC1842FBFB531DA2161BE3B8D6CC03948CFC39C8"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node1"
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "981B3879F24CAC4833AFC7EBB71AAEBE79C61E717D7791295B253E7A8E454518"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node2"
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "E8C4FEE10BA60982CBD9C41614CF42E31E3DF9FE50124BA4C0A5E3CEA897DA9F"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node3"
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "531248E1D7E35EDB26F0C19F3211FA5CFDA818B226F1F9206D0D2047B48B20B9"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node4"
|
||||
}
|
||||
],
|
||||
"app_hash":""
|
||||
}
|
1
network/node3/priv_validator.json
Normal file
1
network/node3/priv_validator.json
Normal file
@ -0,0 +1 @@
|
||||
{"address":"F51FD23581F3FA49C68BBDC229461F24EBE202BE","pub_key":{"type":"ed25519","data":"E8C4FEE10BA60982CBD9C41614CF42E31E3DF9FE50124BA4C0A5E3CEA897DA9F"},"last_height":0,"last_round":0,"last_step":0,"last_signature":null,"priv_key":{"type":"ed25519","data":"E9849DEE8A524F00530EF1A486AD8E93A3AD2FCE4BD59F2F08282AD2BE3AE183E8C4FEE10BA60982CBD9C41614CF42E31E3DF9FE50124BA4C0A5E3CEA897DA9F"}}
|
20
network/node4/config.toml
Normal file
20
network/node4/config.toml
Normal file
@ -0,0 +1,20 @@
|
||||
# This is a TOML config file.
|
||||
# This is a TOML config file.
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://bdb-four:46658"
|
||||
moniker = "anonymous"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
[consensus]
|
||||
create_empty_blocks = false
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = "tendermint-one:46656,tendermint-three:46656"
|
39
network/node4/genesis.json
Normal file
39
network/node4/genesis.json
Normal file
@ -0,0 +1,39 @@
|
||||
{
|
||||
"genesis_time": "0001-01-01T00:00:00Z",
|
||||
"chain_id": "test-chain-KPI1Ud",
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
"type":"ed25519",
|
||||
"data":"60C55D531F87D9AA0DFA3AE2DC1842FBFB531DA2161BE3B8D6CC03948CFC39C8"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node1"
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "981B3879F24CAC4833AFC7EBB71AAEBE79C61E717D7791295B253E7A8E454518"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node2"
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "E8C4FEE10BA60982CBD9C41614CF42E31E3DF9FE50124BA4C0A5E3CEA897DA9F"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node3"
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "531248E1D7E35EDB26F0C19F3211FA5CFDA818B226F1F9206D0D2047B48B20B9"
|
||||
},
|
||||
"power": 10,
|
||||
"name": "node4"
|
||||
}
|
||||
],
|
||||
"app_hash":""
|
||||
}
|
1
network/node4/priv_validator.json
Normal file
1
network/node4/priv_validator.json
Normal file
@ -0,0 +1 @@
|
||||
{"address":"EB1E972D69E212E928CD98B581E50233E9C43F51","pub_key":{"type":"ed25519","data":"531248E1D7E35EDB26F0C19F3211FA5CFDA818B226F1F9206D0D2047B48B20B9"},"last_height":0,"last_round":0,"last_step":0,"last_signature":null,"priv_key":{"type":"ed25519","data":"864CA8AB5B2F3CBD42994381648D6F75E827E0D53C3B28872E03D7AEB2E10F14531248E1D7E35EDB26F0C19F3211FA5CFDA818B226F1F9206D0D2047B48B20B9"}}
|
61
pkg/scripts/Vagrantfile
vendored
Normal file
61
pkg/scripts/Vagrantfile
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
Vagrant.require_version ">= 1.8.7"
|
||||
unless Vagrant.has_plugin?("vagrant-vbguest")
|
||||
raise "Please install the vagrant-vbguest plugin by running `vagrant plugin install vagrant-vbguest`"
|
||||
end
|
||||
|
||||
unless Vagrant.has_plugin?("vagrant-cachier")
|
||||
raise "Please install the vagrant-cachier plugin by running `vagrant plugin install vagrant-cachier`"
|
||||
end
|
||||
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
MEMORY = 4096
|
||||
CPU_COUNT = 2
|
||||
|
||||
MOUNT_DIRS = {
|
||||
:bigchaindb => {:repo => "bigchaindb", :local => "/opt/stack/bigchaindb", :owner => "edxapp"},
|
||||
}
|
||||
|
||||
boxname = ENV['BOXNAME'] || "ubuntu/xenial64"
|
||||
tm_version = ENV['TM_VERSION']
|
||||
|
||||
$script = <<SCRIPT
|
||||
if [ ! -d /opt/stack/bigchaindb/pkg/scripts ]; then
|
||||
echo "Error: Base box is missing provisioning scripts." 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
bash /opt/stack/bigchaindb/pkg/scripts/stack.sh
|
||||
|
||||
SCRIPT
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "ubuntu/xenial64"
|
||||
config.vm.box_check_update = false
|
||||
|
||||
config.vm.network :private_network, ip: "192.168.33.10"
|
||||
|
||||
|
||||
config.vm.network :forwarded_port, guest: 9984, host: 9984 # BDB
|
||||
|
||||
config.ssh.insert_key = true
|
||||
|
||||
config.vm.synced_folder "bigchaindb", "/opt/stack/bigchaindb"
|
||||
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
vb.customize ["modifyvm", :id, "--memory", MEMORY.to_s]
|
||||
vb.customize ["modifyvm", :id, "--cpus", CPU_COUNT.to_s]
|
||||
end
|
||||
|
||||
# Use vagrant-vbguest plugin to make sure Guest Additions are in sync
|
||||
config.vbguest.auto_reboot = true
|
||||
config.vbguest.auto_update = true
|
||||
|
||||
config.vm.provision "shell", inline: $script,
|
||||
privileged: false,
|
||||
env: {
|
||||
:TM_VERSION => ENV['TM_VERSION'],
|
||||
:MONGO_VERSION => ENV['MONGO_VERSION']
|
||||
}
|
||||
end
|
390
pkg/scripts/functions-common
Normal file
390
pkg/scripts/functions-common
Normal file
@ -0,0 +1,390 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Save trace setting
|
||||
_XTRACE_FUNCTIONS_COMMON=$(set +o | grep xtrace)
|
||||
set +o xtrace
|
||||
|
||||
# Distro Functions
|
||||
# ================
|
||||
|
||||
# Determine OS Vendor, Release and Update
|
||||
|
||||
#
|
||||
# NOTE : For portability, you almost certainly do not want to use
|
||||
# these variables directly! The "is_*" functions defined below this
|
||||
# bundle up compatible platforms under larger umbrellas that we have
|
||||
# determinted are compatible enough (e.g. is_ubuntu covers Ubuntu &
|
||||
# Debian, is_fedora covers RPM-based distros). Higher-level functions
|
||||
# such as "install_package" further abstract things in better ways.
|
||||
#
|
||||
# ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc
|
||||
# ``os_RELEASE`` - major release: ``16.04`` (Ubuntu), ``23`` (Fedora)
|
||||
# ``os_PACKAGE`` - package type: ``deb`` or ``rpm``
|
||||
# ``os_CODENAME`` - vendor's codename for release: ``xenial``
|
||||
|
||||
declare -g os_VENDOR os_RELEASE os_PACKAGE os_CODENAME
|
||||
|
||||
# Make a *best effort* attempt to install lsb_release packages for the
|
||||
# user if not available. Note can't use generic install_package*
|
||||
# because they depend on this!
|
||||
function _ensure_lsb_release {
|
||||
if [[ -x $(command -v lsb_release 2>/dev/null) ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ -x $(command -v apt-get 2>/dev/null) ]]; then
|
||||
sudo apt-get install -y lsb-release
|
||||
elif [[ -x $(command -v zypper 2>/dev/null) ]]; then
|
||||
sudo zypper -n install lsb-release
|
||||
elif [[ -x $(command -v dnf 2>/dev/null) ]]; then
|
||||
sudo dnf install -y redhat-lsb-core
|
||||
elif [[ -x $(command -v yum 2>/dev/null) ]]; then
|
||||
# all rh patforms (fedora, centos, rhel) have this pkg
|
||||
sudo yum install -y redhat-lsb-core
|
||||
else
|
||||
die $LINENO "Unable to find or auto-install lsb_release"
|
||||
fi
|
||||
}
|
||||
|
||||
# GetOSVersion
|
||||
# Set the following variables:
|
||||
# - os_RELEASE
|
||||
# - os_CODENAME
|
||||
# - os_VENDOR
|
||||
# - os_PACKAGE
|
||||
function GetOSVersion {
|
||||
# We only support distros that provide a sane lsb_release
|
||||
_ensure_lsb_release
|
||||
|
||||
os_RELEASE=$(lsb_release -r -s)
|
||||
os_CODENAME=$(lsb_release -c -s)
|
||||
os_VENDOR=$(lsb_release -i -s)
|
||||
|
||||
if [[ $os_VENDOR =~ (Debian|Ubuntu|LinuxMint) ]]; then
|
||||
os_PACKAGE="deb"
|
||||
else
|
||||
os_PACKAGE="rpm"
|
||||
fi
|
||||
|
||||
typeset -xr os_VENDOR
|
||||
typeset -xr os_RELEASE
|
||||
typeset -xr os_PACKAGE
|
||||
typeset -xr os_CODENAME
|
||||
}
|
||||
|
||||
# Translate the OS version values into common nomenclature
|
||||
# Sets global ``DISTRO`` from the ``os_*`` values
|
||||
declare -g DISTRO
|
||||
|
||||
function GetDistro {
|
||||
GetOSVersion
|
||||
if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) || \
|
||||
"$os_VENDOR" =~ (LinuxMint) ]]; then
|
||||
# 'Everyone' refers to Ubuntu / Debian / Mint releases by
|
||||
# the code name adjective
|
||||
DISTRO=$os_CODENAME
|
||||
elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
|
||||
# For Fedora, just use 'f' and the release
|
||||
DISTRO="f$os_RELEASE"
|
||||
elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then
|
||||
DISTRO="opensuse-$os_RELEASE"
|
||||
elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then
|
||||
# just use major release
|
||||
DISTRO="sle${os_RELEASE%.*}"
|
||||
elif [[ "$os_VENDOR" =~ (Red.*Hat) || \
|
||||
"$os_VENDOR" =~ (CentOS) || \
|
||||
"$os_VENDOR" =~ (Scientific) || \
|
||||
"$os_VENDOR" =~ (OracleServer) || \
|
||||
"$os_VENDOR" =~ (Virtuozzo) ]]; then
|
||||
# Drop the . release as we assume it's compatible
|
||||
# XXX re-evaluate when we get RHEL10
|
||||
DISTRO="rhel${os_RELEASE::1}"
|
||||
elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
|
||||
DISTRO="xs${os_RELEASE%.*}"
|
||||
elif [[ "$os_VENDOR" =~ (kvmibm) ]]; then
|
||||
DISTRO="${os_VENDOR}${os_RELEASE::1}"
|
||||
else
|
||||
# We can't make a good choice here. Setting a sensible DISTRO
|
||||
# is part of the problem, but not the major issue -- we really
|
||||
# only use DISTRO in the code as a fine-filter.
|
||||
#
|
||||
# The bigger problem is categorising the system into one of
|
||||
# our two big categories as Ubuntu/Debian-ish or
|
||||
# Fedora/CentOS-ish.
|
||||
#
|
||||
# The setting of os_PACKAGE above is only set to "deb" based
|
||||
# on a hard-coded list of vendor names ... thus we will
|
||||
# default to thinking unknown distros are RPM based
|
||||
# (ie. is_ubuntu does not match). But the platform will then
|
||||
# also not match in is_fedora, because that also has a list of
|
||||
# names.
|
||||
#
|
||||
# So, if you are reading this, getting your distro supported
|
||||
# is really about making sure it matches correctly in these
|
||||
# functions. Then you can choose a sensible way to construct
|
||||
# DISTRO based on your distros release approach.
|
||||
die $LINENO "Unable to determine DISTRO, can not continue."
|
||||
fi
|
||||
typeset -xr DISTRO
|
||||
}
|
||||
|
||||
# Utility function for checking machine architecture
|
||||
# is_arch arch-type
|
||||
function is_arch {
|
||||
[[ "$(uname -m)" == "$1" ]]
|
||||
}
|
||||
|
||||
# Determine if current distribution is an Oracle distribution
|
||||
# is_oraclelinux
|
||||
function is_oraclelinux {
|
||||
if [[ -z "$os_VENDOR" ]]; then
|
||||
GetOSVersion
|
||||
fi
|
||||
|
||||
[ "$os_VENDOR" = "OracleServer" ]
|
||||
}
|
||||
|
||||
|
||||
# Determine if current distribution is a Fedora-based distribution
|
||||
# (Fedora, RHEL, CentOS, etc).
|
||||
# is_fedora
|
||||
function is_fedora {
|
||||
if [[ -z "$os_VENDOR" ]]; then
|
||||
GetOSVersion
|
||||
fi
|
||||
|
||||
[ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \
|
||||
[ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \
|
||||
[ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleServer" ] || \
|
||||
[ "$os_VENDOR" = "Virtuozzo" ] || [ "$os_VENDOR" = "kvmibm" ]
|
||||
}
|
||||
|
||||
|
||||
# Determine if current distribution is a SUSE-based distribution
|
||||
# (openSUSE, SLE).
|
||||
# is_suse
|
||||
function is_suse {
|
||||
if [[ -z "$os_VENDOR" ]]; then
|
||||
GetOSVersion
|
||||
fi
|
||||
|
||||
[[ "$os_VENDOR" =~ (openSUSE) || "$os_VENDOR" == "SUSE LINUX" ]]
|
||||
}
|
||||
|
||||
|
||||
# Determine if current distribution is an Ubuntu-based distribution
|
||||
# It will also detect non-Ubuntu but Debian-based distros
|
||||
# is_ubuntu
|
||||
function is_ubuntu {
|
||||
if [[ -z "$os_PACKAGE" ]]; then
|
||||
GetOSVersion
|
||||
fi
|
||||
[ "$os_PACKAGE" = "deb" ]
|
||||
}
|
||||
|
||||
# Package Functions
|
||||
# =================
|
||||
|
||||
# Wrapper for ``apt-get update`` to try multiple times on the update
|
||||
# to address bad package mirrors (which happen all the time).
|
||||
function apt_get_update {
|
||||
# only do this once per run
|
||||
if [[ "$REPOS_UPDATED" == "True" && "$RETRY_UPDATE" != "True" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# bail if we are offline
|
||||
[[ "$OFFLINE" = "True" ]] && return
|
||||
|
||||
local sudo="sudo"
|
||||
[[ "$(id -u)" = "0" ]] && sudo="env"
|
||||
|
||||
# time all the apt operations
|
||||
time_start "apt-get-update"
|
||||
|
||||
local proxies="http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} no_proxy=${no_proxy:-} "
|
||||
local update_cmd="$sudo $proxies apt-get update"
|
||||
if ! timeout 300 sh -c "while ! $update_cmd; do sleep 30; done"; then
|
||||
die $LINENO "Failed to update apt repos, we're dead now"
|
||||
fi
|
||||
|
||||
REPOS_UPDATED=True
|
||||
# stop the clock
|
||||
time_stop "apt-get-update"
|
||||
}
|
||||
|
||||
# Wrapper for ``apt-get`` to set cache and proxy environment variables
|
||||
# Uses globals ``OFFLINE``, ``*_proxy``
|
||||
# apt_get operation package [package ...]
|
||||
function apt_get {
|
||||
local xtrace result
|
||||
xtrace=$(set +o | grep xtrace)
|
||||
set +o xtrace
|
||||
|
||||
[[ "$OFFLINE" = "True" || -z "$@" ]] && return
|
||||
local sudo="sudo"
|
||||
[[ "$(id -u)" = "0" ]] && sudo="env"
|
||||
|
||||
# time all the apt operations
|
||||
time_start "apt-get"
|
||||
|
||||
$xtrace
|
||||
|
||||
$sudo DEBIAN_FRONTEND=noninteractive \
|
||||
http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} \
|
||||
no_proxy=${no_proxy:-} \
|
||||
apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" < /dev/null
|
||||
result=$?
|
||||
|
||||
# stop the clock
|
||||
time_stop "apt-get"
|
||||
return $result
|
||||
}
|
||||
|
||||
|
||||
# Distro-agnostic package installer
|
||||
# Uses globals ``NO_UPDATE_REPOS``, ``REPOS_UPDATED``, ``RETRY_UPDATE``
|
||||
# install_package package [package ...]
|
||||
function update_package_repo {
|
||||
NO_UPDATE_REPOS=${NO_UPDATE_REPOS:-False}
|
||||
REPOS_UPDATED=${REPOS_UPDATED:-False}
|
||||
RETRY_UPDATE=${RETRY_UPDATE:-False}
|
||||
|
||||
if [[ "$NO_UPDATE_REPOS" = "True" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if is_ubuntu; then
|
||||
apt_get_update
|
||||
fi
|
||||
}
|
||||
|
||||
function real_install_package {
|
||||
if is_ubuntu; then
|
||||
apt_get install "$@"
|
||||
elif is_fedora; then
|
||||
yum_install "$@"
|
||||
elif is_suse; then
|
||||
zypper_install "$@"
|
||||
else
|
||||
exit_distro_not_supported "installing packages"
|
||||
fi
|
||||
}
|
||||
|
||||
# Distro-agnostic package installer
|
||||
# install_package package [package ...]
|
||||
function install_package {
|
||||
update_package_repo
|
||||
if ! real_install_package "$@"; then
|
||||
RETRY_UPDATE=True update_package_repo && real_install_package "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
# Distro-agnostic function to tell if a package is installed
|
||||
# is_package_installed package [package ...]
|
||||
function is_package_installed {
|
||||
if [[ -z "$@" ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ -z "$os_PACKAGE" ]]; then
|
||||
GetOSVersion
|
||||
fi
|
||||
|
||||
if [[ "$os_PACKAGE" = "deb" ]]; then
|
||||
dpkg -s "$@" > /dev/null 2> /dev/null
|
||||
elif [[ "$os_PACKAGE" = "rpm" ]]; then
|
||||
rpm --quiet -q "$@"
|
||||
else
|
||||
exit_distro_not_supported "finding if a package is installed"
|
||||
fi
|
||||
}
|
||||
|
||||
# Distro-agnostic package uninstaller
|
||||
# uninstall_package package [package ...]
|
||||
function uninstall_package {
|
||||
if is_ubuntu; then
|
||||
apt_get purge "$@"
|
||||
elif is_fedora; then
|
||||
sudo ${YUM:-yum} remove -y "$@" ||:
|
||||
elif is_suse; then
|
||||
sudo zypper remove -y "$@" ||:
|
||||
else
|
||||
exit_distro_not_supported "uninstalling packages"
|
||||
fi
|
||||
}
|
||||
|
||||
# Wrapper for ``yum`` to set proxy environment variables
|
||||
# Uses globals ``OFFLINE``, ``*_proxy``, ``YUM``
|
||||
# yum_install package [package ...]
|
||||
function yum_install {
|
||||
local result parse_yum_result
|
||||
time_start "yum_install"
|
||||
|
||||
# This is a bit tricky, because yum -y assumes missing or failed
|
||||
# packages are OK (see [1]). We want devstack to stop if we are
|
||||
# installing missing packages.
|
||||
#
|
||||
# Thus we manually match on the output (stack.sh runs in a fixed
|
||||
# locale, so lang shouldn't change).
|
||||
#
|
||||
# If yum returns !0, we echo the result as "YUM_FAILED" and return
|
||||
# that from the awk (we're subverting -e with this trick).
|
||||
# Otherwise we use awk to look for failure strings and return "2"
|
||||
# to indicate a terminal failure.
|
||||
#
|
||||
# [1] https://bugzilla.redhat.com/show_bug.cgi?id=965567
|
||||
parse_yum_result=' \
|
||||
BEGIN { result=0 } \
|
||||
/^YUM_FAILED/ { result=$2 } \
|
||||
/^No package/ { result=2 } \
|
||||
/^Failed:/ { result=2 } \
|
||||
//{ print } \
|
||||
END { exit result }'
|
||||
(sudo_with_proxies "${YUM:-yum}" install -y "$@" 2>&1 || echo YUM_FAILED $?) \
|
||||
| awk "$parse_yum_result" && result=$? || result=$?
|
||||
|
||||
time_stop "yum_install"
|
||||
|
||||
# if we return 1, then the wrapper functions will run an update
|
||||
# and try installing the package again as a defense against bad
|
||||
# mirrors. This can hide failures, especially when we have
|
||||
# packages that are in the "Failed:" section because their rpm
|
||||
# install scripts failed to run correctly (in this case, the
|
||||
# package looks installed, so when the retry happens we just think
|
||||
# the package is OK, and incorrectly continue on).
|
||||
if [ "$result" == 2 ]; then
|
||||
die "Detected fatal package install failure"
|
||||
fi
|
||||
|
||||
return "$result"
|
||||
}
|
||||
|
||||
# zypper wrapper to set arguments correctly
|
||||
# Uses globals ``OFFLINE``, ``*_proxy``
|
||||
# zypper_install package [package ...]
|
||||
function zypper_install {
|
||||
local sudo="sudo"
|
||||
[[ "$(id -u)" = "0" ]] && sudo="env"
|
||||
$sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}" \
|
||||
no_proxy="${no_proxy:-}" \
|
||||
zypper --non-interactive install --auto-agree-with-licenses "$@"
|
||||
}
|
||||
|
||||
function install_tendermint_bin {
|
||||
wget https://s3-us-west-2.amazonaws.com/tendermint/binaries/tendermint/v${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip
|
||||
unzip tendermint_${TM_VERSION}_linux_amd64.zip
|
||||
sudo mv tendermint /usr/local/bin
|
||||
}
|
||||
|
||||
# Find out if a process exists by partial name.
|
||||
# is_running name
|
||||
function is_running {
|
||||
local name=$1
|
||||
ps auxw | grep -v grep | grep ${name} > /dev/null
|
||||
local exitcode=$?
|
||||
return $exitcode
|
||||
}
|
||||
|
||||
# Restore xtrace
|
||||
$_XTRACE_FUNCTIONS_COMMON
|
81
pkg/scripts/install_stack.sh
Normal file
81
pkg/scripts/install_stack.sh
Normal file
@ -0,0 +1,81 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
function usage
|
||||
{
|
||||
cat << EOM
|
||||
|
||||
Usage: $ bash ${0##*/} [-v] [-h]
|
||||
|
||||
Installs the BigchainDB devstack or network.
|
||||
|
||||
ENV[STACK]
|
||||
Set STACK environment variable to Either 'devstack' or 'network'.
|
||||
Network mimics a production network environment with multiple BDB
|
||||
nodes, whereas devstack is useful if you plan on modifying the
|
||||
bigchaindb code.
|
||||
|
||||
ENV[GIT_BRANCH]
|
||||
To configure bigchaindb repo branch to use set GIT_BRANCH environment
|
||||
variable
|
||||
|
||||
ENV[TM_VERSION]
|
||||
Tendermint version to use for the devstack setup
|
||||
|
||||
ENV[MONGO_VERSION]
|
||||
MongoDB version to use with the devstack setup
|
||||
|
||||
-v
|
||||
Verbose output from ansible playbooks.
|
||||
|
||||
-h
|
||||
Show this help and exit.
|
||||
|
||||
EOM
|
||||
}
|
||||
|
||||
# GIT_BRANCH
|
||||
git_branch=$GIT_BRANCH
|
||||
|
||||
while getopts "h" opt; do
|
||||
case "$opt" in
|
||||
h)
|
||||
usage
|
||||
exit
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ! $git_branch ]]; then
|
||||
echo "You must specify GIT_BRANCH before running."
|
||||
echo
|
||||
echo usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p logs
|
||||
log_file=logs/install-$(date +%Y%m%d-%H%M%S).log
|
||||
exec > >(tee $log_file) 2>&1
|
||||
echo "Capturing output to $log_file"
|
||||
echo "Installation started at $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
|
||||
function finish {
|
||||
echo "Installation finished at $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
}
|
||||
trap finish EXIT
|
||||
|
||||
export GIT_BRANCH=$git_branch
|
||||
echo "Using bigchaindb branch '$GIT_BRANCH'"
|
||||
|
||||
git clone https://github.com/bigchaindb/bigchaindb.git -b $GIT_BRANCH || true
|
||||
curl -fOL# https://raw.githubusercontent.com/bigchaindb/bigchaindb/${GIT_BRANCH}/pkg/scripts/Vagrantfile
|
||||
vagrant up --provider virtualbox
|
||||
|
||||
echo -e "Finished installing! You may now log in using 'vagrant ssh'"
|
||||
echo -e "Once inside the VM do 'tmux attach' to attach to tmux session running all services"
|
138
pkg/scripts/stack.sh
Normal file
138
pkg/scripts/stack.sh
Normal file
@ -0,0 +1,138 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ``stack.sh`` is an opinionated BigchainDB developer installation. It
|
||||
# installs and configures **BigchainDb Server**, **Tendermint Server**,
|
||||
# **MongoDB**
|
||||
|
||||
# Print the commands being run so that we can see the command that triggers
|
||||
# an error. It is also useful for following along as the install occurs.
|
||||
set -o xtrace
|
||||
|
||||
# Make sure umask is sane
|
||||
umask 022
|
||||
|
||||
# Keep track of the stack.sh directory
|
||||
TOP_DIR=$(cd $(dirname "$0") && pwd)
|
||||
BASE_DIR=${TOP_DIR}/../..
|
||||
|
||||
# Check for uninitialized variables, a big cause of bugs
|
||||
NOUNSET=${NOUNSET:-}
|
||||
if [[ -n "$NOUNSET" ]]; then
|
||||
set -o nounset
|
||||
fi
|
||||
|
||||
# Set default MongoDB version
|
||||
if [[ "$MONGO_VERSION" = "" ]]; then
|
||||
MONGO_VERSION="3.4"
|
||||
fi
|
||||
|
||||
# Set default tendermint version
|
||||
if [[ "$TM_VERSION" = "" ]]; then
|
||||
TM_VERSION="0.12.1"
|
||||
fi
|
||||
|
||||
# Configuration
|
||||
# =============
|
||||
|
||||
# Source utility functions
|
||||
source ${TOP_DIR}/functions-common
|
||||
|
||||
# Configure Distro Repositories
|
||||
# -----------------------------
|
||||
|
||||
# For Debian/Ubuntu make apt attempt to retry network ops on it's own and mongodb pub key
|
||||
# source repo
|
||||
if is_ubuntu; then
|
||||
echo 'APT::Acquire::Retries "20";' | sudo tee /etc/apt/apt.conf.d/80retry >/dev/null
|
||||
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 0C49F3730359A14518585931BC711F9BA15703C6
|
||||
echo "deb [ arch=amd64,arm64 ] http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/${MONGO_VERSION} multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-${MONGO_VERSION}.list
|
||||
fi
|
||||
|
||||
# Ensure required packages are installed
|
||||
# --------------------------------------
|
||||
|
||||
is_package_installed python3 || install_package python3
|
||||
is_package_installed python3-pip || install_package python3-pip
|
||||
is_package_installed libffi-dev || install_package libffi-dev
|
||||
is_package_installed libssl-dev || install_package libssl-dev
|
||||
is_package_installed tmux || install_package tmux
|
||||
is_package_installed mongodb-org || install_package mongodb-org
|
||||
is_package_installed unzip || install_package unzip
|
||||
install_tendermint_bin
|
||||
|
||||
# Clean system if re-running the script
|
||||
OIFS=$IFS
|
||||
IFS=':'
|
||||
session_str=$(tmux ls | grep -w bdb-dev)
|
||||
if [[ $session_str = "" ]]; then
|
||||
continue
|
||||
else
|
||||
session=($session_str)
|
||||
tmux kill-session -t ${session[0]}
|
||||
fi
|
||||
|
||||
# Stop bigchaindb service
|
||||
if is_running "bigchaindb"; then
|
||||
sudo pkill bigchaindb
|
||||
fi
|
||||
|
||||
# Stop tendermint service
|
||||
if is_running "tendermint"; then
|
||||
sudo pkill tendermint
|
||||
fi
|
||||
|
||||
# Stop mongodb service
|
||||
if is_running "monogod"; then
|
||||
sudo pkill mongod
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
|
||||
# Create data dir for mongod
|
||||
if [[ ! -d /data/db ]]; then
|
||||
sudo mkdir -p /data/db
|
||||
fi
|
||||
sudo chmod -R 700 /data/db
|
||||
|
||||
# Configure tendermint
|
||||
tendermint init
|
||||
|
||||
# Configure tmux
|
||||
cd ${BASE_DIR}
|
||||
tmux new-session -s bdb-dev -n bdb -d
|
||||
tmux new-window -n mdb
|
||||
tmux new-window -n tendermint
|
||||
|
||||
# Start MongoDB
|
||||
tmux send-keys -t bdb-dev:mdb 'sudo mongod --replSet=bigchain-rs' C-m
|
||||
|
||||
# Start BigchainDB
|
||||
tmux send-keys -t bdb-dev:bdb 'sudo python3 setup.py install && bigchaindb -y configure mongodb && bigchaindb -l DEBUG start' C-m
|
||||
|
||||
while ! is_running "bigchaindb"; do
|
||||
echo "Waiting bigchaindb service to start"
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Start tendermint service
|
||||
tmux send-key -t bdb-dev:tendermint 'tendermint init && tendermint unsafe_reset_all && tendermint node' C-m
|
||||
|
||||
# Configure Error Traps
|
||||
# ---------------------
|
||||
|
||||
# Kill background processes on exit
|
||||
trap exit_trap EXIT
|
||||
function exit_trap {
|
||||
exit $?
|
||||
}
|
||||
# Exit on any errors so that errors don't compound and kill if any services already started
|
||||
trap err_trap ERR
|
||||
function err_trap {
|
||||
local r=$?
|
||||
tmux kill-session bdb-dev
|
||||
set +o xtrace
|
||||
exit $?
|
||||
}
|
||||
|
||||
# Begin trapping error exit codes
|
||||
set -o errexit
|
@ -1,3 +1,3 @@
|
||||
[pytest]
|
||||
testpaths = tests
|
||||
testpaths = tests/
|
||||
norecursedirs = .* *.egg *.egg-info env* devenv* docs
|
||||
|
1
setup.py
1
setup.py
@ -83,6 +83,7 @@ install_requires = [
|
||||
'aiohttp~=2.0',
|
||||
'python-rapidjson-schema==0.1.1',
|
||||
'statsd==3.2.1',
|
||||
'abci~=0.3.0',
|
||||
]
|
||||
|
||||
setup(
|
||||
|
0
tests/backend/localmongodb/__init__.py
Normal file
0
tests/backend/localmongodb/__init__.py
Normal file
111
tests/backend/localmongodb/test_queries.py
Normal file
111
tests/backend/localmongodb/test_queries.py
Normal file
@ -0,0 +1,111 @@
|
||||
from copy import deepcopy
|
||||
|
||||
import pytest
|
||||
import pymongo
|
||||
|
||||
pytestmark = [pytest.mark.tendermint, pytest.mark.localmongodb, pytest.mark.bdb]
|
||||
|
||||
|
||||
def test_get_txids_filtered(signed_create_tx, signed_transfer_tx):
|
||||
from bigchaindb.backend import connect, query
|
||||
from bigchaindb.models import Transaction
|
||||
conn = connect()
|
||||
|
||||
# create and insert two blocks, one for the create and one for the
|
||||
# transfer transaction
|
||||
conn.db.transactions.insert_one(signed_create_tx.to_dict())
|
||||
conn.db.transactions.insert_one(signed_transfer_tx.to_dict())
|
||||
|
||||
asset_id = Transaction.get_asset_id([signed_create_tx, signed_transfer_tx])
|
||||
|
||||
# Test get by just asset id
|
||||
txids = set(query.get_txids_filtered(conn, asset_id))
|
||||
assert txids == {signed_create_tx.id, signed_transfer_tx.id}
|
||||
|
||||
# Test get by asset and CREATE
|
||||
txids = set(query.get_txids_filtered(conn, asset_id, Transaction.CREATE))
|
||||
assert txids == {signed_create_tx.id}
|
||||
|
||||
# Test get by asset and TRANSFER
|
||||
txids = set(query.get_txids_filtered(conn, asset_id, Transaction.TRANSFER))
|
||||
assert txids == {signed_transfer_tx.id}
|
||||
|
||||
|
||||
def test_write_assets():
|
||||
from bigchaindb.backend import connect, query
|
||||
conn = connect()
|
||||
|
||||
assets = [
|
||||
{'id': 1, 'data': '1'},
|
||||
{'id': 2, 'data': '2'},
|
||||
{'id': 3, 'data': '3'},
|
||||
# Duplicated id. Should not be written to the database
|
||||
{'id': 1, 'data': '1'},
|
||||
]
|
||||
|
||||
# write the assets
|
||||
for asset in assets:
|
||||
query.store_asset(conn, deepcopy(asset))
|
||||
|
||||
# check that 3 assets were written to the database
|
||||
cursor = conn.db.assets.find({}, projection={'_id': False})\
|
||||
.sort('id', pymongo.ASCENDING)
|
||||
|
||||
assert cursor.count() == 3
|
||||
assert list(cursor) == assets[:-1]
|
||||
|
||||
|
||||
def test_get_assets():
|
||||
from bigchaindb.backend import connect, query
|
||||
conn = connect()
|
||||
|
||||
assets = [
|
||||
{'id': 1, 'data': '1'},
|
||||
{'id': 2, 'data': '2'},
|
||||
{'id': 3, 'data': '3'},
|
||||
]
|
||||
|
||||
conn.db.assets.insert_many(deepcopy(assets), ordered=False)
|
||||
|
||||
for asset in assets:
|
||||
assert query.get_asset(conn, asset['id'])
|
||||
|
||||
|
||||
def test_text_search():
|
||||
from ..mongodb.test_queries import test_text_search
|
||||
|
||||
test_text_search('assets')
|
||||
|
||||
|
||||
def test_get_owned_ids(signed_create_tx, user_pk):
|
||||
from bigchaindb.backend import connect, query
|
||||
conn = connect()
|
||||
|
||||
# insert a transaction
|
||||
conn.db.transactions.insert_one(signed_create_tx.to_dict())
|
||||
|
||||
txns = list(query.get_owned_ids(conn, user_pk))
|
||||
|
||||
assert txns[0] == signed_create_tx.to_dict()
|
||||
|
||||
|
||||
def test_get_spending_transactions(user_pk, user_sk):
|
||||
from bigchaindb.backend import connect, query
|
||||
from bigchaindb.models import Transaction
|
||||
conn = connect()
|
||||
|
||||
out = [([user_pk], 1)]
|
||||
tx1 = Transaction.create([user_pk], out * 3)
|
||||
tx1.sign([user_sk])
|
||||
inputs = tx1.to_inputs()
|
||||
tx2 = Transaction.transfer([inputs[0]], out, tx1.id)
|
||||
tx3 = Transaction.transfer([inputs[1]], out, tx1.id)
|
||||
tx4 = Transaction.transfer([inputs[2]], out, tx1.id)
|
||||
txns = [tx.to_dict() for tx in [tx1, tx2, tx3, tx4]]
|
||||
conn.db.transactions.insert_many(txns)
|
||||
|
||||
links = [inputs[0].fulfills.to_dict(), inputs[2].fulfills.to_dict()]
|
||||
txns = list(query.get_spending_transactions(conn, links))
|
||||
|
||||
# tx3 not a member because input 1 not asked for
|
||||
assert txns == [tx2.to_dict(), tx4.to_dict()]
|
@ -26,11 +26,9 @@ USER_PUBLIC_KEY = 'JEAkEJqLbbgDRAtMm8YAjGp759Aq2qTn9eaEHUj2XePE'
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
if isinstance(item, item.Function):
|
||||
if item.get_marker('skip_travis_rdb'):
|
||||
if (os.getenv('TRAVIS_CI') == 'true' and
|
||||
os.getenv('BIGCHAINDB_DATABASE_BACKEND') == 'rethinkdb'):
|
||||
pytest.skip(
|
||||
'Skip test during Travis CI build when using rethinkdb')
|
||||
backend = item.session.config.getoption('--database-backend')
|
||||
if (item.get_marker('localmongodb') and backend != 'localmongodb'):
|
||||
pytest.skip('Skip tendermint specific tests if not using localmongodb')
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
@ -316,6 +314,12 @@ def b():
|
||||
return Bigchain()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def tb():
|
||||
from bigchaindb.tendermint import BigchainDB
|
||||
return BigchainDB()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def create_tx(b, user_pk):
|
||||
from bigchaindb.models import Transaction
|
||||
|
0
tests/tendermint/__init__.py
Normal file
0
tests/tendermint/__init__.py
Normal file
7
tests/tendermint/conftest.py
Normal file
7
tests/tendermint/conftest.py
Normal file
@ -0,0 +1,7 @@
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def b():
|
||||
from bigchaindb.tendermint import BigchainDB
|
||||
return BigchainDB()
|
120
tests/tendermint/test_core.py
Normal file
120
tests/tendermint/test_core.py
Normal file
@ -0,0 +1,120 @@
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
pytestmark = pytest.mark.tendermint
|
||||
|
||||
|
||||
def encode_tx_to_bytes(transaction):
|
||||
return json.dumps(transaction.to_dict()).encode('utf8')
|
||||
|
||||
|
||||
def test_check_tx__signed_create_is_ok(b):
|
||||
from bigchaindb.tendermint import App
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
|
||||
alice = generate_key_pair()
|
||||
bob = generate_key_pair()
|
||||
|
||||
tx = Transaction.create([alice.public_key],
|
||||
[([bob.public_key], 1)])\
|
||||
.sign([alice.private_key])
|
||||
|
||||
app = App(b)
|
||||
result = app.check_tx(encode_tx_to_bytes(tx))
|
||||
assert result.is_ok()
|
||||
|
||||
|
||||
def test_check_tx__unsigned_create_is_error(b):
|
||||
from bigchaindb.tendermint import App
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
|
||||
alice = generate_key_pair()
|
||||
bob = generate_key_pair()
|
||||
|
||||
tx = Transaction.create([alice.public_key],
|
||||
[([bob.public_key], 1)])
|
||||
|
||||
app = App(b)
|
||||
result = app.check_tx(encode_tx_to_bytes(tx))
|
||||
assert result.is_error()
|
||||
|
||||
|
||||
def test_deliver_tx__valid_create_updates_db(b):
|
||||
from bigchaindb.tendermint import App
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
|
||||
alice = generate_key_pair()
|
||||
bob = generate_key_pair()
|
||||
|
||||
tx = Transaction.create([alice.public_key],
|
||||
[([bob.public_key], 1)])\
|
||||
.sign([alice.private_key])
|
||||
|
||||
app = App(b)
|
||||
result = app.deliver_tx(encode_tx_to_bytes(tx))
|
||||
assert result.is_ok()
|
||||
assert b.get_transaction(tx.id).id == tx.id
|
||||
|
||||
|
||||
def test_deliver_tx__double_spend_fails(b):
|
||||
from bigchaindb.tendermint import App
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
|
||||
alice = generate_key_pair()
|
||||
bob = generate_key_pair()
|
||||
|
||||
tx = Transaction.create([alice.public_key],
|
||||
[([bob.public_key], 1)])\
|
||||
.sign([alice.private_key])
|
||||
|
||||
app = App(b)
|
||||
result = app.deliver_tx(encode_tx_to_bytes(tx))
|
||||
assert result.is_ok()
|
||||
assert b.get_transaction(tx.id).id == tx.id
|
||||
result = app.deliver_tx(encode_tx_to_bytes(tx))
|
||||
assert result.is_error()
|
||||
|
||||
|
||||
def test_deliver_transfer_tx__double_spend_fails(b):
|
||||
from bigchaindb.tendermint import App
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
|
||||
app = App(b)
|
||||
alice = generate_key_pair()
|
||||
bob = generate_key_pair()
|
||||
carly = generate_key_pair()
|
||||
|
||||
asset = {
|
||||
'msg': 'live long and prosper'
|
||||
}
|
||||
|
||||
tx = Transaction.create([alice.public_key],
|
||||
[([alice.public_key], 1)],
|
||||
asset=asset)\
|
||||
.sign([alice.private_key])
|
||||
|
||||
result = app.deliver_tx(encode_tx_to_bytes(tx))
|
||||
assert result.is_ok()
|
||||
|
||||
tx_transfer = Transaction.transfer(tx.to_inputs(),
|
||||
[([bob.public_key], 1)],
|
||||
asset_id=tx.id)\
|
||||
.sign([alice.private_key])
|
||||
|
||||
result = app.deliver_tx(encode_tx_to_bytes(tx_transfer))
|
||||
assert result.is_ok()
|
||||
|
||||
double_spend = Transaction.transfer(tx.to_inputs(),
|
||||
[([carly.public_key], 1)],
|
||||
asset_id=tx.id)\
|
||||
.sign([alice.private_key])
|
||||
|
||||
result = app.deliver_tx(encode_tx_to_bytes(double_spend))
|
||||
assert result.is_error()
|
100
tests/tendermint/test_integration.py
Normal file
100
tests/tendermint/test_integration.py
Normal file
@ -0,0 +1,100 @@
|
||||
import json
|
||||
|
||||
import pytest
|
||||
from abci.server import ProtocolHandler
|
||||
from io import BytesIO
|
||||
import abci.types_pb2 as types
|
||||
from abci.wire import read_message
|
||||
from abci.messages import to_request_deliver_tx, to_request_check_tx
|
||||
|
||||
|
||||
pytestmark = pytest.mark.tendermint
|
||||
|
||||
|
||||
@pytest.mark.bdb
|
||||
def test_app(b):
|
||||
from bigchaindb.tendermint import App
|
||||
from bigchaindb.tendermint.utils import calculate_hash
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
from bigchaindb.models import Transaction
|
||||
|
||||
app = App(b)
|
||||
p = ProtocolHandler(app)
|
||||
|
||||
data = p.process('info', None)
|
||||
res, err = read_message(BytesIO(data), types.Response)
|
||||
assert res
|
||||
assert res.info.last_block_app_hash == b''
|
||||
assert res.info.last_block_height == 0
|
||||
assert not b.get_latest_block()
|
||||
|
||||
p.process('init_chain', None)
|
||||
block0 = b.get_latest_block()
|
||||
assert block0
|
||||
assert block0['height'] == 0
|
||||
assert block0['app_hash'] == ''
|
||||
|
||||
alice = generate_key_pair()
|
||||
bob = generate_key_pair()
|
||||
tx = Transaction.create([alice.public_key],
|
||||
[([bob.public_key], 1)])\
|
||||
.sign([alice.private_key])
|
||||
etxn = json.dumps(tx.to_dict()).encode('utf8')
|
||||
|
||||
r = to_request_check_tx(etxn)
|
||||
data = p.process('check_tx', r)
|
||||
res, err = read_message(BytesIO(data), types.Response)
|
||||
assert res
|
||||
assert res.check_tx.code == 0
|
||||
|
||||
r = types.Request()
|
||||
r.begin_block.hash = b''
|
||||
p.process('begin_block', r)
|
||||
|
||||
r = to_request_deliver_tx(etxn)
|
||||
data = p.process('deliver_tx', r)
|
||||
res, err = read_message(BytesIO(data), types.Response)
|
||||
assert res
|
||||
assert res.deliver_tx.code == 0
|
||||
assert b.get_transaction(tx.id).id == tx.id
|
||||
|
||||
new_block_txn_hash = calculate_hash([tx.id])
|
||||
|
||||
r = types.Request()
|
||||
r.end_block.height = 1
|
||||
data = p.process('end_block', r)
|
||||
res, err = read_message(BytesIO(data), types.Response)
|
||||
assert res
|
||||
assert 'end_block' == res.WhichOneof('value')
|
||||
|
||||
new_block_hash = calculate_hash([block0['app_hash'], new_block_txn_hash])
|
||||
|
||||
data = p.process('commit', None)
|
||||
res, err = read_message(BytesIO(data), types.Response)
|
||||
assert res
|
||||
assert res.commit.code == 0
|
||||
assert res.commit.data == new_block_hash.encode('utf-8')
|
||||
|
||||
block0 = b.get_latest_block()
|
||||
assert block0
|
||||
assert block0['height'] == 1
|
||||
assert block0['app_hash'] == new_block_hash
|
||||
|
||||
# empty block should not update height
|
||||
r = types.Request()
|
||||
r.begin_block.hash = new_block_hash.encode('utf-8')
|
||||
p.process('begin_block', r)
|
||||
|
||||
r = types.Request()
|
||||
r.end_block.height = 2
|
||||
p.process('end_block', r)
|
||||
|
||||
data = p.process('commit', None)
|
||||
assert res.commit.data == new_block_hash.encode('utf-8')
|
||||
|
||||
block0 = b.get_latest_block()
|
||||
assert block0
|
||||
assert block0['height'] == 1
|
||||
|
||||
# when empty block is generated hash of previous block should be returned
|
||||
assert block0['app_hash'] == new_block_hash
|
85
tests/tendermint/test_lib.py
Normal file
85
tests/tendermint/test_lib.py
Normal file
@ -0,0 +1,85 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from bigchaindb import backend
|
||||
from unittest.mock import patch
|
||||
|
||||
|
||||
pytestmark = pytest.mark.tendermint
|
||||
|
||||
|
||||
def test_asset_is_separated_from_transaciton(b):
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
|
||||
alice = generate_key_pair()
|
||||
bob = generate_key_pair()
|
||||
|
||||
asset = {'Never gonna': ['give you up',
|
||||
'let you down',
|
||||
'run around'
|
||||
'desert you',
|
||||
'make you cry',
|
||||
'say goodbye',
|
||||
'tell a lie',
|
||||
'hurt you']}
|
||||
|
||||
tx = Transaction.create([alice.public_key],
|
||||
[([bob.public_key], 1)],
|
||||
metadata=None,
|
||||
asset=asset)\
|
||||
.sign([alice.private_key])
|
||||
|
||||
b.store_transaction(tx)
|
||||
assert 'asset' not in backend.query.get_transaction(b.connection, tx.id)
|
||||
assert backend.query.get_asset(b.connection, tx.id)['data'] == asset
|
||||
assert b.get_transaction(tx.id) == tx
|
||||
|
||||
|
||||
def test_get_latest_block(b):
|
||||
from bigchaindb.tendermint.lib import Block
|
||||
|
||||
for i in range(10):
|
||||
app_hash = os.urandom(16).hex()
|
||||
block = Block(app_hash=app_hash, height=i)._asdict()
|
||||
b.store_block(block)
|
||||
|
||||
block = b.get_latest_block()
|
||||
assert block['height'] == 9
|
||||
|
||||
|
||||
def test_validation_error(b):
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
|
||||
alice = generate_key_pair()
|
||||
tx = Transaction.create([alice.public_key],
|
||||
[([alice.public_key], 1)],
|
||||
asset=None)\
|
||||
.sign([alice.private_key]).to_dict()
|
||||
|
||||
tx['metadata'] = ''
|
||||
assert not b.validate_transaction(tx)
|
||||
|
||||
|
||||
@patch('requests.post')
|
||||
def test_write_and_post_transaction(mock_post, b):
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
from bigchaindb.tendermint.utils import encode_transaction
|
||||
|
||||
alice = generate_key_pair()
|
||||
tx = Transaction.create([alice.public_key],
|
||||
[([alice.public_key], 1)],
|
||||
asset=None)\
|
||||
.sign([alice.private_key]).to_dict()
|
||||
|
||||
tx = b.validate_transaction(tx)
|
||||
b.write_transaction(tx)
|
||||
|
||||
assert mock_post.called
|
||||
args, kwargs = mock_post.call_args
|
||||
assert 'broadcast_tx_async' == kwargs['json']['method']
|
||||
encoded_tx = [encode_transaction(tx.to_dict())]
|
||||
assert encoded_tx == kwargs['json']['params']
|
27
tests/tendermint/test_utils.py
Normal file
27
tests/tendermint/test_utils.py
Normal file
@ -0,0 +1,27 @@
|
||||
import base64
|
||||
import json
|
||||
|
||||
|
||||
def test_encode_decode_transaction(b):
|
||||
from bigchaindb.tendermint.utils import (encode_transaction,
|
||||
decode_transaction)
|
||||
|
||||
asset = {
|
||||
'value': 'key'
|
||||
}
|
||||
|
||||
encode_tx = encode_transaction(asset)
|
||||
new_encode_tx = base64.b64encode(json.dumps(asset).
|
||||
encode('utf8')).decode('utf8')
|
||||
|
||||
assert encode_tx == new_encode_tx
|
||||
|
||||
de64 = base64.b64decode(encode_tx)
|
||||
assert asset == decode_transaction(de64)
|
||||
|
||||
|
||||
def test_calculate_hash_no_key(b):
|
||||
from bigchaindb.tendermint.utils import calculate_hash
|
||||
|
||||
# pass an empty list
|
||||
assert calculate_hash([]) == ''
|
@ -4,5 +4,11 @@ import pytest
|
||||
@pytest.fixture
|
||||
def app(request):
|
||||
from bigchaindb.web import server
|
||||
app = server.create_app(debug=True)
|
||||
from bigchaindb.tendermint.lib import BigchainDB
|
||||
|
||||
if request.config.getoption('--database-backend') == 'localmongodb':
|
||||
app = server.create_app(debug=True, bigchaindb_factory=BigchainDB)
|
||||
else:
|
||||
app = server.create_app(debug=True)
|
||||
|
||||
return app
|
||||
|
@ -3,6 +3,7 @@ import pytest
|
||||
ASSETS_ENDPOINT = '/api/v1/assets/'
|
||||
|
||||
|
||||
@pytest.mark.tendermint
|
||||
def test_get_assets_with_empty_text_search(client):
|
||||
res = client.get(ASSETS_ENDPOINT + '?search=')
|
||||
assert res.json == {'status': 400,
|
||||
@ -10,6 +11,7 @@ def test_get_assets_with_empty_text_search(client):
|
||||
assert res.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.tendermint
|
||||
def test_get_assets_with_missing_text_search(client):
|
||||
res = client.get(ASSETS_ENDPOINT)
|
||||
assert res.status_code == 400
|
||||
@ -81,3 +83,60 @@ def test_get_assets_limit(client, b):
|
||||
res = client.get(ASSETS_ENDPOINT + '?search=abc&limit=1')
|
||||
assert res.status_code == 200
|
||||
assert len(res.json) == 1
|
||||
|
||||
|
||||
@pytest.mark.bdb
|
||||
@pytest.mark.tendermint
|
||||
@pytest.mark.localmongodb
|
||||
def test_get_assets_tendermint(client, tb):
|
||||
from bigchaindb.models import Transaction
|
||||
|
||||
# test returns empty list when no assets are found
|
||||
res = client.get(ASSETS_ENDPOINT + '?search=abc')
|
||||
assert res.json == []
|
||||
assert res.status_code == 200
|
||||
|
||||
# create asset
|
||||
asset = {'msg': 'abc'}
|
||||
tx = Transaction.create([tb.me], [([tb.me], 1)],
|
||||
asset=asset).sign([tb.me_private])
|
||||
|
||||
tb.store_transaction(tx)
|
||||
|
||||
# test that asset is returned
|
||||
res = client.get(ASSETS_ENDPOINT + '?search=abc')
|
||||
assert res.status_code == 200
|
||||
assert len(res.json) == 1
|
||||
assert res.json[0] == {
|
||||
'data': {'msg': 'abc'},
|
||||
'id': tx.id
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.bdb
|
||||
@pytest.mark.tendermint
|
||||
@pytest.mark.localmongodb
|
||||
def test_get_assets_limit_tendermint(client, tb):
|
||||
from bigchaindb.models import Transaction
|
||||
|
||||
b = tb
|
||||
# create two assets
|
||||
asset1 = {'msg': 'abc 1'}
|
||||
asset2 = {'msg': 'abc 2'}
|
||||
tx1 = Transaction.create([b.me], [([b.me], 1)],
|
||||
asset=asset1).sign([b.me_private])
|
||||
tx2 = Transaction.create([b.me], [([b.me], 1)],
|
||||
asset=asset2).sign([b.me_private])
|
||||
|
||||
b.store_transaction(tx1)
|
||||
b.store_transaction(tx2)
|
||||
|
||||
# test that both assets are returned without limit
|
||||
res = client.get(ASSETS_ENDPOINT + '?search=abc')
|
||||
assert res.status_code == 200
|
||||
assert len(res.json) == 2
|
||||
|
||||
# test that only one asset is returned when using limit=1
|
||||
res = client.get(ASSETS_ENDPOINT + '?search=abc&limit=1')
|
||||
assert res.status_code == 200
|
||||
assert len(res.json) == 1
|
||||
|
@ -6,11 +6,12 @@ pytestmark = [pytest.mark.bdb, pytest.mark.usefixtures('inputs')]
|
||||
OUTPUTS_ENDPOINT = '/api/v1/outputs/'
|
||||
|
||||
|
||||
@pytest.mark.tendermint
|
||||
def test_get_outputs_endpoint(client, user_pk):
|
||||
m = MagicMock()
|
||||
m.txid = 'a'
|
||||
m.output = 0
|
||||
with patch('bigchaindb.core.Bigchain.get_outputs_filtered') as gof:
|
||||
with patch('bigchaindb.tendermint.lib.BigchainDB.get_outputs_filtered') as gof:
|
||||
gof.return_value = [m, m]
|
||||
res = client.get(OUTPUTS_ENDPOINT + '?public_key={}'.format(user_pk))
|
||||
assert res.json == [
|
||||
@ -21,11 +22,12 @@ def test_get_outputs_endpoint(client, user_pk):
|
||||
gof.assert_called_once_with(user_pk, None)
|
||||
|
||||
|
||||
@pytest.mark.tendermint
|
||||
def test_get_outputs_endpoint_unspent(client, user_pk):
|
||||
m = MagicMock()
|
||||
m.txid = 'a'
|
||||
m.output = 0
|
||||
with patch('bigchaindb.core.Bigchain.get_outputs_filtered') as gof:
|
||||
with patch('bigchaindb.tendermint.lib.BigchainDB.get_outputs_filtered') as gof:
|
||||
gof.return_value = [m]
|
||||
params = '?spent=False&public_key={}'.format(user_pk)
|
||||
res = client.get(OUTPUTS_ENDPOINT + params)
|
||||
@ -34,11 +36,12 @@ def test_get_outputs_endpoint_unspent(client, user_pk):
|
||||
gof.assert_called_once_with(user_pk, False)
|
||||
|
||||
|
||||
@pytest.mark.tendermint
|
||||
def test_get_outputs_endpoint_spent(client, user_pk):
|
||||
m = MagicMock()
|
||||
m.txid = 'a'
|
||||
m.output = 0
|
||||
with patch('bigchaindb.core.Bigchain.get_outputs_filtered') as gof:
|
||||
with patch('bigchaindb.tendermint.lib.BigchainDB.get_outputs_filtered') as gof:
|
||||
gof.return_value = [m]
|
||||
params = '?spent=true&public_key={}'.format(user_pk)
|
||||
res = client.get(OUTPUTS_ENDPOINT + params)
|
||||
@ -47,11 +50,13 @@ def test_get_outputs_endpoint_spent(client, user_pk):
|
||||
gof.assert_called_once_with(user_pk, True)
|
||||
|
||||
|
||||
@pytest.mark.tendermint
|
||||
def test_get_outputs_endpoint_without_public_key(client):
|
||||
res = client.get(OUTPUTS_ENDPOINT)
|
||||
assert res.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.tendermint
|
||||
def test_get_outputs_endpoint_with_invalid_public_key(client):
|
||||
expected = {'message': {'public_key': 'Invalid base58 ed25519 key'}}
|
||||
res = client.get(OUTPUTS_ENDPOINT + '?public_key=abc')
|
||||
@ -59,6 +64,7 @@ def test_get_outputs_endpoint_with_invalid_public_key(client):
|
||||
assert res.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.tendermint
|
||||
def test_get_outputs_endpoint_with_invalid_spent(client, user_pk):
|
||||
expected = {'message': {'spent': 'Boolean value must be "true" or "false" (lowercase)'}}
|
||||
params = '?spent=tru&public_key={}'.format(user_pk)
|
||||
|
@ -345,6 +345,7 @@ def test_post_invalid_transfer_transaction_returns_400(b, client, user_pk):
|
||||
assert res.json['message'] == expected_error_message
|
||||
|
||||
|
||||
@pytest.mark.tendermint
|
||||
def test_transactions_get_list_good(client):
|
||||
from functools import partial
|
||||
|
||||
@ -371,6 +372,7 @@ def test_transactions_get_list_good(client):
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.tendermint
|
||||
def test_transactions_get_list_bad(client):
|
||||
def should_not_be_called():
|
||||
assert False
|
||||
|
18
tmdata/config.toml
Normal file
18
tmdata/config.toml
Normal file
@ -0,0 +1,18 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://bdb:46658"
|
||||
moniker = "anonymous"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:debug,*:error"
|
||||
|
||||
[consensus]
|
||||
create_empty_blocks = false
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = ""
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user