mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge remote-tracking branch 'origin/master' into fix/outputs-public-keys-validate
This commit is contained in:
commit
aaf4fcb91e
@ -4,9 +4,31 @@ set -e -x
|
||||
|
||||
if [[ "${TOXENV}" == *-rdb ]]; then
|
||||
rethinkdb --daemon
|
||||
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb ]]; then
|
||||
wget http://downloads.mongodb.org/linux/mongodb-linux-x86_64-3.4.1.tgz -O /tmp/mongodb.tgz
|
||||
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb && \
|
||||
-z "${BIGCHAINDB_DATABASE_SSL}" ]]; then
|
||||
# Connect to MongoDB on port 27017 via a normal, unsecure connection if
|
||||
# BIGCHAINDB_DATABASE_SSL is unset.
|
||||
# It is unset in this case in .travis.yml.
|
||||
wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu1404-3.4.4.tgz -O /tmp/mongodb.tgz
|
||||
tar -xvf /tmp/mongodb.tgz
|
||||
mkdir /tmp/mongodb-data
|
||||
${PWD}/mongodb-linux-x86_64-3.4.1/bin/mongod --dbpath=/tmp/mongodb-data --replSet=bigchain-rs &> /dev/null &
|
||||
${PWD}/mongodb-linux-x86_64-ubuntu1404-3.4.4/bin/mongod \
|
||||
--dbpath=/tmp/mongodb-data --replSet=bigchain-rs &> /dev/null &
|
||||
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb && \
|
||||
"${BIGCHAINDB_DATABASE_SSL}" == true ]]; then
|
||||
# Connect to MongoDB on port 27017 via TLS/SSL connection if
|
||||
# BIGCHAINDB_DATABASE_SSL is set.
|
||||
# It is set to 'true' here in .travis.yml. Dummy certificates for testing
|
||||
# are stored under bigchaindb/tests/backend/mongodb-ssl/certs/ directory.
|
||||
wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu1404-3.4.4.tgz -O /tmp/mongodb-ssl.tgz
|
||||
tar -xvf /tmp/mongodb-ssl.tgz
|
||||
mkdir /tmp/mongodb-ssl-data
|
||||
${PWD}/mongodb-linux-x86_64-ubuntu1404-3.4.4/bin/mongod \
|
||||
--dbpath=/tmp/mongodb-ssl-data \
|
||||
--replSet=bigchain-rs \
|
||||
--sslAllowInvalidHostnames \
|
||||
--sslMode=requireSSL \
|
||||
--sslCAFile=$TRAVIS_BUILD_DIR/tests/backend/mongodb-ssl/certs/ca.crt \
|
||||
--sslCRLFile=$TRAVIS_BUILD_DIR/tests/backend/mongodb-ssl/certs/crl.pem \
|
||||
--sslPEMKeyFile=$TRAVIS_BUILD_DIR/tests/backend/mongodb-ssl/certs/test_mdb_ssl_cert_and_key.pem &> /dev/null &
|
||||
fi
|
||||
|
@ -7,6 +7,6 @@ pip install --upgrade pip
|
||||
if [[ -n ${TOXENV} ]]; then
|
||||
pip install --upgrade tox
|
||||
else
|
||||
pip install -e .[test]
|
||||
pip install .[test]
|
||||
pip install --upgrade codecov
|
||||
fi
|
||||
|
@ -4,8 +4,14 @@ set -e -x
|
||||
|
||||
if [[ -n ${TOXENV} ]]; then
|
||||
tox -e ${TOXENV}
|
||||
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb ]]; then
|
||||
pytest -v --database-backend=mongodb --cov=bigchaindb
|
||||
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb && \
|
||||
-z "${BIGCHAINDB_DATABASE_SSL}" ]]; then
|
||||
# Run the full suite of tests for MongoDB over an unsecure connection
|
||||
pytest -sv --database-backend=mongodb --cov=bigchaindb
|
||||
elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb && \
|
||||
"${BIGCHAINDB_DATABASE_SSL}" == true ]]; then
|
||||
# Run a sub-set of tests over SSL; those marked as 'pytest.mark.bdb_ssl'.
|
||||
pytest -sv --database-backend=mongodb-ssl --cov=bigchaindb -m bdb_ssl
|
||||
else
|
||||
pytest -v -n auto --cov=bigchaindb
|
||||
pytest -sv -n auto --cov=bigchaindb
|
||||
fi
|
||||
|
27
.travis.yml
27
.travis.yml
@ -2,7 +2,6 @@ language: python
|
||||
cache: pip
|
||||
|
||||
python:
|
||||
- 3.4
|
||||
- 3.5
|
||||
- 3.6
|
||||
|
||||
@ -14,12 +13,6 @@ env:
|
||||
matrix:
|
||||
fast_finish: true
|
||||
exclude:
|
||||
- python: 3.4
|
||||
env: TOXENV=flake8
|
||||
- python: 3.4
|
||||
env: TOXENV=docsroot
|
||||
- python: 3.4
|
||||
env: TOXENV=docsserver
|
||||
- python: 3.5
|
||||
env: TOXENV=flake8
|
||||
- python: 3.5
|
||||
@ -27,22 +20,30 @@ matrix:
|
||||
- python: 3.5
|
||||
env: TOXENV=docsserver
|
||||
include:
|
||||
- python: 3.4
|
||||
addons:
|
||||
rethinkdb: '2.3.5'
|
||||
env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb
|
||||
- python: 3.5
|
||||
addons:
|
||||
rethinkdb: '2.3.5'
|
||||
env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb
|
||||
- python: 3.5
|
||||
env: BIGCHAINDB_DATABASE_BACKEND=mongodb
|
||||
env:
|
||||
- BIGCHAINDB_DATABASE_BACKEND=mongodb
|
||||
- BIGCHAINDB_DATABASE_SSL=
|
||||
- python: 3.6
|
||||
addons:
|
||||
rethinkdb: '2.3.5'
|
||||
env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb
|
||||
- python: 3.6
|
||||
env: BIGCHAINDB_DATABASE_BACKEND=mongodb
|
||||
env:
|
||||
- BIGCHAINDB_DATABASE_BACKEND=mongodb
|
||||
- BIGCHAINDB_DATABASE_SSL=
|
||||
- python: 3.5
|
||||
env:
|
||||
- BIGCHAINDB_DATABASE_BACKEND=mongodb
|
||||
- BIGCHAINDB_DATABASE_SSL=true
|
||||
- python: 3.6
|
||||
env:
|
||||
- BIGCHAINDB_DATABASE_BACKEND=mongodb
|
||||
- BIGCHAINDB_DATABASE_SSL=true
|
||||
|
||||
before_install: sudo .ci/travis-before-install.sh
|
||||
|
||||
|
73
CHANGELOG.md
73
CHANGELOG.md
@ -1,7 +1,7 @@
|
||||
# Change Log (Release Notes)
|
||||
|
||||
All _notable_ changes to this project will be documented in this file (`CHANGELOG.md`).
|
||||
This project adheres to [Semantic Versioning](http://semver.org/) (or at least we try).
|
||||
This project adheres to [the Python form of Semantic Versioning](https://packaging.python.org/tutorials/distributing-packages/#choosing-a-versioning-scheme) (or at least we try).
|
||||
Contributors to this file, please follow the guidelines on [keepachangelog.com](http://keepachangelog.com/).
|
||||
Note that each version (or "release") is the name of a [Git _tag_](https://git-scm.com/book/en/v2/Git-Basics-Tagging) of a particular commit, so the associated date and time are the date and time of that commit (as reported by GitHub), _not_ the "Uploaded on" date listed on PyPI (which may differ).
|
||||
For reference, the possible headings are:
|
||||
@ -15,14 +15,77 @@ For reference, the possible headings are:
|
||||
* **External Contributors** to list contributors outside of BigchainDB GmbH.
|
||||
* **Notes**
|
||||
|
||||
## [1.0.0rc1] - 2017-06-23
|
||||
Tag name: v1.0.0rc1
|
||||
|
||||
### Added
|
||||
* Support for secure TLS/SSL communication between MongoDB and {BigchainDB, MongoDB Backup Agent, MongoDB Monitoring Agent}. Pull Requests
|
||||
[#1456](https://github.com/bigchaindb/bigchaindb/pull/1456),
|
||||
[#1497](https://github.com/bigchaindb/bigchaindb/pull/1497),
|
||||
[#1510](https://github.com/bigchaindb/bigchaindb/pull/1510),
|
||||
[#1536](https://github.com/bigchaindb/bigchaindb/pull/1536),
|
||||
[#1551](https://github.com/bigchaindb/bigchaindb/pull/1551) and
|
||||
[#1552](https://github.com/bigchaindb/bigchaindb/pull/1552).
|
||||
* Text search support (only if using MongoDB). Pull Requests [#1469](https://github.com/bigchaindb/bigchaindb/pull/1469) and [#1471](https://github.com/bigchaindb/bigchaindb/pull/1471)
|
||||
* The `database.connection_timeout` configuration setting now works with RethinkDB too. [#1512](https://github.com/bigchaindb/bigchaindb/pull/1512)
|
||||
* New code and tools for benchmarking CREATE transactions. [Pull Request #1511](https://github.com/bigchaindb/bigchaindb/pull/1511)
|
||||
|
||||
### Changed
|
||||
* There's an upgrade guide in `docs/upgrade-guides/v0.10-->v1.0.md`. It only covers changes to the transaction model and HTTP API. If that file hasn't been merged yet, see [Pull Request #1547](https://github.com/bigchaindb/bigchaindb/pull/1547)
|
||||
* Cryptographic signatures now sign the whole (serialized) transaction body, including the transaction ID, but with all `"fulfillment"` values changed to `None`. [Pull Request #1225](https://github.com/bigchaindb/bigchaindb/pull/1225)
|
||||
* In transactions, the value of `"amount"` must be a string. (Before, it was supposed to be a number.) [Pull Request #1286](https://github.com/bigchaindb/bigchaindb/pull/1286)
|
||||
* In `setup.py`, the "Development Status" (as reported on PyPI) was changed from Alpha to Beta. [Pull Request #1437](https://github.com/bigchaindb/bigchaindb/pull/1437)
|
||||
* If you explicitly specify a config file, e.g. `bigchaindb -c path/to/config start` and that file can't be found, then BigchainDB Server will fail with a helpful error message. [Pull Request #1486](https://github.com/bigchaindb/bigchaindb/pull/1486)
|
||||
* Reduced the response time on the HTTP API endpoint to get all the unspent outputs associated with a given public key (a.k.a. "fast unspents"). [Pull Request #1411](https://github.com/bigchaindb/bigchaindb/pull/1411)
|
||||
* Internally, the value of an asset's `"data"` is now stored in a separate assets table. This enabled the new text search. Each asset data is stored along with the associated CREATE transaction ID (asset ID). That data gets written when the containing block gets written to the bigchain table. [Pull Request #1460](https://github.com/bigchaindb/bigchaindb/pull/1460)
|
||||
* Schema validation was sped up by switching to `rapidjson-schema`. [Pull Request #1494](https://github.com/bigchaindb/bigchaindb/pull/1494)
|
||||
* If a node comes back from being down for a while, it will resume voting on blocks in the order determined by the MongoDB oplog, in the case of MongoDB. (In the case of RethinkDB, blocks missed in the changefeed will not be voted on.) [Pull Request #1389](https://github.com/bigchaindb/bigchaindb/pull/1389)
|
||||
* Parallelized transaction schema validation in the vote pipeline. [Pull Request #1492](https://github.com/bigchaindb/bigchaindb/pull/1492)
|
||||
* `asset.data` or `asset.id` are now *required* in a CREATE or TRANSFER transaction, respectively. [Pull Request #1518](https://github.com/bigchaindb/bigchaindb/pull/1518)
|
||||
* The HTTP response body, in the response to the `GET /` and the `GET /api/v1` endpoints, was changed substantially. [Pull Request #1529](https://github.com/bigchaindb/bigchaindb/pull/1529)
|
||||
* Changed the HTTP `GET /api/v1/transactions/{transaction_id}` endpoint. It now only returns the transaction if it's in a valid block. It also returns a new header with a relative link to a status monitor. [Pull Request #1543](https://github.com/bigchaindb/bigchaindb/pull/1543)
|
||||
* All instances of `txid` and `tx_id` were replaced with `transaction_id`, in the transaction model and the HTTP API. [Pull Request #1532](https://github.com/bigchaindb/bigchaindb/pull/1532)
|
||||
* The hostname and port were removed from all URLs in all HTTP API responses. [Pull Request #1538](https://github.com/bigchaindb/bigchaindb/pull/1538)
|
||||
* Relative links were replaced with JSON objects in HTTP API responses. [Pull Request #1541](https://github.com/bigchaindb/bigchaindb/pull/1541)
|
||||
* In the outputs endpoint of the HTTP API, the query parameter `unspent` was changed to `spent` (so no more double negatives). If that query parameter isn't included, then all outputs matching the specificed public key will be returned. If `spent=true`, then only the spent outputs will be returned. If `spent=false`, then only the unspent outputs will be returned. [Pull Request #1545](https://github.com/bigchaindb/bigchaindb/pull/1545)
|
||||
* The supported crypto-conditions changed from version 01 of the crypto-conditions spec to version 02. [Pull Request #1562](https://github.com/bigchaindb/bigchaindb/pull/1562)
|
||||
* The value of "version" inside a transaction must now be "1.0". (Before, it could be "0.anything".) [Pull Request #1574](https://github.com/bigchaindb/bigchaindb/pull/1574)
|
||||
|
||||
### Removed
|
||||
* The `server.threads` configuration setting (for the Gunicorn HTTP server) was removed from the default set of BigchainDB configuration settings. [Pull Request #1488](https://github.com/bigchaindb/bigchaindb/pull/1488)
|
||||
|
||||
### Fixed
|
||||
* The `GET /api/v1/outputs` endpoint was failing for some transactions with threshold conditions. Fixed in [Pull Request #1450](https://github.com/bigchaindb/bigchaindb/pull/1450)
|
||||
|
||||
### External Contributors
|
||||
* @elopio - Pull Requests [#1415](https://github.com/bigchaindb/bigchaindb/pull/1415) and [#1491](https://github.com/bigchaindb/bigchaindb/pull/1491)
|
||||
* @CsterKuroi - [Pull Request #1447](https://github.com/bigchaindb/bigchaindb/pull/1447)
|
||||
* @tdsgit - [Pull Request #1512](https://github.com/bigchaindb/bigchaindb/pull/1512)
|
||||
* @lavinasachdev3 - [Pull Request #1357](https://github.com/bigchaindb/bigchaindb/pull/1357)
|
||||
|
||||
### Notes
|
||||
* We dropped support for Python 3.4. [Pull Request #1564](https://github.com/bigchaindb/bigchaindb/pull/1564)
|
||||
* There were many improvements to our Kubernetes-based production deployment template (and the associated documentaiton).
|
||||
* There is now a [BigchainDB Ruby driver](https://github.com/LicenseRocks/bigchaindb_ruby), created by @addywaddy at [license.rocks](https://github.com/bigchaindb/bigchaindb/pull/1437).
|
||||
* The [BigchainDB JavaScript driver](https://github.com/bigchaindb/js-bigchaindb-driver) was moved to a different GitHub repo and is now officially maintained by the BigchainDB team.
|
||||
* We continue to recommend using MongoDB.
|
||||
|
||||
## [0.10.3] - 2017-06-29
|
||||
Tag name: v0.10.3
|
||||
|
||||
## Fixed
|
||||
* Pin minor+ version of `cryptoconditions` to avoid upgrading to a non
|
||||
compatible version.
|
||||
[commit 97268a5](https://github.com/bigchaindb/bigchaindb/commit/97268a577bf27942a87d8eb838f4816165c84fd5)
|
||||
|
||||
## [0.10.2] - 2017-05-16
|
||||
Tag name: v0.10.2
|
||||
|
||||
## Added
|
||||
### Added
|
||||
* Add Cross Origin Resource Sharing (CORS) support for the HTTP API.
|
||||
[Commit 6cb7596](https://github.com/bigchaindb/bigchaindb/commit/6cb75960b05403c77bdae0fd327612482589efcb)
|
||||
|
||||
## Fixed
|
||||
### Fixed
|
||||
* Fixed `streams_v1` API link in response to `GET /api/v1`.
|
||||
[Pull Request #1466](https://github.com/bigchaindb/bigchaindb/pull/1466)
|
||||
* Fixed mismatch between docs and implementation for `GET /blocks?status=`
|
||||
@ -32,10 +95,10 @@ Tag name: v0.10.2
|
||||
## [0.10.1] - 2017-04-19
|
||||
Tag name: v0.10.1
|
||||
|
||||
## Added
|
||||
### Added
|
||||
* Documentation for the BigchainDB settings `wsserver.host` and `wsserver.port`. [Pull Request #1408](https://github.com/bigchaindb/bigchaindb/pull/1408)
|
||||
|
||||
## Fixed
|
||||
### Fixed
|
||||
* Fixed `Dockerfile`, which was failing to build. It now starts `FROM python:3.6` (instead of `FROM ubuntu:xenial`). [Pull Request #1410](https://github.com/bigchaindb/bigchaindb/pull/1410)
|
||||
* Fixed the `Makefile` so that `release` depends on `dist`. [Pull Request #1405](https://github.com/bigchaindb/bigchaindb/pull/1405)
|
||||
|
||||
|
@ -41,7 +41,7 @@ Familiarize yourself with how we do coding and documentation in the BigchainDB p
|
||||
### Step 2 - Install some Dependencies
|
||||
|
||||
* [Install RethinkDB Server](https://rethinkdb.com/docs/install/)
|
||||
* Make sure you have Python 3.4+ (preferably in a virtualenv)
|
||||
* Make sure you have Python 3.5+ (preferably in a virtualenv)
|
||||
* [Install BigchaindB Server's OS-level dependencies](https://docs.bigchaindb.com/projects/server/en/latest/appendices/install-os-level-deps.html)
|
||||
* [Make sure you have the latest Python 3 version of pip and setuptools](https://docs.bigchaindb.com/projects/server/en/latest/appendices/install-latest-pip.html)
|
||||
|
||||
|
@ -8,10 +8,11 @@ RUN apt-get -qq update \
|
||||
&& pip install --no-cache-dir . \
|
||||
&& apt-get autoremove \
|
||||
&& apt-get clean
|
||||
VOLUME ["/data"]
|
||||
VOLUME ["/data", "/certs"]
|
||||
WORKDIR /data
|
||||
ENV BIGCHAINDB_CONFIG_PATH /data/.bigchaindb
|
||||
ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984
|
||||
ENV BIGCHAINDB_WSSERVER_HOST 0.0.0.0
|
||||
ENV BIGCHAINDB_WSSERVER_SCHEME ws
|
||||
ENTRYPOINT ["bigchaindb"]
|
||||
CMD ["start"]
|
||||
|
@ -3,19 +3,19 @@ LABEL maintainer "dev@bigchaindb.com"
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y vim \
|
||||
&& pip install -U pip \
|
||||
&& pip install pynacl \
|
||||
&& apt-get autoremove \
|
||||
&& apt-get clean
|
||||
|
||||
VOLUME ["/data"]
|
||||
WORKDIR /data
|
||||
|
||||
ENV BIGCHAINDB_CONFIG_PATH /data/.bigchaindb
|
||||
ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984
|
||||
ENV BIGCHAINDB_WSSERVER_HOST 0.0.0.0
|
||||
ENV BIGCHAINDB_WSSERVER_SCHEME ws
|
||||
|
||||
ARG backend
|
||||
|
||||
RUN mkdir -p /usr/src/app
|
||||
COPY . /usr/src/app/
|
||||
WORKDIR /usr/src/app
|
||||
RUN pip install --no-cache-dir -e .[dev]
|
||||
RUN bigchaindb -y configure mongodb
|
||||
RUN bigchaindb -y configure "$backend"
|
||||
|
@ -6,7 +6,7 @@ This guide starts out with our general Python coding style guidelines and ends w
|
||||
|
||||
Our starting point is [PEP8](https://www.python.org/dev/peps/pep-0008/), the standard "Style Guide for Python Code." Many Python IDEs will check your code against PEP8. (Note that PEP8 isn't frozen; it actually changes over time, but slowly.)
|
||||
|
||||
BigchainDB uses Python 3.4+, so you can ignore all PEP8 guidelines specific to Python 2.
|
||||
BigchainDB uses Python 3.5+, so you can ignore all PEP8 guidelines specific to Python 2.
|
||||
|
||||
### Python Docstrings
|
||||
|
||||
|
@ -23,7 +23,6 @@ BigchainDB is a scalable blockchain database. [The whitepaper](https://www.bigch
|
||||
* [Roadmap](https://github.com/bigchaindb/org/blob/master/ROADMAP.md)
|
||||
* [Blog](https://medium.com/the-bigchaindb-blog)
|
||||
* [Twitter](https://twitter.com/BigchainDB)
|
||||
* [Google Group](https://groups.google.com/forum/#!forum/bigchaindb)
|
||||
|
||||
## Links for Developers
|
||||
|
||||
|
@ -2,8 +2,14 @@
|
||||
|
||||
The release process for BigchainDB server differs slightly depending on whether it's a minor or a patch release.
|
||||
|
||||
BigchainDB follows [semantic versioning](http://semver.org/) (i.e. MAJOR.MINOR.PATCH), taking into account
|
||||
that [major version 0.x does not export a stable API](http://semver.org/#spec-item-4).
|
||||
BigchainDB follows
|
||||
[the Python form of Semantic Versioning](https://packaging.python.org/tutorials/distributing-packages/#choosing-a-versioning-scheme)
|
||||
(i.e. MAJOR.MINOR.PATCH),
|
||||
which is almost identical
|
||||
to [regular semantic versioning](http://semver.org/)
|
||||
except release candidates are labelled like
|
||||
`3.4.5rc2` not `3.4.5-rc2` (with no hyphen).
|
||||
|
||||
|
||||
## Minor release
|
||||
|
||||
@ -14,6 +20,7 @@ A minor release is preceeded by a feature freeze and created from the 'master' b
|
||||
1. Create and checkout a new branch for the minor release, named after the minor version, without a preceeding 'v', e.g. `git checkout -b 0.9` (*not* 0.9.0, this new branch will be for e.g. 0.9.0, 0.9.1, 0.9.2, etc. each of which will be identified by a tagged commit)
|
||||
1. In `bigchaindb/version.py`, update `__version__` and `__short_version__`, e.g. to `0.9` and `0.9.0` (with no `.dev` on the end)
|
||||
1. Commit that change, and push the new branch to GitHub
|
||||
1. On GitHub, use the new branch to create a new pull request and wait for all the tests to pass
|
||||
1. Follow steps outlined in [Common Steps](#common-steps)
|
||||
1. In 'master' branch, Edit `bigchaindb/version.py`, increment the minor version to the next planned release, e.g. `0.10.0.dev`. This is so people reading the latest docs will know that they're for the latest (master branch) version of BigchainDB Server, not the docs at the time of the most recent release (which are also available).
|
||||
1. Go to [Docker Hub](https://hub.docker.com/), sign in, go to Settings - Build Settings, and under the build with Docker Tag Name equal to `latest`, change the Name to the number of the new release, e.g. `0.9`
|
37
benchmark.yml
Normal file
37
benchmark.yml
Normal file
@ -0,0 +1,37 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
bdb:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile-dev
|
||||
args:
|
||||
backend: mongodb
|
||||
volumes:
|
||||
- ./bigchaindb:/usr/src/app/bigchaindb
|
||||
- ./tests:/usr/src/app/tests
|
||||
- ./docs:/usr/src/app/docs
|
||||
- ./k8s:/usr/src/app/k8s
|
||||
- ./setup.py:/usr/src/app/setup.py
|
||||
- ./setup.cfg:/usr/src/app/setup.cfg
|
||||
- ./pytest.ini:/usr/src/app/pytest.ini
|
||||
- ./tox.ini:/usr/src/app/tox.ini
|
||||
- ./scripts:/usr/src/app/scripts
|
||||
environment:
|
||||
BIGCHAINDB_DATABASE_BACKEND: mongodb
|
||||
BIGCHAINDB_DATABASE_HOST: mdb
|
||||
BIGCHAINDB_DATABASE_PORT: 27017
|
||||
BIGCHAINDB_SERVER_BIND: 0.0.0.0:9984
|
||||
BIGCHAINDB_GRAPHITE_HOST: graphite
|
||||
ports:
|
||||
- "9984"
|
||||
command: bigchaindb start
|
||||
|
||||
graphite:
|
||||
image: hopsoft/graphite-statsd
|
||||
ports:
|
||||
- "2003-2004"
|
||||
- "2023-2024"
|
||||
- "8125/udp"
|
||||
- "8126"
|
||||
- "80"
|
@ -30,7 +30,6 @@ _base_database_mongodb = {
|
||||
'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 27017)),
|
||||
'name': os.environ.get('BIGCHAINDB_DATABASE_NAME', 'bigchain'),
|
||||
'replicaset': os.environ.get('BIGCHAINDB_DATABASE_REPLICASET', 'bigchain-rs'),
|
||||
'ssl': bool(os.environ.get('BIGCHAINDB_DATABASE_SSL', False)),
|
||||
'login': os.environ.get('BIGCHAINDB_DATABASE_LOGIN'),
|
||||
'password': os.environ.get('BIGCHAINDB_DATABASE_PASSWORD')
|
||||
}
|
||||
@ -46,6 +45,12 @@ _database_mongodb = {
|
||||
'backend': os.environ.get('BIGCHAINDB_DATABASE_BACKEND', 'mongodb'),
|
||||
'connection_timeout': 5000,
|
||||
'max_tries': 3,
|
||||
'ssl': bool(os.environ.get('BIGCHAINDB_DATABASE_SSL', False)),
|
||||
'ca_cert': os.environ.get('BIGCHAINDB_DATABASE_CA_CERT'),
|
||||
'certfile': os.environ.get('BIGCHAINDB_DATABASE_CERTFILE'),
|
||||
'keyfile': os.environ.get('BIGCHAINDB_DATABASE_KEYFILE'),
|
||||
'keyfile_passphrase': os.environ.get('BIGCHAINDB_DATABASE_KEYFILE_PASSPHRASE'),
|
||||
'crlfile': os.environ.get('BIGCHAINDB_DATABASE_CRLFILE')
|
||||
}
|
||||
_database_mongodb.update(_base_database_mongodb)
|
||||
|
||||
@ -64,6 +69,7 @@ config = {
|
||||
'workers': None, # if none, the value will be cpu_count * 2 + 1
|
||||
},
|
||||
'wsserver': {
|
||||
'scheme': os.environ.get('BIGCHAINDB_WSSERVER_SCHEME') or 'ws',
|
||||
'host': os.environ.get('BIGCHAINDB_WSSERVER_HOST') or 'localhost',
|
||||
'port': int(os.environ.get('BIGCHAINDB_WSSERVER_PORT', 9985)),
|
||||
},
|
||||
@ -89,6 +95,9 @@ config = {
|
||||
'fmt_logfile': log_config['formatters']['file']['format'],
|
||||
'granular_levels': {},
|
||||
},
|
||||
'graphite': {
|
||||
'host': os.environ.get('BIGCHAINDB_GRAPHITE_HOST', 'localhost'),
|
||||
},
|
||||
}
|
||||
|
||||
# We need to maintain a backup copy of the original config dict in case
|
||||
|
@ -16,7 +16,9 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def connect(backend=None, host=None, port=None, name=None, max_tries=None,
|
||||
connection_timeout=None, replicaset=None, ssl=None, login=None, password=None):
|
||||
connection_timeout=None, replicaset=None, ssl=None, login=None, password=None,
|
||||
ca_cert=None, certfile=None, keyfile=None, keyfile_passphrase=None,
|
||||
crlfile=None):
|
||||
"""Create a new connection to the database backend.
|
||||
|
||||
All arguments default to the current configuration's values if not
|
||||
@ -38,6 +40,8 @@ def connect(backend=None, host=None, port=None, name=None, max_tries=None,
|
||||
:exc:`~ConnectionError`: If the connection to the database fails.
|
||||
:exc:`~ConfigurationError`: If the given (or defaulted) :attr:`backend`
|
||||
is not supported or could not be loaded.
|
||||
:exc:`~AuthenticationError`: If there is a OperationFailure due to
|
||||
Authentication failure after connecting to the database.
|
||||
"""
|
||||
|
||||
backend = backend or bigchaindb.config['database']['backend']
|
||||
@ -53,6 +57,11 @@ def connect(backend=None, host=None, port=None, name=None, max_tries=None,
|
||||
ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)
|
||||
login = login or bigchaindb.config['database'].get('login')
|
||||
password = password or bigchaindb.config['database'].get('password')
|
||||
ca_cert = ca_cert or bigchaindb.config['database'].get('ca_cert', None)
|
||||
certfile = certfile or bigchaindb.config['database'].get('certfile', None)
|
||||
keyfile = keyfile or bigchaindb.config['database'].get('keyfile', None)
|
||||
keyfile_passphrase = keyfile_passphrase or bigchaindb.config['database'].get('keyfile_passphrase', None)
|
||||
crlfile = crlfile or bigchaindb.config['database'].get('crlfile', None)
|
||||
|
||||
try:
|
||||
module_name, _, class_name = BACKENDS[backend].rpartition('.')
|
||||
@ -66,13 +75,15 @@ def connect(backend=None, host=None, port=None, name=None, max_tries=None,
|
||||
logger.debug('Connection: {}'.format(Class))
|
||||
return Class(host=host, port=port, dbname=dbname,
|
||||
max_tries=max_tries, connection_timeout=connection_timeout,
|
||||
replicaset=replicaset, ssl=ssl, login=login, password=password)
|
||||
replicaset=replicaset, ssl=ssl, login=login, password=password,
|
||||
ca_cert=ca_cert, certfile=certfile, keyfile=keyfile,
|
||||
keyfile_passphrase=keyfile_passphrase, crlfile=crlfile)
|
||||
|
||||
|
||||
class Connection:
|
||||
"""Connection class interface.
|
||||
|
||||
All backend implementations should provide a connection class that
|
||||
All backend implementations should provide a connection class that inherits
|
||||
from and implements this class.
|
||||
"""
|
||||
|
||||
|
@ -9,10 +9,6 @@ class ConnectionError(BackendError):
|
||||
"""Exception raised when the connection to the backend fails."""
|
||||
|
||||
|
||||
class AuthenticationError(ConnectionError):
|
||||
"""Exception raised when MongoDB Authentication fails"""
|
||||
|
||||
|
||||
class OperationError(BackendError):
|
||||
"""Exception raised when a backend operation fails."""
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
import time
|
||||
import logging
|
||||
from ssl import CERT_REQUIRED
|
||||
|
||||
import pymongo
|
||||
|
||||
@ -8,8 +9,7 @@ from bigchaindb.utils import Lazy
|
||||
from bigchaindb.common.exceptions import ConfigurationError
|
||||
from bigchaindb.backend.exceptions import (DuplicateKeyError,
|
||||
OperationError,
|
||||
ConnectionError,
|
||||
AuthenticationError)
|
||||
ConnectionError)
|
||||
from bigchaindb.backend.connection import Connection
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -17,7 +17,10 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class MongoDBConnection(Connection):
|
||||
|
||||
def __init__(self, replicaset=None, ssl=None, login=None, password=None, **kwargs):
|
||||
def __init__(self, replicaset=None, ssl=None, login=None, password=None,
|
||||
ca_cert=None, certfile=None, keyfile=None,
|
||||
keyfile_passphrase=None, crlfile=None, **kwargs):
|
||||
|
||||
"""Create a new Connection instance.
|
||||
|
||||
Args:
|
||||
@ -32,6 +35,11 @@ class MongoDBConnection(Connection):
|
||||
self.ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)
|
||||
self.login = login or bigchaindb.config['database'].get('login')
|
||||
self.password = password or bigchaindb.config['database'].get('password')
|
||||
self.ca_cert = ca_cert or bigchaindb.config['database'].get('ca_cert', None)
|
||||
self.certfile = certfile or bigchaindb.config['database'].get('certfile', None)
|
||||
self.keyfile = keyfile or bigchaindb.config['database'].get('keyfile', None)
|
||||
self.keyfile_passphrase = keyfile_passphrase or bigchaindb.config['database'].get('keyfile_passphrase', None)
|
||||
self.crlfile = crlfile or bigchaindb.config['database'].get('crlfile', None)
|
||||
|
||||
@property
|
||||
def db(self):
|
||||
@ -69,50 +77,114 @@ class MongoDBConnection(Connection):
|
||||
Raises:
|
||||
:exc:`~ConnectionError`: If the connection to the database
|
||||
fails.
|
||||
:exc:`~AuthenticationError`: If there is a OperationFailure due to
|
||||
Authentication failure after connecting to the database.
|
||||
:exc:`~ConfigurationError`: If there is a ConfigurationError while
|
||||
connecting to the database.
|
||||
"""
|
||||
|
||||
try:
|
||||
# we should only return a connection if the replica set is
|
||||
# initialized. initialize_replica_set will check if the
|
||||
# replica set is initialized else it will initialize it.
|
||||
initialize_replica_set(self.host, self.port, self.connection_timeout,
|
||||
self.dbname, self.ssl, self.login, self.password)
|
||||
initialize_replica_set(self.host,
|
||||
self.port,
|
||||
self.connection_timeout,
|
||||
self.dbname,
|
||||
self.ssl,
|
||||
self.login,
|
||||
self.password,
|
||||
self.ca_cert,
|
||||
self.certfile,
|
||||
self.keyfile,
|
||||
self.keyfile_passphrase,
|
||||
self.crlfile)
|
||||
|
||||
# FYI: this might raise a `ServerSelectionTimeoutError`,
|
||||
# that is a subclass of `ConnectionFailure`.
|
||||
# FYI: the connection process might raise a
|
||||
# `ServerSelectionTimeoutError`, that is a subclass of
|
||||
# `ConnectionFailure`.
|
||||
# The presence of ca_cert, certfile, keyfile, crlfile implies the
|
||||
# use of certificates for TLS connectivity.
|
||||
if self.ca_cert is None or self.certfile is None or \
|
||||
self.keyfile is None or self.crlfile is None:
|
||||
client = pymongo.MongoClient(self.host,
|
||||
self.port,
|
||||
replicaset=self.replicaset,
|
||||
serverselectiontimeoutms=self.connection_timeout,
|
||||
ssl=self.ssl)
|
||||
|
||||
if self.login is not None and self.password is not None:
|
||||
client[self.dbname].authenticate(self.login, self.password)
|
||||
else:
|
||||
logger.info('Connecting to MongoDB over TLS/SSL...')
|
||||
client = pymongo.MongoClient(self.host,
|
||||
self.port,
|
||||
replicaset=self.replicaset,
|
||||
serverselectiontimeoutms=self.connection_timeout,
|
||||
ssl=self.ssl,
|
||||
ssl_ca_certs=self.ca_cert,
|
||||
ssl_certfile=self.certfile,
|
||||
ssl_keyfile=self.keyfile,
|
||||
ssl_pem_passphrase=self.keyfile_passphrase,
|
||||
ssl_crlfile=self.crlfile,
|
||||
ssl_cert_reqs=CERT_REQUIRED)
|
||||
if self.login is not None:
|
||||
client[self.dbname].authenticate(self.login,
|
||||
mechanism='MONGODB-X509')
|
||||
|
||||
return client
|
||||
|
||||
# `initialize_replica_set` might raise `ConnectionFailure` or `OperationFailure`.
|
||||
# `initialize_replica_set` might raise `ConnectionFailure`,
|
||||
# `OperationFailure` or `ConfigurationError`.
|
||||
except (pymongo.errors.ConnectionFailure,
|
||||
pymongo.errors.OperationFailure) as exc:
|
||||
if "Authentication fail" in str(exc):
|
||||
raise AuthenticationError() from exc
|
||||
raise ConnectionError() from exc
|
||||
logger.info('Exception in _connect(): {}'.format(exc))
|
||||
raise ConnectionError(str(exc)) from exc
|
||||
except pymongo.errors.ConfigurationError as exc:
|
||||
raise ConfigurationError from exc
|
||||
|
||||
|
||||
def initialize_replica_set(host, port, connection_timeout, dbname, ssl, login, password):
|
||||
def initialize_replica_set(host, port, connection_timeout, dbname, ssl, login,
|
||||
password, ca_cert, certfile, keyfile,
|
||||
keyfile_passphrase, crlfile):
|
||||
"""Initialize a replica set. If already initialized skip."""
|
||||
|
||||
# Setup a MongoDB connection
|
||||
# The reason we do this instead of `backend.connect` is that
|
||||
# `backend.connect` will connect you to a replica set but this fails if
|
||||
# you try to connect to a replica set that is not yet initialized
|
||||
conn = pymongo.MongoClient(host=host,
|
||||
port=port,
|
||||
try:
|
||||
# The presence of ca_cert, certfile, keyfile, crlfile implies the
|
||||
# use of certificates for TLS connectivity.
|
||||
if ca_cert is None or certfile is None or keyfile is None or \
|
||||
crlfile is None:
|
||||
conn = pymongo.MongoClient(host,
|
||||
port,
|
||||
serverselectiontimeoutms=connection_timeout,
|
||||
ssl=ssl)
|
||||
|
||||
if login is not None and password is not None:
|
||||
conn[dbname].authenticate(login, password)
|
||||
else:
|
||||
logger.info('Connecting to MongoDB over TLS/SSL...')
|
||||
conn = pymongo.MongoClient(host,
|
||||
port,
|
||||
serverselectiontimeoutms=connection_timeout,
|
||||
ssl=ssl,
|
||||
ssl_ca_certs=ca_cert,
|
||||
ssl_certfile=certfile,
|
||||
ssl_keyfile=keyfile,
|
||||
ssl_pem_passphrase=keyfile_passphrase,
|
||||
ssl_crlfile=crlfile,
|
||||
ssl_cert_reqs=CERT_REQUIRED)
|
||||
if login is not None:
|
||||
logger.info('Authenticating to the database...')
|
||||
conn[dbname].authenticate(login, mechanism='MONGODB-X509')
|
||||
|
||||
except (pymongo.errors.ConnectionFailure,
|
||||
pymongo.errors.OperationFailure) as exc:
|
||||
logger.info('Exception in _connect(): {}'.format(exc))
|
||||
raise ConnectionError(str(exc)) from exc
|
||||
except pymongo.errors.ConfigurationError as exc:
|
||||
raise ConfigurationError from exc
|
||||
|
||||
_check_replica_set(conn)
|
||||
host = '{}:{}'.format(bigchaindb.config['database']['host'],
|
||||
@ -129,6 +201,10 @@ def initialize_replica_set(host, port, connection_timeout, dbname, ssl, login, p
|
||||
else:
|
||||
_wait_for_replica_set_initialization(conn)
|
||||
logger.info('Initialized replica set')
|
||||
finally:
|
||||
if conn is not None:
|
||||
logger.info('Closing initial connection to MongoDB')
|
||||
conn.close()
|
||||
|
||||
|
||||
def _check_replica_set(conn):
|
||||
|
@ -157,8 +157,8 @@ def get_spent(conn, transaction_id, output):
|
||||
{'$match': {
|
||||
'block.transactions.inputs': {
|
||||
'$elemMatch': {
|
||||
'fulfills.txid': transaction_id,
|
||||
'fulfills.output': output,
|
||||
'fulfills.transaction_id': transaction_id,
|
||||
'fulfills.output_index': output,
|
||||
},
|
||||
},
|
||||
}},
|
||||
@ -166,8 +166,8 @@ def get_spent(conn, transaction_id, output):
|
||||
{'$match': {
|
||||
'block.transactions.inputs': {
|
||||
'$elemMatch': {
|
||||
'fulfills.txid': transaction_id,
|
||||
'fulfills.output': output,
|
||||
'fulfills.transaction_id': transaction_id,
|
||||
'fulfills.output_index': output,
|
||||
},
|
||||
},
|
||||
}},
|
||||
|
@ -68,11 +68,11 @@ def create_bigchain_secondary_index(conn, dbname):
|
||||
.create_index('block.transactions.outputs.public_keys',
|
||||
name='outputs')
|
||||
|
||||
# secondary index on inputs/transaction links (txid, output)
|
||||
# secondary index on inputs/transaction links (transaction_id, output)
|
||||
conn.conn[dbname]['bigchain']\
|
||||
.create_index([
|
||||
('block.transactions.inputs.fulfills.txid', ASCENDING),
|
||||
('block.transactions.inputs.fulfills.output', ASCENDING),
|
||||
('block.transactions.inputs.fulfills.transaction_id', ASCENDING),
|
||||
('block.transactions.inputs.fulfills.output_index', ASCENDING),
|
||||
], name='inputs')
|
||||
|
||||
|
||||
|
@ -122,7 +122,8 @@ def get_spent(connection, transaction_id, output):
|
||||
.get_all([transaction_id, output], index='inputs')
|
||||
.concat_map(lambda doc: doc['block']['transactions'])
|
||||
.filter(lambda transaction: transaction['inputs'].contains(
|
||||
lambda input_: input_['fulfills'] == {'txid': transaction_id, 'output': output})))
|
||||
lambda input_: input_['fulfills'] == {
|
||||
'transaction_id': transaction_id, 'output_index': output})))
|
||||
|
||||
|
||||
@register_query(RethinkDBConnection)
|
||||
@ -286,7 +287,8 @@ def unwind_block_transactions(block):
|
||||
def get_spending_transactions(connection, links):
|
||||
query = (
|
||||
r.table('bigchain')
|
||||
.get_all(*[(l['txid'], l['output']) for l in links], index='inputs')
|
||||
.get_all(*[(l['transaction_id'], l['output_index']) for l in links],
|
||||
index='inputs')
|
||||
.concat_map(unwind_block_transactions)
|
||||
# filter transactions spending output
|
||||
.filter(lambda doc: r.expr(links).set_intersection(
|
||||
|
@ -79,16 +79,16 @@ def create_bigchain_secondary_index(connection, dbname):
|
||||
.concat_map(lambda tx: tx['outputs']['public_keys'])
|
||||
.reduce(lambda l, r: l + r), multi=True))
|
||||
|
||||
# secondary index on inputs/transaction links (txid, output)
|
||||
# secondary index on inputs/transaction links (transaction_id, output)
|
||||
connection.run(
|
||||
r.db(dbname)
|
||||
.table('bigchain')
|
||||
.index_create('inputs',
|
||||
r.row['block']['transactions']
|
||||
.concat_map(lambda tx: tx['inputs']['fulfills'])
|
||||
.with_fields('txid', 'output')
|
||||
.map(lambda fulfills: [fulfills['txid'],
|
||||
fulfills['output']]),
|
||||
.with_fields('transaction_id', 'output_index')
|
||||
.map(lambda fulfills: [fulfills['transaction_id'],
|
||||
fulfills['output_index']]),
|
||||
multi=True))
|
||||
|
||||
# wait for rethinkdb to finish creating secondary indexes
|
||||
|
@ -96,7 +96,7 @@ def run_configure(args, skip_if_exists=False):
|
||||
val = conf['server'][key]
|
||||
conf['server'][key] = input_on_stderr('API Server {}? (default `{}`): '.format(key, val), val)
|
||||
|
||||
for key in ('host', 'port'):
|
||||
for key in ('scheme', 'host', 'port'):
|
||||
val = conf['wsserver'][key]
|
||||
conf['wsserver'][key] = input_on_stderr('WebSocket Server {}? (default `{}`): '.format(key, val), val)
|
||||
|
||||
|
@ -106,3 +106,7 @@ class SybilError(ValidationError):
|
||||
|
||||
class DuplicateTransaction(ValidationError):
|
||||
"""Raised if a duplicated transaction is found"""
|
||||
|
||||
|
||||
class ThresholdTooDeep(ValidationError):
|
||||
"""Raised if threshold condition is too deep"""
|
||||
|
@ -56,7 +56,7 @@ properties:
|
||||
See: `Metadata`_.
|
||||
version:
|
||||
type: string
|
||||
pattern: "^0\\."
|
||||
pattern: "^1\\.0$"
|
||||
description: |
|
||||
BigchainDB transaction schema version.
|
||||
definitions:
|
||||
@ -150,11 +150,10 @@ definitions:
|
||||
- uri
|
||||
properties:
|
||||
details:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
"$ref": "#/definitions/condition_details"
|
||||
uri:
|
||||
type: string
|
||||
pattern: "^cc:([1-9a-f][0-9a-f]{0,3}|0):[1-9a-f][0-9a-f]{0,15}:[a-zA-Z0-9_-]{0,86}:([1-9][0-9]{0,17}|0)$"
|
||||
pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})?(.+)$"
|
||||
public_keys:
|
||||
"$ref": "#/definitions/public_keys"
|
||||
description: |
|
||||
@ -174,28 +173,14 @@ definitions:
|
||||
description: |
|
||||
List of public keys of the previous owners of the asset.
|
||||
fulfillment:
|
||||
anyOf:
|
||||
- type: object
|
||||
additionalProperties: false
|
||||
properties:
|
||||
bitmask:
|
||||
type: integer
|
||||
public_key:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
signature:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: 'null'
|
||||
type_id:
|
||||
type: integer
|
||||
description: |
|
||||
Fulfillment of an `Output.condition`_, or, put a different way, a payload
|
||||
that satisfies the condition of a previous output to prove that the
|
||||
creator(s) of this transaction have control over the listed asset.
|
||||
anyOf:
|
||||
- type: string
|
||||
pattern: "^cf:([1-9a-f][0-9a-f]{0,3}|0):[a-zA-Z0-9_-]*$"
|
||||
pattern: "^[a-zA-Z0-9_-]*$"
|
||||
- "$ref": "#/definitions/condition_details"
|
||||
fulfills:
|
||||
anyOf:
|
||||
- type: 'object'
|
||||
@ -203,14 +188,14 @@ definitions:
|
||||
Reference to the output that is being spent.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- output
|
||||
- txid
|
||||
- output_index
|
||||
- transaction_id
|
||||
properties:
|
||||
output:
|
||||
output_index:
|
||||
"$ref": "#/definitions/offset"
|
||||
description: |
|
||||
Index of the output containing the condition being fulfilled
|
||||
txid:
|
||||
transaction_id:
|
||||
"$ref": "#/definitions/sha3_hexdigest"
|
||||
description: |
|
||||
Transaction ID containing the output to spend
|
||||
@ -224,3 +209,37 @@ definitions:
|
||||
additionalProperties: true
|
||||
minProperties: 1
|
||||
- type: 'null'
|
||||
condition_details:
|
||||
description: |
|
||||
Details needed to reconstruct the condition associated with an output.
|
||||
Currently, BigchainDB only supports ed25519 and threshold condition types.
|
||||
anyOf:
|
||||
- type: object
|
||||
additionalProperties: false
|
||||
required:
|
||||
- type
|
||||
- public_key
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
pattern: "^ed25519-sha-256$"
|
||||
public_key:
|
||||
"$ref": "#/definitions/base58"
|
||||
- type: object
|
||||
additionalProperties: false
|
||||
required:
|
||||
- type
|
||||
- threshold
|
||||
- subconditions
|
||||
properties:
|
||||
type:
|
||||
type: "string"
|
||||
pattern: "^threshold-sha-256$"
|
||||
threshold:
|
||||
type: integer
|
||||
minimum: 1
|
||||
maximum: 100
|
||||
subconditions:
|
||||
type: array
|
||||
items:
|
||||
"$ref": "#/definitions/condition_details"
|
||||
|
@ -14,6 +14,8 @@ properties:
|
||||
- type: object
|
||||
additionalProperties: true
|
||||
- type: 'null'
|
||||
required:
|
||||
- data
|
||||
inputs:
|
||||
type: array
|
||||
title: "Transaction inputs"
|
||||
|
@ -12,6 +12,8 @@ properties:
|
||||
"$ref": "#/definitions/sha3_hexdigest"
|
||||
description: |
|
||||
ID of the transaction that created the asset.
|
||||
required:
|
||||
- id
|
||||
inputs:
|
||||
type: array
|
||||
title: "Transaction inputs"
|
||||
|
@ -1,16 +1,17 @@
|
||||
from copy import deepcopy
|
||||
from functools import reduce
|
||||
|
||||
from cryptoconditions import (Fulfillment, ThresholdSha256Fulfillment,
|
||||
Ed25519Fulfillment)
|
||||
from cryptoconditions.exceptions import ParsingError
|
||||
import base58
|
||||
from cryptoconditions import Fulfillment, ThresholdSha256, Ed25519Sha256
|
||||
from cryptoconditions.exceptions import (
|
||||
ParsingError, ASN1DecodeError, ASN1EncodeError, UnsupportedTypeError)
|
||||
|
||||
from bigchaindb.common.crypto import PrivateKey, hash_data
|
||||
from bigchaindb.common.exceptions import (KeypairMismatchException,
|
||||
InvalidHash, InvalidSignature,
|
||||
AmountError, AssetIdMismatch)
|
||||
from bigchaindb.common.utils import serialize, gen_timestamp
|
||||
import bigchaindb.version
|
||||
AmountError, AssetIdMismatch,
|
||||
ThresholdTooDeep)
|
||||
from bigchaindb.common.utils import serialize
|
||||
|
||||
|
||||
class Input(object):
|
||||
@ -65,16 +66,8 @@ class Input(object):
|
||||
"""
|
||||
try:
|
||||
fulfillment = self.fulfillment.serialize_uri()
|
||||
except (TypeError, AttributeError):
|
||||
# NOTE: When a non-signed transaction is casted to a dict,
|
||||
# `self.inputs` value is lost, as in the node's
|
||||
# transaction model that is saved to the database, does not
|
||||
# account for its dictionary form but just for its signed uri
|
||||
# form.
|
||||
# Hence, when a non-signed fulfillment is to be cast to a
|
||||
# dict, we just call its internal `to_dict` method here and
|
||||
# its `from_dict` method in `Fulfillment.from_dict`.
|
||||
fulfillment = self.fulfillment.to_dict()
|
||||
except (TypeError, AttributeError, ASN1EncodeError):
|
||||
fulfillment = _fulfillment_to_details(self.fulfillment)
|
||||
|
||||
try:
|
||||
# NOTE: `self.fulfills` can be `None` and that's fine
|
||||
@ -114,19 +107,74 @@ class Input(object):
|
||||
Raises:
|
||||
InvalidSignature: If an Input's URI couldn't be parsed.
|
||||
"""
|
||||
fulfillment = data['fulfillment']
|
||||
if not isinstance(fulfillment, Fulfillment):
|
||||
try:
|
||||
fulfillment = Fulfillment.from_uri(data['fulfillment'])
|
||||
except ValueError:
|
||||
# TODO FOR CC: Throw an `InvalidSignature` error in this case.
|
||||
except ASN1DecodeError:
|
||||
# TODO Remove as it is legacy code, and simply fall back on
|
||||
# ASN1DecodeError
|
||||
raise InvalidSignature("Fulfillment URI couldn't been parsed")
|
||||
except TypeError:
|
||||
# NOTE: See comment about this special case in
|
||||
# `Input.to_dict`
|
||||
fulfillment = Fulfillment.from_dict(data['fulfillment'])
|
||||
fulfillment = _fulfillment_from_details(data['fulfillment'])
|
||||
fulfills = TransactionLink.from_dict(data['fulfills'])
|
||||
return cls(fulfillment, data['owners_before'], fulfills)
|
||||
|
||||
|
||||
def _fulfillment_to_details(fulfillment):
|
||||
"""
|
||||
Encode a fulfillment as a details dictionary
|
||||
|
||||
Args:
|
||||
fulfillment: Crypto-conditions Fulfillment object
|
||||
"""
|
||||
|
||||
if fulfillment.type_name == 'ed25519-sha-256':
|
||||
return {
|
||||
'type': 'ed25519-sha-256',
|
||||
'public_key': base58.b58encode(fulfillment.public_key),
|
||||
}
|
||||
|
||||
if fulfillment.type_name == 'threshold-sha-256':
|
||||
subconditions = [
|
||||
_fulfillment_to_details(cond['body'])
|
||||
for cond in fulfillment.subconditions
|
||||
]
|
||||
return {
|
||||
'type': 'threshold-sha-256',
|
||||
'threshold': fulfillment.threshold,
|
||||
'subconditions': subconditions,
|
||||
}
|
||||
|
||||
raise UnsupportedTypeError(fulfillment.type_name)
|
||||
|
||||
|
||||
def _fulfillment_from_details(data):
|
||||
"""
|
||||
Load a fulfillment for a signing spec dictionary
|
||||
|
||||
Args:
|
||||
data: tx.output[].condition.details dictionary
|
||||
"""
|
||||
if data['type'] == 'ed25519-sha-256':
|
||||
public_key = base58.b58decode(data['public_key'])
|
||||
return Ed25519Sha256(public_key=public_key)
|
||||
|
||||
if data['type'] == 'threshold-sha-256':
|
||||
try:
|
||||
threshold = ThresholdSha256(data['threshold'])
|
||||
for cond in data['subconditions']:
|
||||
cond = _fulfillment_from_details(cond)
|
||||
threshold.add_subfulfillment(cond)
|
||||
return threshold
|
||||
except RecursionError:
|
||||
raise ThresholdTooDeep()
|
||||
|
||||
raise UnsupportedTypeError(data.get('type'))
|
||||
|
||||
|
||||
class TransactionLink(object):
|
||||
"""An object for unidirectional linking to a Transaction's Output.
|
||||
|
||||
@ -175,7 +223,7 @@ class TransactionLink(object):
|
||||
:class:`~bigchaindb.common.transaction.TransactionLink`
|
||||
"""
|
||||
try:
|
||||
return cls(link['txid'], link['output'])
|
||||
return cls(link['transaction_id'], link['output_index'])
|
||||
except TypeError:
|
||||
return cls()
|
||||
|
||||
@ -189,8 +237,8 @@ class TransactionLink(object):
|
||||
return None
|
||||
else:
|
||||
return {
|
||||
'txid': self.txid,
|
||||
'output': self.output,
|
||||
'transaction_id': self.txid,
|
||||
'output_index': self.output,
|
||||
}
|
||||
|
||||
def to_uri(self, path=''):
|
||||
@ -259,7 +307,7 @@ class Output(object):
|
||||
# and fulfillment!
|
||||
condition = {}
|
||||
try:
|
||||
condition['details'] = self.fulfillment.to_dict()
|
||||
condition['details'] = _fulfillment_to_details(self.fulfillment)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
@ -310,13 +358,14 @@ class Output(object):
|
||||
raise ValueError('`public_keys` needs to contain at least one'
|
||||
'owner')
|
||||
elif len(public_keys) == 1 and not isinstance(public_keys[0], list):
|
||||
try:
|
||||
ffill = Ed25519Fulfillment(public_key=public_keys[0])
|
||||
except TypeError:
|
||||
if isinstance(public_keys[0], Fulfillment):
|
||||
ffill = public_keys[0]
|
||||
else:
|
||||
ffill = Ed25519Sha256(
|
||||
public_key=base58.b58decode(public_keys[0]))
|
||||
return cls(ffill, public_keys, amount=amount)
|
||||
else:
|
||||
initial_cond = ThresholdSha256Fulfillment(threshold=threshold)
|
||||
initial_cond = ThresholdSha256(threshold=threshold)
|
||||
threshold_cond = reduce(cls._gen_condition, public_keys,
|
||||
initial_cond)
|
||||
return cls(threshold_cond, public_keys, amount=amount)
|
||||
@ -331,13 +380,13 @@ class Output(object):
|
||||
:meth:`~.Output.generate`.
|
||||
|
||||
Args:
|
||||
initial (:class:`cryptoconditions.ThresholdSha256Fulfillment`):
|
||||
initial (:class:`cryptoconditions.ThresholdSha256`):
|
||||
A Condition representing the overall root.
|
||||
new_public_keys (:obj:`list` of :obj:`str`|str): A list of new
|
||||
owners or a single new owner.
|
||||
|
||||
Returns:
|
||||
:class:`cryptoconditions.ThresholdSha256Fulfillment`:
|
||||
:class:`cryptoconditions.ThresholdSha256`:
|
||||
"""
|
||||
try:
|
||||
threshold = len(new_public_keys)
|
||||
@ -345,7 +394,7 @@ class Output(object):
|
||||
threshold = None
|
||||
|
||||
if isinstance(new_public_keys, list) and len(new_public_keys) > 1:
|
||||
ffill = ThresholdSha256Fulfillment(threshold=threshold)
|
||||
ffill = ThresholdSha256(threshold=threshold)
|
||||
reduce(cls._gen_condition, new_public_keys, ffill)
|
||||
elif isinstance(new_public_keys, list) and len(new_public_keys) <= 1:
|
||||
raise ValueError('Sublist cannot contain single owner')
|
||||
@ -354,16 +403,17 @@ class Output(object):
|
||||
new_public_keys = new_public_keys.pop()
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
ffill = Ed25519Fulfillment(public_key=new_public_keys)
|
||||
except TypeError:
|
||||
# NOTE: Instead of submitting base58 encoded addresses, a user
|
||||
# of this class can also submit fully instantiated
|
||||
# Cryptoconditions. In the case of casting
|
||||
# `new_public_keys` to a Ed25519Fulfillment with the
|
||||
# result of a `TypeError`, we're assuming that
|
||||
# `new_public_keys` is a Cryptocondition then.
|
||||
if isinstance(new_public_keys, Fulfillment):
|
||||
ffill = new_public_keys
|
||||
else:
|
||||
ffill = Ed25519Sha256(
|
||||
public_key=base58.b58decode(new_public_keys))
|
||||
initial.add_subfulfillment(ffill)
|
||||
return initial
|
||||
|
||||
@ -384,7 +434,7 @@ class Output(object):
|
||||
:class:`~bigchaindb.common.transaction.Output`
|
||||
"""
|
||||
try:
|
||||
fulfillment = Fulfillment.from_dict(data['condition']['details'])
|
||||
fulfillment = _fulfillment_from_details(data['condition']['details'])
|
||||
except KeyError:
|
||||
# NOTE: Hashlock condition case
|
||||
fulfillment = data['condition']['uri']
|
||||
@ -415,13 +465,13 @@ class Transaction(object):
|
||||
``id`` property.
|
||||
metadata (dict):
|
||||
Metadata to be stored along with the Transaction.
|
||||
version (int): Defines the version number of a Transaction.
|
||||
version (string): Defines the version number of a Transaction.
|
||||
"""
|
||||
CREATE = 'CREATE'
|
||||
TRANSFER = 'TRANSFER'
|
||||
GENESIS = 'GENESIS'
|
||||
ALLOWED_OPERATIONS = (CREATE, TRANSFER, GENESIS)
|
||||
VERSION = '.'.join(bigchaindb.version.__short_version__.split('.')[:2])
|
||||
VERSION = '1.0'
|
||||
|
||||
def __init__(self, operation, asset, inputs=None, outputs=None,
|
||||
metadata=None, version=None):
|
||||
@ -441,7 +491,7 @@ class Transaction(object):
|
||||
lock.
|
||||
metadata (dict): Metadata to be stored along with the
|
||||
Transaction.
|
||||
version (int): Defines the version number of a Transaction.
|
||||
version (string): Defines the version number of a Transaction.
|
||||
"""
|
||||
if operation not in Transaction.ALLOWED_OPERATIONS:
|
||||
allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS)
|
||||
@ -661,7 +711,7 @@ class Transaction(object):
|
||||
This method works only for the following Cryptoconditions
|
||||
currently:
|
||||
- Ed25519Fulfillment
|
||||
- ThresholdSha256Fulfillment
|
||||
- ThresholdSha256
|
||||
Furthermore, note that all keys required to fully sign the
|
||||
Transaction have to be passed to this method. A subset of all
|
||||
will cause this method to fail.
|
||||
@ -712,7 +762,7 @@ class Transaction(object):
|
||||
This method works only for the following Cryptoconditions
|
||||
currently:
|
||||
- Ed25519Fulfillment
|
||||
- ThresholdSha256Fulfillment.
|
||||
- ThresholdSha256.
|
||||
|
||||
Args:
|
||||
input_ (:class:`~bigchaindb.common.transaction.
|
||||
@ -720,10 +770,10 @@ class Transaction(object):
|
||||
message (str): The message to be signed
|
||||
key_pairs (dict): The keys to sign the Transaction with.
|
||||
"""
|
||||
if isinstance(input_.fulfillment, Ed25519Fulfillment):
|
||||
if isinstance(input_.fulfillment, Ed25519Sha256):
|
||||
return cls._sign_simple_signature_fulfillment(input_, message,
|
||||
key_pairs)
|
||||
elif isinstance(input_.fulfillment, ThresholdSha256Fulfillment):
|
||||
elif isinstance(input_.fulfillment, ThresholdSha256):
|
||||
return cls._sign_threshold_signature_fulfillment(input_, message,
|
||||
key_pairs)
|
||||
else:
|
||||
@ -749,7 +799,10 @@ class Transaction(object):
|
||||
try:
|
||||
# cryptoconditions makes no assumptions of the encoding of the
|
||||
# message to sign or verify. It only accepts bytestrings
|
||||
input_.fulfillment.sign(message.encode(), key_pairs[public_key])
|
||||
input_.fulfillment.sign(
|
||||
message.encode(),
|
||||
base58.b58decode(key_pairs[public_key].encode()),
|
||||
)
|
||||
except KeyError:
|
||||
raise KeypairMismatchException('Public key {} is not a pair to '
|
||||
'any of the private keys'
|
||||
@ -758,7 +811,7 @@ class Transaction(object):
|
||||
|
||||
@classmethod
|
||||
def _sign_threshold_signature_fulfillment(cls, input_, message, key_pairs):
|
||||
"""Signs a ThresholdSha256Fulfillment.
|
||||
"""Signs a ThresholdSha256.
|
||||
|
||||
Args:
|
||||
input_ (:class:`~bigchaindb.common.transaction.
|
||||
@ -778,7 +831,8 @@ class Transaction(object):
|
||||
# TODO FOR CC: `get_subcondition` is singular. One would not
|
||||
# expect to get a list back.
|
||||
ccffill = input_.fulfillment
|
||||
subffills = ccffill.get_subcondition_from_vk(owner_before)
|
||||
subffills = ccffill.get_subcondition_from_vk(
|
||||
base58.b58decode(owner_before))
|
||||
if not subffills:
|
||||
raise KeypairMismatchException('Public key {} cannot be found '
|
||||
'in the fulfillment'
|
||||
@ -793,7 +847,7 @@ class Transaction(object):
|
||||
# cryptoconditions makes no assumptions of the encoding of the
|
||||
# message to sign or verify. It only accepts bytestrings
|
||||
for subffill in subffills:
|
||||
subffill.sign(message.encode(), private_key)
|
||||
subffill.sign(message.encode(), base58.b58decode(private_key.encode()))
|
||||
return input_
|
||||
|
||||
def inputs_valid(self, outputs=None):
|
||||
@ -882,7 +936,8 @@ class Transaction(object):
|
||||
ccffill = input_.fulfillment
|
||||
try:
|
||||
parsed_ffill = Fulfillment.from_uri(ccffill.serialize_uri())
|
||||
except (TypeError, ValueError, ParsingError):
|
||||
except (TypeError, ValueError,
|
||||
ParsingError, ASN1DecodeError, ASN1EncodeError):
|
||||
return False
|
||||
|
||||
if operation in (Transaction.CREATE, Transaction.GENESIS):
|
||||
@ -897,8 +952,7 @@ class Transaction(object):
|
||||
|
||||
# cryptoconditions makes no assumptions of the encoding of the
|
||||
# message to sign or verify. It only accepts bytestrings
|
||||
ffill_valid = parsed_ffill.validate(message=tx_serialized.encode(),
|
||||
now=gen_timestamp())
|
||||
ffill_valid = parsed_ffill.validate(message=tx_serialized.encode())
|
||||
return output_valid and ffill_valid
|
||||
|
||||
def to_dict(self):
|
||||
@ -940,7 +994,7 @@ class Transaction(object):
|
||||
tx_dict = deepcopy(tx_dict)
|
||||
for input_ in tx_dict['inputs']:
|
||||
# NOTE: Not all Cryptoconditions return a `signature` key (e.g.
|
||||
# ThresholdSha256Fulfillment), so setting it to `None` in any
|
||||
# ThresholdSha256), so setting it to `None` in any
|
||||
# case could yield incorrect signatures. This is why we only
|
||||
# set it to `None` if it's set in the dict.
|
||||
input_['fulfillment'] = None
|
||||
|
@ -1,4 +1,5 @@
|
||||
import random
|
||||
import statsd
|
||||
from time import time
|
||||
|
||||
from bigchaindb import exceptions as core_exceptions
|
||||
@ -71,6 +72,8 @@ class Bigchain(object):
|
||||
if not self.me or not self.me_private:
|
||||
raise exceptions.KeypairNotFoundException()
|
||||
|
||||
self.statsd = statsd.StatsClient(bigchaindb.config['graphite']['host'])
|
||||
|
||||
federation = property(lambda self: set(self.nodes_except_me + [self.me]))
|
||||
""" Set of federation member public keys """
|
||||
|
||||
@ -399,20 +402,33 @@ class Bigchain(object):
|
||||
:obj:`list` of TransactionLink: list of ``txid`` s and ``output`` s
|
||||
pointing to another transaction's condition
|
||||
"""
|
||||
return self.get_outputs_filtered(owner, include_spent=False)
|
||||
return self.get_outputs_filtered(owner, spent=False)
|
||||
|
||||
@property
|
||||
def fastquery(self):
|
||||
return fastquery.FastQuery(self.connection, self.me)
|
||||
|
||||
def get_outputs_filtered(self, owner, include_spent=True):
|
||||
def get_outputs_filtered(self, owner, spent=None):
|
||||
"""
|
||||
Get a list of output links filtered on some criteria
|
||||
|
||||
Args:
|
||||
owner (str): base58 encoded public_key.
|
||||
spent (bool): If ``True`` return only the spent outputs. If
|
||||
``False`` return only unspent outputs. If spent is
|
||||
not specified (``None``) return all outputs.
|
||||
|
||||
Returns:
|
||||
:obj:`list` of TransactionLink: list of ``txid`` s and ``output`` s
|
||||
pointing to another transaction's condition
|
||||
"""
|
||||
outputs = self.fastquery.get_outputs_by_public_key(owner)
|
||||
if not include_spent:
|
||||
outputs = self.fastquery.filter_spent_outputs(outputs)
|
||||
if spent is None:
|
||||
return outputs
|
||||
elif spent is True:
|
||||
return self.fastquery.filter_unspent_outputs(outputs)
|
||||
elif spent is False:
|
||||
return self.fastquery.filter_spent_outputs(outputs)
|
||||
|
||||
def get_transactions_filtered(self, asset_id, operation=None):
|
||||
"""
|
||||
|
@ -68,3 +68,18 @@ class FastQuery:
|
||||
for tx in txs
|
||||
for input_ in tx['inputs']}
|
||||
return [ff for ff in outputs if ff not in spends]
|
||||
|
||||
def filter_unspent_outputs(self, outputs):
|
||||
"""
|
||||
Remove outputs that have not been spent
|
||||
|
||||
Args:
|
||||
outputs: list of TransactionLink
|
||||
"""
|
||||
links = [o.to_dict() for o in outputs]
|
||||
res = query.get_spending_transactions(self.connection, links)
|
||||
txs = [tx for _, tx in self.filter_valid_items(res)]
|
||||
spends = {TransactionLink.from_dict(input_['fulfills'])
|
||||
for tx in txs
|
||||
for input_ in tx['inputs']}
|
||||
return [ff for ff in outputs if ff in spends]
|
||||
|
@ -117,6 +117,8 @@ class BlockPipeline:
|
||||
logger.info('Write new block %s with %s transactions',
|
||||
block.id, len(block.transactions))
|
||||
self.bigchain.write_block(block)
|
||||
self.bigchain.statsd.incr('pipelines.block.throughput',
|
||||
len(block.transactions))
|
||||
return block
|
||||
|
||||
def delete_tx(self, block):
|
||||
|
@ -137,9 +137,9 @@ class Vote:
|
||||
self.last_voted_id = block_id
|
||||
del self.counters[block_id]
|
||||
del self.validity[block_id]
|
||||
return vote
|
||||
return vote, num_tx
|
||||
|
||||
def write_vote(self, vote):
|
||||
def write_vote(self, vote, num_tx):
|
||||
"""Write vote to the database.
|
||||
|
||||
Args:
|
||||
@ -149,6 +149,7 @@ class Vote:
|
||||
logger.info("Voting '%s' for block %s", validity,
|
||||
vote['vote']['voting_for_block'])
|
||||
self.bigchain.write_vote(vote)
|
||||
self.bigchain.statsd.incr('pipelines.vote.throughput', num_tx)
|
||||
return vote
|
||||
|
||||
|
||||
|
@ -96,8 +96,8 @@ def condition_details_has_owner(condition_details, owner):
|
||||
bool: True if the public key is found in the condition details, False otherwise
|
||||
|
||||
"""
|
||||
if 'subfulfillments' in condition_details:
|
||||
result = condition_details_has_owner(condition_details['subfulfillments'], owner)
|
||||
if 'subconditions' in condition_details:
|
||||
result = condition_details_has_owner(condition_details['subconditions'], owner)
|
||||
if result:
|
||||
return True
|
||||
|
||||
|
@ -1,2 +1,2 @@
|
||||
__version__ = '0.11.0.dev'
|
||||
__short_version__ = '0.11.dev'
|
||||
__version__ = '1.0.0.dev'
|
||||
__short_version__ = '1.0.dev'
|
||||
|
@ -61,20 +61,7 @@ def create_app(*, debug=False, threads=1):
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
CORS(app,
|
||||
allow_headers=(
|
||||
'x-requested-with',
|
||||
'content-type',
|
||||
'accept',
|
||||
'origin',
|
||||
'authorization',
|
||||
'x-csrftoken',
|
||||
'withcredentials',
|
||||
'cache-control',
|
||||
'cookie',
|
||||
'session-id',
|
||||
),
|
||||
supports_credentials=True)
|
||||
CORS(app)
|
||||
|
||||
app.debug = debug
|
||||
|
||||
|
@ -3,7 +3,7 @@ Common classes and methods for API handlers
|
||||
"""
|
||||
import logging
|
||||
|
||||
from flask import jsonify, request
|
||||
from flask import jsonify
|
||||
|
||||
from bigchaindb import config
|
||||
|
||||
@ -21,14 +21,9 @@ def make_error(status_code, message=None):
|
||||
return response
|
||||
|
||||
|
||||
def base_url():
|
||||
return '%s://%s/' % (request.environ['wsgi.url_scheme'],
|
||||
request.environ['HTTP_HOST'])
|
||||
|
||||
|
||||
def base_ws_uri():
|
||||
"""Base websocket uri."""
|
||||
# TODO Revisit as this is a workaround to address issue
|
||||
# https://github.com/bigchaindb/bigchaindb/issues/1465.
|
||||
host = request.environ['HTTP_HOST'].split(':')[0]
|
||||
return 'ws://{}:{}'.format(host, config['wsserver']['port'])
|
||||
scheme = config['wsserver']['scheme']
|
||||
host = config['wsserver']['host']
|
||||
port = config['wsserver']['port']
|
||||
return '{}://{}:{}'.format(scheme, host, port)
|
||||
|
@ -41,12 +41,12 @@ class BlockListApi(Resource):
|
||||
"valid", "invalid", "undecided".
|
||||
"""
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('tx_id', type=str, required=True)
|
||||
parser.add_argument('transaction_id', type=str, required=True)
|
||||
parser.add_argument('status', type=str, case_sensitive=False,
|
||||
choices=[Bigchain.BLOCK_VALID, Bigchain.BLOCK_INVALID, Bigchain.BLOCK_UNDECIDED])
|
||||
|
||||
args = parser.parse_args(strict=True)
|
||||
tx_id = args['tx_id']
|
||||
tx_id = args['transaction_id']
|
||||
status = args['status']
|
||||
|
||||
pool = current_app.config['bigchain_pool']
|
||||
|
@ -4,7 +4,7 @@ import flask
|
||||
from flask_restful import Resource
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb.web.views.base import base_url, base_ws_uri
|
||||
from bigchaindb.web.views.base import base_ws_uri
|
||||
from bigchaindb import version
|
||||
from bigchaindb.web.websocket_server import EVENTS_ENDPOINT
|
||||
|
||||
@ -15,12 +15,11 @@ class RootIndex(Resource):
|
||||
'https://docs.bigchaindb.com/projects/server/en/v',
|
||||
version.__version__ + '/'
|
||||
]
|
||||
api_v1_url = base_url() + 'api/v1/'
|
||||
return flask.jsonify({
|
||||
'_links': {
|
||||
'docs': ''.join(docs_url),
|
||||
'api_v1': api_v1_url,
|
||||
'api': {
|
||||
'v1': get_api_v1_info('/api/v1/')
|
||||
},
|
||||
'docs': ''.join(docs_url),
|
||||
'software': 'BigchainDB',
|
||||
'version': version.__version__,
|
||||
'public_key': bigchaindb.config['keypair']['public'],
|
||||
@ -30,19 +29,26 @@ class RootIndex(Resource):
|
||||
|
||||
class ApiV1Index(Resource):
|
||||
def get(self):
|
||||
api_root = base_url() + 'api/v1/'
|
||||
return flask.jsonify(get_api_v1_info('/'))
|
||||
|
||||
|
||||
def get_api_v1_info(api_prefix):
|
||||
"""
|
||||
Return a dict with all the information specific for the v1 of the
|
||||
api.
|
||||
"""
|
||||
websocket_root = base_ws_uri() + EVENTS_ENDPOINT
|
||||
docs_url = [
|
||||
'https://docs.bigchaindb.com/projects/server/en/v',
|
||||
version.__version__,
|
||||
'/http-client-server-api.html',
|
||||
]
|
||||
return flask.jsonify({
|
||||
'_links': {
|
||||
|
||||
return {
|
||||
'docs': ''.join(docs_url),
|
||||
'self': api_root,
|
||||
'statuses': api_root + 'statuses/',
|
||||
'transactions': api_root + 'transactions/',
|
||||
'streams_v1': websocket_root,
|
||||
},
|
||||
})
|
||||
'transactions': '{}transactions/'.format(api_prefix),
|
||||
'statuses': '{}statuses/'.format(api_prefix),
|
||||
'assets': '{}assets/'.format(api_prefix),
|
||||
'outputs': '{}outputs/'.format(api_prefix),
|
||||
'streams': websocket_root
|
||||
}
|
||||
|
@ -15,14 +15,12 @@ class OutputListApi(Resource):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('public_key', type=parameters.valid_ed25519,
|
||||
required=True)
|
||||
parser.add_argument('unspent', type=parameters.valid_bool)
|
||||
args = parser.parse_args()
|
||||
parser.add_argument('spent', type=parameters.valid_bool)
|
||||
args = parser.parse_args(strict=True)
|
||||
|
||||
pool = current_app.config['bigchain_pool']
|
||||
include_spent = not args['unspent']
|
||||
|
||||
with pool() as bigchain:
|
||||
outputs = bigchain.get_outputs_filtered(args['public_key'],
|
||||
include_spent)
|
||||
# NOTE: We pass '..' as a path to create a valid relative URI
|
||||
return [u.to_uri('..') for u in outputs]
|
||||
args['spent'])
|
||||
return [{'transaction_id': output.txid, 'output_index': output.output}
|
||||
for output in outputs]
|
||||
|
@ -17,44 +17,29 @@ class StatusApi(Resource):
|
||||
``<status>`` is one of "valid", "invalid", "undecided", "backlog".
|
||||
"""
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('tx_id', type=str)
|
||||
parser.add_argument('transaction_id', type=str)
|
||||
parser.add_argument('block_id', type=str)
|
||||
|
||||
args = parser.parse_args(strict=True)
|
||||
tx_id = args['tx_id']
|
||||
tx_id = args['transaction_id']
|
||||
block_id = args['block_id']
|
||||
|
||||
# logical xor - exactly one query argument required
|
||||
if bool(tx_id) == bool(block_id):
|
||||
return make_error(400, 'Provide exactly one query parameter. Choices are: block_id, tx_id')
|
||||
return make_error(400, 'Provide exactly one query parameter. Choices are: block_id, transaction_id')
|
||||
|
||||
pool = current_app.config['bigchain_pool']
|
||||
status, links = None, None
|
||||
status = None
|
||||
|
||||
with pool() as bigchain:
|
||||
if tx_id:
|
||||
status = bigchain.get_status(tx_id)
|
||||
links = {
|
||||
'tx': '/transactions/{}'.format(tx_id)
|
||||
}
|
||||
|
||||
elif block_id:
|
||||
_, status = bigchain.get_block(block_id=block_id, include_status=True)
|
||||
# TODO: enable once blocks endpoint is available
|
||||
# links = {
|
||||
# "block": "/blocks/{}".format(args['block_id'])
|
||||
# }
|
||||
|
||||
if not status:
|
||||
return make_error(404)
|
||||
|
||||
response = {
|
||||
return {
|
||||
'status': status
|
||||
}
|
||||
|
||||
if links:
|
||||
response.update({
|
||||
'_links': links
|
||||
})
|
||||
|
||||
return response
|
||||
|
@ -4,7 +4,7 @@ For more information please refer to the documentation: http://bigchaindb.com/ht
|
||||
"""
|
||||
import logging
|
||||
|
||||
from flask import current_app, request
|
||||
from flask import current_app, request, jsonify
|
||||
from flask_restful import Resource, reqparse
|
||||
|
||||
from bigchaindb.common.exceptions import SchemaValidationError, ValidationError
|
||||
@ -28,9 +28,9 @@ class TransactionApi(Resource):
|
||||
pool = current_app.config['bigchain_pool']
|
||||
|
||||
with pool() as bigchain:
|
||||
tx = bigchain.get_transaction(tx_id)
|
||||
tx, status = bigchain.get_transaction(tx_id, include_status=True)
|
||||
|
||||
if not tx:
|
||||
if not tx or status is not bigchain.TX_VALID:
|
||||
return make_error(404)
|
||||
|
||||
return tx.to_dict()
|
||||
@ -76,6 +76,7 @@ class TransactionListApi(Resource):
|
||||
)
|
||||
|
||||
with pool() as bigchain:
|
||||
bigchain.statsd.incr('web.tx.post')
|
||||
try:
|
||||
bigchain.validate_transaction(tx_obj)
|
||||
except ValidationError as e:
|
||||
@ -86,4 +87,16 @@ class TransactionListApi(Resource):
|
||||
else:
|
||||
bigchain.write_transaction(tx_obj)
|
||||
|
||||
return tx, 202
|
||||
response = jsonify(tx)
|
||||
response.status_code = 202
|
||||
|
||||
# NOTE: According to W3C, sending a relative URI is not allowed in the
|
||||
# Location Header:
|
||||
# - https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
#
|
||||
# Flask is autocorrecting relative URIs. With the following command,
|
||||
# we're able to prevent this.
|
||||
response.autocorrect_location_header = False
|
||||
status_monitor = '../statuses?transaction_id={}'.format(tx_obj.id)
|
||||
response.headers['Location'] = status_monitor
|
||||
return response
|
||||
|
@ -26,7 +26,7 @@ from bigchaindb.events import EventTypes
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
POISON_PILL = 'POISON_PILL'
|
||||
EVENTS_ENDPOINT = '/api/v1/streams/valid_tx'
|
||||
EVENTS_ENDPOINT = '/api/v1/streams/valid_transactions'
|
||||
|
||||
|
||||
def _multiprocessing_to_asyncio(in_queue, out_queue, loop):
|
||||
@ -91,7 +91,7 @@ class Dispatcher:
|
||||
asset_id = tx['id'] if tx['operation'] == 'CREATE' else tx['asset']['id']
|
||||
data = {'block_id': block['id'],
|
||||
'asset_id': asset_id,
|
||||
'tx_id': tx['id']}
|
||||
'transaction_id': tx['id']}
|
||||
str_buffer.append(json.dumps(data))
|
||||
|
||||
for _, websocket in self.subscribers.items():
|
||||
@ -111,10 +111,15 @@ def websocket_handler(request):
|
||||
|
||||
while True:
|
||||
# Consume input buffer
|
||||
try:
|
||||
msg = yield from websocket.receive()
|
||||
except RuntimeError as e:
|
||||
logger.debug('Websocket exception: %s', str(e))
|
||||
return websocket
|
||||
|
||||
if msg.type == aiohttp.WSMsgType.ERROR:
|
||||
logger.debug('Websocket exception: %s', websocket.exception())
|
||||
return
|
||||
return websocket
|
||||
|
||||
|
||||
def init_app(event_source, *, loop=None):
|
||||
|
@ -7,54 +7,12 @@ services:
|
||||
- "27017"
|
||||
command: mongod --replSet=bigchain-rs
|
||||
|
||||
rdb:
|
||||
image: rethinkdb
|
||||
ports:
|
||||
- "58080:8080"
|
||||
- "28015"
|
||||
volumes_from:
|
||||
- rdb-data
|
||||
|
||||
rdb-2:
|
||||
image: rethinkdb
|
||||
ports:
|
||||
- "8080"
|
||||
- "29015"
|
||||
command: rethinkdb --join rdb:29015 --bind all
|
||||
|
||||
rdb-data:
|
||||
image: rethinkdb:2.3.5
|
||||
volumes:
|
||||
- /data
|
||||
command: "true"
|
||||
|
||||
bdb-rdb:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile-dev
|
||||
container_name: docker-bigchaindb
|
||||
volumes:
|
||||
- ./bigchaindb:/usr/src/app/bigchaindb
|
||||
- ./tests:/usr/src/app/tests
|
||||
- ./docs:/usr/src/app/docs
|
||||
- ./k8s:/usr/src/app/k8s
|
||||
- ./setup.py:/usr/src/app/setup.py
|
||||
- ./setup.cfg:/usr/src/app/setup.cfg
|
||||
- ./pytest.ini:/usr/src/app/pytest.ini
|
||||
- ./tox.ini:/usr/src/app/tox.ini
|
||||
- ./Makefile:/usr/src/app/Makefile
|
||||
environment:
|
||||
BIGCHAINDB_DATABASE_BACKEND: rethinkdb
|
||||
BIGCHAINDB_DATABASE_HOST: rdb
|
||||
BIGCHAINDB_SERVER_BIND: 0.0.0.0:9984
|
||||
ports:
|
||||
- "9984"
|
||||
command: bigchaindb start
|
||||
|
||||
bdb:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile-dev
|
||||
args:
|
||||
backend: mongodb
|
||||
volumes:
|
||||
- ./bigchaindb:/usr/src/app/bigchaindb
|
||||
- ./tests:/usr/src/app/tests
|
||||
@ -64,6 +22,7 @@ services:
|
||||
- ./setup.cfg:/usr/src/app/setup.cfg
|
||||
- ./pytest.ini:/usr/src/app/pytest.ini
|
||||
- ./tox.ini:/usr/src/app/tox.ini
|
||||
- ../cryptoconditions:/usr/src/app/cryptoconditions
|
||||
environment:
|
||||
BIGCHAINDB_DATABASE_BACKEND: mongodb
|
||||
BIGCHAINDB_DATABASE_HOST: mdb
|
||||
|
@ -9,7 +9,7 @@ BigchainDB can store data of any kind (within reason), but it's designed to be p
|
||||
* The owners of an asset can specify (crypto-)conditions which must be satisified by anyone wishing transfer the asset to new owners. For example, a condition might be that at least 3 of the 5 current owners must cryptographically sign a transfer transaction.
|
||||
* BigchainDB verifies that the conditions have been satisified as part of checking the validity of transfer transactions. (Moreover, anyone can check that they were satisfied.)
|
||||
* BigchainDB prevents double-spending of an asset.
|
||||
* Validated transactions are strongly tamper-resistant; see [the section about immutability / tamper-resistance](immutable.html).
|
||||
* Validated transactions are strongly tamper-resistant; see :doc:`the page about immutability / tamper-resistance <immutable>`.
|
||||
|
||||
|
||||
BigchainDB Integration with Other Blockchains
|
||||
@ -21,4 +21,4 @@ We’re actively exploring ways that BigchainDB can be used with other blockchai
|
||||
|
||||
.. note::
|
||||
|
||||
We used the word "owners" somewhat loosely above. A more accurate word might be fulfillers, signers, controllers, or tranfer-enablers. See BigchainDB Server `issue #626 <https://github.com/bigchaindb/bigchaindb/issues/626>`_.
|
||||
We used the word "owners" somewhat loosely above. A more accurate word might be fulfillers, signers, controllers, or transfer-enablers. See BigchainDB Server `issue #626 <https://github.com/bigchaindb/bigchaindb/issues/626>`_.
|
||||
|
@ -1,7 +1,14 @@
|
||||
# Production-Ready?
|
||||
|
||||
BigchainDB is not production-ready. You can use it to build a prototype or proof-of-concept (POC); many people are already doing that.
|
||||
Once BigchainDB is production-ready, we'll make an announcement.
|
||||
|
||||
BigchainDB Server is currently in version 0.X. ([The Releases page on GitHub](https://github.com/bigchaindb/bigchaindb/releases) has the exact version number.) Once BigchainDB Server is production-ready, we'll issue an announcement.
|
||||
BigchainDB version numbers follow the conventions of *Semantic Versioning* as documented at [semver.org](http://semver.org/). This means, among other things:
|
||||
|
||||
* Before version 1.0, breaking API changes could happen in any new version, even in a change from version 0.Y.4 to 0.Y.5.
|
||||
|
||||
* Starting with version 1.0.0, breaking API changes will only happen when the MAJOR version changes (e.g. from 1.7.4 to 2.0.0, or from 4.9.3 to 5.0.0).
|
||||
|
||||
To review the release history of some particular BigchainDB software, go to the GitHub repository of that software and click on "Releases". For example, the release history of BigchainDB Server can be found at [https://github.com/bigchaindb/bigchaindb/releases](https://github.com/bigchaindb/bigchaindb/releases).
|
||||
|
||||
[The BigchainDB Roadmap](https://github.com/bigchaindb/org/blob/master/ROADMAP.md) will give you a sense of the things we intend to do with BigchainDB in the near term and the long term.
|
@ -7,16 +7,13 @@ BigchainDB will run the subset of smart contracts expressible using "crypto-cond
|
||||
|
||||
The owners of an asset can impose conditions on it that must be met for the asset to be transferred to new owners. Examples of possible conditions (crypto-conditions) include:
|
||||
|
||||
- The current owner must sign the transfer transaction (one which transfers ownership to new owners)
|
||||
- Three out of five current owners must sign the transfer transaction
|
||||
- (Shannon and Kelly) or Morgan must sign the transfer transaction
|
||||
- Anyone who provides the secret password (technically, the preimage of a known hash) can create a valid transfer transaction
|
||||
- The current owner must sign the transfer transaction (one which transfers ownership to new owners).
|
||||
- Three out of five current owners must sign the transfer transaction.
|
||||
- (Shannon and Kelly) or Morgan must sign the transfer transaction.
|
||||
|
||||
Crypto-conditions can be quite complex if-this-then-that type conditions, where the "this" can be a long boolean expression. Crypto-conditions can't include loops or recursion and are therefore will always run/check in finite time.
|
||||
|
||||
BigchainDB also supports a timeout condition which enables it to support a form of escrow.
|
||||
|
||||
.. note::
|
||||
|
||||
We used the word "owners" somewhat loosely above. A more accurate word might be fulfillers, signers, controllers, or tranfer-enablers. See BigchainDB Server `issue #626 <https://github.com/bigchaindb/bigchaindb/issues/626>`_.
|
||||
We used the word "owners" somewhat loosely above. A more accurate word might be fulfillers, signers, controllers, or transfer-enablers. See BigchainDB Server `issue #626 <https://github.com/bigchaindb/bigchaindb/issues/626>`_.
|
||||
|
@ -1,11 +1,12 @@
|
||||
# Transaction Concepts
|
||||
|
||||
In BigchainDB, _Transactions_ are used to register, issue, create or transfer
|
||||
In BigchainDB, _transactions_ are used to register, issue, create or transfer
|
||||
things (e.g. assets).
|
||||
|
||||
Transactions are the most basic kind of record stored by BigchainDB. There are
|
||||
two kinds: CREATE transactions and TRANSFER transactions.
|
||||
|
||||
|
||||
## CREATE Transactions
|
||||
|
||||
A CREATE transaction can be used to register, issue, create or otherwise
|
||||
@ -14,29 +15,57 @@ one might register an identity or a creative work. The things are often called
|
||||
"assets" but they might not be literal assets.
|
||||
|
||||
BigchainDB supports divisible assets as of BigchainDB Server v0.8.0.
|
||||
That means you can create/register an asset with an initial quantity,
|
||||
e.g. 700 oak trees. Divisible assets can be split apart or recombined
|
||||
by transfer transactions (described more below).
|
||||
That means you can create/register an asset with an initial number of "shares."
|
||||
For example, A CREATE transaction could register a truckload of 50 oak trees.
|
||||
Each share of a divisible asset must be interchangeable with each other share;
|
||||
the shares must be fungible.
|
||||
|
||||
A CREATE transaction can have one or more outputs.
|
||||
Each output has an associated amount: the number of shares tied to that output.
|
||||
For example, if the asset consists of 50 oak trees,
|
||||
one output might have 35 oak trees for one set of owners,
|
||||
and the other output might have 15 oak trees for another set of owners.
|
||||
|
||||
Each output also has an associated condition: the condition that must be met
|
||||
(by a TRANSFER transaction) to transfer/spend the output.
|
||||
BigchainDB supports a variety of conditions,
|
||||
a subset of the [Interledger Protocol (ILP)](https://interledger.org/)
|
||||
crypto-conditions. For details, see
|
||||
[the documentation about Inputs and Outputs](https://docs.bigchaindb.com/projects/server/en/latest/data-models/inputs-outputs.html).
|
||||
|
||||
Each output also has a list of all the public keys associated
|
||||
with the conditions on that output.
|
||||
Loosely speaking, that list might be interpreted as the list of "owners."
|
||||
A more accurate word might be fulfillers, signers, controllers,
|
||||
or transfer-enablers.
|
||||
See BigchainDB Server [issue #626](https://github.com/bigchaindb/bigchaindb/issues/626).
|
||||
|
||||
A CREATE transaction must be signed by all the owners.
|
||||
(If you're looking for that signature,
|
||||
it's in the one "fulfillment" of the one input, albeit encoded.)
|
||||
|
||||
A CREATE transaction also establishes, in its outputs, the conditions that must
|
||||
be met to transfer the asset(s). The conditions may also be associated with a
|
||||
list of public keys that, depending on the condition, may have full or partial
|
||||
control over the asset(s). For example, there may be a condition that any
|
||||
transfer must be signed (cryptographically) by the private key associated with a
|
||||
given public key. More sophisticated conditions are possible. BigchainDB's
|
||||
conditions are based on the crypto-conditions of the [Interledger Protocol
|
||||
(ILP)](https://interledger.org/).
|
||||
|
||||
## TRANSFER Transactions
|
||||
|
||||
A TRANSFER transaction can transfer an asset
|
||||
by providing inputs which fulfill the current output conditions on the asset.
|
||||
It must also specify new transfer conditions.
|
||||
A TRANSFER transaction can transfer/spend one or more outputs
|
||||
on other transactions (CREATE transactions or other TRANSFER transactions).
|
||||
Those outputs must all be associated with the same asset;
|
||||
a TRANSFER transaction can only transfer shares of one asset at a time.
|
||||
|
||||
Each input on a TRANSFER transaction connects to one output
|
||||
on another transaction.
|
||||
Each input must satisfy the condition on the output it's trying
|
||||
to transfer/spend.
|
||||
|
||||
A TRANSFER transaction can have one or more outputs,
|
||||
just like a CREATE transaction (described above).
|
||||
The total number of shares coming in on the inputs must equal
|
||||
the total number of shares going out on the outputs.
|
||||
|
||||
**Example 1:** Suppose a red car is owned and controlled by Joe.
|
||||
Suppose the current transfer condition on the car says
|
||||
that any valid transfer must be signed by Joe.
|
||||
Joe and a buyer named Rae could build a TRANSFER transaction containing
|
||||
Joe could build a TRANSFER transaction containing
|
||||
an input with Joe's signature (to fulfill the current output condition)
|
||||
plus a new output condition saying that any valid transfer
|
||||
must be signed by Rae.
|
||||
@ -53,33 +82,18 @@ transferred if both Jack and Kelly sign.
|
||||
Note how the sum of the incoming paperclips must equal the sum
|
||||
of the outgoing paperclips (100).
|
||||
|
||||
|
||||
## Transaction Validity
|
||||
|
||||
When a node is asked to check if a transaction is valid, it checks several
|
||||
things. Some things it checks are:
|
||||
things. We documented those things in a post on *The BigchainDB Blog*:
|
||||
["What is a Valid Transaction in BigchainDB?"](https://blog.bigchaindb.com/what-is-a-valid-transaction-in-bigchaindb-9a1a075a9598)
|
||||
(Note: That post was about BigchainDB Server v1.0.0.)
|
||||
|
||||
* Are all the fulfillments valid? (Do they correctly satisfy the conditions
|
||||
they claim to satisfy?)
|
||||
* If it's a creation transaction, is the asset valid?
|
||||
* If it's a transfer transaction:
|
||||
* Is it trying to fulfill a condition in a nonexistent transaction?
|
||||
* Is it trying to fulfill a condition that's not in a valid transaction?
|
||||
(It's okay if the condition is in a transaction in an invalid block; those
|
||||
transactions are ignored. Transactions in the backlog or undecided blocks
|
||||
are not ignored.)
|
||||
* Is it trying to fulfill a condition that has already been fulfilled, or
|
||||
that some other pending transaction (in the backlog or an undecided block)
|
||||
also aims to fulfill?
|
||||
* Is the asset ID in the transaction the same as the asset ID in all
|
||||
transactions whose conditions are being fulfilled?
|
||||
* Is the sum of the amounts in the fulfillments equal
|
||||
to the sum of the amounts in the new conditions?
|
||||
|
||||
If you're curious about the details of transaction validation, the code is in
|
||||
the `validate` method of the `Transaction` class, in `bigchaindb/models.py` (at
|
||||
the time of writing).
|
||||
## Example Transactions
|
||||
|
||||
Note: The check to see if the transaction ID is equal to the hash of the
|
||||
transaction body is actually done whenever the transaction is converted from a
|
||||
Python dict to a Transaction object, which must be done before the `validate`
|
||||
method can be called (since it's called on a Transaction object).
|
||||
There are example BigchainDB transactions in
|
||||
[the HTTP API documentation](https://docs.bigchaindb.com/projects/server/en/latest/http-client-server-api.html)
|
||||
and
|
||||
[the Python Driver documentation](https://docs.bigchaindb.com/projects/py-driver/en/latest/usage.html).
|
||||
|
@ -68,6 +68,7 @@ Content-Type: application/json
|
||||
|
||||
TPLS['post-tx-response'] = """\
|
||||
HTTP/1.1 202 Accepted
|
||||
Location: ../statuses?transaction_id=%(txid)s
|
||||
Content-Type: application/json
|
||||
|
||||
%(tx)s
|
||||
@ -75,7 +76,7 @@ Content-Type: application/json
|
||||
|
||||
|
||||
TPLS['get-statuses-tx-request'] = """\
|
||||
GET /statuses?tx_id=%(txid)s HTTP/1.1
|
||||
GET /statuses?transaction_id=%(txid)s HTTP/1.1
|
||||
Host: example.com
|
||||
|
||||
"""
|
||||
@ -96,10 +97,7 @@ HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"status": "valid",
|
||||
"_links": {
|
||||
"tx": "/transactions/%(txid)s"
|
||||
}
|
||||
"status": "valid"
|
||||
}
|
||||
"""
|
||||
|
||||
@ -126,10 +124,7 @@ HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"status": "valid",
|
||||
"_links": {
|
||||
"block": "/blocks/%(blockid)s"
|
||||
}
|
||||
"status": "valid"
|
||||
}
|
||||
"""
|
||||
|
||||
@ -150,7 +145,7 @@ Content-Type: application/json
|
||||
|
||||
|
||||
TPLS['get-block-txid-request'] = """\
|
||||
GET /api/v1/blocks?tx_id=%(txid)s HTTP/1.1
|
||||
GET /api/v1/blocks?transaction_id=%(txid)s HTTP/1.1
|
||||
Host: example.com
|
||||
|
||||
"""
|
||||
|
BIN
docs/server/source/_static/mongodb_cloud_manager_1.png
Normal file
BIN
docs/server/source/_static/mongodb_cloud_manager_1.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 12 KiB |
@ -18,6 +18,7 @@ Appendices
|
||||
backend
|
||||
commands
|
||||
aws-setup
|
||||
aws-testing-cluster
|
||||
template-terraform-aws
|
||||
template-ansible
|
||||
azure-quickstart-template
|
||||
|
@ -5,7 +5,7 @@ You can check the version of `pip` you're using (in your current virtualenv) by
|
||||
pip -V
|
||||
```
|
||||
|
||||
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.4, then you must install a `pip` version associated with Python 3.4+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
|
||||
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.5, then you must install a `pip` version associated with Python 3.5+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
|
||||
|
||||
On Ubuntu 16.04, we found that this works:
|
||||
```text
|
||||
|
@ -1,463 +0,0 @@
|
||||
First Node or Bootstrap Node Setup
|
||||
==================================
|
||||
|
||||
This document is a work in progress and will evolve over time to include
|
||||
security, websocket and other settings.
|
||||
|
||||
|
||||
Step 1: Set Up the Cluster
|
||||
--------------------------
|
||||
|
||||
.. code:: bash
|
||||
|
||||
az group create --name bdb-test-cluster-0 --location westeurope --debug --output json
|
||||
|
||||
ssh-keygen -t rsa -C "k8s-bdb-test-cluster-0" -f ~/.ssh/k8s-bdb-test-cluster-0
|
||||
|
||||
az acs create --name k8s-bdb-test-cluster-0 \
|
||||
--resource-group bdb-test-cluster-0 \
|
||||
--master-count 3 \
|
||||
--agent-count 2 \
|
||||
--admin-username ubuntu \
|
||||
--agent-vm-size Standard_D2_v2 \
|
||||
--dns-prefix k8s-bdb-test-cluster-0 \
|
||||
--ssh-key-value ~/.ssh/k8s-bdb-test-cluster-0.pub \
|
||||
--orchestrator-type kubernetes \
|
||||
--debug --output json
|
||||
|
||||
az acs kubernetes get-credentials \
|
||||
--resource-group bdb-test-cluster-0 \
|
||||
--name k8s-bdb-test-cluster-0 \
|
||||
--debug --output json
|
||||
|
||||
echo -e "Host k8s-bdb-test-cluster-0.westeurope.cloudapp.azure.com\n ForwardAgent yes" >> ~/.ssh/config
|
||||
|
||||
|
||||
Step 2: Connect to the Cluster UI - (optional)
|
||||
----------------------------------------------
|
||||
|
||||
* Get the kubectl context for this cluster using ``kubectl config view``.
|
||||
|
||||
* For the above commands, the context would be ``k8s-bdb-test-cluster-0``.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 proxy -p 8001
|
||||
|
||||
Step 3. Configure the Cluster
|
||||
-----------------------------
|
||||
|
||||
* Use the ConfigMap in ``configuration/config-map.yaml`` file for configuring
|
||||
the cluster.
|
||||
|
||||
* Log in the the MongoDB Cloud Manager and select the group that will monitor
|
||||
and backup this cluster from the dropdown box.
|
||||
|
||||
* Go to Settings, Group Settings and copy the ``Agent Api Key``.
|
||||
|
||||
* Replace the ``<api key here>`` field with this key.
|
||||
|
||||
* Since this is the first node of the cluster, ensure that the ``data.fqdn``
|
||||
field has the value ``mdb-instance-0``.
|
||||
|
||||
* We only support the value ``all`` in the ``data.allowed-hosts`` field for now.
|
||||
|
||||
* Create the ConfigMap
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 apply -f configuration/config-map.yaml
|
||||
|
||||
Step 4. Start the NGINX Service
|
||||
-------------------------------
|
||||
|
||||
* This will will give us a public IP for the cluster.
|
||||
|
||||
* Once you complete this step, you might need to wait up to 10 mins for the
|
||||
public IP to be assigned.
|
||||
|
||||
* You have the option to use vanilla NGINX or an OpenResty NGINX integrated
|
||||
with 3scale API Gateway.
|
||||
|
||||
|
||||
Step 4.1. Vanilla NGINX
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* This configuration is located in the file ``nginx/nginx-svc.yaml``.
|
||||
|
||||
* Since this is the first node, rename ``metadata.name`` and ``metadata.labels.name``
|
||||
to ``ngx-instance-0``, and ``spec.selector.app`` to ``ngx-instance-0-dep``.
|
||||
|
||||
* Start the Kubernetes Service:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 apply -f nginx/nginx-svc.yaml
|
||||
|
||||
|
||||
Step 4.2. OpenResty NGINX + 3scale
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* You have to enable HTTPS for this one and will need an HTTPS certificate
|
||||
for your domain
|
||||
|
||||
* Assuming that the public key chain is named ``cert.pem`` and private key is
|
||||
``cert.key``, run the following commands to encode the certificates into
|
||||
single continuous string that can be embedded in yaml.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
cat cert.pem | base64 -w 0 > cert.pem.b64
|
||||
|
||||
cat cert.key | base64 -w 0 > cert.key.b64
|
||||
|
||||
|
||||
* Copy the contents of ``cert.pem.b64`` in the ``cert.pem`` field, and the
|
||||
contents of ``cert.key.b64`` in the ``cert.key`` field in the file
|
||||
``nginx-3scale/nginx-3scale-secret.yaml``
|
||||
|
||||
* Create the Kubernetes Secret:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-3scale/nginx-3scale-secret.yaml
|
||||
|
||||
* Since this is the first node, rename ``metadata.name`` and ``metadata.labels.name``
|
||||
to ``ngx-instance-0``, and ``spec.selector.app`` to ``ngx-instance-0-dep`` in
|
||||
``nginx-3scale/nginx-3scale-svc.yaml`` file.
|
||||
|
||||
* Start the Kubernetes Service:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-3scale/nginx-3scale-svc.yaml
|
||||
|
||||
|
||||
Step 5. Assign DNS Name to the NGINX Public IP
|
||||
----------------------------------------------
|
||||
|
||||
* The following command can help you find out if the nginx service strated above
|
||||
has been assigned a public IP or external IP address:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 get svc -w
|
||||
|
||||
* Once a public IP is assigned, you can log in to the Azure portal and map it to
|
||||
a DNS name.
|
||||
|
||||
* We usually start with bdb-test-cluster-0, bdb-test-cluster-1 and so on.
|
||||
|
||||
* Let us assume that we assigned the unique name of ``bdb-test-cluster-0`` here.
|
||||
|
||||
|
||||
Step 6. Start the Mongo Kubernetes Service
|
||||
------------------------------------------
|
||||
|
||||
* Change ``metadata.name`` and ``metadata.labels.name`` to
|
||||
``mdb-instance-0``, and ``spec.selector.app`` to ``mdb-instance-0-ss``.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb/mongo-svc.yaml
|
||||
|
||||
|
||||
Step 7. Start the BigchainDB Kubernetes Service
|
||||
-----------------------------------------------
|
||||
|
||||
* Change ``metadata.name`` and ``metadata.labels.name`` to
|
||||
``bdb-instance-0``, and ``spec.selector.app`` to ``bdb-instance-0-dep``.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 apply -f bigchaindb/bigchaindb-svc.yaml
|
||||
|
||||
|
||||
Step 8. Start the NGINX Kubernetes Deployment
|
||||
---------------------------------------------
|
||||
|
||||
* As in step 4, you have the option to use vanilla NGINX or an OpenResty NGINX
|
||||
integrated with 3scale API Gateway.
|
||||
|
||||
Step 8.1. Vanilla NGINX
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* This configuration is located in the file ``nginx/nginx-dep.yaml``.
|
||||
|
||||
* Since this is the first node, change the ``metadata.name`` and
|
||||
``spec.template.metadata.labels.app`` to ``ngx-instance-0-dep``.
|
||||
|
||||
* Set ``MONGODB_BACKEND_HOST`` env var to
|
||||
``mdb-instance-0.default.svc.cluster.local``.
|
||||
|
||||
* Set ``BIGCHAINDB_BACKEND_HOST`` env var to
|
||||
``bdb-instance-0.default.svc.cluster.local``.
|
||||
|
||||
* Set ``MONGODB_FRONTEND_PORT`` to
|
||||
``$(NGX_INSTANCE_0_SERVICE_PORT_NGX_PUBLIC_MDB_PORT)``.
|
||||
|
||||
* Set ``BIGCHAINDB_FRONTEND_PORT`` to
|
||||
``$(NGX_INSTANCE_0_SERVICE_PORT_NGX_PUBLIC_BDB_PORT)``.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 apply -f nginx/nginx-dep.yaml
|
||||
|
||||
Step 8.2. OpenResty NGINX + 3scale
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* This configuration is located in the file
|
||||
``nginx-3scale/nginx-3scale-dep.yaml``.
|
||||
|
||||
* Since this is the first node, change the metadata.name and
|
||||
spec.template.metadata.labels.app to ``ngx-instance-0-dep``.
|
||||
|
||||
* Set ``MONGODB_BACKEND_HOST`` env var to
|
||||
``mdb-instance-0.default.svc.cluster.local``.
|
||||
|
||||
* Set ``BIGCHAINDB_BACKEND_HOST`` env var to
|
||||
``bdb-instance-0.default.svc.cluster.local``.
|
||||
|
||||
* Set ``MONGODB_FRONTEND_PORT`` to
|
||||
``$(NGX_INSTANCE_0_SERVICE_PORT_NGX_PUBLIC_MDB_PORT)``.
|
||||
|
||||
* Set ``BIGCHAINDB_FRONTEND_PORT`` to
|
||||
``$(NGX_INSTANCE_0_SERVICE_PORT_NGX_PUBLIC_BDB_PORT)``.
|
||||
|
||||
* Also, replace the placeholder strings for the env vars with the values
|
||||
obtained from 3scale. You will need the Secret Token, Service ID, Version Header
|
||||
and Provider Key from 3scale.
|
||||
|
||||
* The ``THREESCALE_FRONTEND_API_DNS_NAME`` will be DNS name registered for your
|
||||
HTTPS certificate.
|
||||
|
||||
* You can set the ``THREESCALE_UPSTREAM_API_PORT`` to any port other than 9984,
|
||||
9985, 443, 8888 and 27017. We usually use port ``9999``.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-3scale/nginx-3scale-dep.yaml
|
||||
|
||||
|
||||
Step 9. Create a Kubernetes Storage Class for MongoDB
|
||||
-----------------------------------------------------
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb/mongo-sc.yaml
|
||||
|
||||
|
||||
Step 10. Create a Kubernetes PersistentVolumeClaim
|
||||
--------------------------------------------------
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb/mongo-pvc.yaml
|
||||
|
||||
|
||||
Step 11. Start a Kubernetes StatefulSet for MongoDB
|
||||
---------------------------------------------------
|
||||
|
||||
* Change ``spec.serviceName`` to ``mdb-instance-0``.
|
||||
|
||||
* Change the ``metadata.name``, ``template.metadata.name`` and
|
||||
``template.metadata.labels.app`` to ``mdb-instance-0-ss``.
|
||||
|
||||
* It might take up to 10 minutes for the disks to be created and attached to
|
||||
the pod.
|
||||
|
||||
* The UI might show that the pod has errored with the
|
||||
message "timeout expired waiting for volumes to attach/mount".
|
||||
|
||||
* Use the CLI below to check the status of the pod in this case,
|
||||
instead of the UI. This happens due to a bug in Azure ACS.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb/mongo-ss.yaml
|
||||
|
||||
* You can check the status of the pod using the command:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 get po -w
|
||||
|
||||
|
||||
Step 12. Start a Kubernetes Deployment for Bigchaindb
|
||||
-----------------------------------------------------
|
||||
|
||||
* Change both ``metadata.name`` and ``spec.template.metadata.labels.app``
|
||||
to ``bdb-instance-0-dep``.
|
||||
|
||||
* Set ``BIGCHAINDB_DATABASE_HOST`` to ``mdb-instance-0``.
|
||||
|
||||
* Set the appropriate ``BIGCHAINDB_KEYPAIR_PUBLIC``,
|
||||
``BIGCHAINDB_KEYPAIR_PRIVATE`` values.
|
||||
|
||||
* One way to generate BigchainDB keypair is to run a Python shell with
|
||||
the command
|
||||
``from bigchaindb_driver import crypto; crypto.generate_keypair()``.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 apply -f bigchaindb/bigchaindb-dep.yaml
|
||||
|
||||
|
||||
Step 13. Start a Kubernetes Deployment for MongoDB Monitoring Agent
|
||||
-------------------------------------------------------------------
|
||||
|
||||
* Change both metadata.name and spec.template.metadata.labels.app to
|
||||
``mdb-mon-instance-0-dep``.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb-monitoring-agent/mongo-mon-dep.yaml
|
||||
|
||||
* Get the pod name and check its logs:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 get po
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 logs -f <pod name>
|
||||
|
||||
|
||||
Step 14. Configure MongoDB Cloud Manager for Monitoring
|
||||
-------------------------------------------------------
|
||||
|
||||
* Open `MongoDB Cloud Manager <https://cloud.mongodb.com>`_.
|
||||
|
||||
* Click ``Login`` under ``MongoDB Cloud Manager`` and log in to the Cloud Manager.
|
||||
|
||||
* Select the group from the dropdown box on the page.
|
||||
|
||||
* Go to Settings, Group Settings and add a Preferred Hostnames regexp as
|
||||
``^mdb-instance-[0-9]{1,2}$``. It may take up to 5 mins till this setting
|
||||
is in effect. You may refresh the browser window and verify whether the changes
|
||||
have been saved or not.
|
||||
|
||||
* Next, click the ``Deployment`` tab, and then the ``Manage Existing`` button.
|
||||
|
||||
* On the ``Import your deployment for monitoring`` page, enter the hostname as
|
||||
``mdb-instance-0``, port number as ``27017``, with no authentication and no
|
||||
TLS/SSL settings.
|
||||
|
||||
* Once the deployment is found, click the ``Continue`` button.
|
||||
This may take about a minute or two.
|
||||
|
||||
* Do not add ``Automation Agent`` when given an option to add it.
|
||||
|
||||
* Verify on the UI that data is being by the monitoring agent.
|
||||
|
||||
|
||||
Step 15. Start a Kubernetes Deployment for MongoDB Backup Agent
|
||||
---------------------------------------------------------------
|
||||
|
||||
* Change both ``metadata.name`` and ``spec.template.metadata.labels.app``
|
||||
to ``mdb-backup-instance-0-dep``.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb-backup-agent/mongo-backup-dep.yaml
|
||||
|
||||
* Get the pod name and check its logs:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 get po
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 logs -f <pod name>
|
||||
|
||||
|
||||
Step 16. Configure MongoDB Cloud Manager for Backup
|
||||
---------------------------------------------------
|
||||
|
||||
* Open `MongoDB Cloud Manager <https://cloud.mongodb.com>`_.
|
||||
|
||||
* Click ``Login`` under ``MongoDB Cloud Manager`` and log in to the Cloud
|
||||
Manager.
|
||||
|
||||
* Select the group from the dropdown box on the page.
|
||||
|
||||
* Click ``Backup`` tab.
|
||||
|
||||
* Click on the ``Begin Setup``.
|
||||
|
||||
* Click on ``Next``, select the replica set from the dropdown menu.
|
||||
|
||||
* Verify the details of your MongoDB instance and click on ``Start`` again.
|
||||
|
||||
* It might take up to 5 minutes to start the backup process.
|
||||
|
||||
* Verify that data is being backed up on the UI.
|
||||
|
||||
|
||||
Step 17. Verify that the Cluster is Correctly Set Up
|
||||
----------------------------------------------------
|
||||
|
||||
* Start the toolbox container in the cluster
|
||||
|
||||
.. code:: bash
|
||||
|
||||
kubectl --context k8s-bdb-test-cluster-0 \
|
||||
run -it toolbox \
|
||||
--image bigchaindb/toolbox \
|
||||
--image-pull-policy=Always \
|
||||
--restart=Never --rm
|
||||
|
||||
* Verify MongoDB instance
|
||||
|
||||
.. code:: bash
|
||||
|
||||
nslookup mdb-instance-0
|
||||
|
||||
dig +noall +answer _mdb-port._tcp.mdb-instance-0.default.svc.cluster.local SRV
|
||||
|
||||
curl -X GET http://mdb-instance-0:27017
|
||||
|
||||
* Verify BigchainDB instance
|
||||
|
||||
.. code:: bash
|
||||
|
||||
nslookup bdb-instance-0
|
||||
|
||||
dig +noall +answer _bdb-port._tcp.bdb-instance-0.default.svc.cluster.local SRV
|
||||
|
||||
dig +noall +answer _bdb-ws-port._tcp.bdb-instance-0.default.svc.cluster.local SRV
|
||||
|
||||
curl -X GET http://bdb-instance-0:9984
|
||||
|
||||
wsc ws://bdb-instance-0:9985/api/v1/streams/valid_tx
|
||||
|
||||
* Verify NGINX instance
|
||||
|
||||
.. code:: bash
|
||||
|
||||
nslookup ngx-instance-0
|
||||
|
||||
dig +noall +answer _ngx-public-mdb-port._tcp.ngx-instance-0.default.svc.cluster.local SRV
|
||||
|
||||
curl -X GET http://ngx-instance-0:27017 # results in curl: (56) Recv failure: Connection reset by peer
|
||||
|
||||
dig +noall +answer _ngx-public-bdb-port._tcp.ngx-instance-0.default.svc.cluster.local SRV
|
||||
|
||||
dig +noall +answer _ngx-public-ws-port._tcp.ngx-instance-0.default.svc.cluster.local SRV
|
||||
|
||||
* If you have run the vanilla NGINX instance, run
|
||||
|
||||
.. code:: bash
|
||||
|
||||
curl -X GET http://ngx-instance-0:80
|
||||
|
||||
wsc ws://ngx-instance-0:81/api/v1/streams/valid_tx
|
||||
|
||||
* If you have the OpenResty NGINX + 3scale instance, run
|
||||
|
||||
.. code:: bash
|
||||
|
||||
curl -X GET https://ngx-instance-0
|
||||
|
||||
* Check the MongoDB monitoring and backup agent on the MongoDB Coud Manager portal to verify they are working fine.
|
||||
|
||||
* Send some transactions to BigchainDB and verify it's up and running!
|
||||
|
@ -1,476 +0,0 @@
|
||||
Kubernetes Template: Deploy a Single BigchainDB Node
|
||||
====================================================
|
||||
|
||||
This page describes how to deploy the first BigchainDB node
|
||||
in a BigchainDB cluster, or a stand-alone BigchainDB node,
|
||||
using `Kubernetes <https://kubernetes.io/>`_.
|
||||
It assumes you already have a running Kubernetes cluster.
|
||||
|
||||
If you want to add a new BigchainDB node to an existing BigchainDB cluster,
|
||||
refer to :doc:`the page about that <add-node-on-kubernetes>`.
|
||||
|
||||
|
||||
Step 1: Install kubectl
|
||||
-----------------------
|
||||
|
||||
kubectl is the Kubernetes CLI.
|
||||
If you don't already have it installed,
|
||||
then see the `Kubernetes docs to install it
|
||||
<https://kubernetes.io/docs/user-guide/prereqs/>`_.
|
||||
|
||||
|
||||
Step 2: Configure kubectl
|
||||
-------------------------
|
||||
|
||||
The default location of the kubectl configuration file is ``~/.kube/config``.
|
||||
If you don't have that file, then you need to get it.
|
||||
|
||||
**Azure.** If you deployed your Kubernetes cluster on Azure
|
||||
using the Azure CLI 2.0 (as per :doc:`our template <template-kubernetes-azure>`),
|
||||
then you can get the ``~/.kube/config`` file using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ az acs kubernetes get-credentials \
|
||||
--resource-group <name of resource group containing the cluster> \
|
||||
--name <ACS cluster name>
|
||||
|
||||
If it asks for a password (to unlock the SSH key)
|
||||
and you enter the correct password,
|
||||
but you get an error message,
|
||||
then try adding ``--ssh-key-file ~/.ssh/<name>``
|
||||
to the above command (i.e. the path to the private key).
|
||||
|
||||
|
||||
Step 3: Create Storage Classes
|
||||
------------------------------
|
||||
|
||||
MongoDB needs somewhere to store its data persistently,
|
||||
outside the container where MongoDB is running.
|
||||
Our MongoDB Docker container
|
||||
(based on the official MongoDB Docker container)
|
||||
exports two volume mounts with correct
|
||||
permissions from inside the container:
|
||||
|
||||
* The directory where the mongod instance stores its data: ``/data/db``.
|
||||
There's more explanation in the MongoDB docs about `storage.dbpath <https://docs.mongodb.com/manual/reference/configuration-options/#storage.dbPath>`_.
|
||||
|
||||
* The directory where the mongodb instance stores the metadata for a sharded
|
||||
cluster: ``/data/configdb/``.
|
||||
There's more explanation in the MongoDB docs about `sharding.configDB <https://docs.mongodb.com/manual/reference/configuration-options/#sharding.configDB>`_.
|
||||
|
||||
Explaining how Kubernetes handles persistent volumes,
|
||||
and the associated terminology,
|
||||
is beyond the scope of this documentation;
|
||||
see `the Kubernetes docs about persistent volumes
|
||||
<https://kubernetes.io/docs/user-guide/persistent-volumes>`_.
|
||||
|
||||
The first thing to do is create the Kubernetes storage classes.
|
||||
|
||||
**Azure.** First, you need an Azure storage account.
|
||||
If you deployed your Kubernetes cluster on Azure
|
||||
using the Azure CLI 2.0
|
||||
(as per :doc:`our template <template-kubernetes-azure>`),
|
||||
then the `az acs create` command already created two
|
||||
storage accounts in the same location and resource group
|
||||
as your Kubernetes cluster.
|
||||
Both should have the same "storage account SKU": ``Standard_LRS``.
|
||||
Standard storage is lower-cost and lower-performance.
|
||||
It uses hard disk drives (HDD).
|
||||
LRS means locally-redundant storage: three replicas
|
||||
in the same data center.
|
||||
Premium storage is higher-cost and higher-performance.
|
||||
It uses solid state drives (SSD).
|
||||
At the time of writing,
|
||||
when we created a storage account with SKU ``Premium_LRS``
|
||||
and tried to use that,
|
||||
the PersistentVolumeClaim would get stuck in a "Pending" state.
|
||||
For future reference, the command to create a storage account is
|
||||
`az storage account create <https://docs.microsoft.com/en-us/cli/azure/storage/account#create>`_.
|
||||
|
||||
|
||||
Get the file ``mongo-sc.yaml`` from GitHub using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-sc.yaml
|
||||
|
||||
You may have to update the ``parameters.location`` field in both the files to
|
||||
specify the location you are using in Azure.
|
||||
|
||||
Create the required storage classes using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f mongo-sc.yaml
|
||||
|
||||
|
||||
You can check if it worked using ``kubectl get storageclasses``.
|
||||
|
||||
**Azure.** Note that there is no line of the form
|
||||
``storageAccount: <azure storage account name>``
|
||||
under ``parameters:``. When we included one
|
||||
and then created a PersistentVolumeClaim based on it,
|
||||
the PersistentVolumeClaim would get stuck
|
||||
in a "Pending" state.
|
||||
Kubernetes just looks for a storageAccount
|
||||
with the specified skuName and location.
|
||||
|
||||
|
||||
Step 4: Create Persistent Volume Claims
|
||||
---------------------------------------
|
||||
|
||||
Next, you will create two PersistentVolumeClaim objects ``mongo-db-claim`` and
|
||||
``mongo-configdb-claim``.
|
||||
Get the file ``mongo-pvc.yaml`` from GitHub using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-pvc.yaml
|
||||
|
||||
Note how there's no explicit mention of Azure, AWS or whatever.
|
||||
``ReadWriteOnce`` (RWO) means the volume can be mounted as
|
||||
read-write by a single Kubernetes node.
|
||||
(``ReadWriteOnce`` is the *only* access mode supported
|
||||
by AzureDisk.)
|
||||
``storage: 20Gi`` means the volume has a size of 20
|
||||
`gibibytes <https://en.wikipedia.org/wiki/Gibibyte>`_.
|
||||
|
||||
You may want to update the ``spec.resources.requests.storage`` field in both
|
||||
the files to specify a different disk size.
|
||||
|
||||
Create the required Persistent Volume Claims using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f mongo-pvc.yaml
|
||||
|
||||
|
||||
You can check its status using: ``kubectl get pvc -w``
|
||||
|
||||
Initially, the status of persistent volume claims might be "Pending"
|
||||
but it should become "Bound" fairly quickly.
|
||||
|
||||
|
||||
Step 5: Create the Config Map - Optional
|
||||
----------------------------------------
|
||||
|
||||
This step is required only if you are planning to set up multiple
|
||||
`BigchainDB nodes
|
||||
<https://docs.bigchaindb.com/en/latest/terminology.html>`_.
|
||||
|
||||
MongoDB reads the local ``/etc/hosts`` file while bootstrapping a replica set
|
||||
to resolve the hostname provided to the ``rs.initiate()`` command. It needs to
|
||||
ensure that the replica set is being initialized in the same instance where
|
||||
the MongoDB instance is running.
|
||||
|
||||
To achieve this, you will create a ConfigMap with the FQDN of the MongoDB instance
|
||||
and populate the ``/etc/hosts`` file with this value so that a replica set can
|
||||
be created seamlessly.
|
||||
|
||||
Get the file ``mongo-cm.yaml`` from GitHub using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-cm.yaml
|
||||
|
||||
You may want to update the ``data.fqdn`` field in the file before creating the
|
||||
ConfigMap. ``data.fqdn`` field will be the DNS name of your MongoDB instance.
|
||||
This will be used by other MongoDB instances when forming a MongoDB
|
||||
replica set. It should resolve to the MongoDB instance in your cluster when
|
||||
you are done with the setup. This will help when you are adding more MongoDB
|
||||
instances to the replica set in the future.
|
||||
|
||||
|
||||
**Azure.**
|
||||
In Kubernetes on ACS, the name you populate in the ``data.fqdn`` field
|
||||
will be used to configure a DNS name for the public IP assigned to the
|
||||
Kubernetes Service that is the frontend for the MongoDB instance.
|
||||
We suggest using a name that will already be available in Azure.
|
||||
We use ``mdb-instance-0``, ``mdb-instance-1`` and so on in this document,
|
||||
which gives us ``mdb-instance-0.<azure location>.cloudapp.azure.com``,
|
||||
``mdb-instance-1.<azure location>.cloudapp.azure.com``, etc. as the FQDNs.
|
||||
The ``<azure location>`` is the Azure datacenter location you are using,
|
||||
which can also be obtained using the ``az account list-locations`` command.
|
||||
You can also try to assign a name to an Public IP in Azure before starting
|
||||
the process, or use ``nslookup`` with the name you have in mind to check
|
||||
if it's available for use.
|
||||
|
||||
You should ensure that the the name specified in the ``data.fqdn`` field is
|
||||
a unique one.
|
||||
|
||||
**Kubernetes on bare-metal or other cloud providers.**
|
||||
You need to provide the name resolution function
|
||||
by other means (using DNS providers like GoDaddy, CloudFlare or your own
|
||||
private DNS server). The DNS set up for other environments is currently
|
||||
beyond the scope of this document.
|
||||
|
||||
|
||||
Create the required ConfigMap using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f mongo-cm.yaml
|
||||
|
||||
|
||||
You can check its status using: ``kubectl get cm``
|
||||
|
||||
Now you are ready to run MongoDB and BigchainDB on our Kubernetes cluster.
|
||||
|
||||
|
||||
Step 6: Run MongoDB as a StatefulSet
|
||||
------------------------------------
|
||||
|
||||
Get the file ``mongo-ss.yaml`` from GitHub using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-ss.yaml
|
||||
|
||||
|
||||
Note how the MongoDB container uses the ``mongo-db-claim`` and the
|
||||
``mongo-configdb-claim`` PersistentVolumeClaims for its ``/data/db`` and
|
||||
``/data/configdb`` diretories (mount path). Note also that we use the pod's
|
||||
``securityContext.capabilities.add`` specification to add the ``FOWNER``
|
||||
capability to the container.
|
||||
That is because MongoDB container has the user ``mongodb``, with uid ``999``
|
||||
and group ``mongodb``, with gid ``999``.
|
||||
When this container runs on a host with a mounted disk, the writes fail when
|
||||
there is no user with uid ``999``.
|
||||
To avoid this, we use the Docker feature of ``--cap-add=FOWNER``.
|
||||
This bypasses the uid and gid permission checks during writes and allows data
|
||||
to be persisted to disk.
|
||||
Refer to the
|
||||
`Docker docs <https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities>`_
|
||||
for details.
|
||||
|
||||
As we gain more experience running MongoDB in testing and production, we will
|
||||
tweak the ``resources.limits.cpu`` and ``resources.limits.memory``.
|
||||
We will also stop exposing port ``27017`` globally and/or allow only certain
|
||||
hosts to connect to the MongoDB instance in the future.
|
||||
|
||||
Create the required StatefulSet using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f mongo-ss.yaml
|
||||
|
||||
You can check its status using the commands ``kubectl get statefulsets -w``
|
||||
and ``kubectl get svc -w``
|
||||
|
||||
You may have to wait for up to 10 minutes for the disk to be created
|
||||
and attached on the first run. The pod can fail several times with the message
|
||||
saying that the timeout for mounting the disk was exceeded.
|
||||
|
||||
|
||||
Step 7: Initialize a MongoDB Replica Set - Optional
|
||||
---------------------------------------------------
|
||||
|
||||
This step is required only if you are planning to set up multiple
|
||||
`BigchainDB nodes
|
||||
<https://docs.bigchaindb.com/en/latest/terminology.html>`_.
|
||||
|
||||
|
||||
Login to the running MongoDB instance and access the mongo shell using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl exec -it mdb-0 -c mongodb -- /bin/bash
|
||||
root@mdb-0:/# mongo --port 27017
|
||||
|
||||
You will initiate the replica set by using the ``rs.initiate()`` command from the
|
||||
mongo shell. Its syntax is:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
rs.initiate({
|
||||
_id : "<replica-set-name",
|
||||
members: [ {
|
||||
_id : 0,
|
||||
host : "<fqdn of this instance>:<port number>"
|
||||
} ]
|
||||
})
|
||||
|
||||
An example command might look like:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
> rs.initiate({ _id : "bigchain-rs", members: [ { _id : 0, host :"mdb-instance-0.westeurope.cloudapp.azure.com:27017" } ] })
|
||||
|
||||
|
||||
where ``mdb-instance-0.westeurope.cloudapp.azure.com`` is the value stored in
|
||||
the ``data.fqdn`` field in the ConfigMap created using ``mongo-cm.yaml``.
|
||||
|
||||
|
||||
You should see changes in the mongo shell prompt from ``>``
|
||||
to ``bigchain-rs:OTHER>`` to ``bigchain-rs:SECONDARY>`` and finally
|
||||
to ``bigchain-rs:PRIMARY>``.
|
||||
|
||||
You can use the ``rs.conf()`` and the ``rs.status()`` commands to check the
|
||||
detailed replica set configuration now.
|
||||
|
||||
|
||||
Step 8: Create a DNS record - Optional
|
||||
--------------------------------------
|
||||
|
||||
This step is required only if you are planning to set up multiple
|
||||
`BigchainDB nodes
|
||||
<https://docs.bigchaindb.com/en/latest/terminology.html>`_.
|
||||
|
||||
**Azure.** Select the current Azure resource group and look for the ``Public IP``
|
||||
resource. You should see at least 2 entries there - one for the Kubernetes
|
||||
master and the other for the MongoDB instance. You may have to ``Refresh`` the
|
||||
Azure web page listing the resources in a resource group for the latest
|
||||
changes to be reflected.
|
||||
Select the ``Public IP`` resource that is attached to your service (it should
|
||||
have the Kubernetes cluster name along with a random string),
|
||||
select ``Configuration``, add the DNS name that was added in the
|
||||
ConfigMap earlier, click ``Save``, and wait for the changes to be applied.
|
||||
|
||||
To verify the DNS setting is operational, you can run ``nslookup <dns
|
||||
name added in ConfigMap>`` from your local Linux shell.
|
||||
|
||||
This will ensure that when you scale the replica set later, other MongoDB
|
||||
members in the replica set can reach this instance.
|
||||
|
||||
|
||||
Step 9: Run BigchainDB as a Deployment
|
||||
--------------------------------------
|
||||
|
||||
Get the file ``bigchaindb-dep.yaml`` from GitHub using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/bigchaindb/bigchaindb-dep.yaml
|
||||
|
||||
Note that we set the ``BIGCHAINDB_DATABASE_HOST`` to ``mdb-svc`` which is the
|
||||
name of the MongoDB service defined earlier.
|
||||
|
||||
We also hardcode the ``BIGCHAINDB_KEYPAIR_PUBLIC``,
|
||||
``BIGCHAINDB_KEYPAIR_PRIVATE`` and ``BIGCHAINDB_KEYRING`` for now.
|
||||
|
||||
As we gain more experience running BigchainDB in testing and production, we
|
||||
will tweak the ``resources.limits`` values for CPU and memory, and as richer
|
||||
monitoring and probing becomes available in BigchainDB, we will tweak the
|
||||
``livenessProbe`` and ``readinessProbe`` parameters.
|
||||
|
||||
We also plan to specify scheduling policies for the BigchainDB deployment so
|
||||
that we ensure that BigchainDB and MongoDB are running in separate nodes, and
|
||||
build security around the globally exposed port ``9984``.
|
||||
|
||||
Create the required Deployment using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f bigchaindb-dep.yaml
|
||||
|
||||
You can check its status using the command ``kubectl get deploy -w``
|
||||
|
||||
|
||||
Step 10: Run NGINX as a Deployment
|
||||
----------------------------------
|
||||
|
||||
NGINX is used as a proxy to both the BigchainDB and MongoDB instances in the
|
||||
node.
|
||||
It proxies HTTP requests on port 80 to the BigchainDB backend, and TCP
|
||||
connections on port 27017 to the MongoDB backend.
|
||||
|
||||
You can also configure a whitelist in NGINX to allow only connections from
|
||||
other instances in the MongoDB replica set to access the backend MongoDB
|
||||
instance.
|
||||
|
||||
Get the file ``nginx-cm.yaml`` from GitHub using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/nginx/nginx-cm.yaml
|
||||
|
||||
The IP address whitelist can be explicitly configured in ``nginx-cm.yaml``
|
||||
file. You will need a list of the IP addresses of all the other MongoDB
|
||||
instances in the cluster. If the MongoDB intances specify a hostname, then this
|
||||
needs to be resolved to the corresponding IP addresses. If the IP address of
|
||||
any MongoDB instance changes, we can start a 'rolling upgrade' of NGINX after
|
||||
updating the corresponding ConfigMap without affecting availabilty.
|
||||
|
||||
|
||||
Create the ConfigMap for the whitelist using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f nginx-cm.yaml
|
||||
|
||||
Get the file ``nginx-dep.yaml`` from GitHub using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/nginx/nginx-dep.yaml
|
||||
|
||||
Create the NGINX deployment using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f nginx-dep.yaml
|
||||
|
||||
|
||||
Step 11: Verify the BigchainDB Node Setup
|
||||
-----------------------------------------
|
||||
|
||||
Step 11.1: Testing Internally
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Run a container that provides utilities like ``nslookup``, ``curl`` and ``dig``
|
||||
on the cluster and query the internal DNS and IP endpoints.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl run -it toolbox -- image <docker image to run> --restart=Never --rm
|
||||
|
||||
There is a generic image based on alpine:3.5 with the required utilities
|
||||
hosted at Docker Hub under `bigchaindb/toolbox <https://hub.docker.com/r/bigchaindb/toolbox/>`_.
|
||||
The corresponding Dockerfile is in the bigchaindb/bigchaindb repository on GitHub, at `https://github.com/bigchaindb/bigchaindb/blob/master/k8s/toolbox/Dockerfile <https://github.com/bigchaindb/bigchaindb/blob/master/k8s/toolbox/Dockerfile>`_.
|
||||
|
||||
You can use it as below to get started immediately:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl run -it toolbox --image bigchaindb/toolbox --restart=Never --rm
|
||||
|
||||
It will drop you to the shell prompt.
|
||||
Now you can query for the ``mdb`` and ``bdb`` service details.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
# nslookup mdb-svc
|
||||
# nslookup bdb-svc
|
||||
# nslookup ngx-svc
|
||||
# dig +noall +answer _mdb-port._tcp.mdb-svc.default.svc.cluster.local SRV
|
||||
# dig +noall +answer _bdb-port._tcp.bdb-svc.default.svc.cluster.local SRV
|
||||
# dig +noall +answer _ngx-public-mdb-port._tcp.ngx-svc.default.svc.cluster.local SRV
|
||||
# dig +noall +answer _ngx-public-bdb-port._tcp.ngx-svc.default.svc.cluster.local SRV
|
||||
# curl -X GET http://mdb-svc:27017
|
||||
# curl -X GET http://bdb-svc:9984
|
||||
# curl -X GET http://ngx-svc:80
|
||||
# curl -X GET http://ngx-svc:27017
|
||||
|
||||
The ``nslookup`` commands should output the configured IP addresses of the
|
||||
services in the cluster
|
||||
|
||||
The ``dig`` commands should return the port numbers configured for the
|
||||
various services in the cluster.
|
||||
|
||||
Finally, the ``curl`` commands test the availability of the services
|
||||
themselves.
|
||||
|
||||
Step 11.2: Testing Externally
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Try to access the ``<dns/ip of your exposed bigchaindb service endpoint>:80``
|
||||
on your browser. You must receive a json output that shows the BigchainDB
|
||||
server version among other things.
|
||||
|
||||
Try to access the ``<dns/ip of your exposed mongodb service endpoint>:27017``
|
||||
on your browser. If your IP is in the whitelist, you will receive a message
|
||||
from the MongoDB instance stating that it doesn't allow HTTP connections to
|
||||
the port anymore. If your IP is not in the whitelist, your access will be
|
||||
blocked and you will not see any response from the MongoDB instance.
|
||||
|
@ -1,9 +0,0 @@
|
||||
Clusters
|
||||
========
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
set-up-a-cluster
|
||||
aws-testing-cluster
|
||||
|
@ -1,28 +0,0 @@
|
||||
# Set Up a Cluster
|
||||
|
||||
This section is about how to set up a BigchainDB cluster where each node is operated by a different operator. If you want to set up and run a testing cluster on AWS (where all nodes are operated by you), then see [the section about that](aws-testing-cluster.html).
|
||||
|
||||
|
||||
## Initial Questions
|
||||
|
||||
There are many questions that must be answered before setting up a BigchainDB cluster. For example:
|
||||
|
||||
* Do you have a governance process for making consortium-level decisions, such as how to admit new members?
|
||||
* What will you store in creation transactions (data payload)? Is there a data schema?
|
||||
* Will you use transfer transactions? Will they include a non-empty data payload?
|
||||
* Who will be allowed to submit transactions? Who will be allowed to read or query transactions? How will you enforce the access rules?
|
||||
|
||||
|
||||
## Set Up the Initial Cluster
|
||||
|
||||
The consortium must decide some things before setting up the initial cluster (initial set of BigchainDB nodes):
|
||||
|
||||
1. Who will operate each node in the initial cluster?
|
||||
2. What will the replication factor be? (It should be 3 or more.)
|
||||
3. Who will deploy the first node, second node, etc.?
|
||||
|
||||
Once those things have been decided, the cluster deployment process can begin. The process for deploying a production node is outlined in [the section on production nodes](../production-nodes/index.html).
|
||||
|
||||
Every time a new BigchainDB node is added, every other node must update their [BigchainDB keyring](../server-reference/configuration.html#keyring) (one of the BigchainDB configuration settings): they must add the public key of the new node.
|
||||
|
||||
To secure communications between BigchainDB nodes, each BigchainDB node can use a firewall or similar, and doing that will require additional coordination.
|
50
docs/server/source/clusters.md
Normal file
50
docs/server/source/clusters.md
Normal file
@ -0,0 +1,50 @@
|
||||
# Clusters
|
||||
|
||||
A **BigchainDB Cluster** is a set of connected **BigchainDB Nodes**, managed by a **BigchainDB Consortium** (i.e. an organization). Those terms are defined in the [BigchainDB Terminology page](https://docs.bigchaindb.com/en/latest/terminology.html).
|
||||
|
||||
|
||||
## Consortium Structure & Governance
|
||||
|
||||
The consortium might be a company, a foundation, a cooperative, or [some other form of organization](https://en.wikipedia.org/wiki/Organizational_structure).
|
||||
It must make many decisions, e.g. How will new members be added? Who can read the stored data? What kind of data will be stored?
|
||||
A governance process is required to make those decisions, and therefore one of the first steps for any new consortium is to specify its governance process (if one doesn't already exist).
|
||||
This documentation doesn't explain how to create a consortium, nor does it outline the possible governance processes.
|
||||
|
||||
It's worth noting that the decentralization of a BigchainDB cluster depends,
|
||||
to some extent, on the decentralization of the associated consortium. See the pages about [decentralization](https://docs.bigchaindb.com/en/latest/decentralized.html) and [node diversity](https://docs.bigchaindb.com/en/latest/diversity.html).
|
||||
|
||||
|
||||
## Relevant Technical Documentation
|
||||
|
||||
There are some pages and sections that will be of particular interest to anyone building or managing a BigchainDB cluster. In particular:
|
||||
|
||||
* [the page about how to set up and run a cluster node](production-nodes/setup-run-node.html),
|
||||
* [our production deployment template](production-deployment-template/index.html), and
|
||||
* [our old RethinkDB-based AWS deployment template](appendices/aws-testing-cluster.html).
|
||||
|
||||
|
||||
## Cluster DNS Records and SSL Certificates
|
||||
|
||||
We now describe how *we* set up the external (public-facing) DNS records for a BigchainDB cluster. Your consortium may opt to do it differently.
|
||||
There were several goals:
|
||||
|
||||
* Allow external users/clients to connect directly to any BigchainDB node in the cluster (over the internet), if they want.
|
||||
* Each BigchainDB node operator should get an SSL certificate for their BigchainDB node, so that their BigchainDB node can serve the [BigchainDB HTTP API](http-client-server-api.html) via HTTPS. (The same certificate might also be used to serve the [WebSocket API](websocket-event-stream-api.html).)
|
||||
* There should be no sharing of SSL certificates among BigchainDB node operators.
|
||||
* Optional: Allow clients to connect to a "random" BigchainDB node in the cluster at one particular domain (or subdomain).
|
||||
|
||||
|
||||
### Node Operator Responsibilities
|
||||
|
||||
1. Register a domain (or use one that you already have) for your BigchainDB node. You can use a subdomain if you like. For example, you might opt to use `abc-org73.net`, `api.dynabob8.io` or `figmentdb3.ninja`.
|
||||
2. Get an SSL certificate for your domain or subdomain, and properly install it in your node (e.g. in your NGINX instance).
|
||||
3. Create a DNS A Record mapping your domain or subdomain to the public IP address of your node (i.e. the one that serves the BigchainDB HTTP API).
|
||||
|
||||
|
||||
### Consortium Responsibilities
|
||||
|
||||
Optional: The consortium managing the BigchainDB cluster could register a domain name and set up CNAME records mapping that domain name (or one of its subdomains) to each of the nodes in the cluster. For example, if the consortium registered `bdbcluster.io`, they could set up CNAME records like the following:
|
||||
|
||||
* CNAME record mapping `api.bdbcluster.io` to `abc-org73.net`
|
||||
* CNAME record mapping `api.bdbcluster.io` to `api.dynabob8.io`
|
||||
* CNAME record mapping `api.bdbcluster.io` to `figmentdb3.ninja`
|
@ -1,21 +1,20 @@
|
||||
# The Digital Asset Model
|
||||
# The Asset Model
|
||||
|
||||
To avoid redundant data in transactions, the digital asset model is different for `CREATE` and `TRANSFER` transactions.
|
||||
To avoid redundant data in transactions, the asset model is different for `CREATE` and `TRANSFER` transactions.
|
||||
|
||||
A digital asset's properties are defined in a `CREATE` transaction with the following model:
|
||||
In a `CREATE` transaction, the `"asset"` must contain exactly one key-value pair. The key must be `"data"` and the value can be any valid JSON document, or `null`. For example:
|
||||
```json
|
||||
{
|
||||
"data": "<json document>"
|
||||
"data": {
|
||||
"desc": "Gold-inlay bookmark owned by Xavier Bellomat Dickens III",
|
||||
"xbd_collection_id": 1857
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For `TRANSFER` transactions we only keep the asset ID:
|
||||
In a `TRANSFER` transaction, the `"asset"` must contain exactly one key-value pair. They key must be `"id"` and the value must contain a transaction ID (i.e. a SHA3-256 hash: the ID of the `CREATE` transaction which created the asset, which also serves as the asset ID). For example:
|
||||
```json
|
||||
{
|
||||
"id": "<asset's CREATE transaction ID (sha3-256 hash)>"
|
||||
"id": "38100137cea87fb9bd751e2372abb2c73e7d5bcf39d940a5516a324d9c7fb88d"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
- `id`: The ID of the `CREATE` transaction that created the asset.
|
||||
- `data`: A user supplied JSON document with custom information about the asset. Defaults to null.
|
||||
|
@ -3,7 +3,7 @@ Data Models
|
||||
|
||||
BigchainDB stores all data in the underlying database as JSON documents (conceptually, at least). There are three main kinds:
|
||||
|
||||
1. Transactions, which contain digital assets, inputs, outputs, and other things
|
||||
1. Transactions, which contain assets, inputs, outputs, and other things
|
||||
2. Blocks
|
||||
3. Votes
|
||||
|
||||
|
@ -81,30 +81,20 @@ to spend the asset. For example:
|
||||
{
|
||||
"condition": {
|
||||
"details": {
|
||||
"bitmask": 41,
|
||||
"subfulfillments": [
|
||||
"type": "threshold-sha-256",
|
||||
"threshold": 2,
|
||||
"subconditions": [
|
||||
{
|
||||
"bitmask": 32,
|
||||
"public_key": "<new owner 1 public key>",
|
||||
"signature": null,
|
||||
"type": "fulfillment",
|
||||
"type_id": 4,
|
||||
"weight": 1
|
||||
"type": "ed25519-sha-256",
|
||||
},
|
||||
{
|
||||
"bitmask": 32,
|
||||
"public_key": "<new owner 2 public key>",
|
||||
"signature": null,
|
||||
"type": "fulfillment",
|
||||
"type_id": 4,
|
||||
"weight": 1
|
||||
"type": "ed25519-sha-256",
|
||||
}
|
||||
],
|
||||
"threshold": 2,
|
||||
"type": "fulfillment",
|
||||
"type_id": 2
|
||||
},
|
||||
"uri": "cc:2:29:ytNK3X6-bZsbF-nCGDTuopUIMi1HCyCkyPewm6oLI3o:206"},
|
||||
"uri": "ni:///sha-256;PNYwdxaRaNw60N6LDFzOWO97b8tJeragczakL8PrAPc?fpt=ed25519-sha-256&cost=131072"},
|
||||
"public_keys": [
|
||||
"<owner 1 public key>",
|
||||
"<owner 2 public key>"
|
||||
@ -112,11 +102,10 @@ to spend the asset. For example:
|
||||
}
|
||||
|
||||
|
||||
- ``subfulfillments``: a list of fulfillments
|
||||
- ``weight``: integer weight for each subfulfillment's contribution to the threshold
|
||||
- ``threshold``: threshold to reach for the subfulfillments to reach a valid fulfillment
|
||||
- ``subconditions``: a list of condition specs
|
||||
- ``threshold``: threshold to reach for the subconditions to reach a valid fulfillment
|
||||
|
||||
The ``weight``s and ``threshold`` could be adjusted. For example, if the ``threshold`` was changed to 1 above, then only one of the new owners would have to provide a signature to spend the asset.
|
||||
The ``threshold`` can be adjusted. For example, if the ``threshold`` was changed to 1 above, then only one of the new owners would have to provide a signature to spend the asset. If it is desired to give a different weight to a subcondition, it should be specified multiple times.
|
||||
|
||||
Inputs
|
||||
------
|
||||
@ -132,8 +121,8 @@ If there is only one *current owner*, the fulfillment will be a simple signature
|
||||
"owners_before": ["<public key of the owner before the transaction happened>"],
|
||||
"fulfillment": "cf:4:RxFzIE679tFBk8zwEgizhmTuciAylvTUwy6EL6ehddHFJOhK5F4IjwQ1xLu2oQK9iyRCZJdfWAefZVjTt3DeG5j2exqxpGliOPYseNkRAWEakqJ_UrCwgnj92dnFRAEE",
|
||||
"fulfills": {
|
||||
"output": 0,
|
||||
"txid": "11b3e7d893cc5fdfcf1a1706809c7def290a3b10b0bef6525d10b024649c42d3"
|
||||
"output_index": 0,
|
||||
"transaction_id": "11b3e7d893cc5fdfcf1a1706809c7def290a3b10b0bef6525d10b024649c42d3"
|
||||
}
|
||||
}
|
||||
|
||||
@ -151,8 +140,8 @@ If there are multiple *current owners*, the fulfillment will be a little differe
|
||||
"owners_before": ["<public key of the first owner before the transaction happened>","<public key of the second owner before the transaction happened>"],
|
||||
"fulfillment": "cf:2:AQIBAgEBYwAEYEv6O5HjHGl7OWo2Tu5mWcWQcL_OGrFuUjyej-dK3LM99TbZsRd8c9luQhU30xCH5AdNaupxg-pLHuk8DoSaDA1MHQGXUZ80a_cV-4UaaaCpdey8K0CEcJxre0X96hTHCwABAWMABGBnsuHExhuSj5Mdm-q0KoPgX4nAt0s00k1WTMCzuUpQIp6aStLoTSMlsvS4fmDtOSv9gubekKLuHTMAk-LQFSKF1JdzwaVWAA2UOv0v_OS2gY3A-r0kRq8HtzjYdcmVswUA",
|
||||
"fulfills": {
|
||||
"output": 0,
|
||||
"txid": "e4805f1bfc999d6409b38e3a4c3b2fafad7c1280eb0d441da7083e945dd89eb8"
|
||||
"output_index": 0,
|
||||
"transaction_id": "e4805f1bfc999d6409b38e3a4c3b2fafad7c1280eb0d441da7083e945dd89eb8"
|
||||
}
|
||||
}
|
||||
|
||||
@ -160,5 +149,5 @@ If there are multiple *current owners*, the fulfillment will be a little differe
|
||||
- ``owners_before``: A list of public keys of the owners before the transaction; in this case it has two owners, hence two public keys.
|
||||
- ``fulfillment``: A crypto-conditions URI that encodes the cryptographic fulfillments like signatures and others;'cf' indicates this is a fulfillment, '2' indicates the condition type is THRESHOLD-SHA-256 (while '4' in `One Current Owner`_ indicates its condition type is ED25519).
|
||||
- ``fulfills``: Pointer to an output from a previous transaction that is being spent
|
||||
- ``output``: The index of the output in a previous transaction
|
||||
- ``txid``: ID of the transaction
|
||||
- ``output_index``: The index of the output in a previous transaction
|
||||
- ``transaction_id``: ID of the transaction
|
||||
|
@ -1,17 +1,3 @@
|
||||
.. raw:: html
|
||||
|
||||
<style>
|
||||
.rst-content a.internal[href*='/schema/'] {
|
||||
border: solid 1px #e1e4e5;
|
||||
font-family: monospace;
|
||||
font-size: 12px;
|
||||
color: blue;
|
||||
padding: 2px 4px;
|
||||
background-color: white;
|
||||
}
|
||||
</style>
|
||||
|
||||
=====================
|
||||
The Transaction Model
|
||||
=====================
|
||||
|
||||
@ -20,33 +6,57 @@ A transaction has the following structure:
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"id": "<hash of transaction, excluding signatures (see explanation)>",
|
||||
"version": "<version number of the transaction model>",
|
||||
"inputs": ["<list of inputs>"],
|
||||
"outputs": ["<list of outputs>"],
|
||||
"operation": "<string>",
|
||||
"asset": "<digital asset description (explained in the next section)>",
|
||||
"metadata": "<any JSON document>"
|
||||
"id": "<ID of the transaction>",
|
||||
"version": "<Transaction schema version number>",
|
||||
"inputs": ["<List of inputs>"],
|
||||
"outputs": ["<List of outputs>"],
|
||||
"operation": "<String>",
|
||||
"asset": {"<Asset model; see below>"},
|
||||
"metadata": {"<Arbitrary transaction metadata>"}
|
||||
}
|
||||
|
||||
Here's some explanation of the contents of a :ref:`transaction <transaction>`:
|
||||
Here's some explanation of the contents:
|
||||
|
||||
- id: The :ref:`id <transaction.id>` of the transaction, and also the database primary key.
|
||||
- version: :ref:`Version <transaction.version>` number of the transaction model, so that software can support different transaction models.
|
||||
- **inputs**: List of inputs. Each :ref:`input <Input>` contains a pointer to an unspent output
|
||||
and a *crypto fulfillment* that satisfies the conditions of that output. A *fulfillment*
|
||||
is usually a signature proving the ownership of the asset.
|
||||
See :doc:`./inputs-outputs`.
|
||||
- **id**: The ID of the transaction and also the hash of the transaction (loosely speaking). See below for an explanation of how it's computed. It's also the database primary key.
|
||||
|
||||
- **outputs**: List of outputs. Each :ref:`output <Output>` contains *crypto-conditions* that need to be fulfilled by a transfer transaction in order to transfer ownership to new owners.
|
||||
See :doc:`./inputs-outputs`.
|
||||
- **version**: The version-number of :ref:`the transaction schema <Transaction Schema>`. As of BigchainDB Server 1.0.0, the only allowed value is ``"1.0"``.
|
||||
|
||||
- **operation**: String representation of the :ref:`operation <transaction.operation>` being performed (currently either "CREATE", "TRANSFER" or "GENESIS"). It determines how the transaction should be validated.
|
||||
- **inputs**: List of inputs.
|
||||
Each input spends/transfers a previous output by satisfying/fulfilling
|
||||
the crypto-conditions on that output.
|
||||
A CREATE transaction should have exactly one input.
|
||||
A TRANSFER transaction should have at least one input (i.e. ≥1).
|
||||
For more details, see the subsection about :ref:`inputs <Inputs>`.
|
||||
|
||||
- **asset**: Definition of the digital :ref:`asset <Asset>`. See next section.
|
||||
- **outputs**: List of outputs.
|
||||
Each output indicates the crypto-conditions which must be satisfied
|
||||
by anyone wishing to spend/transfer that output.
|
||||
It also indicates the number of shares of the asset tied to that output.
|
||||
For more details, see the subsection about :ref:`outputs <Outputs>`.
|
||||
|
||||
- **metadata**: User-provided transaction :ref:`metadata <metadata>`: Can be any JSON document, or `NULL`.
|
||||
- **operation**: A string indicating what kind of transaction this is,
|
||||
and how it should be validated.
|
||||
It can only be ``"CREATE"``, ``"TRANSFER"`` or ``"GENESIS"``
|
||||
(but there should only be one transaction whose operation is ``"GENESIS"``:
|
||||
the one in the GENESIS block).
|
||||
|
||||
Later, when we get to the models for the block and the vote, we'll see that both include a signature (from the node which created it). You may wonder why transactions don't have signatures... The answer is that they do! They're just hidden inside the ``fulfillment`` string of each input. A creation transaction is signed by whoever created it. A transfer transaction is signed by whoever currently controls or owns it.
|
||||
- **asset**: A JSON document for the asset associated with the transaction.
|
||||
(A transaction can only be associated with one asset.)
|
||||
See :ref:`the page about the asset model <The Asset Model>`.
|
||||
|
||||
What gets signed? For each input in the transaction, the "fullfillment message" that gets signed includes the JSON serialized body of the transaction, minus any fulfillment strings. The computed signature goes into creating the ``fulfillment`` string of the input.
|
||||
- **metadata**: User-provided transaction metadata.
|
||||
It can be any valid JSON document, or ``null``.
|
||||
|
||||
**How the transaction ID is computed.**
|
||||
1) Build a Python dictionary containing ``version``, ``inputs``, ``outputs``, ``operation``, ``asset``, ``metadata`` and their values,
|
||||
2) In each of the inputs, replace the value of each ``fulfillment`` with ``null``,
|
||||
3) :ref:`Serialize <JSON Serialization>` that dictionary,
|
||||
4) The transaction ID is just :ref:`the SHA3-256 hash <Hashes>` of the serialized dictionary.
|
||||
|
||||
**About signing the transaction.**
|
||||
Later, when we get to the models for the block and the vote, we'll see that both include a signature (from the node which created it). You may wonder why transactions don't have signatures… The answer is that they do! They're just hidden inside the ``fulfillment`` string of each input. What gets signed (as of version 1.0.0) is everything inside the transaction, including the ``id``, but the value of each ``fulfillment`` is replaced with ``null``.
|
||||
|
||||
There are example BigchainDB transactions in
|
||||
:ref:`the HTTP API documentation <The HTTP Client-Server API>`
|
||||
and
|
||||
`the Python Driver documentation <https://docs.bigchaindb.com/projects/py-driver/en/latest/usage.html>`_.
|
||||
|
@ -4,17 +4,24 @@ A vote has the following structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "<RethinkDB-generated ID for the vote>",
|
||||
"node_pubkey": "<the public key of the voting node>",
|
||||
"node_pubkey": "<The public key of the voting node>",
|
||||
"vote": {
|
||||
"voting_for_block": "<id of the block the node is voting for>",
|
||||
"previous_block": "<id of the block previous to this one>",
|
||||
"is_block_valid": "<true|false>",
|
||||
"invalid_reason": "<None|DOUBLE_SPEND|TRANSACTIONS_HASH_MISMATCH|NODES_PUBKEYS_MISMATCH",
|
||||
"voting_for_block": "<ID of the block the node is voting on>",
|
||||
"previous_block": "<ID of the block previous to the block being voted on>",
|
||||
"is_block_valid": "<true OR false>",
|
||||
"invalid_reason": null,
|
||||
"timestamp": "<Unix time when the vote was generated, provided by the voting node>"
|
||||
},
|
||||
"signature": "<signature of vote>"
|
||||
"signature": "<Cryptographic signature of vote>"
|
||||
}
|
||||
```
|
||||
|
||||
Note: The `invalid_reason` was not being used and may be dropped in a future version of BigchainDB. See [Issue #217](https://github.com/bigchaindb/bigchaindb/issues/217) on GitHub.
|
||||
**Notes**
|
||||
|
||||
* Votes have no ID (or `"id"`), as far as users are concerned. (The backend database uses one internally, but it's of no concern to users and it's never reported to them via BigchainDB APIs.)
|
||||
|
||||
* At the time of writing, the value of `"invalid_reason"` was always `null`. In other words, it wasn't being used. It may be used or dropped in a future version of BigchainDB. See [Issue #217](https://github.com/bigchaindb/bigchaindb/issues/217) on GitHub.
|
||||
|
||||
* For more information about the vote `"timestamp"`, see [the page about timestamps in BigchainDB](https://docs.bigchaindb.com/en/latest/timestamps.html).
|
||||
|
||||
* For more information about how the `"signature"` is calculated, see [the page about cryptography in BigchainDB](../appendices/cryptography.html).
|
||||
|
@ -4,7 +4,8 @@ Drivers & Clients
|
||||
Libraries and Tools Maintained by the BigchainDB Team
|
||||
-----------------------------------------------------
|
||||
|
||||
* `The Python Driver <https://docs.bigchaindb.com/projects/py-driver/en/latest/index.html>`_
|
||||
* `Python Driver <https://docs.bigchaindb.com/projects/py-driver/en/latest/index.html>`_
|
||||
* `JavaScript / Node.js Driver <https://github.com/bigchaindb/js-bigchaindb-driver>`_
|
||||
* `The Transaction CLI <https://docs.bigchaindb.com/projects/cli/en/latest/>`_ is
|
||||
a command-line interface for building BigchainDB transactions.
|
||||
You may be able to call it from inside the language of
|
||||
@ -20,7 +21,6 @@ Community-Driven Libraries and Tools
|
||||
Some of these projects are a work in progress,
|
||||
but may still be useful.
|
||||
|
||||
* `JavaScript / Node.js driver <https://github.com/bigchaindb/js-bigchaindb-driver>`_
|
||||
* `Haskell transaction builder <https://github.com/bigchaindb/bigchaindb-hs>`_
|
||||
* `Go driver <https://github.com/zbo14/envoke/blob/master/bigchain/bigchain.go>`_
|
||||
* `Java driver <https://github.com/mgrand/bigchaindb-java-driver>`_
|
||||
|
@ -42,19 +42,19 @@ that allows you to discover the BigchainDB API endpoints:
|
||||
Transactions
|
||||
-------------------
|
||||
|
||||
.. http:get:: /api/v1/transactions/{tx_id}
|
||||
.. http:get:: /api/v1/transactions/{transaction_id}
|
||||
|
||||
Get the transaction with the ID ``tx_id``.
|
||||
Get the transaction with the ID ``transaction_id``.
|
||||
|
||||
This endpoint returns a transaction if it was included in a ``VALID`` block,
|
||||
if it is still waiting to be processed (``BACKLOG``) or is still in an
|
||||
undecided block (``UNDECIDED``). All instances of a transaction in invalid
|
||||
blocks are ignored and treated as if they don't exist. If a request is made
|
||||
for a transaction and instances of that transaction are found only in
|
||||
invalid blocks, then the response will be ``404 Not Found``.
|
||||
This endpoint returns a transaction if it was included in a ``VALID`` block.
|
||||
All instances of a transaction in invalid/undecided blocks or the backlog
|
||||
are ignored and treated as if they don't exist. If a request is made for a
|
||||
transaction and instances of that transaction are found only in
|
||||
invalid/undecided blocks or the backlog, then the response will be ``404 Not
|
||||
Found``.
|
||||
|
||||
:param tx_id: transaction ID
|
||||
:type tx_id: hex string
|
||||
:param transaction_id: transaction ID
|
||||
:type transaction_id: hex string
|
||||
|
||||
**Example request**:
|
||||
|
||||
@ -147,7 +147,16 @@ Transactions
|
||||
.. literalinclude:: http-samples/post-tx-response.http
|
||||
:language: http
|
||||
|
||||
.. note::
|
||||
If the server is returning a ``202`` HTTP status code, then the
|
||||
transaction has been accepted for processing. To check the status of the
|
||||
transaction, poll the link to the
|
||||
:ref:`status monitor <get_status_of_transaction>`
|
||||
provided in the ``Location`` header or listen to server's
|
||||
:ref:`WebSocket Event Stream API <The WebSocket Event Stream API>`.
|
||||
|
||||
:resheader Content-Type: ``application/json``
|
||||
:resheader Location: Relative link to a status monitor for the submitted transaction.
|
||||
|
||||
:statuscode 202: The pushed transaction was accepted in the ``BACKLOG``, but the processing has not been completed.
|
||||
:statuscode 400: The transaction was malformed and not accepted in the ``BACKLOG``.
|
||||
@ -157,21 +166,29 @@ Transaction Outputs
|
||||
-------------------
|
||||
|
||||
The ``/api/v1/outputs`` endpoint returns transactions outputs filtered by a
|
||||
given public key, and optionally filtered to only include outputs that have
|
||||
not already been spent.
|
||||
given public key, and optionally filtered to only include either spent or
|
||||
unspent outputs.
|
||||
|
||||
|
||||
.. http:get:: /api/v1/outputs?public_key={public_key}
|
||||
.. http:get:: /api/v1/outputs
|
||||
|
||||
Get transaction outputs by public key. The `public_key` parameter must be
|
||||
Get transaction outputs by public key. The ``public_key`` parameter must be
|
||||
a base58 encoded ed25519 public key associated with transaction output
|
||||
ownership.
|
||||
|
||||
Returns a list of links to transaction outputs.
|
||||
Returns a list of transaction outputs.
|
||||
|
||||
:param public_key: Base58 encoded public key associated with output ownership. This parameter is mandatory and without it the endpoint will return a ``400`` response code.
|
||||
:param unspent: Boolean value ("true" or "false") indicating if the result set should be limited to outputs that are available to spend. Defaults to "false".
|
||||
:param public_key: Base58 encoded public key associated with output
|
||||
ownership. This parameter is mandatory and without it
|
||||
the endpoint will return a ``400`` response code.
|
||||
:param spent: Boolean value ("true" or "false") indicating if the result set
|
||||
should include only spent or only unspent outputs. If not
|
||||
specified the result includes all the outputs (both spent
|
||||
and unspent) associated with the ``public_key``.
|
||||
|
||||
.. http:get:: /api/v1/outputs?public_key={public_key}
|
||||
|
||||
Return all outputs, both spent and unspent, for the ``public_key``.
|
||||
|
||||
**Example request**:
|
||||
|
||||
@ -188,8 +205,70 @@ not already been spent.
|
||||
Content-Type: application/json
|
||||
|
||||
[
|
||||
"../transactions/2d431073e1477f3073a4693ac7ff9be5634751de1b8abaa1f4e19548ef0b4b0e/outputs/0",
|
||||
"../transactions/2d431073e1477f3073a4693ac7ff9be5634751de1b8abaa1f4e19548ef0b4b0e/outputs/1"
|
||||
{
|
||||
"output_index": 0,
|
||||
"transaction_id": "2d431073e1477f3073a4693ac7ff9be5634751de1b8abaa1f4e19548ef0b4b0e"
|
||||
},
|
||||
{
|
||||
"output_index": 1,
|
||||
"transaction_id": "2d431073e1477f3073a4693ac7ff9be5634751de1b8abaa1f4e19548ef0b4b0e"
|
||||
}
|
||||
]
|
||||
|
||||
:statuscode 200: A list of outputs were found and returned in the body of the response.
|
||||
:statuscode 400: The request wasn't understood by the server, e.g. the ``public_key`` querystring was not included in the request.
|
||||
|
||||
.. http:get:: /api/v1/outputs?public_key={public_key}&spent=true
|
||||
|
||||
Return all **spent** outputs for ``public_key``.
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /api/v1/outputs?public_key=1AAAbbb...ccc&spent=true HTTP/1.1
|
||||
Host: example.com
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
[
|
||||
{
|
||||
"output_index": 0,
|
||||
"transaction_id": "2d431073e1477f3073a4693ac7ff9be5634751de1b8abaa1f4e19548ef0b4b0e"
|
||||
}
|
||||
]
|
||||
|
||||
:statuscode 200: A list of outputs were found and returned in the body of the response.
|
||||
:statuscode 400: The request wasn't understood by the server, e.g. the ``public_key`` querystring was not included in the request.
|
||||
|
||||
.. http:get:: /api/v1/outputs?public_key={public_key}&spent=false
|
||||
|
||||
Return all **unspent** outputs for ``public_key``.
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /api/v1/outputs?public_key=1AAAbbb...ccc&spent=false HTTP/1.1
|
||||
Host: example.com
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
[
|
||||
{
|
||||
"output_index": 1,
|
||||
"transaction_id": "2d431073e1477f3073a4693ac7ff9be5634751de1b8abaa1f4e19548ef0b4b0e"
|
||||
}
|
||||
]
|
||||
|
||||
:statuscode 200: A list of outputs were found and returned in the body of the response.
|
||||
@ -203,21 +282,19 @@ Statuses
|
||||
|
||||
Get the status of an asynchronously written transaction or block by their id.
|
||||
|
||||
A link to the resource is also provided in the returned payload under
|
||||
``_links``.
|
||||
|
||||
:query string tx_id: transaction ID
|
||||
:query string transaction_id: transaction ID
|
||||
:query string block_id: block ID
|
||||
|
||||
.. note::
|
||||
|
||||
Exactly one of the ``tx_id`` or ``block_id`` query parameters must be
|
||||
Exactly one of the ``transaction_id`` or ``block_id`` query parameters must be
|
||||
used together with this endpoint (see below for getting `transaction
|
||||
statuses <#get--statuses?tx_id=tx_id>`_ and `block statuses
|
||||
<#get--statuses?block_id=block_id>`_).
|
||||
|
||||
.. _get_status_of_transaction:
|
||||
|
||||
.. http:get:: /api/v1/statuses?tx_id={tx_id}
|
||||
.. http:get:: /api/v1/statuses?transaction_id={transaction_id}
|
||||
|
||||
Get the status of a transaction.
|
||||
|
||||
@ -236,7 +313,6 @@ Statuses
|
||||
:language: http
|
||||
|
||||
:resheader Content-Type: ``application/json``
|
||||
:resheader Location: Once the transaction has been persisted, this header will link to the actual resource.
|
||||
|
||||
:statuscode 200: A transaction with that ID was found.
|
||||
:statuscode 404: A transaction with that ID was not found.
|
||||
@ -255,16 +331,10 @@ Statuses
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. literalinclude:: http-samples/get-statuses-block-invalid-response.http
|
||||
:language: http
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. literalinclude:: http-samples/get-statuses-block-valid-response.http
|
||||
:language: http
|
||||
|
||||
:resheader Content-Type: ``application/json``
|
||||
:resheader Location: Once the block has been persisted, this header will link to the actual resource.
|
||||
|
||||
:statuscode 200: A block with that ID was found.
|
||||
:statuscode 404: A block with that ID was not found.
|
||||
@ -288,8 +358,8 @@ Assets
|
||||
|
||||
.. http:get:: /api/v1/assets?search={text_search}
|
||||
|
||||
Return all assets that match a given text search. The asset is returned
|
||||
with the ``id`` of the transaction that created the asset.
|
||||
Return all assets that match a given text search. The ``id`` of the asset
|
||||
is the same ``id`` of the transaction that created the asset.
|
||||
|
||||
If no assets match the text search it returns an empty list.
|
||||
|
||||
@ -388,12 +458,12 @@ Advanced Usage
|
||||
The following endpoints are more advanced and meant for debugging and transparency purposes.
|
||||
|
||||
More precisely, the `blocks endpoint <#blocks>`_ allows you to retrieve a block by ``block_id`` as well the list of blocks that
|
||||
a certain transaction with ``tx_id`` occured in (a transaction can occur in multiple ``invalid`` blocks until it
|
||||
a certain transaction with ``transaction_id`` occured in (a transaction can occur in multiple ``invalid`` blocks until it
|
||||
either gets rejected or validated by the system). This endpoint gives the ability to drill down on the lifecycle of a
|
||||
transaction
|
||||
|
||||
The `votes endpoint <#votes>`_ contains all the voting information for a specific block. So after retrieving the
|
||||
``block_id`` for a given ``tx_id``, one can now simply inspect the votes that happened at a specific time on that block.
|
||||
``block_id`` for a given ``transaction_id``, one can now simply inspect the votes that happened at a specific time on that block.
|
||||
|
||||
|
||||
Blocks
|
||||
@ -429,8 +499,8 @@ Blocks
|
||||
.. http:get:: /api/v1/blocks
|
||||
|
||||
The unfiltered ``/blocks`` endpoint without any query parameters returns a `400` status code.
|
||||
The list endpoint should be filtered with a ``tx_id`` query parameter,
|
||||
see the ``/blocks?tx_id={tx_id}&status={UNDECIDED|VALID|INVALID}``
|
||||
The list endpoint should be filtered with a ``transaction_id`` query parameter,
|
||||
see the ``/blocks?transaction_id={transaction_id}&status={UNDECIDED|VALID|INVALID}``
|
||||
`endpoint <#get--blocks?tx_id=tx_id&status=UNDECIDED|VALID|INVALID>`_.
|
||||
|
||||
|
||||
@ -449,9 +519,9 @@ Blocks
|
||||
|
||||
:statuscode 400: The request wasn't understood by the server, e.g. just requesting ``/blocks`` without the ``block_id``.
|
||||
|
||||
.. http:get:: /api/v1/blocks?tx_id={tx_id}&status={UNDECIDED|VALID|INVALID}
|
||||
.. http:get:: /api/v1/blocks?transaction_id={transaction_id}&status={UNDECIDED|VALID|INVALID}
|
||||
|
||||
Retrieve a list of ``block_id`` with their corresponding status that contain a transaction with the ID ``tx_id``.
|
||||
Retrieve a list of ``block_id`` with their corresponding status that contain a transaction with the ID ``transaction_id``.
|
||||
|
||||
Any blocks, be they ``UNDECIDED``, ``VALID`` or ``INVALID`` will be
|
||||
returned if no status filter is provided.
|
||||
@ -460,7 +530,7 @@ Blocks
|
||||
In case no block was found, an empty list and an HTTP status code
|
||||
``200 OK`` is returned, as the request was still successful.
|
||||
|
||||
:query string tx_id: transaction ID *(required)*
|
||||
:query string transaction_id: transaction ID *(required)*
|
||||
:query string status: Filter blocks by their status. One of ``VALID``, ``UNDECIDED`` or ``INVALID``.
|
||||
|
||||
**Example request**:
|
||||
@ -475,8 +545,8 @@ Blocks
|
||||
|
||||
:resheader Content-Type: ``application/json``
|
||||
|
||||
:statuscode 200: A list of blocks containing a transaction with ID ``tx_id`` was found and returned.
|
||||
:statuscode 400: The request wasn't understood by the server, e.g. just requesting ``/blocks``, without defining ``tx_id``.
|
||||
:statuscode 200: A list of blocks containing a transaction with ID ``transaction_id`` was found and returned.
|
||||
:statuscode 400: The request wasn't understood by the server, e.g. just requesting ``/blocks``, without defining ``transaction_id``.
|
||||
|
||||
|
||||
Votes
|
||||
|
@ -8,8 +8,8 @@ BigchainDB Server Documentation
|
||||
introduction
|
||||
quickstart
|
||||
production-nodes/index
|
||||
clusters-feds/index
|
||||
cloud-deployment-templates/index
|
||||
clusters
|
||||
production-deployment-template/index
|
||||
dev-and-test/index
|
||||
server-reference/index
|
||||
http-client-server-api
|
||||
|
@ -8,7 +8,7 @@ Note that there are a few kinds of nodes:
|
||||
|
||||
- A **dev/test node** is a node created by a developer working on BigchainDB Server, e.g. for testing new or changed code. A dev/test node is typically run on the developer's local machine.
|
||||
|
||||
- A **bare-bones node** is a node deployed in the cloud, either as part of a testing cluster or as a starting point before upgrading the node to be production-ready. Our cloud deployment templates deploy a bare-bones node, as do our scripts for deploying a testing cluster on AWS.
|
||||
- A **bare-bones node** is a node deployed in the cloud, either as part of a testing cluster or as a starting point before upgrading the node to be production-ready.
|
||||
|
||||
- A **production node** is a node that is part of a consortium's BigchainDB cluster. A production node has the most components and requirements.
|
||||
|
||||
@ -16,10 +16,14 @@ Note that there are a few kinds of nodes:
|
||||
## Setup Instructions for Various Cases
|
||||
|
||||
* [Set up a local stand-alone BigchainDB node for learning and experimenting: Quickstart](quickstart.html)
|
||||
* [Set up and run a bare-bones node in the cloud](cloud-deployment-templates/index.html)
|
||||
* [Set up and run a local dev/test node for developing and testing BigchainDB Server](dev-and-test/setup-run-node.html)
|
||||
* [Deploy a testing cluster on AWS](clusters-feds/aws-testing-cluster.html)
|
||||
* [Set up and run a cluster (including production nodes)](clusters-feds/set-up-a-cluster.html)
|
||||
* [Set up and run a BigchainDB cluster](clusters.html)
|
||||
|
||||
There are some old RethinkDB-based deployment instructions as well:
|
||||
|
||||
* [Deploy a bare-bones RethinkDB-based node on Azure](appendices/azure-quickstart-template.html)
|
||||
* [Deploy a bare-bones RethinkDB-based node on any Ubuntu machine with Ansible](appendices/template-ansible.html)
|
||||
* [Deploy a RethinkDB-based testing cluster on AWS](appendices/aws-testing-cluster.html)
|
||||
|
||||
Instructions for setting up a client will be provided once there's a public test net.
|
||||
|
||||
|
@ -71,10 +71,10 @@ Step 2: Prepare the New Kubernetes Cluster
|
||||
Follow the steps in the sections to set up Storage Classes and Persistent Volume
|
||||
Claims, and to run MongoDB in the new cluster:
|
||||
|
||||
1. :ref:`Add Storage Classes <Step 3: Create Storage Classes>`
|
||||
2. :ref:`Add Persistent Volume Claims <Step 4: Create Persistent Volume Claims>`
|
||||
3. :ref:`Create the Config Map <Step 5: Create the Config Map - Optional>`
|
||||
4. :ref:`Run MongoDB instance <Step 6: Run MongoDB as a StatefulSet>`
|
||||
1. :ref:`Add Storage Classes <Step 9: Create Kubernetes Storage Classes for MongoDB>`.
|
||||
2. :ref:`Add Persistent Volume Claims <Step 10: Create Kubernetes Persistent Volume Claims>`.
|
||||
3. :ref:`Create the Config Map <Step 3: Configure Your BigchainDB Node>`.
|
||||
4. :ref:`Run MongoDB instance <Step 11: Start a Kubernetes StatefulSet for MongoDB>`.
|
||||
|
||||
|
||||
Step 3: Add the New MongoDB Instance to the Existing Replica Set
|
||||
@ -166,13 +166,13 @@ show-config`` command to check that the keyring is updated.
|
||||
Step 7: Run NGINX as a Deployment
|
||||
---------------------------------
|
||||
|
||||
Please refer :ref:`this <Step 10: Run NGINX as a Deployment>` to
|
||||
Please see :ref:`this page <Step 8: Start the NGINX Kubernetes Deployment>` to
|
||||
set up NGINX in your new node.
|
||||
|
||||
|
||||
Step 8: Test Your New BigchainDB Node
|
||||
-------------------------------------
|
||||
|
||||
Please refer to the testing steps :ref:`here <Step 11: Verify the BigchainDB
|
||||
Please refer to the testing steps :ref:`here <Step 17: Verify the BigchainDB
|
||||
Node Setup>` to verify that your new BigchainDB node is working as expected.
|
||||
|
@ -33,13 +33,17 @@ by going to the ``bdb-cluster-ca/easy-rsa-3.0.1/easyrsa3`` directory and using:
|
||||
|
||||
./easyrsa build-ca
|
||||
|
||||
|
||||
You will be asked to enter a PEM pass phrase for encrypting the ``ca.key`` file.
|
||||
You will also be asked to enter a PEM pass phrase (for encrypting the ``ca.key`` file).
|
||||
Make sure to securely store that PEM pass phrase.
|
||||
If you lose it, you won't be able to add or remove entities from your PKI infrastructure in the future.
|
||||
|
||||
It will ask several other questions.
|
||||
You can accept all the defaults [in brackets] by pressing Enter.
|
||||
You will be prompted to enter the Distinguished Name (DN) information for this CA.
|
||||
For each field, you can accept the default value [in brackets] by pressing Enter.
|
||||
|
||||
.. warning::
|
||||
|
||||
Don't accept the default value of OU (``IT``). Instead, enter the value ``ROOT-CA``.
|
||||
|
||||
While ``Easy-RSA CA`` *is* a valid and acceptable Common Name,
|
||||
you should probably enter a name based on the name of the managing organization,
|
||||
e.g. ``Omega Ledger CA``.
|
||||
@ -51,7 +55,7 @@ by using the subcommand ``./easyrsa help``
|
||||
Step 3: Create an Intermediate CA
|
||||
---------------------------------
|
||||
|
||||
TODO(Krish)
|
||||
TODO
|
||||
|
||||
Step 4: Generate a Certificate Revocation List
|
||||
----------------------------------------------
|
||||
@ -62,9 +66,9 @@ You can generate a Certificate Revocation List (CRL) using:
|
||||
|
||||
./easyrsa gen-crl
|
||||
|
||||
You will need to run this command every time you revoke a certificate and the
|
||||
generated ``crl.pem`` needs to be uploaded to your infrastructure to prevent
|
||||
the revoked certificate from being used again.
|
||||
You will need to run this command every time you revoke a certificate.
|
||||
The generated ``crl.pem`` needs to be uploaded to your infrastructure to
|
||||
prevent the revoked certificate from being used again.
|
||||
|
||||
|
||||
Step 5: Secure the CA
|
@ -1,9 +1,8 @@
|
||||
How to Generate a Client Certificate for MongoDB
|
||||
================================================
|
||||
|
||||
This page enumerates the steps *we* use
|
||||
to generate a client certificate
|
||||
to be used by clients who want to connect to a TLS-secured MongoDB cluster.
|
||||
This page enumerates the steps *we* use to generate a client certificate to be
|
||||
used by clients who want to connect to a TLS-secured MongoDB cluster.
|
||||
We use Easy-RSA.
|
||||
|
||||
|
||||
@ -25,7 +24,7 @@ Step 2: Create the Client Private Key and CSR
|
||||
---------------------------------------------
|
||||
|
||||
You can create the client private key and certificate signing request (CSR)
|
||||
by going into the directory ``client-cert/easy-rsa-3.0.1/easyrsa``
|
||||
by going into the directory ``client-cert/easy-rsa-3.0.1/easyrsa3``
|
||||
and using:
|
||||
|
||||
.. code:: bash
|
||||
@ -34,26 +33,37 @@ and using:
|
||||
|
||||
./easyrsa gen-req bdb-instance-0 nopass
|
||||
|
||||
You should change the Common Name (e.g. ``bdb-instance-0``)
|
||||
to a value that reflects what the
|
||||
client certificate is being used for, e.g. ``mdb-mon-instance-3`` or ``mdb-bak-instance-4``. (The final integer is specific to your BigchainDB node in the BigchainDB cluster.)
|
||||
|
||||
You should change ``bdb-instance-0`` to a value based on the client
|
||||
the certificate is for.
|
||||
You will be prompted to enter the Distinguished Name (DN) information for this certificate. For each field, you can accept the default value [in brackets] by pressing Enter.
|
||||
|
||||
Tip: You can get help with the ``easyrsa`` command (and its subcommands)
|
||||
by using the subcommand ``./easyrsa help``
|
||||
.. warning::
|
||||
|
||||
Don't accept the default value of OU (``IT``). Instead, enter the value
|
||||
``BigchainDB-Instance``, ``MongoDB-Mon-Instance`` or ``MongoDB-Backup-Instance``
|
||||
as appropriate.
|
||||
|
||||
Aside: The ``nopass`` option means "do not encrypt the private key (default is encrypted)". You can get help with the ``easyrsa`` command (and its subcommands)
|
||||
by using the subcommand ``./easyrsa help``.
|
||||
|
||||
|
||||
Step 3: Get the Client Certificate Signed
|
||||
-----------------------------------------
|
||||
|
||||
The CSR file (created in the last step)
|
||||
should be located in ``pki/reqs/bdb-instance-0.req``.
|
||||
The CSR file created in the previous step
|
||||
should be located in ``pki/reqs/bdb-instance-0.req``
|
||||
(or whatever Common Name you used in the ``gen-req`` command above).
|
||||
You need to send it to the organization managing the cluster
|
||||
so that they can use their CA
|
||||
to sign the request.
|
||||
(The managing organization should already have a self-signed CA.)
|
||||
|
||||
If you are the admin of the managing organization's self-signed CA,
|
||||
then you can import the CSR and use Easy-RSA to sign it. For example:
|
||||
then you can import the CSR and use Easy-RSA to sign it.
|
||||
Go to your ``bdb-cluster-ca/easy-rsa-3.0.1/easyrsa3/``
|
||||
directory and do something like:
|
||||
|
||||
.. code:: bash
|
||||
|
@ -0,0 +1,96 @@
|
||||
Configure MongoDB Cloud Manager for Monitoring and Backup
|
||||
=========================================================
|
||||
|
||||
This document details the steps required to configure MongoDB Cloud Manager to
|
||||
enable monitoring and backup of data in a MongoDB Replica Set.
|
||||
|
||||
|
||||
Configure MongoDB Cloud Manager for Monitoring
|
||||
----------------------------------------------
|
||||
|
||||
* Once the Monitoring Agent is up and running, open
|
||||
`MongoDB Cloud Manager <https://cloud.mongodb.com>`_.
|
||||
|
||||
* Click ``Login`` under ``MongoDB Cloud Manager`` and log in to the Cloud
|
||||
Manager.
|
||||
|
||||
* Select the group from the dropdown box on the page.
|
||||
|
||||
* Go to Settings, Group Settings and add a ``Preferred Hostnames`` entry as
|
||||
a regexp based on the ``mdb-instance-name`` of the nodes in your cluster.
|
||||
It may take up to 5 mins till this setting takes effect.
|
||||
You may refresh the browser window and verify whether the changes have
|
||||
been saved or not.
|
||||
|
||||
For example, for the nodes in a cluster that are named ``mdb-instance-0``,
|
||||
``mdb-instance-1`` and so on, a regex like ``^mdb-instance-[0-9]{1,2}$``
|
||||
is recommended.
|
||||
|
||||
* Next, click the ``Deployment`` tab, and then the ``Manage Existing``
|
||||
button.
|
||||
|
||||
* On the ``Import your deployment for monitoring`` page, enter the hostname
|
||||
to be the same as the one set for ``mdb-instance-name`` in the global
|
||||
ConfigMap for a node.
|
||||
For example, if the ``mdb-instance-name`` is set to ``mdb-instance-0``,
|
||||
enter ``mdb-instance-0`` as the value in this field.
|
||||
|
||||
* Enter the port number as ``27017``, with no authentication.
|
||||
|
||||
* If you have authentication enabled, select the option to enable
|
||||
authentication and specify the authentication mechanism as per your
|
||||
deployment. The default BigchainDB production deployment currently
|
||||
supports ``X.509 Client Certificate`` as the authentication mechanism.
|
||||
|
||||
* If you have TLS enabled, select the option to enable TLS/SSL for MongoDB
|
||||
connections, and click ``Continue``. This should already be selected for
|
||||
you in case you selected ``X.509 Client Certificate`` above.
|
||||
|
||||
* Wait a minute or two for the deployment to be found and then
|
||||
click the ``Continue`` button again.
|
||||
|
||||
* Verify that you see your process on the Cloud Manager UI.
|
||||
It should look something like this:
|
||||
|
||||
.. image:: /_static/mongodb_cloud_manager_1.png
|
||||
|
||||
* Click ``Continue``.
|
||||
|
||||
* Verify on the UI that data is being sent by the monitoring agent to the
|
||||
Cloud Manager. It may take upto 5 minutes for data to appear on the UI.
|
||||
|
||||
|
||||
Configure MongoDB Cloud Manager for Backup
|
||||
------------------------------------------
|
||||
|
||||
* Once the Backup Agent is up and running, open
|
||||
`MongoDB Cloud Manager <https://cloud.mongodb.com>`_.
|
||||
|
||||
* Click ``Login`` under ``MongoDB Cloud Manager`` and log in to the Cloud
|
||||
Manager.
|
||||
|
||||
* Select the group from the dropdown box on the page.
|
||||
|
||||
* Click ``Backup`` tab.
|
||||
|
||||
* Hover over the ``Status`` column of your backup and click ``Start``
|
||||
to start the backup.
|
||||
|
||||
* Select the replica set on the side pane.
|
||||
|
||||
* If you have authentication enabled, select the authentication mechanism as
|
||||
per your deployment. The default BigchainDB production deployment currently
|
||||
supports ``X.509 Client Certificate`` as the authentication mechanism.
|
||||
|
||||
* If you have TLS enabled, select the checkbox ``Replica set allows TLS/SSL
|
||||
connections``. This should be selected by default in case you selected
|
||||
``X.509 Client Certificate`` as the auth mechanism above.
|
||||
|
||||
* Choose the ``WiredTiger`` storage engine.
|
||||
|
||||
* Verify the details of your MongoDB instance and click on ``Start``.
|
||||
|
||||
* It may take up to 5 minutes for the backup process to start.
|
||||
During this process, the UI will show the status of the backup process.
|
||||
|
||||
* Verify that data is being backed up on the UI.
|
@ -48,10 +48,10 @@ by copying the existing ``vars.example`` file
|
||||
and then editing it.
|
||||
You should change the
|
||||
country, province, city, org and email
|
||||
to the correct values for you.
|
||||
to the correct values for your organisation.
|
||||
(Note: The country, province, city, org and email are part of
|
||||
the `Distinguished Name <https://en.wikipedia.org/wiki/X.509#Certificates>`_ (DN).)
|
||||
The comments in the file explain what the variables mean.
|
||||
The comments in the file explain what each of the variables mean.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
@ -60,23 +60,29 @@ The comments in the file explain what the variables mean.
|
||||
cp vars.example vars
|
||||
|
||||
echo 'set_var EASYRSA_DN "org"' >> vars
|
||||
echo 'set_var EASYRSA_REQ_OU "IT"' >> vars
|
||||
echo 'set_var EASYRSA_KEY_SIZE 4096' >> vars
|
||||
|
||||
echo 'set_var EASYRSA_REQ_COUNTRY "DE"' >> vars
|
||||
echo 'set_var EASYRSA_REQ_PROVINCE "Berlin"' >> vars
|
||||
echo 'set_var EASYRSA_REQ_CITY "Berlin"' >> vars
|
||||
echo 'set_var EASYRSA_REQ_ORG "BigchainDB GmbH"' >> vars
|
||||
echo 'set_var EASYRSA_REQ_OU "IT"' >> vars
|
||||
echo 'set_var EASYRSA_REQ_EMAIL "dev@bigchaindb.com"' >> vars
|
||||
|
||||
Note: Later, when building a CA or generating a certificate signing request, you will be prompted to enter a value for the OU (or to accept the default). You should change the default OU from ``IT`` to one of the following, as appropriate:
|
||||
``ROOT-CA``,
|
||||
``MongoDB-Instance``, ``BigchainDB-Instance``, ``MongoDB-Mon-Instance`` or
|
||||
``MongoDB-Backup-Instance``.
|
||||
To understand why, see `the MongoDB Manual <https://docs.mongodb.com/manual/tutorial/configure-x509-client-authentication/>`_.
|
||||
There are reminders to do this in the relevant docs.
|
||||
|
||||
|
||||
Step 4: Maybe Edit x509-types/server
|
||||
------------------------------------
|
||||
|
||||
.. warning::
|
||||
|
||||
Only do this step if you are setting up a self-signed CA
|
||||
or creating a server/member certificate.
|
||||
Only do this step if you are setting up a self-signed CA.
|
||||
|
||||
Edit the file ``x509-types/server`` and change
|
||||
``extendedKeyUsage = serverAuth`` to
|
@ -22,6 +22,7 @@ Feel free change things to suit your needs or preferences.
|
||||
node-on-kubernetes
|
||||
add-node-on-kubernetes
|
||||
upgrade-on-kubernetes
|
||||
first-node
|
||||
log-analytics
|
||||
easy-rsa
|
||||
cloud-manager
|
||||
node-config-map-and-secrets
|
@ -193,30 +193,55 @@ simply run the following command:
|
||||
$ kubectl create -f oms-daemonset.yaml
|
||||
|
||||
|
||||
Create an Email Alert
|
||||
---------------------
|
||||
Search the OMS Logs
|
||||
-------------------
|
||||
|
||||
Suppose you want to get an email whenever there's a logging message
|
||||
with the CRITICAL or ERROR logging level from any container.
|
||||
At the time of writing, it wasn't possible to create email alerts
|
||||
using the Azure Portal (as far as we could tell),
|
||||
but it *was* possible using the OMS Portal.
|
||||
(There are instructions to get to the OMS Portal
|
||||
in the section titled :ref:`Deploy the OMS Agents` above.)
|
||||
OMS should now be getting, storing and indexing all the logs
|
||||
from all the containers in your Kubernetes cluster.
|
||||
You can search the OMS logs from the Azure Portal
|
||||
or the OMS Portal, but at the time of writing,
|
||||
there was more functionality in the OMS Portal
|
||||
(e.g. the ability to create an Alert based on a search).
|
||||
|
||||
There are instructions to get to the OMS Portal
|
||||
in the section titled :ref:`Deploy the OMS Agents` above.
|
||||
Once you're in the OMS Portal, click on **Log Search**
|
||||
and enter the query string:
|
||||
and enter a query.
|
||||
Here are some example queries:
|
||||
|
||||
All logging messages containing the strings "critical" or "error" (not case-sensitive):
|
||||
|
||||
``Type=ContainerLog (critical OR error)``
|
||||
|
||||
If you don't see any query results,
|
||||
try experimenting with the query string and time range
|
||||
to convince yourself that it's working.
|
||||
For query syntax help, see the
|
||||
.. note::
|
||||
|
||||
You can filter the results even more by clicking on things in the left sidebar.
|
||||
For OMS Log Search syntax help, see the
|
||||
`Log Analytics search reference <https://docs.microsoft.com/en-us/azure/log-analytics/log-analytics-search-reference>`_.
|
||||
If you want to exclude the "404 Not Found" errors,
|
||||
use the query string
|
||||
"Type=ContainerLog (critical OR error) NOT(404)".
|
||||
Once you're satisfied with the query string,
|
||||
|
||||
All logging messages containing the string "error" but not "404":
|
||||
|
||||
``Type=ContainerLog error NOT(404)``
|
||||
|
||||
All logging messages containing the string "critical" but not "CriticalAddonsOnly":
|
||||
|
||||
``Type=ContainerLog critical NOT(CriticalAddonsOnly)``
|
||||
|
||||
All logging messages from containers running the Docker image bigchaindb/nginx_3scale:1.3, containing the string "GET" but not the strings "Go-http-client" or "runscope" (where those exclusions filter out tests by Kubernetes and Runscope):
|
||||
|
||||
``Type=ContainerLog Image="bigchaindb/nginx_3scale:1.3" GET NOT("Go-http-client") NOT(runscope)``
|
||||
|
||||
.. note::
|
||||
|
||||
We wrote a small Python 3 script to analyze the logs found by the above NGINX search.
|
||||
It's in ``k8s/logging-and-monitoring/analyze.py``. The docsting at the top
|
||||
of the script explains how to use it.
|
||||
|
||||
|
||||
Create an Email Alert
|
||||
---------------------
|
||||
|
||||
Once you're satisfied with an OMS Log Search query string,
|
||||
click the **🔔 Alert** icon in the top menu,
|
||||
fill in the form,
|
||||
and click **Save** when you're done.
|
@ -0,0 +1,161 @@
|
||||
How to Configure a BigchainDB Node
|
||||
==================================
|
||||
|
||||
This page outlines the steps to set a bunch of configuration settings
|
||||
in your BigchainDB node.
|
||||
They are pushed to the Kubernetes cluster in two files,
|
||||
named ``config-map.yaml`` (a set of ConfigMaps)
|
||||
and ``secret.yaml`` (a set of Secrets).
|
||||
They are stored in the Kubernetes cluster's key-value store (etcd).
|
||||
|
||||
Make sure you did all the things listed in the section titled
|
||||
:ref:`Things Each Node Operator Must Do`
|
||||
(including generation of all the SSL certificates needed
|
||||
for MongoDB auth).
|
||||
|
||||
|
||||
Edit config-map.yaml
|
||||
--------------------
|
||||
|
||||
Make a copy of the file ``k8s/configuration/config-map.yaml``
|
||||
and edit the data values in the various ConfigMaps.
|
||||
That file already contains many comments to help you
|
||||
understand each data value, but we make some additional
|
||||
remarks on some of the values below.
|
||||
|
||||
Note: None of the data values in ``config-map.yaml`` need
|
||||
to be base64-encoded. (This is unlike ``secret.yaml``,
|
||||
where all data values must be base64-encoded.
|
||||
This is true of all Kubernetes ConfigMaps and Secrets.)
|
||||
|
||||
|
||||
vars.mdb-instance-name and Similar
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Your BigchainDB cluster organization should have a standard way
|
||||
of naming instances, so the instances in your BigchainDB node
|
||||
should conform to that standard (i.e. you can't just make up some names).
|
||||
There are some things worth noting about the ``mdb-instance-name``:
|
||||
|
||||
* MongoDB reads the local ``/etc/hosts`` file while bootstrapping a replica
|
||||
set to resolve the hostname provided to the ``rs.initiate()`` command.
|
||||
It needs to ensure that the replica set is being initialized in the same
|
||||
instance where the MongoDB instance is running.
|
||||
* We use the value in the ``mdb-instance-name`` field to achieve this.
|
||||
* This field will be the DNS name of your MongoDB instance, and Kubernetes
|
||||
maps this name to its internal DNS.
|
||||
* This field will also be used by other MongoDB instances when forming a
|
||||
MongoDB replica set.
|
||||
* We use ``mdb-instance-0``, ``mdb-instance-1`` and so on in our
|
||||
documentation. Your BigchainDB cluster may use a different naming convention.
|
||||
|
||||
bdb-keyring.bdb-keyring
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This lists the BigchainDB public keys
|
||||
of all *other* nodes in your BigchainDB cluster
|
||||
(not including the public key of your BigchainDB node). Cases:
|
||||
|
||||
* If you're deploying the first node in the cluster,
|
||||
the value should be ``""`` (an empty string).
|
||||
* If you're deploying the second node in the cluster,
|
||||
the value should be the BigchainDB public key of the first/original
|
||||
node in the cluster.
|
||||
For example,
|
||||
``"EPQk5i5yYpoUwGVM8VKZRjM8CYxB6j8Lu8i8SG7kGGce"``
|
||||
* If there are two or more other nodes already in the cluster,
|
||||
the value should be a colon-separated list
|
||||
of the BigchainDB public keys
|
||||
of those other nodes.
|
||||
For example,
|
||||
``"DPjpKbmbPYPKVAuf6VSkqGCf5jzrEh69Ldef6TrLwsEQ:EPQk5i5yYpoUwGVM8VKZRjM8CYxB6j8Lu8i8SG7kGGce"``
|
||||
|
||||
|
||||
Edit secret.yaml
|
||||
----------------
|
||||
|
||||
Make a copy of the file ``k8s/configuration/secret.yaml``
|
||||
and edit the data values in the various Secrets.
|
||||
That file includes many comments to explain the required values.
|
||||
**In particular, note that all values must be base64-encoded.**
|
||||
There are tips at the top of the file
|
||||
explaining how to convert values into base64-encoded values.
|
||||
|
||||
Your BigchainDB node might not need all the Secrets.
|
||||
For example, if you plan to access the BigchainDB API over HTTP, you
|
||||
don't need the ``https-certs`` Secret.
|
||||
You can delete the Secrets you don't need,
|
||||
or set their data values to ``""``.
|
||||
|
||||
Note that ``ca.pem`` is just another name for ``ca.crt``
|
||||
(the certificate of your BigchainDB cluster's self-signed CA).
|
||||
|
||||
|
||||
bdb-certs.bdb-user
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This is the user name that BigchainDB uses to authenticate itself to the
|
||||
backend MongoDB database.
|
||||
|
||||
We need to specify the user name *as seen in the certificate* issued to
|
||||
the BigchainDB instance in order to authenticate correctly. Use
|
||||
the following ``openssl`` command to extract the user name from the
|
||||
certificate:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ openssl x509 -in <path to the bigchaindb certificate> \
|
||||
-inform PEM -subject -nameopt RFC2253
|
||||
|
||||
You should see an output line that resembles:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
subject= emailAddress=dev@bigchaindb.com,CN=test-bdb-ssl,OU=BigchainDB-Instance,O=BigchainDB GmbH,L=Berlin,ST=Berlin,C=DE
|
||||
|
||||
The ``subject`` line states the complete user name we need to use for this
|
||||
field (``bdb-certs.bdb-user``), i.e.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
emailAddress=dev@bigchaindb.com,CN=test-bdb-ssl,OU=BigchainDB-Instance,O=BigchainDB GmbH,L=Berlin,ST=Berlin,C=DE
|
||||
|
||||
|
||||
threescale-credentials.*
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
If you're not using 3scale,
|
||||
you can delete the ``threescale-credentials`` Secret
|
||||
or leave all the values blank (``""``).
|
||||
|
||||
If you *are* using 3scale, you can get the value for ``frontend-api-dns-name``
|
||||
using something like ``echo "your.nodesubdomain.net" | base64 -w 0``
|
||||
|
||||
To get the values for ``secret-token``, ``service-id``,
|
||||
``version-header`` and ``provider-key``, login to your 3scale admin,
|
||||
then click **APIs** and click on **Integration** for the relevant API.
|
||||
Scroll to the bottom of the page and click the small link
|
||||
in the lower right corner, labelled **Download the NGINX Config files**.
|
||||
You'll get a ``.zip`` file.
|
||||
Unzip it, then open the ``.conf`` file and the ``.lua`` file.
|
||||
You should be able to find all the values in those files.
|
||||
You have to be careful because it will have values for *all* your APIs,
|
||||
and some values vary from API to API.
|
||||
The ``version-header`` is the timestamp in a line that looks like:
|
||||
|
||||
.. code::
|
||||
|
||||
proxy_set_header X-3scale-Version "2017-06-28T14:57:34Z";
|
||||
|
||||
|
||||
Deploy Your config-map.yaml and secret.yaml
|
||||
-------------------------------------------
|
||||
|
||||
You can deploy your edited ``config-map.yaml`` and ``secret.yaml``
|
||||
files to your Kubernetes cluster using the commands:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f config-map.yaml
|
||||
|
||||
$ kubectl apply -f secret.yaml
|
@ -0,0 +1,818 @@
|
||||
Kubernetes Template: Deploy a Single BigchainDB Node
|
||||
====================================================
|
||||
|
||||
This page describes how to deploy the first BigchainDB node
|
||||
in a BigchainDB cluster, or a stand-alone BigchainDB node,
|
||||
using `Kubernetes <https://kubernetes.io/>`_.
|
||||
It assumes you already have a running Kubernetes cluster.
|
||||
|
||||
If you want to add a new BigchainDB node to an existing BigchainDB cluster,
|
||||
refer to :doc:`the page about that <add-node-on-kubernetes>`.
|
||||
|
||||
Below, we refer to many files by their directory and filename,
|
||||
such as ``configuration/config-map.yaml``. Those files are files in the
|
||||
`bigchaindb/bigchaindb repository on GitHub
|
||||
<https://github.com/bigchaindb/bigchaindb/>`_ in the ``k8s/`` directory.
|
||||
Make sure you're getting those files from the appropriate Git branch on
|
||||
GitHub, i.e. the branch for the version of BigchainDB that your BigchainDB
|
||||
cluster is using.
|
||||
|
||||
|
||||
Step 1: Install and Configure kubectl
|
||||
-------------------------------------
|
||||
|
||||
kubectl is the Kubernetes CLI.
|
||||
If you don't already have it installed,
|
||||
then see the `Kubernetes docs to install it
|
||||
<https://kubernetes.io/docs/user-guide/prereqs/>`_.
|
||||
|
||||
The default location of the kubectl configuration file is ``~/.kube/config``.
|
||||
If you don't have that file, then you need to get it.
|
||||
|
||||
**Azure.** If you deployed your Kubernetes cluster on Azure
|
||||
using the Azure CLI 2.0 (as per :doc:`our template <template-kubernetes-azure>`),
|
||||
then you can get the ``~/.kube/config`` file using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ az acs kubernetes get-credentials \
|
||||
--resource-group <name of resource group containing the cluster> \
|
||||
--name <ACS cluster name>
|
||||
|
||||
If it asks for a password (to unlock the SSH key)
|
||||
and you enter the correct password,
|
||||
but you get an error message,
|
||||
then try adding ``--ssh-key-file ~/.ssh/<name>``
|
||||
to the above command (i.e. the path to the private key).
|
||||
|
||||
.. note::
|
||||
|
||||
**About kubectl contexts.** You might manage several
|
||||
Kubernetes clusters. To make it easy to switch from one to another,
|
||||
kubectl has a notion of "contexts," e.g. the context for cluster 1 or
|
||||
the context for cluster 2. To find out the current context, do:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl config view
|
||||
|
||||
and then look for the ``current-context`` in the output.
|
||||
The output also lists all clusters, contexts and users.
|
||||
(You might have only one of each.)
|
||||
You can switch to a different context using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl config use-context <new-context-name>
|
||||
|
||||
You can also switch to a different context for just one command
|
||||
by inserting ``--context <context-name>`` into any kubectl command.
|
||||
For example:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 get pods
|
||||
|
||||
will get a list of the pods in the Kubernetes cluster associated
|
||||
with the context named ``k8s-bdb-test-cluster-0``.
|
||||
|
||||
Step 2: Connect to Your Cluster's Web UI (Optional)
|
||||
---------------------------------------------------
|
||||
|
||||
You can connect to your cluster's
|
||||
`Kubernetes Dashboard <https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/>`_
|
||||
(also called the Web UI) using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl proxy -p 8001
|
||||
|
||||
or, if you prefer to be explicit about the context (explained above):
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 proxy -p 8001
|
||||
|
||||
The output should be something like ``Starting to serve on 127.0.0.1:8001``.
|
||||
That means you can visit the dashboard in your web browser at
|
||||
`http://127.0.0.1:8001/ui <http://127.0.0.1:8001/ui>`_.
|
||||
|
||||
|
||||
Step 3: Configure Your BigchainDB Node
|
||||
--------------------------------------
|
||||
|
||||
See the page titled :ref:`How to Configure a BigchainDB Node`.
|
||||
|
||||
|
||||
Step 4: Start the NGINX Service
|
||||
-------------------------------
|
||||
|
||||
* This will will give us a public IP for the cluster.
|
||||
|
||||
* Once you complete this step, you might need to wait up to 10 mins for the
|
||||
public IP to be assigned.
|
||||
|
||||
* You have the option to use vanilla NGINX without HTTPS support or an
|
||||
OpenResty NGINX integrated with 3scale API Gateway.
|
||||
|
||||
|
||||
Step 4.1: Vanilla NGINX
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* This configuration is located in the file ``nginx/nginx-svc.yaml``.
|
||||
|
||||
* Set the ``metadata.name`` and ``metadata.labels.name`` to the value
|
||||
set in ``ngx-instance-name`` in the ConfigMap above.
|
||||
|
||||
* Set the ``spec.selector.app`` to the value set in ``ngx-instance-name`` in
|
||||
the ConfigMap followed by ``-dep``. For example, if the value set in the
|
||||
``ngx-instance-name`` is ``ngx-instance-0``, set the
|
||||
``spec.selector.app`` to ``ngx-instance-0-dep``.
|
||||
|
||||
* Set ``ngx-public-mdb-port.port`` to 27017, or the port number on which you
|
||||
want to expose MongoDB service.
|
||||
Set the ``ngx-public-mdb-port.targetPort`` to the port number on which the
|
||||
Kubernetes MongoDB service will be present.
|
||||
|
||||
* Set ``ngx-public-api-port.port`` to 80, or the port number on which you want to
|
||||
expose BigchainDB API service.
|
||||
Set the ``ngx-public-api-port.targetPort`` to the port number on which the
|
||||
Kubernetes BigchainDB API service will present.
|
||||
|
||||
* Set ``ngx-public-ws-port.port`` to 81, or the port number on which you want to
|
||||
expose BigchainDB Websocket service.
|
||||
Set the ``ngx-public-ws-port.targetPort`` to the port number on which the
|
||||
BigchainDB Websocket service will be present.
|
||||
|
||||
* Start the Kubernetes Service:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx/nginx-svc.yaml
|
||||
|
||||
|
||||
Step 4.2: OpenResty NGINX + 3scale
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* You have to enable HTTPS for this one and will need an HTTPS certificate
|
||||
for your domain.
|
||||
|
||||
* You should have already created the necessary Kubernetes Secrets in the previous
|
||||
step (e.g. ``https-certs`` and ``threescale-credentials``).
|
||||
|
||||
* This configuration is located in the file ``nginx-3scale/nginx-3scale-svc.yaml``.
|
||||
|
||||
* Set the ``metadata.name`` and ``metadata.labels.name`` to the value
|
||||
set in ``ngx-instance-name`` in the ConfigMap above.
|
||||
|
||||
* Set the ``spec.selector.app`` to the value set in ``ngx-instance-name`` in
|
||||
the ConfigMap followed by ``-dep``. For example, if the value set in the
|
||||
``ngx-instance-name`` is ``ngx-instance-0``, set the
|
||||
``spec.selector.app`` to ``ngx-instance-0-dep``.
|
||||
|
||||
* Set ``ngx-public-mdb-port.port`` to 27017, or the port number on which you
|
||||
want to expose MongoDB service.
|
||||
Set the ``ngx-public-mdb-port.targetPort`` to the port number on which the
|
||||
Kubernetes MongoDB service will be present.
|
||||
|
||||
* Set ``ngx-public-3scale-port.port`` to 8080, or the port number on which
|
||||
you want to let 3scale communicate with Openresty NGINX for authenctication.
|
||||
Set the ``ngx-public-3scale-port.targetPort`` to the port number on which
|
||||
this Openresty NGINX service will be listening to for communication with
|
||||
3scale.
|
||||
|
||||
* Set ``ngx-public-bdb-port.port`` to 443, or the port number on which you want
|
||||
to expose BigchainDB API service.
|
||||
Set the ``ngx-public-api-port.targetPort`` to the port number on which the
|
||||
Kubernetes BigchainDB API service will present.
|
||||
|
||||
* Set ``ngx-public-bdb-port-http.port`` to 80, or the port number on which you
|
||||
want to expose BigchainDB Websocket service.
|
||||
Set the ``ngx-public-bdb-port-http.targetPort`` to the port number on which the
|
||||
BigchainDB Websocket service will be present.
|
||||
|
||||
* Start the Kubernetes Service:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-3scale/nginx-3scale-svc.yaml
|
||||
|
||||
|
||||
Step 5: Assign DNS Name to the NGINX Public IP
|
||||
----------------------------------------------
|
||||
|
||||
* This step is required only if you are planning to set up multiple
|
||||
`BigchainDB nodes
|
||||
<https://docs.bigchaindb.com/en/latest/terminology.html>`_ or are using
|
||||
HTTPS certificates tied to a domain.
|
||||
|
||||
* The following command can help you find out if the NGINX service started
|
||||
above has been assigned a public IP or external IP address:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 get svc -w
|
||||
|
||||
* Once a public IP is assigned, you can map it to
|
||||
a DNS name.
|
||||
We usually assign ``bdb-test-cluster-0``, ``bdb-test-cluster-1`` and
|
||||
so on in our documentation.
|
||||
Let's assume that we assign the unique name of ``bdb-test-cluster-0`` here.
|
||||
|
||||
|
||||
**Set up DNS mapping in Azure.**
|
||||
Select the current Azure resource group and look for the ``Public IP``
|
||||
resource. You should see at least 2 entries there - one for the Kubernetes
|
||||
master and the other for the MongoDB instance. You may have to ``Refresh`` the
|
||||
Azure web page listing the resources in a resource group for the latest
|
||||
changes to be reflected.
|
||||
Select the ``Public IP`` resource that is attached to your service (it should
|
||||
have the Azure DNS prefix name along with a long random string, without the
|
||||
``master-ip`` string), select ``Configuration``, add the DNS assigned above
|
||||
(for example, ``bdb-test-cluster-0``), click ``Save``, and wait for the
|
||||
changes to be applied.
|
||||
|
||||
To verify the DNS setting is operational, you can run ``nslookup <DNS
|
||||
name added in ConfigMap>`` from your local Linux shell.
|
||||
|
||||
This will ensure that when you scale the replica set later, other MongoDB
|
||||
members in the replica set can reach this instance.
|
||||
|
||||
|
||||
Step 6: Start the MongoDB Kubernetes Service
|
||||
--------------------------------------------
|
||||
|
||||
* This configuration is located in the file ``mongodb/mongo-svc.yaml``.
|
||||
|
||||
* Set the ``metadata.name`` and ``metadata.labels.name`` to the value
|
||||
set in ``mdb-instance-name`` in the ConfigMap above.
|
||||
|
||||
* Set the ``spec.selector.app`` to the value set in ``mdb-instance-name`` in
|
||||
the ConfigMap followed by ``-ss``. For example, if the value set in the
|
||||
``mdb-instance-name`` is ``mdb-instance-0``, set the
|
||||
``spec.selector.app`` to ``mdb-instance-0-ss``.
|
||||
|
||||
* Start the Kubernetes Service:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb/mongo-svc.yaml
|
||||
|
||||
|
||||
Step 7: Start the BigchainDB Kubernetes Service
|
||||
-----------------------------------------------
|
||||
|
||||
* This configuration is located in the file ``bigchaindb/bigchaindb-svc.yaml``.
|
||||
|
||||
* Set the ``metadata.name`` and ``metadata.labels.name`` to the value
|
||||
set in ``bdb-instance-name`` in the ConfigMap above.
|
||||
|
||||
* Set the ``spec.selector.app`` to the value set in ``bdb-instance-name`` in
|
||||
the ConfigMap followed by ``-dep``. For example, if the value set in the
|
||||
``bdb-instance-name`` is ``bdb-instance-0``, set the
|
||||
``spec.selector.app`` to ``bdb-instance-0-dep``.
|
||||
|
||||
* Start the Kubernetes Service:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 apply -f bigchaindb/bigchaindb-svc.yaml
|
||||
|
||||
|
||||
Step 8: Start the NGINX Kubernetes Deployment
|
||||
---------------------------------------------
|
||||
|
||||
* NGINX is used as a proxy to both the BigchainDB and MongoDB instances in
|
||||
the node. It proxies HTTP requests on port 80 to the BigchainDB backend,
|
||||
and TCP connections on port 27017 to the MongoDB backend.
|
||||
|
||||
* As in step 4, you have the option to use vanilla NGINX or an OpenResty
|
||||
NGINX integrated with 3scale API Gateway.
|
||||
|
||||
Step 8.1: Vanilla NGINX
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* This configuration is located in the file ``nginx/nginx-dep.yaml``.
|
||||
|
||||
* Set the ``metadata.name`` and ``spec.template.metadata.labels.app``
|
||||
to the value set in ``ngx-instance-name`` in the ConfigMap followed by a
|
||||
``-dep``. For example, if the value set in the ``ngx-instance-name`` is
|
||||
``ngx-instance-0``, set the fields to ``ngx-instance-0-dep``.
|
||||
|
||||
* Set ``MONGODB_BACKEND_HOST`` env var to
|
||||
the value set in ``mdb-instance-name`` in the ConfigMap, followed by
|
||||
``.default.svc.cluster.local``. For example, if the value set in the
|
||||
``mdb-instance-name`` is ``mdb-instance-0``, set the
|
||||
``MONGODB_BACKEND_HOST`` env var to
|
||||
``mdb-instance-0.default.svc.cluster.local``.
|
||||
|
||||
* Set ``BIGCHAINDB_BACKEND_HOST`` env var to
|
||||
the value set in ``bdb-instance-name`` in the ConfigMap, followed by
|
||||
``.default.svc.cluster.local``. For example, if the value set in the
|
||||
``bdb-instance-name`` is ``bdb-instance-0``, set the
|
||||
``BIGCHAINDB_BACKEND_HOST`` env var to
|
||||
``bdb-instance-0.default.svc.cluster.local``.
|
||||
|
||||
* Start the Kubernetes Deployment:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx/nginx-dep.yaml
|
||||
|
||||
|
||||
Step 8.2: OpenResty NGINX + 3scale
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* This configuration is located in the file
|
||||
``nginx-3scale/nginx-3scale-dep.yaml``.
|
||||
|
||||
* Set the ``metadata.name`` and ``spec.template.metadata.labels.app``
|
||||
to the value set in ``ngx-instance-name`` in the ConfigMap followed by a
|
||||
``-dep``. For example, if the value set in the ``ngx-instance-name`` is
|
||||
``ngx-instance-0``, set the fields to ``ngx-instance-0-dep``.
|
||||
|
||||
* Set ``MONGODB_BACKEND_HOST`` env var to
|
||||
the value set in ``mdb-instance-name`` in the ConfigMap, followed by
|
||||
``.default.svc.cluster.local``. For example, if the value set in the
|
||||
``mdb-instance-name`` is ``mdb-instance-0``, set the
|
||||
``MONGODB_BACKEND_HOST`` env var to
|
||||
``mdb-instance-0.default.svc.cluster.local``.
|
||||
|
||||
* Set ``BIGCHAINDB_BACKEND_HOST`` env var to
|
||||
the value set in ``bdb-instance-name`` in the ConfigMap, followed by
|
||||
``.default.svc.cluster.local``. For example, if the value set in the
|
||||
``bdb-instance-name`` is ``bdb-instance-0``, set the
|
||||
``BIGCHAINDB_BACKEND_HOST`` env var to
|
||||
``bdb-instance-0.default.svc.cluster.local``.
|
||||
|
||||
* Start the Kubernetes Deployment:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 apply -f nginx-3scale/nginx-3scale-dep.yaml
|
||||
|
||||
|
||||
Step 9: Create Kubernetes Storage Classes for MongoDB
|
||||
-----------------------------------------------------
|
||||
|
||||
MongoDB needs somewhere to store its data persistently,
|
||||
outside the container where MongoDB is running.
|
||||
Our MongoDB Docker container
|
||||
(based on the official MongoDB Docker container)
|
||||
exports two volume mounts with correct
|
||||
permissions from inside the container:
|
||||
|
||||
* The directory where the mongod instance stores its data: ``/data/db``.
|
||||
There's more explanation in the MongoDB docs about `storage.dbpath <https://docs.mongodb.com/manual/reference/configuration-options/#storage.dbPath>`_.
|
||||
|
||||
* The directory where the mongodb instance stores the metadata for a sharded
|
||||
cluster: ``/data/configdb/``.
|
||||
There's more explanation in the MongoDB docs about `sharding.configDB <https://docs.mongodb.com/manual/reference/configuration-options/#sharding.configDB>`_.
|
||||
|
||||
Explaining how Kubernetes handles persistent volumes,
|
||||
and the associated terminology,
|
||||
is beyond the scope of this documentation;
|
||||
see `the Kubernetes docs about persistent volumes
|
||||
<https://kubernetes.io/docs/user-guide/persistent-volumes>`_.
|
||||
|
||||
The first thing to do is create the Kubernetes storage classes.
|
||||
|
||||
**Set up Storage Classes in Azure.**
|
||||
First, you need an Azure storage account.
|
||||
If you deployed your Kubernetes cluster on Azure
|
||||
using the Azure CLI 2.0
|
||||
(as per :doc:`our template <template-kubernetes-azure>`),
|
||||
then the `az acs create` command already created two
|
||||
storage accounts in the same location and resource group
|
||||
as your Kubernetes cluster.
|
||||
Both should have the same "storage account SKU": ``Standard_LRS``.
|
||||
Standard storage is lower-cost and lower-performance.
|
||||
It uses hard disk drives (HDD).
|
||||
LRS means locally-redundant storage: three replicas
|
||||
in the same data center.
|
||||
Premium storage is higher-cost and higher-performance.
|
||||
It uses solid state drives (SSD).
|
||||
At the time of writing,
|
||||
when we created a storage account with SKU ``Premium_LRS``
|
||||
and tried to use that,
|
||||
the PersistentVolumeClaim would get stuck in a "Pending" state.
|
||||
For future reference, the command to create a storage account is
|
||||
`az storage account create <https://docs.microsoft.com/en-us/cli/azure/storage/account#create>`_.
|
||||
|
||||
|
||||
The Kubernetes template for configuration of Storage Class is located in the
|
||||
file ``mongodb/mongo-sc.yaml``.
|
||||
|
||||
You may have to update the ``parameters.location`` field in the file to
|
||||
specify the location you are using in Azure.
|
||||
|
||||
Create the required storage classes using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb/mongo-sc.yaml
|
||||
|
||||
|
||||
You can check if it worked using ``kubectl get storageclasses``.
|
||||
|
||||
**Azure.** Note that there is no line of the form
|
||||
``storageAccount: <azure storage account name>``
|
||||
under ``parameters:``. When we included one
|
||||
and then created a PersistentVolumeClaim based on it,
|
||||
the PersistentVolumeClaim would get stuck
|
||||
in a "Pending" state.
|
||||
Kubernetes just looks for a storageAccount
|
||||
with the specified skuName and location.
|
||||
|
||||
|
||||
Step 10: Create Kubernetes Persistent Volume Claims
|
||||
---------------------------------------------------
|
||||
|
||||
Next, you will create two PersistentVolumeClaim objects ``mongo-db-claim`` and
|
||||
``mongo-configdb-claim``.
|
||||
|
||||
This configuration is located in the file ``mongodb/mongo-pvc.yaml``.
|
||||
|
||||
Note how there's no explicit mention of Azure, AWS or whatever.
|
||||
``ReadWriteOnce`` (RWO) means the volume can be mounted as
|
||||
read-write by a single Kubernetes node.
|
||||
(``ReadWriteOnce`` is the *only* access mode supported
|
||||
by AzureDisk.)
|
||||
``storage: 20Gi`` means the volume has a size of 20
|
||||
`gibibytes <https://en.wikipedia.org/wiki/Gibibyte>`_.
|
||||
|
||||
You may want to update the ``spec.resources.requests.storage`` field in both
|
||||
the files to specify a different disk size.
|
||||
|
||||
Create the required Persistent Volume Claims using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb/mongo-pvc.yaml
|
||||
|
||||
|
||||
You can check its status using: ``kubectl get pvc -w``
|
||||
|
||||
Initially, the status of persistent volume claims might be "Pending"
|
||||
but it should become "Bound" fairly quickly.
|
||||
|
||||
|
||||
Step 11: Start a Kubernetes StatefulSet for MongoDB
|
||||
---------------------------------------------------
|
||||
|
||||
* This configuration is located in the file ``mongodb/mongo-ss.yaml``.
|
||||
|
||||
* Set the ``spec.serviceName`` to the value set in ``mdb-instance-name`` in
|
||||
the ConfigMap.
|
||||
For example, if the value set in the ``mdb-instance-name``
|
||||
is ``mdb-instance-0``, set the field to ``mdb-instance-0``.
|
||||
|
||||
* Set ``metadata.name``, ``spec.template.metadata.name`` and
|
||||
``spec.template.metadata.labels.app`` to the value set in
|
||||
``mdb-instance-name`` in the ConfigMap, followed by
|
||||
``-ss``.
|
||||
For example, if the value set in the
|
||||
``mdb-instance-name`` is ``mdb-instance-0``, set the fields to the value
|
||||
``mdb-insance-0-ss``.
|
||||
|
||||
* Note how the MongoDB container uses the ``mongo-db-claim`` and the
|
||||
``mongo-configdb-claim`` PersistentVolumeClaims for its ``/data/db`` and
|
||||
``/data/configdb`` directories (mount paths).
|
||||
|
||||
* Note also that we use the pod's ``securityContext.capabilities.add``
|
||||
specification to add the ``FOWNER`` capability to the container. That is
|
||||
because the MongoDB container has the user ``mongodb``, with uid ``999`` and
|
||||
group ``mongodb``, with gid ``999``.
|
||||
When this container runs on a host with a mounted disk, the writes fail
|
||||
when there is no user with uid ``999``. To avoid this, we use the Docker
|
||||
feature of ``--cap-add=FOWNER``. This bypasses the uid and gid permission
|
||||
checks during writes and allows data to be persisted to disk.
|
||||
Refer to the `Docker docs
|
||||
<https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities>`_
|
||||
for details.
|
||||
|
||||
* As we gain more experience running MongoDB in testing and production, we
|
||||
will tweak the ``resources.limits.cpu`` and ``resources.limits.memory``.
|
||||
|
||||
* Create the MongoDB StatefulSet using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb/mongo-ss.yaml
|
||||
|
||||
* It might take up to 10 minutes for the disks, specified in the Persistent
|
||||
Volume Claims above, to be created and attached to the pod.
|
||||
The UI might show that the pod has errored with the message
|
||||
"timeout expired waiting for volumes to attach/mount". Use the CLI below
|
||||
to check the status of the pod in this case, instead of the UI.
|
||||
This happens due to a bug in Azure ACS.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 get pods -w
|
||||
|
||||
|
||||
Step 12: Configure Users and Access Control for MongoDB
|
||||
-------------------------------------------------------
|
||||
|
||||
* In this step, you will create a user on MongoDB with authorization
|
||||
to create more users and assign
|
||||
roles to them.
|
||||
Note: You need to do this only when setting up the first MongoDB node of
|
||||
the cluster.
|
||||
|
||||
* Find out the name of your MongoDB pod by reading the output
|
||||
of the ``kubectl ... get pods`` command at the end of the last step.
|
||||
It should be something like ``mdb-instance-0-ss-0``.
|
||||
|
||||
* Log in to the MongoDB pod using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 exec -it <name of your MongoDB pod> bash
|
||||
|
||||
* Open a mongo shell using the certificates
|
||||
already present at ``/etc/mongod/ssl/``
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ mongo --host localhost --port 27017 --verbose --ssl \
|
||||
--sslCAFile /etc/mongod/ssl/ca.pem \
|
||||
--sslPEMKeyFile /etc/mongod/ssl/mdb-instance.pem
|
||||
|
||||
* Initialize the replica set using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
> rs.initiate( {
|
||||
_id : "bigchain-rs",
|
||||
members: [ {
|
||||
_id : 0,
|
||||
host :"<hostname>:27017"
|
||||
} ]
|
||||
} )
|
||||
|
||||
The ``hostname`` in this case will be the value set in
|
||||
``mdb-instance-name`` in the ConfigMap.
|
||||
For example, if the value set in the ``mdb-instance-name`` is
|
||||
``mdb-instance-0``, set the ``hostname`` above to the value ``mdb-instance-0``.
|
||||
|
||||
* The instance should be voted as the ``PRIMARY`` in the replica set (since
|
||||
this is the only instance in the replica set till now).
|
||||
This can be observed from the mongo shell prompt,
|
||||
which will read ``PRIMARY>``.
|
||||
|
||||
* Create a user ``adminUser`` on the ``admin`` database with the
|
||||
authorization to create other users. This will only work the first time you
|
||||
log in to the mongo shell. For further details, see `localhost
|
||||
exception <https://docs.mongodb.com/manual/core/security-users/#localhost-exception>`_
|
||||
in MongoDB.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
PRIMARY> use admin
|
||||
PRIMARY> db.createUser( {
|
||||
user: "adminUser",
|
||||
pwd: "superstrongpassword",
|
||||
roles: [ { role: "userAdminAnyDatabase", db: "admin" } ]
|
||||
} )
|
||||
|
||||
* Exit and restart the mongo shell using the above command.
|
||||
Authenticate as the ``adminUser`` we created earlier:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
PRIMARY> use admin
|
||||
PRIMARY> db.auth("adminUser", "superstrongpassword")
|
||||
|
||||
``db.auth()`` returns 0 when authentication is not successful,
|
||||
and 1 when successful.
|
||||
|
||||
* We need to specify the user name *as seen in the certificate* issued to
|
||||
the BigchainDB instance in order to authenticate correctly. Use
|
||||
the following ``openssl`` command to extract the user name from the
|
||||
certificate:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ openssl x509 -in <path to the bigchaindb certificate> \
|
||||
-inform PEM -subject -nameopt RFC2253
|
||||
|
||||
You should see an output line that resembles:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
subject= emailAddress=dev@bigchaindb.com,CN=test-bdb-ssl,OU=BigchainDB-Instance,O=BigchainDB GmbH,L=Berlin,ST=Berlin,C=DE
|
||||
|
||||
The ``subject`` line states the complete user name we need to use for
|
||||
creating the user on the mongo shell as follows:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
PRIMARY> db.getSiblingDB("$external").runCommand( {
|
||||
createUser: 'emailAddress=dev@bigchaindb.com,CN=test-bdb-ssl,OU=BigchainDB-Instance,O=BigchainDB GmbH,L=Berlin,ST=Berlin,C=DE',
|
||||
writeConcern: { w: 'majority' , wtimeout: 5000 },
|
||||
roles: [
|
||||
{ role: 'clusterAdmin', db: 'admin' },
|
||||
{ role: 'readWriteAnyDatabase', db: 'admin' }
|
||||
]
|
||||
} )
|
||||
|
||||
* You can similarly create users for MongoDB Monitoring Agent and MongoDB
|
||||
Backup Agent. For example:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
PRIMARY> db.getSiblingDB("$external").runCommand( {
|
||||
createUser: 'emailAddress=dev@bigchaindb.com,CN=test-mdb-mon-ssl,OU=MongoDB-Mon-Instance,O=BigchainDB GmbH,L=Berlin,ST=Berlin,C=DE',
|
||||
writeConcern: { w: 'majority' , wtimeout: 5000 },
|
||||
roles: [
|
||||
{ role: 'clusterMonitor', db: 'admin' }
|
||||
]
|
||||
} )
|
||||
|
||||
PRIMARY> db.getSiblingDB("$external").runCommand( {
|
||||
createUser: 'emailAddress=dev@bigchaindb.com,CN=test-mdb-bak-ssl,OU=MongoDB-Bak-Instance,O=BigchainDB GmbH,L=Berlin,ST=Berlin,C=DE',
|
||||
writeConcern: { w: 'majority' , wtimeout: 5000 },
|
||||
roles: [
|
||||
{ role: 'backup', db: 'admin' }
|
||||
]
|
||||
} )
|
||||
|
||||
|
||||
Step 13: Start a Kubernetes Deployment for MongoDB Monitoring Agent
|
||||
-------------------------------------------------------------------
|
||||
|
||||
* This configuration is located in the file
|
||||
``mongodb-monitoring-agent/mongo-mon-dep.yaml``.
|
||||
|
||||
* Set ``metadata.name``, ``spec.template.metadata.name`` and
|
||||
``spec.template.metadata.labels.app`` to the value set in
|
||||
``mdb-mon-instance-name`` in the ConfigMap, followed by
|
||||
``-dep``.
|
||||
For example, if the value set in the
|
||||
``mdb-mon-instance-name`` is ``mdb-mon-instance-0``, set the fields to the
|
||||
value ``mdb-mon-instance-0-dep``.
|
||||
|
||||
* Start the Kubernetes Deployment using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb-monitoring-agent/mongo-mon-dep.yaml
|
||||
|
||||
|
||||
Step 14: Start a Kubernetes Deployment for MongoDB Backup Agent
|
||||
---------------------------------------------------------------
|
||||
|
||||
* This configuration is located in the file
|
||||
``mongodb-backup-agent/mongo-backup-dep.yaml``.
|
||||
|
||||
* Set ``metadata.name``, ``spec.template.metadata.name`` and
|
||||
``spec.template.metadata.labels.app`` to the value set in
|
||||
``mdb-bak-instance-name`` in the ConfigMap, followed by
|
||||
``-dep``.
|
||||
For example, if the value set in the
|
||||
``mdb-bak-instance-name`` is ``mdb-bak-instance-0``, set the fields to the
|
||||
value ``mdb-bak-instance-0-dep``.
|
||||
|
||||
* Start the Kubernetes Deployment using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 apply -f mongodb-backup-agent/mongo-backup-dep.yaml
|
||||
|
||||
|
||||
Step 15: Start a Kubernetes Deployment for BigchainDB
|
||||
-----------------------------------------------------
|
||||
|
||||
* This configuration is located in the file
|
||||
``bigchaindb/bigchaindb-dep.yaml``.
|
||||
|
||||
* Set ``metadata.name`` and ``spec.template.metadata.labels.app`` to the
|
||||
value set in ``bdb-instance-name`` in the ConfigMap, followed by
|
||||
``-dep``.
|
||||
For example, if the value set in the
|
||||
``bdb-instance-name`` is ``bdb-instance-0``, set the fields to the
|
||||
value ``bdb-insance-0-dep``.
|
||||
|
||||
* Set the value of ``BIGCHAINDB_KEYPAIR_PRIVATE`` (not base64-encoded).
|
||||
(In the future, we'd like to pull the BigchainDB private key from
|
||||
the Secret named ``bdb-private-key``,
|
||||
but a Secret can only be mounted as a file,
|
||||
so BigchainDB Server would have to be modified to look for it
|
||||
in a file.)
|
||||
|
||||
* As we gain more experience running BigchainDB in testing and production,
|
||||
we will tweak the ``resources.limits`` values for CPU and memory, and as
|
||||
richer monitoring and probing becomes available in BigchainDB, we will
|
||||
tweak the ``livenessProbe`` and ``readinessProbe`` parameters.
|
||||
|
||||
* Create the BigchainDB Deployment using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 apply -f bigchaindb/bigchaindb-dep.yaml
|
||||
|
||||
|
||||
* You can check its status using the command ``kubectl get deployments -w``
|
||||
|
||||
|
||||
Step 16: Configure the MongoDB Cloud Manager
|
||||
--------------------------------------------
|
||||
|
||||
Refer to the
|
||||
:ref:`documentation <Configure MongoDB Cloud Manager for Monitoring and Backup>`
|
||||
for details on how to configure the MongoDB Cloud Manager to enable
|
||||
monitoring and backup.
|
||||
|
||||
|
||||
Step 17: Verify the BigchainDB Node Setup
|
||||
-----------------------------------------
|
||||
|
||||
Step 17.1: Testing Internally
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To test the setup of your BigchainDB node, you could use a Docker container
|
||||
that provides utilities like ``nslookup``, ``curl`` and ``dig``.
|
||||
For example, you could use a container based on our
|
||||
`bigchaindb/toolbox <https://hub.docker.com/r/bigchaindb/toolbox/>`_ image.
|
||||
(The corresponding
|
||||
`Dockerfile <https://github.com/bigchaindb/bigchaindb/blob/master/k8s/toolbox/Dockerfile>`_
|
||||
is in the ``bigchaindb/bigchaindb`` repository on GitHub.)
|
||||
You can use it as below to get started immediately:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context k8s-bdb-test-cluster-0 \
|
||||
run -it toolbox \
|
||||
--image bigchaindb/toolbox \
|
||||
--image-pull-policy=Always \
|
||||
--restart=Never --rm
|
||||
|
||||
It will drop you to the shell prompt.
|
||||
|
||||
To test the MongoDB instance:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ nslookup mdb-instance-0
|
||||
|
||||
$ dig +noall +answer _mdb-port._tcp.mdb-instance-0.default.svc.cluster.local SRV
|
||||
|
||||
$ curl -X GET http://mdb-instance-0:27017
|
||||
|
||||
The ``nslookup`` command should output the configured IP address of the service
|
||||
(in the cluster).
|
||||
The ``dig`` command should return the configured port numbers.
|
||||
The ``curl`` command tests the availability of the service.
|
||||
|
||||
To test the BigchainDB instance:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ nslookup bdb-instance-0
|
||||
|
||||
$ dig +noall +answer _bdb-port._tcp.bdb-instance-0.default.svc.cluster.local SRV
|
||||
|
||||
$ curl -X GET http://bdb-instance-0:9984
|
||||
|
||||
To test the NGINX instance:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ nslookup ngx-instance-0
|
||||
|
||||
$ dig +noall +answer _ngx-public-mdb-port._tcp.ngx-instance-0.default.svc.cluster.local SRV
|
||||
|
||||
$ dig +noall +answer _ngx-public-bdb-port._tcp.ngx-instance-0.default.svc.cluster.local SRV
|
||||
|
||||
$ curl -X GET http://ngx-instance-0:27017
|
||||
|
||||
The curl command should result get the response
|
||||
``curl: (7) Failed to connect to ngx-instance-0 port 27017: Connection refused``.
|
||||
|
||||
If you ran the vanilla NGINX instance, run:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ curl -X GET http://ngx-instance-0:80
|
||||
|
||||
If you ran the OpenResty NGINX + 3scale instance, run:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ curl -X GET https://ngx-instance-0
|
||||
|
||||
|
||||
Step 17.2: Testing Externally
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Check the MongoDB monitoring and backup agent on the MongoDB Cloud Manager
|
||||
portal to verify they are working fine.
|
||||
|
||||
Try to access the ``<DNS/IP of your exposed BigchainDB service endpoint>:80``
|
||||
on your browser. You should receive a JSON response that shows the BigchainDB
|
||||
server version, among other things.
|
||||
|
||||
Use the Python Driver to send some transactions to the BigchainDB node and
|
||||
verify that your node or cluster works as expected.
|
@ -1,8 +1,8 @@
|
||||
How to Revoke an SSL/TLS Certificate
|
||||
====================================
|
||||
|
||||
This page enumerates the steps *we* take to revoke a self-signed SSL/TLS certificate
|
||||
in a cluster.
|
||||
This page enumerates the steps *we* take to revoke a self-signed SSL/TLS
|
||||
certificate in a cluster.
|
||||
It can only be done by someone with access to the self-signed CA
|
||||
associated with the cluster's managing organization.
|
||||
|
||||
@ -23,11 +23,11 @@ certificate:
|
||||
|
||||
./easyrsa revoke <filename>
|
||||
|
||||
|
||||
This will update the CA database with the revocation details.
|
||||
The next step is to use the updated database to issue an up-to-date
|
||||
certificate revocation list (CRL).
|
||||
|
||||
|
||||
Step 2: Generate a New CRL
|
||||
--------------------------
|
||||
|
||||
@ -40,3 +40,4 @@ Generate a new CRL for your infrastructure using:
|
||||
The generated ``crl.pem`` file needs to be uploaded to your infrastructure to
|
||||
prevent the revoked certificate from being used again.
|
||||
|
||||
In particlar, the generated ``crl.pem`` file should be sent to all BigchainDB node operators in your BigchainDB cluster, so that they can update it in their MongoDB instance and their BigchainDB Server instance.
|
@ -26,7 +26,7 @@ Step 2: Create the Server Private Key and CSR
|
||||
---------------------------------------------
|
||||
|
||||
You can create the server private key and certificate signing request (CSR)
|
||||
by going into the directory ``member-cert/easy-rsa-3.0.1/easyrsa``
|
||||
by going into the directory ``member-cert/easy-rsa-3.0.1/easyrsa3``
|
||||
and using something like:
|
||||
|
||||
.. code:: bash
|
||||
@ -35,32 +35,36 @@ and using something like:
|
||||
|
||||
./easyrsa --req-cn=mdb-instance-0 --subject-alt-name=DNS:localhost,DNS:mdb-instance-0 gen-req mdb-instance-0 nopass
|
||||
|
||||
You must replace the common name (``mdb-instance-0`` above)
|
||||
with the common name of *your* MongoDB instance
|
||||
(which should be the same as the hostname of your MongoDB instance).
|
||||
You should replace the Common Name (``mdb-instance-0`` above) with the correct name for *your* MongoDB instance in the cluster, e.g. ``mdb-instance-5`` or ``mdb-instance-12``. (This name is decided by the organization managing the cluster.)
|
||||
|
||||
You need to provide the ``DNS:localhost`` SAN during certificate generation for
|
||||
using the ``localhost exception`` in the MongoDB instance.
|
||||
You will be prompted to enter the Distinguished Name (DN) information for this certificate.
|
||||
For each field, you can accept the default value [in brackets] by pressing Enter.
|
||||
|
||||
.. warning::
|
||||
|
||||
Don't accept the default value of OU (``IT``). Instead, enter the value ``MongoDB-Instance``.
|
||||
|
||||
Aside: You need to provide the ``DNS:localhost`` SAN during certificate generation
|
||||
for using the ``localhost exception`` in the MongoDB instance.
|
||||
All certificates can have this attribute without compromising security as the
|
||||
``localhost exception`` works only the first time.
|
||||
|
||||
Tip: You can get help with the ``easyrsa`` command (and its subcommands)
|
||||
by using the subcommand ``./easyrsa help``
|
||||
|
||||
|
||||
Step 3: Get the Server Certificate Signed
|
||||
-----------------------------------------
|
||||
|
||||
The CSR file (created in the last step)
|
||||
should be located in ``pki/reqs/mdb-instance-0.req``.
|
||||
The CSR file created in the last step
|
||||
should be located in ``pki/reqs/mdb-instance-0.req``
|
||||
(where the integer ``0`` may be different for you).
|
||||
You need to send it to the organization managing the cluster
|
||||
so that they can use their CA
|
||||
to sign the request.
|
||||
(The managing organization should already have a self-signed CA.)
|
||||
|
||||
If you are the admin of the managing organization's self-signed CA,
|
||||
then you can import the CSR and use Easy-RSA to sign it. For example:
|
||||
then you can import the CSR and use Easy-RSA to sign it.
|
||||
Go to your ``bdb-cluster-ca/easy-rsa-3.0.1/easyrsa3/``
|
||||
directory and do something like:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
@ -83,10 +87,3 @@ private keys.
|
||||
|
||||
cat mdb-instance-0.crt mdb-instance-0.key > mdb-instance-0.pem
|
||||
|
||||
|
||||
Step 5: Update the MongoDB Config File
|
||||
--------------------------------------
|
||||
|
||||
In the MongoDB configuration file,
|
||||
set the ``net.ssl.PEMKeyFile`` parameter to the path of the ``mdb-instance-0.pem`` file,
|
||||
and the ``net.ssl.CAFile`` parameter to the ``ca.crt`` file.
|
@ -45,11 +45,12 @@ on most common operating systems
|
||||
<https://docs.microsoft.com/en-us/cli/azure/install-az-cli2>`_.
|
||||
Do that.
|
||||
|
||||
First, update the Azure CLI to the latest version:
|
||||
If you already *have* the Azure CLI installed, you may want to update it.
|
||||
|
||||
.. code:: bash
|
||||
.. warning::
|
||||
|
||||
``az component update`` isn't supported if you installed the CLI using some of Microsoft's provided installation instructions. See `the Microsoft docs for update instructions <https://docs.microsoft.com/en-us/cli/azure/install-az-cli2>`_.
|
||||
|
||||
$ az component update
|
||||
|
||||
Next, login to your account using:
|
||||
|
||||
@ -101,7 +102,7 @@ Finally, you can deploy an ACS using something like:
|
||||
--agent-vm-size Standard_D2_v2 \
|
||||
--dns-prefix <make up a name> \
|
||||
--ssh-key-value ~/.ssh/<name>.pub \
|
||||
--orchestrator-type kubernetes
|
||||
--orchestrator-type kubernetes \
|
||||
--debug --output json
|
||||
|
||||
|
||||
@ -138,7 +139,7 @@ of a master node from the Azure Portal. For example:
|
||||
|
||||
.. note::
|
||||
|
||||
All the master nodes should have the *same* IP address and hostname
|
||||
All the master nodes should have the *same* public IP address and hostname
|
||||
(also called the Master FQDN).
|
||||
|
||||
The "agent" nodes shouldn't get public IP addresses or hostnames,
|
@ -53,6 +53,26 @@ Similarly, other instances must also have unique names in the cluster.
|
||||
#. Name of the MongoDB backup agent instance (``mdb-bak-instance-*``)
|
||||
|
||||
|
||||
☐ Generate four keys and corresponding certificate signing requests (CSRs):
|
||||
|
||||
#. Server Certificate (a.k.a. Member Certificate) for the MongoDB instance
|
||||
#. Client Certificate for BigchainDB Server to identify itself to MongoDB
|
||||
#. Client Certificate for MongoDB Monitoring Agent to identify itself to MongoDB
|
||||
#. Client Certificate for MongoDB Backup Agent to identify itself to MongoDB
|
||||
|
||||
Ask the managing organization to use its self-signed CA to sign those four CSRs.
|
||||
They should send you:
|
||||
|
||||
* Four certificates (one for each CSR you sent them).
|
||||
* One ``ca.crt`` file: their CA certificate.
|
||||
* One ``crl.pem`` file: a certificate revocation list.
|
||||
|
||||
For help, see the pages:
|
||||
|
||||
* :ref:`How to Generate a Server Certificate for MongoDB`
|
||||
* :ref:`How to Generate a Client Certificate for MongoDB`
|
||||
|
||||
|
||||
☐ Every node in a BigchainDB cluster needs its own
|
||||
BigchainDB keypair (i.e. a public key and corresponding private key).
|
||||
You can generate a BigchainDB keypair for your node, for example,
|
||||
@ -73,15 +93,17 @@ Don't share your private key.
|
||||
That list of public keys is known as the BigchainDB "keyring."
|
||||
|
||||
|
||||
☐ Ask the managing organization
|
||||
for the FQDN used to serve the BigchainDB APIs
|
||||
(e.g. ``api.orgname.net`` or ``bdb.clustername.com``).
|
||||
|
||||
|
||||
☐ Make up an FQDN for your BigchainDB node (e.g. ``mynode.mycorp.com``).
|
||||
Make sure you've registered the associated domain name (e.g. ``mycorp.com``),
|
||||
and have an SSL certificate for the FQDN.
|
||||
(You can get an SSL certificate from any SSL certificate provider).
|
||||
(You can get an SSL certificate from any SSL certificate provider.)
|
||||
|
||||
|
||||
☐ Ask the managing organization
|
||||
for the FQDN used to serve the BigchainDB APIs
|
||||
(e.g. ``api.orgname.net`` or ``bdb.clustername.com``)
|
||||
and for a copy of the associated SSL/TLS certificate.
|
||||
Also, ask for the user name to use for authenticating to MongoDB.
|
||||
|
||||
|
||||
☐ If the cluster uses 3scale for API authentication, monitoring and billing,
|
||||
@ -89,35 +111,20 @@ you must ask the managing organization for all relevant 3scale credentials.
|
||||
|
||||
|
||||
☐ If the cluster uses MongoDB Cloud Manager for monitoring and backup,
|
||||
you must ask the managing organization for the ``Agent Api Key``.
|
||||
(Each Cloud Manager backup will have its own ``Agent Api Key``.
|
||||
If there's one Cloud Manager backup,
|
||||
there will be one ``Agent Api Key`` for the whole cluster.)
|
||||
|
||||
|
||||
☐ Generate four keys and corresponding certificate signing requests (CSRs):
|
||||
|
||||
#. Server Certificate (a.k.a. Member Certificate) for the MongoDB instance
|
||||
#. Client Certificate for BigchainDB Server to identify itself to MongoDB
|
||||
#. Client Certificate for MongoDB Monitoring Agent to identify itself to MongoDB
|
||||
#. Client Certificate for MongoDB Backup Agent to identify itself to MongoDB
|
||||
|
||||
Ask the managing organization to use its self-signed CA to sign those certificates.
|
||||
|
||||
For help, see the pages:
|
||||
|
||||
* :ref:`How to Generate a Server Certificate for MongoDB`
|
||||
* :ref:`How to Generate a Client Certificate for MongoDB`
|
||||
you must ask the managing organization for the ``Group ID`` and the
|
||||
``Agent API Key``.
|
||||
(Each Cloud Manager "group" has its own ``Group ID``. A ``Group ID`` can
|
||||
contain a number of ``Agent API Key`` s. It can be found under
|
||||
**Settings - Group Settings**. It was recently added to the Cloud Manager to
|
||||
allow easier periodic rotation of the ``Agent API Key`` with a constant
|
||||
``Group ID``)
|
||||
|
||||
|
||||
☐ :doc:`Deploy a Kubernetes cluster on Azure <template-kubernetes-azure>`.
|
||||
|
||||
|
||||
☐ Create the Kubernetes Configuration for this node.
|
||||
We will use Kubernetes ConfigMaps and Secrets to hold all the information
|
||||
gathered above.
|
||||
|
||||
|
||||
☐ Deploy your BigchainDB node on your Kubernetes cluster.
|
||||
|
||||
TODO: Links to instructions for first-node-in-cluster or second-or-later-node-in-cluster
|
||||
☐ You can now proceed to set up your BigchainDB node based on whether it is the
|
||||
:ref:`first node in a new cluster
|
||||
<Kubernetes Template: Deploy a Single BigchainDB Node>` or a
|
||||
:ref:`node that will be added to an existing cluster
|
||||
<Kubernetes Template: Add a BigchainDB Node to an Existing BigchainDB Cluster>`.
|
@ -13,4 +13,4 @@ We make some assumptions about production nodes:
|
||||
|
||||
You can use RethinkDB when building prototypes, but we don't advise or support using it in production.
|
||||
|
||||
We don't provide a detailed cookbook explaining how to secure a server, or other things that a sysadmin should know. (We do provide some [templates](../cloud-deployment-templates/index.html), but those are just a starting point.)
|
||||
We don't provide a detailed cookbook explaining how to secure a server, or other things that a sysadmin should know. We do provide some templates, but those are just starting points.
|
||||
|
@ -5,9 +5,9 @@
|
||||
|
||||
## OS Requirements
|
||||
|
||||
BigchainDB Server requires Python 3.4+ and Python 3.4+ [will run on any modern OS](https://docs.python.org/3.4/using/index.html), but we recommend using an LTS version of [Ubuntu Server](https://www.ubuntu.com/server) or a similarly server-grade Linux distribution.
|
||||
BigchainDB Server requires Python 3.5+ and Python 3.5+ [will run on any modern OS](https://docs.python.org/3.5/using/index.html), but we recommend using an LTS version of [Ubuntu Server](https://www.ubuntu.com/server) or a similarly server-grade Linux distribution.
|
||||
|
||||
_Don't use macOS_ (formerly OS X, formerly Mac OS X), because it's not a server-grade operating system. Also, BigchaindB Server uses the Python multiprocessing package and [some functionality in the multiprocessing package doesn't work on Mac OS X](https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.qsize).
|
||||
_Don't use macOS_ (formerly OS X, formerly Mac OS X), because it's not a server-grade operating system. Also, BigchaindB Server uses the Python multiprocessing package and [some functionality in the multiprocessing package doesn't work on Mac OS X](https://docs.python.org/3.5/library/multiprocessing.html#multiprocessing.Queue.qsize).
|
||||
|
||||
|
||||
## General Considerations
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
This is a page of general guidelines for setting up a production BigchainDB node. Before continuing, make sure you've read the pages about production node [assumptions](node-assumptions.html), [components](node-components.html) and [requirements](node-requirements.html).
|
||||
|
||||
Note: These are just guidelines. You can modify them to suit your needs. For example, if you want to initialize the MongoDB replica set before installing BigchainDB, you _can_ do that. If you'd prefer to use Docker and Kubernetes, you can (and [we have a template](../cloud-deployment-templates/node-on-kubernetes.html)). We don't cover all possible setup procedures here.
|
||||
Note: These are just guidelines. You can modify them to suit your needs. For example, if you want to initialize the MongoDB replica set before installing BigchainDB, you _can_ do that. If you'd prefer to use Docker and Kubernetes, you can (and [we have a template](../production-deployment-template/index.html)). We don't cover all possible setup procedures here.
|
||||
|
||||
|
||||
## Security Guidelines
|
||||
@ -50,16 +50,16 @@ Consult the MongoDB documentation for its recommendations regarding storage hard
|
||||
|
||||
### Install BigchainDB Server Dependencies
|
||||
|
||||
Before you can install BigchainDB Server, you must [install its OS-level dependencies](../appendices/install-os-level-deps.html) and you may have to [install Python 3.4+](https://www.python.org/downloads/).
|
||||
Before you can install BigchainDB Server, you must [install its OS-level dependencies](../appendices/install-os-level-deps.html) and you may have to [install Python 3.5+](https://www.python.org/downloads/).
|
||||
|
||||
### How to Install BigchainDB Server with pip
|
||||
|
||||
BigchainDB is distributed as a Python package on PyPI so you can install it using `pip`. First, make sure you have an up-to-date Python 3.4+ version of `pip` installed:
|
||||
BigchainDB is distributed as a Python package on PyPI so you can install it using `pip`. First, make sure you have an up-to-date Python 3.5+ version of `pip` installed:
|
||||
```text
|
||||
pip -V
|
||||
```
|
||||
|
||||
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.4, then you must install a `pip` version associated with Python 3.4+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
|
||||
If it says that `pip` isn't installed, or it says `pip` is associated with a Python version less than 3.5, then you must install a `pip` version associated with Python 3.5+. In the following instructions, we call it `pip3` but you may be able to use `pip` if that refers to the same thing. See [the `pip` installation instructions](https://pip.pypa.io/en/stable/installing/).
|
||||
|
||||
On Ubuntu 16.04, we found that this works:
|
||||
```text
|
||||
|
@ -4,7 +4,7 @@ This page has instructions to set up a single stand-alone BigchainDB node for le
|
||||
|
||||
A. Install MongoDB as the database backend. (There are other options but you can ignore them for now.)
|
||||
|
||||
[Install MongoDB Server 3.4+](https://docs.mongodb.com/manual/tutorial/install-mongodb-on-ubuntu/)
|
||||
[Install MongoDB Server 3.5+](https://docs.mongodb.com/manual/tutorial/install-mongodb-on-ubuntu/)
|
||||
|
||||
B. Run MongoDB. Open a Terminal and run the command:
|
||||
```text
|
||||
|
@ -21,7 +21,7 @@ For convenience, here's a list of all the relevant environment variables (docume
|
||||
`BIGCHAINDB_SERVER_BIND`<br>
|
||||
`BIGCHAINDB_SERVER_LOGLEVEL`<br>
|
||||
`BIGCHAINDB_SERVER_WORKERS`<br>
|
||||
`BIGCHAINDB_SERVER_THREADS`<br>
|
||||
`BIGCHAINDB_WSSERVER_SCHEME`<br>
|
||||
`BIGCHAINDB_WSSERVER_HOST`<br>
|
||||
`BIGCHAINDB_WSSERVER_PORT`<br>
|
||||
`BIGCHAINDB_CONFIG_PATH`<br>
|
||||
@ -36,6 +36,15 @@ For convenience, here's a list of all the relevant environment variables (docume
|
||||
`BIGCHAINDB_LOG_FMT_CONSOLE`<br>
|
||||
`BIGCHAINDB_LOG_FMT_LOGFILE`<br>
|
||||
`BIGCHAINDB_LOG_GRANULAR_LEVELS`<br>
|
||||
`BIGCHAINDB_DATABASE_SSL`<br>
|
||||
`BIGCHIANDB_DATABASE_LOGIN`<br>
|
||||
`BIGCHAINDB_DATABASE_PASSWORD`<br>
|
||||
`BIGCHAINDB_DATABASE_CA_CERT`<br>
|
||||
`BIGCHAINDB_DATABASE_CERTFILE`<br>
|
||||
`BIGCHAINDB_DATABASE_KEYFILE`<br>
|
||||
`BIGCHAINDB_DATABASE_KEYFILE_PASSPHRASE`<br>
|
||||
`BIGCHAINDB_DATABASE_CRLFILE`<br>
|
||||
`BIGCHAINDB_GRAPHITE_HOST`<br>
|
||||
|
||||
The local config file is `$HOME/.bigchaindb` by default (a file which might not even exist), but you can tell BigchainDB to use a different file by using the `-c` command-line option, e.g. `bigchaindb -c path/to/config_file.json start`
|
||||
or using the `BIGCHAINDB_CONFIG_PATH` environment variable, e.g. `BIGHAINDB_CONFIG_PATH=.my_bigchaindb_config bigchaindb start`.
|
||||
@ -43,7 +52,7 @@ Note that the `-c` command line option will always take precedence if both the `
|
||||
|
||||
You can read the current default values in the file [bigchaindb/\_\_init\_\_.py](https://github.com/bigchaindb/bigchaindb/blob/master/bigchaindb/__init__.py). (The link is to the latest version.)
|
||||
|
||||
Running `bigchaindb -y configure rethinkdb` will generate a local config file in `$HOME/.bigchaindb` with all the default values, with two exceptions: It will generate a valid private/public keypair, rather than using the default keypair (`None` and `None`).
|
||||
Running `bigchaindb -y configure mongodb` will generate a local config file in `$HOME/.bigchaindb` with all the default values (for using MongoDB as the database backend), with two exceptions: it will generate a valid private/public keypair, rather than using the default keypair (`None` and `None`).
|
||||
|
||||
|
||||
## keypair.public & keypair.private
|
||||
@ -64,7 +73,7 @@ export BIGCHAINDB_KEYPAIR_PRIVATE=5C5Cknco7YxBRP9AgB1cbUVTL4FAcooxErLygw1DeG2D
|
||||
}
|
||||
```
|
||||
|
||||
Internally (i.e. in the Python code), both keys have a default value of `None`, but that's not a valid key. Therefore you can't rely on the defaults for the keypair. If you want to run BigchainDB, you must provide a valid keypair, either in the environment variables or in the local config file. You can generate a local config file with a valid keypair (and default everything else) using `bigchaindb -y configure rethinkdb`.
|
||||
Internally (i.e. in the Python code), both keys have a default value of `None`, but that's not a valid key. Therefore you can't rely on the defaults for the keypair. If you want to run BigchainDB, you must provide a valid keypair, either in the environment variables or in the local config file. You can generate a local config file with a valid keypair (and default everything else) using `bigchaindb -y configure mongodb`.
|
||||
|
||||
|
||||
## keyring
|
||||
@ -93,15 +102,28 @@ Note how the keys in the list are separated by colons.
|
||||
## database.*
|
||||
|
||||
The settings with names of the form `database.*` are for the database backend
|
||||
(currently either RethinkDB or MongoDB). They are:
|
||||
(currently either MongoDB or RethinkDB). They are:
|
||||
|
||||
* `database.backend` is either `rethinkdb` or `mongodb`.
|
||||
* `database.backend` is either `mongodb` or `rethinkdb`.
|
||||
* `database.host` is the hostname (FQDN) of the backend database.
|
||||
* `database.port` is self-explanatory.
|
||||
* `database.name` is a user-chosen name for the database inside RethinkDB or MongoDB, e.g. `bigchain`.
|
||||
* `database.name` is a user-chosen name for the database inside MongoDB or RethinkDB, e.g. `bigchain`.
|
||||
* `database.replicaset` is only relevant if using MongoDB; it's the name of the MongoDB replica set, e.g. `bigchain-rs`.
|
||||
* `database.connection_timeout` is the maximum number of milliseconds that BigchainDB will wait before giving up on one attempt to connect to the database backend.
|
||||
* `database.max_tries` is the maximum number of times that BigchainDB will try to establish a connection with the database backend. If 0, then it will try forever.
|
||||
* `database.ssl` is a flag that determines if BigchainDB connects to the
|
||||
backend database over TLS/SSL or not. This can be set to either `true` or
|
||||
`false` (the default).
|
||||
Note: This parameter is only supported for the MongoDB backend currently.
|
||||
* `database.login` and `database.password` are the login and password used to
|
||||
authenticate to the database before performing any operations, specified in
|
||||
plaintext. The default values for both are currently `null`, which means that
|
||||
BigchainDB will not authenticate with the backend database.
|
||||
Note: These parameters are only supported for the MongoDB backend currently.
|
||||
* `database.ca_cert`, `database.certfile`, `database.keyfile` and `database.crlfile` are the paths to the CA, signed certificate, private key and certificate revocation list files respectively.
|
||||
Note: These parameters are only supported for the MongoDB backend currently.
|
||||
* `database.keyfile_passphrase` is the private key decryption passphrase, specified in plaintext.
|
||||
Note: This parameter is only supported for the MongoDB backend currently.
|
||||
|
||||
**Example using environment variables**
|
||||
```text
|
||||
@ -137,7 +159,15 @@ If you used `bigchaindb -y configure mongodb` to create a default local config f
|
||||
"name": "bigchain",
|
||||
"replicaset": "bigchain-rs",
|
||||
"connection_timeout": 5000,
|
||||
"max_tries": 3
|
||||
"max_tries": 3,
|
||||
"login": null,
|
||||
"password": null
|
||||
"ssl": false,
|
||||
"ca_cert": null,
|
||||
"crlfile": null,
|
||||
"certfile": null,
|
||||
"keyfile": null,
|
||||
"keyfile_passphrase": null,
|
||||
}
|
||||
```
|
||||
|
||||
@ -146,20 +176,19 @@ If you used `bigchaindb -y configure mongodb` to create a default local config f
|
||||
|
||||
These settings are for the [Gunicorn HTTP server](http://gunicorn.org/), which is used to serve the [HTTP client-server API](../http-client-server-api.html).
|
||||
|
||||
`server.bind` is where to bind the Gunicorn HTTP server socket. It's a string. It can be any valid value for [Gunicorn's bind setting](http://docs.gunicorn.org/en/stable/settings.html#bind). If you want to allow IPv4 connections from anyone, on port 9984, use '0.0.0.0:9984'. In a production setting, we recommend you use Gunicorn behind a reverse proxy server. If Gunicorn and the reverse proxy are running on the same machine, then use 'localhost:PORT' where PORT is _not_ 9984 (because the reverse proxy needs to listen on port 9984). Maybe use PORT=9983 in that case because we know 9983 isn't used. If Gunicorn and the reverse proxy are running on different machines, then use 'A.B.C.D:9984' where A.B.C.D is the IP address of the reverse proxy. There's [more information about deploying behind a reverse proxy in the Gunicorn documentation](http://docs.gunicorn.org/en/stable/deploy.html). (They call it a proxy.)
|
||||
`server.bind` is where to bind the Gunicorn HTTP server socket. It's a string. It can be any valid value for [Gunicorn's bind setting](http://docs.gunicorn.org/en/stable/settings.html#bind). If you want to allow IPv4 connections from anyone, on port 9984, use `0.0.0.0:9984`. In a production setting, we recommend you use Gunicorn behind a reverse proxy server. If Gunicorn and the reverse proxy are running on the same machine, then use `localhost:PORT` where PORT is _not_ 9984 (because the reverse proxy needs to listen on port 9984). Maybe use PORT=9983 in that case because we know 9983 isn't used. If Gunicorn and the reverse proxy are running on different machines, then use `A.B.C.D:9984` where A.B.C.D is the IP address of the reverse proxy. There's [more information about deploying behind a reverse proxy in the Gunicorn documentation](http://docs.gunicorn.org/en/stable/deploy.html). (They call it a proxy.)
|
||||
|
||||
`server.loglevel` sets the log level of Gunicorn's Error log outputs. See
|
||||
[Gunicorn's documentation](http://docs.gunicorn.org/en/latest/settings.html#loglevel)
|
||||
for more information.
|
||||
|
||||
`server.workers` is [the number of worker processes](http://docs.gunicorn.org/en/stable/settings.html#workers) for handling requests. If `None` (the default), the value will be (cpu_count * 2 + 1). Each worker process has a single thread. The HTTP server will be able to handle `server.workers` requests simultaneously.
|
||||
`server.workers` is [the number of worker processes](http://docs.gunicorn.org/en/stable/settings.html#workers) for handling requests. If `None` (the default), the value will be (2 × cpu_count + 1). Each worker process has a single thread. The HTTP server will be able to handle `server.workers` requests simultaneously.
|
||||
|
||||
**Example using environment variables**
|
||||
```text
|
||||
export BIGCHAINDB_SERVER_BIND=0.0.0.0:9984
|
||||
export BIGCHAINDB_SERVER_LOGLEVEL=debug
|
||||
export BIGCHAINDB_SERVER_WORKERS=5
|
||||
export BIGCHAINDB_SERVER_THREADS=5
|
||||
```
|
||||
|
||||
**Example config file snippet**
|
||||
@ -181,12 +210,14 @@ export BIGCHAINDB_SERVER_THREADS=5
|
||||
```
|
||||
|
||||
|
||||
## wsserver.host and wsserver.port
|
||||
## wsserver.scheme, wsserver.host and wsserver.port
|
||||
|
||||
These settings are for the
|
||||
[aiohttp server](https://aiohttp.readthedocs.io/en/stable/index.html),
|
||||
which is used to serve the
|
||||
[WebSocket Event Stream API](../websocket-event-stream-api.html).
|
||||
`wsserver.scheme` should be either `"ws"` or `"wss"`
|
||||
(but setting it to `"wss"` does *not* enable SSL/TLS).
|
||||
`wsserver.host` is where to bind the aiohttp server socket and
|
||||
`wsserver.port` is the corresponding port.
|
||||
If you want to allow connections from anyone, on port 9985,
|
||||
@ -194,6 +225,7 @@ set `wsserver.host` to 0.0.0.0 and `wsserver.port` to 9985.
|
||||
|
||||
**Example using environment variables**
|
||||
```text
|
||||
export BIGCHAINDB_WSSERVER_SCHEME=ws
|
||||
export BIGCHAINDB_WSSERVER_HOST=0.0.0.0
|
||||
export BIGCHAINDB_WSSERVER_PORT=9985
|
||||
```
|
||||
@ -201,6 +233,7 @@ export BIGCHAINDB_WSSERVER_PORT=9985
|
||||
**Example config file snippet**
|
||||
```js
|
||||
"wsserver": {
|
||||
"scheme": "wss",
|
||||
"host": "0.0.0.0",
|
||||
"port": 65000
|
||||
}
|
||||
@ -209,6 +242,7 @@ export BIGCHAINDB_WSSERVER_PORT=9985
|
||||
**Default values (from a config file)**
|
||||
```js
|
||||
"wsserver": {
|
||||
"scheme": "ws",
|
||||
"host": "localhost",
|
||||
"port": 9985
|
||||
}
|
||||
@ -462,3 +496,29 @@ logging of the `core.py` module to be more verbose, you would set the
|
||||
```
|
||||
|
||||
**Defaults to**: `"{}"`
|
||||
|
||||
|
||||
## graphite.host
|
||||
|
||||
The host name or IP address of a server listening for statsd events on UDP
|
||||
port 8125. This defaults to `localhost`, and if no statsd collector is running,
|
||||
the events are simply dropped by the operating system.
|
||||
|
||||
**Example using environment variables**
|
||||
```text
|
||||
export BIGCHAINDB_GRAPHITE_HOST=10.0.0.5
|
||||
```
|
||||
|
||||
**Example config file snippet**
|
||||
```js
|
||||
"graphite": {
|
||||
"host": "10.0.0.5"
|
||||
}
|
||||
```
|
||||
|
||||
**Default values (from a config file)**
|
||||
```js
|
||||
"graphite": {
|
||||
"host": "localhost"
|
||||
}
|
||||
```
|
||||
|
@ -26,17 +26,15 @@ It's a good idea to make sure that the node you're connecting with
|
||||
has advertised support for the Event Stream API. To do so, send a HTTP GET
|
||||
request to the node's :ref:`API Root Endpoint`
|
||||
(e.g. ``http://localhost:9984/api/v1/``) and check that the
|
||||
response contains a ``streams_<version>`` property in ``_links``:
|
||||
response contains a ``streams`` property:
|
||||
|
||||
.. code:: JSON
|
||||
|
||||
{
|
||||
"_links": {
|
||||
...,
|
||||
"streams_v1": "ws://example.com:9985/api/v1/streams/valid_tx",
|
||||
"streams": "ws://example.com:9985/api/v1/streams/valid_transactions",
|
||||
...
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Connection Keep-Alive
|
||||
@ -58,8 +56,8 @@ BigchainDB node will be ignored.
|
||||
Streams will always be under the WebSocket protocol (so ``ws://`` or
|
||||
``wss://``) and accessible as extensions to the ``/api/v<version>/streams/``
|
||||
API root URL (for example, `validated transactions <#valid-transactions>`_
|
||||
would be accessible under ``/api/v1/streams/valid_tx``). If you're running your
|
||||
own BigchainDB instance and need help determining its root URL,
|
||||
would be accessible under ``/api/v1/streams/valid_transactions``). If you're
|
||||
running your own BigchainDB instance and need help determining its root URL,
|
||||
then see the page titled :ref:`Determining the API Root URL`.
|
||||
|
||||
All messages sent in a stream are in the JSON format.
|
||||
@ -79,7 +77,7 @@ All messages sent in a stream are in the JSON format.
|
||||
Valid Transactions
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
``/valid_tx``
|
||||
``/valid_transactions``
|
||||
|
||||
Streams an event for any newly validated transactions. Message bodies contain
|
||||
the transaction's ID, associated asset ID, and containing block's ID.
|
||||
@ -89,7 +87,7 @@ Example message:
|
||||
.. code:: JSON
|
||||
|
||||
{
|
||||
"tx_id": "<sha3-256 hash>",
|
||||
"transaction_id": "<sha3-256 hash>",
|
||||
"asset_id": "<sha3-256 hash>",
|
||||
"block_id": "<sha3-256 hash>"
|
||||
}
|
||||
@ -100,6 +98,6 @@ Example message:
|
||||
Transactions in BigchainDB are validated in batches ("blocks") and will,
|
||||
therefore, be streamed in batches. Each block can contain up to a 1000
|
||||
transactions, ordered by the time at which they were included in the block.
|
||||
The ``/valid_tx`` stream will send these transactions in the same order
|
||||
that the block stored them in, but this does **NOT** guarantee that you
|
||||
will recieve the events in that same order.
|
||||
The ``/valid_transactions`` stream will send these transactions in the same
|
||||
order that the block stored them in, but this does **NOT** guarantee that
|
||||
you will recieve the events in that same order.
|
||||
|
@ -1,9 +1,3 @@
|
||||
###############################################################
|
||||
# This config file runs bigchaindb:0.10.1 as a k8s Deployment #
|
||||
# and it connects to the mongodb backend running as a #
|
||||
# separate pod #
|
||||
###############################################################
|
||||
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
@ -18,13 +12,16 @@ spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: bigchaindb
|
||||
image: bigchaindb/bigchaindb:0.10.2
|
||||
image: bigchaindb/bigchaindb:1.0.0rc1
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- start
|
||||
env:
|
||||
- name: BIGCHAINDB_DATABASE_HOST
|
||||
value: mdb-instance-0
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: vars
|
||||
key: mdb-instance-name
|
||||
- name: BIGCHAINDB_DATABASE_PORT
|
||||
value: "27017"
|
||||
- name: BIGCHAINDB_DATABASE_REPLICASET
|
||||
@ -40,7 +37,10 @@ spec:
|
||||
- name: BIGCHAINDB_WSSERVER_PORT
|
||||
value: "9985"
|
||||
- name: BIGCHAINDB_KEYPAIR_PUBLIC
|
||||
value: "<public key here>"
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: bdb-public-key
|
||||
key: bdb-public-key
|
||||
- name: BIGCHAINDB_KEYPAIR_PRIVATE
|
||||
value: "<private key here>"
|
||||
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
|
||||
@ -51,9 +51,22 @@ spec:
|
||||
value: "120"
|
||||
- name: BIGCHAINDB_LOG_LEVEL_CONSOLE
|
||||
value: debug
|
||||
- name: BIGCHAINDB_DATABASE_CA_CERT
|
||||
value: /etc/bigchaindb/ssl/ca.pem
|
||||
- name: BIGCHAINDB_DATABASE_CRLFILE
|
||||
value: /etc/bigchaindb/ssl/crlfile
|
||||
- name: BIGCHAINDB_DATABASE_CERTFILE
|
||||
value: /etc/bigchaindb/ssl/bdb-instance.pem
|
||||
- name: BIGCHAINDB_DATABASE_KEYFILE
|
||||
value: /etc/bigchaindb/ssl/bdb-instance.key
|
||||
- name: BIGCHAINDB_DATABASE_LOGIN
|
||||
value: /etc/bigchaindb/ssl/bdb-user
|
||||
# The following env var is not required for the bootstrap/first node
|
||||
#- name: BIGCHAINDB_KEYRING
|
||||
# value: ""
|
||||
# valueFrom:
|
||||
# configMapKeyRef:
|
||||
# name: bdb-keyring
|
||||
# key: bdb-keyring
|
||||
ports:
|
||||
- containerPort: 9984
|
||||
hostPort: 9984
|
||||
@ -63,6 +76,10 @@ spec:
|
||||
hostPort: 9985
|
||||
name: bdb-ws-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: bdb-certs
|
||||
mountPath: /etc/bigchaindb/ssl/
|
||||
readOnly: true
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
@ -80,3 +97,8 @@ spec:
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: bdb-certs
|
||||
secret:
|
||||
secretName: bdb-certs
|
||||
defaultMode: 0400
|
||||
|
@ -1,30 +1,22 @@
|
||||
#######################################################
|
||||
# This YAML file desribes a ConfigMap for the cluster #
|
||||
#######################################################
|
||||
## Note: data values do NOT have to be base64-encoded in this file.
|
||||
|
||||
## vars is common environment variables for this BigchaindB node
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mdb-mon
|
||||
name: vars
|
||||
namespace: default
|
||||
data:
|
||||
api-key: "<api key here>"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mdb-backup
|
||||
namespace: default
|
||||
data:
|
||||
api-key: "<api key here>"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mdb-fqdn
|
||||
namespace: default
|
||||
data:
|
||||
fqdn: mdb-instance-0
|
||||
# MongoDB
|
||||
mdb-instance-name: "<name of the mdb instance>"
|
||||
# BigchainDB
|
||||
bdb-instance-name: "<name of the bdb instance>"
|
||||
# NGINX
|
||||
ngx-instance-name: "<name of the ngx instance>"
|
||||
# MongoDB Monitoring Agent
|
||||
mdb-mon-instance-name: "<name of the mdb monitoring agent instance>"
|
||||
# MongoDB Backup Agent
|
||||
mdb-bak-instance-name: "<name of the mdb backup agent instance>"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
@ -32,5 +24,24 @@ metadata:
|
||||
name: mongodb-whitelist
|
||||
namespace: default
|
||||
data:
|
||||
# We only support "all"" currently
|
||||
allowed-hosts: "all"
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: bdb-keyring
|
||||
namespace: default
|
||||
data:
|
||||
# Colon-separated list of all *other* nodes' BigchainDB public keys.
|
||||
bdb-keyring: "<':' separated list of public keys>"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: bdb-public-key
|
||||
namespace: default
|
||||
data:
|
||||
# BigchainDB public key of *this* node.
|
||||
# Example: "EPQk5i5yYpoUwGVM8VKZRjM8CYxB6j8Lu8i8SG7kGGce"
|
||||
bdb-public-key: "<public key>"
|
||||
|
119
k8s/configuration/secret.yaml
Normal file
119
k8s/configuration/secret.yaml
Normal file
@ -0,0 +1,119 @@
|
||||
# All secret data should be base64 encoded before embedding them here.
|
||||
# Short strings can be encoded using, e.g.
|
||||
# echo "secret string" | base64 -w 0 > secret.string.b64
|
||||
# Files (e.g. certificates) can be encoded using, e.g.
|
||||
# cat cert.pem | base64 -w 0 > cert.pem.b64
|
||||
# then copy the contents of cert.pem.b64 (for example) below.
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/secret/
|
||||
# Unused values can be set to ""
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: cloud-manager-credentials
|
||||
namespace: default
|
||||
type: Opaque
|
||||
data:
|
||||
# Base64-encoded Group ID
|
||||
group-id: "<b64 encoded Group ID>"
|
||||
# Base64-encoded Agent API Key
|
||||
agent-api-key: "<b64 encoded Agent API Key>"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: bdb-private-key
|
||||
namespace: default
|
||||
type: Opaque
|
||||
data:
|
||||
# Base64-encoded BigchainDB private key of *this* node
|
||||
private.key: "<b64 encoded BigchainDB private key>"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: mdb-certs
|
||||
namespace: default
|
||||
type: Opaque
|
||||
data:
|
||||
# Base64-encoded, concatenated certificate and private key
|
||||
mdb-instance.pem: "<b64 encoded, concatenated certificate and private key>"
|
||||
# Base64-encoded CA certificate (ca.crt)
|
||||
ca.pem: "<b64 encoded CA certificate>"
|
||||
# Base64-encoded MongoDB CRL
|
||||
mdb-crl.pem: "<b64 encoded CRL data>"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: mdb-mon-certs
|
||||
namespace: default
|
||||
type: Opaque
|
||||
data:
|
||||
# Base64-encoded, concatenated certificate and private key
|
||||
mdb-mon-instance.pem: "<b64 encoded, concatenated certificate and private key>"
|
||||
# Base64-encoded CA certificate (ca.crt)
|
||||
ca.pem: "<b64 encoded CA certificate>"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: mdb-bak-certs
|
||||
namespace: default
|
||||
type: Opaque
|
||||
data:
|
||||
# Base64-encoded, concatenated certificate and private key
|
||||
mdb-bak-instance.pem: "<b64 encoded, concatenated certificate and private key>"
|
||||
# Base64-encoded CA certificate (ca.crt)
|
||||
ca.pem: "<b64 encoded CA certificate>"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: bdb-certs
|
||||
namespace: default
|
||||
type: Opaque
|
||||
data:
|
||||
# Base64-encoded CA certificate (ca.crt)
|
||||
ca.pem: "<b64 encoded CA certificate>"
|
||||
# Base64-encoded CRL file
|
||||
crlfile: "<b64 encoded CRL>"
|
||||
# Base64-encoded BigchainDB instance certificate
|
||||
bdb-instance.pem: "<b64 encoded certificate>"
|
||||
# Base64-encoded private key
|
||||
bdb-instance.key: "<b64 encoded private key>"
|
||||
# Base64-encoded instance authentication credentials
|
||||
bdb-user: "<b64 encoded user name>"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: https-certs
|
||||
namespace: default
|
||||
type: Opaque
|
||||
data:
|
||||
# Base64-encoded HTTPS private key
|
||||
cert.key: "<b64 encoded HTTPS private key>"
|
||||
# Base64-encoded HTTPS certificate chain
|
||||
# starting with your primary SSL cert (e.g. your_domain.crt)
|
||||
# followed by all intermediate certs.
|
||||
# If cert if from DigiCert, download "Best format for nginx".
|
||||
cert.pem: "<b64 encoded HTTPS certificate chain"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: threescale-credentials
|
||||
namespace: default
|
||||
type: Opaque
|
||||
data:
|
||||
secret-token: "<b64 encoded 3scale secret-token>"
|
||||
service-id: "<b64 encoded 3scale service-id>"
|
||||
version-header: "<b64 encoded 3scale version-header>"
|
||||
provider-key: "<b64 encoded 3scale provider-key>"
|
||||
# The frontend-api-dns-name will be DNS name registered for your HTTPS
|
||||
# certificate.
|
||||
frontend-api-dns-name: "<b64 encoded DNS/FQDN>"
|
||||
# The upstream-api-port can be set to any port other than 9984, 9985, 443,
|
||||
# 8888 and 27017. We usually use port '9999', which is 'OTk5OQo=' in base 64.
|
||||
upstream-api-port: "OTk5OQo="
|
77
k8s/logging-and-monitoring/analyze.py
Normal file
77
k8s/logging-and-monitoring/analyze.py
Normal file
@ -0,0 +1,77 @@
|
||||
"""
|
||||
A little Python script to do some analysis of the NGINX logs.
|
||||
To get the relevant NGINX logs:
|
||||
1. Go to the OMS Portal
|
||||
2. Create a new Log Search
|
||||
3. Use a search string such as:
|
||||
|
||||
Type=ContainerLog Image="bigchaindb/nginx_3scale:1.3" GET NOT("Go-http-client") NOT(runscope)
|
||||
|
||||
(This gets all logs from the NGINX container, only those with the word "GET",
|
||||
excluding those with the string "Go-http-client" [internal Kubernetes traffic],
|
||||
excluding those with the string "runscope" [Runscope tests].)
|
||||
|
||||
4. In the left sidebar, at the top, use the dropdown menu to select the time range,
|
||||
e.g. "Data based on last 7 days". Pay attention to the number of results and
|
||||
the time series chart in the left sidebar. Are there any spikes?
|
||||
5. Export the search results. A CSV file will be saved on your local machine.
|
||||
6. $ python3 analyze.py logs.csv
|
||||
|
||||
Thanks to https://gist.github.com/hreeder/f1ffe1408d296ce0591d
|
||||
"""
|
||||
|
||||
import sys
|
||||
import csv
|
||||
import re
|
||||
from dateutil.parser import parse
|
||||
|
||||
|
||||
lineformat = re.compile(r'(?P<ipaddress>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) - - '
|
||||
r'\[(?P<dateandtime>\d{2}\/[a-z]{3}\/\d{4}:\d{2}:\d{2}:\d{2} '
|
||||
r'(\+|\-)\d{4})\] ((\"(GET|POST) )(?P<url>.+)(http\/1\.1")) '
|
||||
r'(?P<statuscode>\d{3}) '
|
||||
r'(?P<bytessent>\d+) '
|
||||
r'(["](?P<refferer>(\-)|(.+))["]) '
|
||||
r'(["](?P<useragent>.+)["])',
|
||||
re.IGNORECASE)
|
||||
|
||||
filepath = sys.argv[1]
|
||||
|
||||
logline_list = []
|
||||
with open(filepath) as csvfile:
|
||||
csvreader = csv.reader(csvfile, delimiter=',')
|
||||
for row in csvreader:
|
||||
if row and (row[8] != 'LogEntry'):
|
||||
# because the first line is just the column headers, such as 'LogEntry'
|
||||
logline = row[8]
|
||||
print(logline + '\n')
|
||||
logline_data = re.search(lineformat, logline)
|
||||
if logline_data:
|
||||
logline_dict = logline_data.groupdict()
|
||||
logline_list.append(logline_dict)
|
||||
# so logline_list is a list of dicts
|
||||
# print('{}'.format(logline_dict))
|
||||
|
||||
# Analysis
|
||||
|
||||
total_bytes_sent = 0
|
||||
tstamp_list = []
|
||||
|
||||
for lldict in logline_list:
|
||||
total_bytes_sent += int(lldict['bytessent'])
|
||||
dt = lldict['dateandtime']
|
||||
# https://tinyurl.com/lqjnhot
|
||||
dtime = parse(dt[:11] + " " + dt[12:])
|
||||
tstamp_list.append(dtime.timestamp())
|
||||
|
||||
print('Number of log lines seen: {}'.format(len(logline_list)))
|
||||
|
||||
# Time range
|
||||
trange_sec = max(tstamp_list) - min(tstamp_list)
|
||||
trange_days = trange_sec / 60.0 / 60.0 / 24.0
|
||||
print('Time range seen (days): {}'.format(trange_days))
|
||||
|
||||
print('Total bytes sent: {}'.format(total_bytes_sent))
|
||||
|
||||
print('Average bytes sent per day (out via GET): {}'.
|
||||
format(total_bytes_sent / trange_days))
|
@ -6,7 +6,10 @@ ARG FILE_URL="https://cloud.mongodb.com/download/agent/backup/"$DEB_FILE
|
||||
WORKDIR /
|
||||
RUN apt update \
|
||||
&& apt -y upgrade \
|
||||
&& apt -y install --no-install-recommends curl ca-certificates logrotate \
|
||||
&& apt -y install --no-install-recommends \
|
||||
curl \
|
||||
ca-certificates \
|
||||
logrotate \
|
||||
libsasl2-2 \
|
||||
&& curl -OL $FILE_URL \
|
||||
&& dpkg -i $DEB_FILE \
|
||||
@ -16,4 +19,6 @@ RUN apt update \
|
||||
&& apt clean
|
||||
COPY mongodb_backup_agent_entrypoint.bash /
|
||||
RUN chown -R mongodb-mms-agent:mongodb-mms-agent /etc/mongodb-mms/
|
||||
VOLUME /etc/mongod/ssl
|
||||
#USER mongodb-mms-agent - BUG(Krish) Uncomment after tests are complete
|
||||
ENTRYPOINT ["/mongodb_backup_agent_entrypoint.bash"]
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
docker build -t bigchaindb/mongodb-backup-agent:2.0 .
|
||||
docker build -t bigchaindb/mongodb-backup-agent:3.0 .
|
||||
|
||||
docker push bigchaindb/mongodb-backup-agent:2.0
|
||||
docker push bigchaindb/mongodb-backup-agent:3.0
|
||||
|
@ -4,21 +4,29 @@ set -euo pipefail
|
||||
|
||||
MONGODB_BACKUP_CONF_FILE=/etc/mongodb-mms/backup-agent.config
|
||||
|
||||
mms_api_key=`printenv MMS_API_KEY`
|
||||
mms_api_keyfile_path=`printenv MMS_API_KEYFILE_PATH`
|
||||
mms_groupid_keyfile_path=`printenv MMS_GROUPID_KEYFILE_PATH`
|
||||
ca_crt_path=`printenv CA_CRT_PATH`
|
||||
backup_crt_path=`printenv BACKUP_PEM_PATH`
|
||||
|
||||
if [[ -z "${mms_api_key}" || \
|
||||
if [[ -z "${mms_api_keyfile_path}" || \
|
||||
-z "${ca_crt_path}" || \
|
||||
-z "${backup_crt_path}" ]]; then
|
||||
-z "${backup_crt_path}" || \
|
||||
-z "${mms_groupid_keyfile_path}" ]]; then
|
||||
echo "Invalid environment settings detected. Exiting!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sed -i '/mmsApiKey/d' ${MONGODB_BACKUP_CONF_FILE}
|
||||
sed -i '/mmsGroupId/d' ${MONGODB_BACKUP_CONF_FILE}
|
||||
sed -i '/mothership/d' ${MONGODB_BACKUP_CONF_FILE}
|
||||
|
||||
# Get the api key from file
|
||||
mms_api_key=`cat ${mms_api_keyfile_path}`
|
||||
mms_groupid_key=`cat ${mms_groupid_keyfile_path}`
|
||||
|
||||
echo "mmsApiKey="${mms_api_key} >> ${MONGODB_BACKUP_CONF_FILE}
|
||||
echo "mmsGroupId="${mms_groupid_key} >> ${MONGODB_BACKUP_CONF_FILE}
|
||||
echo "mothership=api-backup.eu-west-1.mongodb.com" >> ${MONGODB_BACKUP_CONF_FILE}
|
||||
|
||||
# Append SSL settings to the config file
|
||||
|
@ -1,27 +1,58 @@
|
||||
############################################################
|
||||
# This config file defines a k8s Deployment for the #
|
||||
# bigchaindb/mongodb-backup-agent Docker image #
|
||||
# #
|
||||
# It connects to a MongoDB instance in a separate pod, #
|
||||
# all remote MongoDB instances in the cluster, #
|
||||
# and also to MongoDB Cloud Manager (an external service). #
|
||||
# Notes: #
|
||||
# MongoDB agents connect to Cloud Manager on port 443. #
|
||||
############################################################
|
||||
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: mdb-backup-instance-0-dep
|
||||
name: mdb-bak-instance-0-dep
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
name: mdb-bak-instance-0-dep
|
||||
labels:
|
||||
app: mdb-backup-instance-0-dep
|
||||
app: mdb-bak-instance-0-dep
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: mdb-backup
|
||||
image: bigchaindb/mongodb-backup-agent:1.0
|
||||
image: bigchaindb/mongodb-backup-agent:3.0
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: MMS_API_KEY
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: mdb-backup
|
||||
key: api-key
|
||||
- name: MMS_API_KEYFILE_PATH
|
||||
value: /etc/mongod/cloud/agent-api-key
|
||||
- name: MMS_GROUPID_KEYFILE_PATH
|
||||
value: /etc/mongod/cloud/group-id
|
||||
- name: CA_CRT_PATH
|
||||
value: /etc/mongod/ssl/ca.pem
|
||||
- name: BACKUP_PEM_PATH
|
||||
value: /etc/mongod/ssl/mdb-bak-instance.pem
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
volumeMounts:
|
||||
- name: mdb-bak-certs
|
||||
mountPath: /etc/mongod/ssl/
|
||||
readOnly: true
|
||||
- name: cloud-manager-credentials
|
||||
mountPath: /etc/mongod/cloud/
|
||||
readOnly: true
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: mdb-bak-certs
|
||||
secret:
|
||||
secretName: mdb-bak-certs
|
||||
defaultMode: 0400
|
||||
- name: cloud-manager-credentials
|
||||
secret:
|
||||
secretName: cloud-manager-credentials
|
||||
defaultMode: 0400
|
||||
|
@ -18,7 +18,10 @@ ARG FILE_URL="https://cloud.mongodb.com/download/agent/monitoring/"$DEB_FILE
|
||||
WORKDIR /
|
||||
RUN apt update \
|
||||
&& apt -y upgrade \
|
||||
&& apt -y install --no-install-recommends curl ca-certificates logrotate \
|
||||
&& apt -y install --no-install-recommends \
|
||||
curl \
|
||||
ca-certificates \
|
||||
logrotate \
|
||||
libsasl2-2 \
|
||||
&& curl -OL $FILE_URL \
|
||||
&& dpkg -i $DEB_FILE \
|
||||
@ -50,5 +53,6 @@ RUN apt update \
|
||||
|
||||
COPY mongodb_mon_agent_entrypoint.bash /
|
||||
RUN chown -R mongodb-mms-agent:mongodb-mms-agent /etc/mongodb-mms/
|
||||
VOLUME /etc/mongod/ssl
|
||||
#USER mongodb-mms-agent - BUG(Krish) Uncomment after tests are complete
|
||||
ENTRYPOINT ["/mongodb_mon_agent_entrypoint.bash"]
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
docker build -t bigchaindb/mongodb-monitoring-agent:2.0 .
|
||||
docker build -t bigchaindb/mongodb-monitoring-agent:3.0 .
|
||||
|
||||
docker push bigchaindb/mongodb-monitoring-agent:2.0
|
||||
docker push bigchaindb/mongodb-monitoring-agent:3.0
|
||||
|
@ -8,24 +8,33 @@ set -euo pipefail
|
||||
|
||||
MONGODB_MON_CONF_FILE=/etc/mongodb-mms/monitoring-agent.config
|
||||
|
||||
mms_api_key=`printenv MMS_API_KEY`
|
||||
mms_api_keyfile_path=`printenv MMS_API_KEYFILE_PATH`
|
||||
mms_groupid_keyfile_path=`printenv MMS_GROUPID_KEYFILE_PATH`
|
||||
ca_crt_path=`printenv CA_CRT_PATH`
|
||||
monitoring_crt_path=`printenv MONITORING_PEM_PATH`
|
||||
|
||||
if [[ -z "${mms_api_key}" || \
|
||||
if [[ -z "${mms_api_keyfile_path}" || \
|
||||
-z "${ca_crt_path}" || \
|
||||
-z "${monitoring_crt_path}" ]]; then
|
||||
-z "${monitoring_crt_path}" || \
|
||||
-z "${mms_groupid_keyfile_path}" ]]; then
|
||||
echo "Invalid environment settings detected. Exiting!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Delete all lines containing "mmsApiKey" in the MongoDB Monitoring Agent
|
||||
# config file /etc/mongodb-mms/monitoring-agent.config
|
||||
# Delete the line containing "mmsApiKey" and the line containing "mmsGroupId"
|
||||
# in the MongoDB Monitoring Agent config file
|
||||
# /etc/mongodb-mms/monitoring-agent.config
|
||||
sed -i '/mmsApiKey/d' $MONGODB_MON_CONF_FILE
|
||||
sed -i '/mmsGroupId/d' $MONGODB_MON_CONF_FILE
|
||||
|
||||
# Get the api key from file
|
||||
mms_api_key=`cat ${mms_api_keyfile_path}`
|
||||
mms_groupid_key=`cat ${mms_groupid_keyfile_path}`
|
||||
|
||||
# Append a new line of the form
|
||||
# mmsApiKey=value_of_MMS_API_KEY
|
||||
echo "mmsApiKey="${mms_api_key} >> ${MONGODB_MON_CONF_FILE}
|
||||
echo "mmsGroupId="${mms_groupid_key} >> ${MONGODB_MON_CONF_FILE}
|
||||
|
||||
# Append SSL settings to the config file
|
||||
echo "useSslForAllConnections=true" >> ${MONGODB_MON_CONF_FILE}
|
||||
|
@ -1,6 +1,6 @@
|
||||
############################################################
|
||||
# This config file defines a k8s Deployment for the #
|
||||
# bigchaindb/mongodb-monitoring-agent:latest Docker image #
|
||||
# bigchaindb/mongodb-monitoring-agent Docker image #
|
||||
# #
|
||||
# It connects to a MongoDB instance in a separate pod, #
|
||||
# all remote MongoDB instances in the cluster, #
|
||||
@ -17,22 +17,42 @@ spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
name: mdb-mon-instance-0-dep
|
||||
labels:
|
||||
app: mdb-mon-instance-0-dep
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: mdb-mon
|
||||
image: bigchaindb/mongodb-monitoring-agent:1.0
|
||||
image: bigchaindb/mongodb-monitoring-agent:3.0
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: MMS_API_KEY
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: mdb-mon
|
||||
key: api-key
|
||||
- name: MMS_API_KEYFILE_PATH
|
||||
value: /etc/mongod/cloud/agent-api-key
|
||||
- name: MMS_GROUPID_KEYFILE_PATH
|
||||
value: /etc/mongod/cloud/group-id
|
||||
- name: CA_CRT_PATH
|
||||
value: /etc/mongod/ssl/ca.pem
|
||||
- name: MONITORING_PEM_PATH
|
||||
value: /etc/mongod/ssl/mdb-mon-instance.pem
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
volumeMounts:
|
||||
- name: mdb-mon-certs
|
||||
mountPath: /etc/mongod/ssl/
|
||||
readOnly: true
|
||||
- name: cloud-manager-credentials
|
||||
mountPath: /etc/mongod/cloud/
|
||||
readOnly: true
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: mdb-mon-certs
|
||||
secret:
|
||||
secretName: mdb-mon-certs
|
||||
defaultMode: 0400
|
||||
- name: cloud-manager-credentials
|
||||
secret:
|
||||
secretName: cloud-manager-credentials
|
||||
defaultMode: 0400
|
||||
|
@ -4,10 +4,9 @@ WORKDIR /
|
||||
RUN apt-get update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get autoremove \
|
||||
&& apt-get clean \
|
||||
&& mkdir /mongo-ssl
|
||||
&& apt-get clean
|
||||
COPY mongod.conf.template /etc/mongod.conf
|
||||
COPY mongod_entrypoint.bash /
|
||||
VOLUME /data/db /data/configdb /mongo-ssl
|
||||
VOLUME /data/db /data/configdb /etc/mongod/ssl
|
||||
EXPOSE 27017
|
||||
ENTRYPOINT ["/mongod_entrypoint.bash"]
|
||||
|
@ -9,9 +9,11 @@
|
||||
* We also need a way to overwrite certain parameters to suit our use case.
|
||||
|
||||
|
||||
### Step 1: Build the Latest Container
|
||||
|
||||
`docker build -t bigchaindb/mongodb:3.4.4 .` from the root of this project.
|
||||
### Step 1: Build and Push the Latest Container
|
||||
Use the `docker_build_and_push.bash` script to build the latest docker image
|
||||
and upload it to Docker Hub.
|
||||
Ensure that the image tag is updated to a new version number to properly
|
||||
reflect any changes made to the container.
|
||||
|
||||
|
||||
### Step 2: Run the Container
|
||||
@ -25,7 +27,7 @@ docker run \
|
||||
--volume=<host dir for mongodb data files>:/data/db \
|
||||
--volume=<host dir for mongodb config data files>:/data/configdb \
|
||||
--volume=<host dir with the required TLS certificates>:/mongo-ssl:ro \
|
||||
bigchaindb/mongodb:3.4.4 \
|
||||
bigchaindb/mongodb:3.0 \
|
||||
--mongodb-port <mongod port number for external connections> \
|
||||
--mongodb-key-file-path /mongo-ssl/<private key file name>.pem \
|
||||
--mongodb-key-file-password <password for the private key file> \
|
||||
|
5
k8s/mongodb/container/docker_build_and_push.bash
Executable file
5
k8s/mongodb/container/docker_build_and_push.bash
Executable file
@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
docker build -t bigchaindb/mongodb:3.0 .
|
||||
|
||||
docker push bigchaindb/mongodb:3.0
|
@ -65,14 +65,15 @@ net:
|
||||
#weakCertificateValidation: false
|
||||
#allowInvalidCertificates: false
|
||||
|
||||
#security: TODO
|
||||
# authorization: enabled
|
||||
# clusterAuthMode: x509
|
||||
security:
|
||||
authorization: enabled
|
||||
clusterAuthMode: x509
|
||||
|
||||
setParameter:
|
||||
enableLocalhostAuthBypass: true
|
||||
#notablescan: 1 TODO
|
||||
#logUserIds: 1 TODO
|
||||
#notablescan: 1
|
||||
logUserIds: 1
|
||||
authenticationMechanisms: MONGODB-X509,SCRAM-SHA-1
|
||||
|
||||
storage:
|
||||
dbPath: /data/db
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user