Compare commits

..

No commits in common. "master" and "v2.0.0a1" have entirely different histories.

581 changed files with 19012 additions and 17342 deletions

View File

@ -1,14 +0,0 @@
#!/bin/bash
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
set -e -x
if [[ ${BIGCHAINDB_CI_ABCI} == 'enable' ]]; then
sleep 3600
else
bigchaindb -l DEBUG start
fi

View File

@ -1,12 +1,7 @@
#!/bin/bash
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
set -e -x
if [[ -z ${TOXENV} ]] && [[ ${BIGCHAINDB_CI_ABCI} != 'enable' ]] && [[ ${BIGCHAINDB_ACCEPTANCE_TEST} != 'enable' ]]; then
codecov -v -f htmlcov/coverage.xml
if [[ -z ${TOXENV} ]]; then
codecov -v
fi

View File

@ -1,9 +1,4 @@
#!/bin/bash
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
if [[ -z ${TOXENV} ]]; then
sudo apt-get update

View File

@ -1,18 +1,7 @@
#!/bin/bash
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
set -e -x
if [[ -z ${TOXENV} ]]; then
if [[ ${BIGCHAINDB_CI_ABCI} == 'enable' ]]; then
docker-compose up -d bigchaindb
else
docker-compose up -d bdb
fi
docker-compose up -d bdb
fi

View File

@ -1,9 +1,4 @@
#!/bin/bash
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
set -e -x
@ -11,10 +6,6 @@ pip install --upgrade pip
if [[ -n ${TOXENV} ]]; then
pip install --upgrade tox
elif [[ ${BIGCHAINDB_CI_ABCI} == 'enable' ]]; then
docker-compose build --no-cache --build-arg abci_status=enable bigchaindb
elif [[ $BIGCHAINDB_INTEGRATION_TEST == 'enable' ]]; then
docker-compose build bigchaindb python-driver
else
docker-compose build --no-cache bigchaindb
pip install --upgrade codecov

View File

@ -1,18 +1,9 @@
#!/bin/bash
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
set -e -x
if [[ -n ${TOXENV} ]]; then
tox -e ${TOXENV}
elif [[ ${BIGCHAINDB_CI_ABCI} == 'enable' ]]; then
docker-compose exec bigchaindb pytest -v -m abci
elif [[ ${BIGCHAINDB_ACCEPTANCE_TEST} == 'enable' ]]; then
./run-acceptance-test.sh
else
docker-compose exec bigchaindb pytest -v --cov=bigchaindb --cov-report xml:htmlcov/coverage.xml
docker-compose run --rm --no-deps bigchaindb pytest -v --cov=bigchaindb
fi

View File

@ -1,10 +1,3 @@
<!---
Copyright © 2020 Interplanetary Database Association e.V.,
BigchainDB and IPDB software contributors.
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
--->
# How to Contribute to the BigchainDB Project
There are many ways you can contribute to the BigchainDB project, some very easy and others more involved.

View File

@ -1,30 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Logs or terminal output**
If applicable, add add textual content to help explain your problem.
**Desktop (please complete the following information):**
- Distribution: [e.g. Ubuntu 18.04]
- Bigchaindb version:
- Tendermint version:
- Mongodb version:
- Python full version: [e.g. Python 3.6.6]
**Additional context**
Add any other context about the problem here.

2
.gitignore vendored
View File

@ -95,5 +95,3 @@ network/*/data
# Docs that are fetched at build time
docs/contributing/source/cross-project-policies/*.md
.DS_Store

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
repos:
- repo: git://github.com/pre-commit/pre-commit-hooks
sha: v1.1.1
@ -22,4 +17,4 @@ repos:
args: ['--select=D204,D201,D209,D210,D212,D300,D403']
# negate the exclude to only apply the hooks to 'bigchaindb' and 'tests' folder
exclude: '^(?!bigchaindb/)(?!tests/)(?!acceptance/)'
exclude: '^(?!bigchaindb/)(?!tests/)'

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
build:
image: latest

View File

@ -1,11 +1,6 @@
# Copyright © 2020, 2021 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
sudo: required
dist: focal
dist: trusty
services:
- docker
@ -15,15 +10,14 @@ cache: pip
python:
- 3.6
- 3.7
- 3.8
env:
global:
- DOCKER_COMPOSE_VERSION=1.29.2
- DOCKER_COMPOSE_VERSION=1.19.0
matrix:
- TOXENV=flake8
- TOXENV=docsroot
- TOXENV=docsserver
matrix:
fast_finish: true
@ -32,39 +26,6 @@ matrix:
env:
- BIGCHAINDB_DATABASE_BACKEND=localmongodb
- BIGCHAINDB_DATABASE_SSL=
- python: 3.6
env:
- BIGCHAINDB_DATABASE_BACKEND=localmongodb
- BIGCHAINDB_DATABASE_SSL=
- BIGCHAINDB_CI_ABCI=enable
- python: 3.6
env:
- BIGCHAINDB_ACCEPTANCE_TEST=enable
- python: 3.7
env:
- BIGCHAINDB_DATABASE_BACKEND=localmongodb
- BIGCHAINDB_DATABASE_SSL=
- python: 3.7
env:
- BIGCHAINDB_DATABASE_BACKEND=localmongodb
- BIGCHAINDB_DATABASE_SSL=
- BIGCHAINDB_CI_ABCI=enable
- python: 3.7
env:
- BIGCHAINDB_ACCEPTANCE_TEST=enable
- python: 3.8
env:
- BIGCHAINDB_DATABASE_BACKEND=localmongodb
- BIGCHAINDB_DATABASE_SSL=
- python: 3.8
env:
- BIGCHAINDB_DATABASE_BACKEND=localmongodb
- BIGCHAINDB_DATABASE_SSL=
- BIGCHAINDB_CI_ABCI=enable
- python: 3.8
env:
- BIGCHAINDB_ACCEPTANCE_TEST=enable
before_install: sudo .ci/travis-before-install.sh

View File

@ -1,10 +1,3 @@
<!---
Copyright © 2020 Interplanetary Database Association e.V.,
BigchainDB and IPDB software contributors.
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
--->
# Change Log (Release Notes)
All _notable_ changes to this project will be documented in this file (`CHANGELOG.md`).
@ -22,344 +15,8 @@ For reference, the possible headings are:
* **Fixed** for any bug fixes.
* **Security** to invite users to upgrade in case of vulnerabilities.
* **External Contributors** to list contributors outside of BigchainDB GmbH.
* **Known Issues**
* **Notes**
## [2.2.2] - 2020-08-12
### Security
Several dependencies updated including Flask that had vulnerability.
### Fixed
* Updated priv_validator key format in stack script (#2707)
### External Contributors
* @aostrun - [#2708](https://github.com/bigchaindb/bigchaindb/pull/2708)
## [2.2.1] - 2020-04-14
### Fixed
Gevent library API update is incompatible with bigchaindb-abci 1.0.1 version.
Updated bigchaindb-abci.
## [2.2.0] - 2020-02-20
### Added
Support for multiple ABCI versions.
## [2.1.0] - 2019-11-06
### Added
Option for last transaction retrieval added.
## [2.0] - 2019-09-26
### Changed
Migrated from Tendermint 0.22.8 to 0.31.5.
## [2.0 Beta 9] - 2018-11-27
### Changed
Removed support for TLSv1 and TLSv1.1 in all NGINX config files. Kept support for TLSv1.2 and added support for TLSv1.3. [Pull Request #2601](https://github.com/bigchaindb/bigchaindb/pull/2601)
### Fixed
Fixed two issues with schema validation. Pull requests [#2606](https://github.com/bigchaindb/bigchaindb/pull/2606) & [#2607](https://github.com/bigchaindb/bigchaindb/pull/2607)
### External Contributors
[@gamjapark](https://github.com/gamjapark) and team translated all the [BigchainDB root docs](https://docs.bigchaindb.com/en/latest/korean/index.html) into Korean. [Pull Request #2603](https://github.com/bigchaindb/bigchaindb/pull/2603)
## [2.0 Beta 8] - 2018-11-03
### Changed
* Revised the [Simple Deployment Template](http://docs.bigchaindb.com/projects/server/en/latest/simple-deployment-template/index.html) in the docs. Added NGINX to the mix. Pull Requests [#2578](https://github.com/bigchaindb/bigchaindb/pull/2578) and [#2579](https://github.com/bigchaindb/bigchaindb/pull/2579)
* Revised `nginx/nginx.conf` to enable CORS. [Pull Request #2580](https://github.com/bigchaindb/bigchaindb/pull/2580)
### Fixed
* Fixed a typo in the Kubernetes ConfigMap template. [Pull Request #2583](https://github.com/bigchaindb/bigchaindb/pull/2583)
### External Contributors
[@gamjapark](https://github.com/gamjapark) translated the main `README.md` file into Korean. [Pull Request #2592](https://github.com/bigchaindb/bigchaindb/pull/2592)
## [2.0 Beta 7] - 2018-09-28
Tag name: v2.0.0b7
### Added
Completed the implementation of chain-migration elections (BEP-42). Pull requests [#2553](https://github.com/bigchaindb/bigchaindb/pull/2553), [#2556](https://github.com/bigchaindb/bigchaindb/pull/2556), [#2558](https://github.com/bigchaindb/bigchaindb/pull/2558), [#2563](https://github.com/bigchaindb/bigchaindb/pull/2563) and [#2566](https://github.com/bigchaindb/bigchaindb/pull/2566)
### Changed
* Code that used the Python driver's (deprecated) transactions.send() method now uses its transactions.send_commit() method instead. [Pull request #2547](https://github.com/bigchaindb/bigchaindb/pull/2547)
* Code that implied pluggable "consensus" now implies pluggable transaction "validation" (a more accurate word). [Pull request #2561](https://github.com/bigchaindb/bigchaindb/pull/2561)
### Removed
Benchmark logs. [Pull request #2565](https://github.com/bigchaindb/bigchaindb/pull/2565)
### Fixed
A bug caused by an incorrect MongoDB query. [Pull request #2567](https://github.com/bigchaindb/bigchaindb/pull/2567)
### Notes
There's now better documentation about logs, log rotation, and the `server.bind` config setting. Pull requests [#2546](https://github.com/bigchaindb/bigchaindb/pull/2546) and [#2575](https://github.com/bigchaindb/bigchaindb/pull/2575)
## [2.0 Beta 6] - 2018-09-17
Tag name: v2.0.0b6
### Added
* [New documentation about privacy and handling private data](https://docs.bigchaindb.com/en/latest/private-data.html). [Pull request #2437](https://github.com/bigchaindb/bigchaindb/pull/2437)
* New documentation about log rotation. Also rotate Tendermint logs if started using Monit. [Pull request #2528](https://github.com/bigchaindb/bigchaindb/pull/2528)
* Began implementing one of the migration strategies outlined in [BEP-42](https://github.com/bigchaindb/BEPs/tree/master/42). That involved creating a more general-purpose election process and commands. Pull requests [#2488](https://github.com/bigchaindb/bigchaindb/pull/2488), [#2495](https://github.com/bigchaindb/bigchaindb/pull/2495), [#2498](https://github.com/bigchaindb/bigchaindb/pull/2498), [#2515](https://github.com/bigchaindb/bigchaindb/pull/2515), [#2535](https://github.com/bigchaindb/bigchaindb/pull/2535)
* Used memoization to avoid doing some validation checks multiple times. [Pull request #2490](https://github.com/bigchaindb/bigchaindb/pull/2490)
* Created an all-in-one Docker image containing BigchainDB Server, Tendermint and MongoDB. It was created for a particular user and is not recommended for production use unless you really know what you're doing. [Pull request #2424](https://github.com/bigchaindb/bigchaindb/pull/2424)
### Changed
* The supported versions of Tendermint are now hard-wired into BigchainDB Server: it checks to see what version the connected Tendermint has, and if it's not compatible, BigchainDB Server exits with an error message. [Pull request #2541](https://github.com/bigchaindb/bigchaindb/pull/2541)
* The docs no longer say to install the highest version of Tendermint: they say to install a specific version. [Pull request #2524](https://github.com/bigchaindb/bigchaindb/pull/2524)
* The setup docs include more recommended settings for `config.toml`. [Pull request #2516](https://github.com/bigchaindb/bigchaindb/pull/2516)
* The process to add, remove or update the voting power of a validator at run time (using the `bigchaindb upsert-validator` subcommands) was completely changed and is now fully working. See [issue #2372](https://github.com/bigchaindb/bigchaindb/issues/2372) and all the pull requests it references. Pull requests [#2439](https://github.com/bigchaindb/bigchaindb/pull/2439) and [#2440](https://github.com/bigchaindb/bigchaindb/pull/2440)
* The license on the documentation was changed from CC-BY-SA-4 to CC-BY-4. [Pull request #2427](https://github.com/bigchaindb/bigchaindb/pull/2427)
* Re-activated and/or updated some unit tests that had been deacivated during the migration to Tendermint. Pull requests [#2390](https://github.com/bigchaindb/bigchaindb/pull/2390), [#2415](https://github.com/bigchaindb/bigchaindb/pull/2415), [#2452](https://github.com/bigchaindb/bigchaindb/pull/24), [#2456](https://github.com/bigchaindb/bigchaindb/pull/2456)
* Updated RapidJSON to a newer, faster version. [Pull request #2470](https://github.com/bigchaindb/bigchaindb/pull/2470)
* The Java driver is now officially supported. [Pull request #2478](https://github.com/bigchaindb/bigchaindb/pull/2478)
* The MongoDB indexes on transaction id and block height were changed to be [unique indexes](https://docs.mongodb.com/manual/core/index-unique/). [Pull request #2492](https://github.com/bigchaindb/bigchaindb/pull/2492)
* Updated the required `cryptoconditions` package to a newer one. [Pull request #2494](https://github.com/bigchaindb/bigchaindb/pull/2494)
### Removed
* Removed some old code and tests. Pull requests
[#2374](https://github.com/bigchaindb/bigchaindb/pull/2374),
[#2452](https://github.com/bigchaindb/bigchaindb/pull/2452),
[#2474](https://github.com/bigchaindb/bigchaindb/pull/2474),
[#2476](https://github.com/bigchaindb/bigchaindb/pull/2476),
[#2491](https://github.com/bigchaindb/bigchaindb/pull/2491)
### Fixed
* Fixed the Events API so that it only sends valid transactions to subscribers. Also changed how it works internally, so now it is more reliable. [Pull request #2529](https://github.com/bigchaindb/bigchaindb/pull/2529)
* Fixed a bug where MongoDB database initialization would abort if a collection already existed. [Pull request #2520](https://github.com/bigchaindb/bigchaindb/pull/2520)
* Fixed a unit test that was failing randomly. [Pull request #2423](https://github.com/bigchaindb/bigchaindb/pull/2423)
* Fixed the validator curl port. [Pull request #2447](https://github.com/bigchaindb/bigchaindb/pull/2447)
* Fixed an error in the docs about the HTTP POST /transactions endpoint. [Pull request #2481](https://github.com/bigchaindb/bigchaindb/pull/2481)
* Fixed a unit test that could loop forever. [Pull requqest #2486](https://github.com/bigchaindb/bigchaindb/pull/2486)
* Fixed a bug when validating a CREATE + TRANSFER. [Pull request #2487](https://github.com/bigchaindb/bigchaindb/pull/2487)
* Fixed the HTTP response when posting a transaction in commit mode. [Pull request #2510](https://github.com/bigchaindb/bigchaindb/pull/2510)
* Fixed a crash that happened when attempting to restart BigchainDB at Tendermint block height 1. [Pull request#2519](https://github.com/bigchaindb/bigchaindb/pull/2519)
### External Contributors
@danacr - [Pull request #2447](https://github.com/bigchaindb/bigchaindb/pull/2447)
### Notes
The docs section titled "Production Deployment Template" was renamed to "Kubernetes Deployment Template" and we no longer consider it the go-to deployment template. The "Simple Deployment Template" is simpler, easier to understand, and less expensive (unless you are with an organization that already has a big Kubernetes cluster).
## [2.0 Beta 5] - 2018-08-01
Tag name: v2.0.0b5
### Changed
* Supported version of Tendermint `0.22.3` -> `0.22.8`. [Pull request #2429](https://github.com/bigchaindb/bigchaindb/pull/2429).
### Fixed
* Stateful validation raises a DoubleSpend exception if there is any other transaction that spends the same output(s) even if it has the same transaction ID. [Pull request #2422](https://github.com/bigchaindb/bigchaindb/pull/2422).
## [2.0 Beta 4] - 2018-07-30
Tag name: v2.0.0b4
### Added
- Added scripts for creating a configuration to manage processes with Monit. [Pull request #2410](https://github.com/bigchaindb/bigchaindb/pull/2410).
### Fixed
- Redundant asset and metadata queries were removed. [Pull request #2409](https://github.com/bigchaindb/bigchaindb/pull/2409).
- Signal handling was fixed for BigchainDB processes. [Pull request #2395](https://github.com/bigchaindb/bigchaindb/pull/2395).
- Some of the abruptly closed sockets that used to stay in memory are being cleaned up now. [Pull request 2408](https://github.com/bigchaindb/bigchaindb/pull/2408).
- Fixed the bug when WebSockets powering Events API became unresponsive. [Pull request #2413](https://github.com/bigchaindb/bigchaindb/pull/2413).
### Notes:
* The instructions on how to write a BEP were simplified. [Pull request #2347](https://github.com/bigchaindb/bigchaindb/pull/2347).
* A section about troubleshooting was added to the network setup guide. [Pull request #2398](https://github.com/bigchaindb/bigchaindb/pull/2398).
* Some of the core code was given a better package structure. [Pull request #2401](https://github.com/bigchaindb/bigchaindb/pull/2401).
* Some of the previously disabled unit tests were re-enabled and updated. Pull requests [#2404](https://github.com/bigchaindb/bigchaindb/pull/2404) and [#2402](https://github.com/bigchaindb/bigchaindb/pull/2402).
* Some building blocks for dynamically adding new validators were introduced. [Pull request #2392](https://github.com/bigchaindb/bigchaindb/pull/2392).
## [2.0 Beta 3] - 2018-07-18
Tag name: v2.0.0b3
### Fixed
Fixed a bug in transaction validation. For some more-complex situations, it would say that a valid transaction was invalid. This bug was actually fixed before; it was [issue #1271](https://github.com/bigchaindb/bigchaindb/issues/1271). The unit test for it was turned off while we integrated Tendermint. Then the query implementation code got changed, reintroducing the bug, but the unit test was off so the bug wasn't caught. When we turned the test back on, shortly after releasing Beta 2, it failed, unveiling the bug. [Pull request #2389](https://github.com/bigchaindb/bigchaindb/pull/2389)
## [2.0 Beta 2] - 2018-07-16
Tag name: v2.0.0b2
### Added
* Added new configuration settings `tendermint.host` and `tendermint.port`. [Pull request #2342](https://github.com/bigchaindb/bigchaindb/pull/2342)
* Added tests to ensure that BigchainDB gracefully handles "nasty" strings in keys and values. [Pull request #2334](https://github.com/bigchaindb/bigchaindb/pull/2334)
* Added a new logging handler to capture benchmark stats to a separate file. [Pull request #2349](https://github.com/bigchaindb/bigchaindb/pull/2349)
### Changed
* Changed the names of BigchainDB processes (Python processes) to include 'bigchaindb', so they are easier to spot and find. [Pull request #2354](https://github.com/bigchaindb/bigchaindb/pull/2354)
* Updated all code to support the latest version of Tendermint. Note that the BigchainDB ABCI server now listens to port 26657 instead of 46657. Pull requests [#2375](https://github.com/bigchaindb/bigchaindb/pull/2375) and [#2380](https://github.com/bigchaindb/bigchaindb/pull/2380)
### Removed
Removed all support and code for the old backlog_reassign_delay setting. [Pull request #2332](https://github.com/bigchaindb/bigchaindb/pull/2332)
### Fixed
* Fixed a bug that sometimes arose when using Docker Compose. (Tendermint would freeze.) [Pull request #2341](https://github.com/bigchaindb/bigchaindb/pull/2341)
* Fixed a bug in the code that creates a MongoDB index for the "id" in the transactions collection. It works now, and performance is improved. [Pull request #2378](https://github.com/bigchaindb/bigchaindb/pull/2378)
* The logging server would keep runnning in some tear-down scenarios. It doesn't do that any more. [Pull request #2304](https://github.com/bigchaindb/bigchaindb/pull/2304)
### External Contributors
@hrntknr - [Pull request #2331](https://github.com/bigchaindb/bigchaindb/pull/2331)
### Known Issues
The `bigchaindb upsert-validator` subcommand is not working yet, but a solution ([BEP-21](https://github.com/bigchaindb/BEPs/tree/master/21)) has been finalized and will be implemented before we release the final BigchainDB 2.0.
### Notes
* A lot of old/dead code was deleted. Pull requests
[#2319](https://github.com/bigchaindb/bigchaindb/pull/2319),
[#2338](https://github.com/bigchaindb/bigchaindb/pull/2338),
[#2357](https://github.com/bigchaindb/bigchaindb/pull/2357),
[#2365](https://github.com/bigchaindb/bigchaindb/pull/2365),
[#2366](https://github.com/bigchaindb/bigchaindb/pull/2366),
[#2368](https://github.com/bigchaindb/bigchaindb/pull/2368) and
[#2374](https://github.com/bigchaindb/bigchaindb/pull/2374)
* Improved the documentation page "How to setup a BigchainDB Network". [Pull Request #2312](https://github.com/bigchaindb/bigchaindb/pull/2312)
## [2.0 Beta 1] - 2018-06-01
Tag name: v2.0.0b1
### Fixed
* Fixed a bug that arose with some code that treated transactions-waiting-for-block-inclusion as if they were already stored in MongoDB (i.e. already in a block). [Pull request #2318](https://github.com/bigchaindb/bigchaindb/pull/2318)
* If a user asked for a block and it happened to be an empty block, BigchainDB returned 404 Not Found, as if the block did not exist. Now it returns a 200 OK with a block containing no transactions, i.e. an empty block. [Pull request #2321](https://github.com/bigchaindb/bigchaindb/pull/2321)
### Known Issues
* An issue was found with the `bigchaindb upsert-validator` command. A solution was proposed in [BEP-19](https://github.com/bigchaindb/BEPs/pull/45) and is being implemented in [pull request #2314](https://github.com/bigchaindb/bigchaindb/pull/2314)
* If you run BigchainDB locally using `make start` (i.e. using Docker Compose) and then you put the node under heavy write load, Tendermint can become unresponsive and running `make stop` can hang.
* There seems to be one or more issues with Tendermint when it is put under heavy load (i.e. even when BigchainDB isn't involved). See Tendermint issues [#1394](https://github.com/tendermint/tendermint/issues/1394), [#1642](https://github.com/tendermint/tendermint/issues/1642) and [#1661](https://github.com/tendermint/tendermint/issues/1661)
### Notes
* There's a [new docs page](https://docs.bigchaindb.com/projects/server/en/v2.0.0b1/simple-network-setup.html) about how to set up a network where each node uses a single virtual machine.
* We checked, and BigchainDB 2.0 Beta 1 works with MongoDB 3.6 (and 3.4).
* Support for RethinkDB is completely gone.
## [2.0 Alpha 6] - 2018-05-17
Tag name: v2.0.0a6
### Changed
Upgraded PyMongo to version 3.6 (which is compatible with MongoDB 3.6, 3.4 [and more](https://docs.mongodb.com/ecosystem/drivers/driver-compatibility-reference/#python-driver-compatibility)). [Pull request #2298](https://github.com/bigchaindb/bigchaindb/pull/2298)
### Fixed
When deploying a node using our Docker Compose file, it didn't expose port 46656, which is used by Tendermint for inter-node communications, so the node couldn't communicate with other nodes. We fixed that in [pull request #2299](https://github.com/bigchaindb/bigchaindb/pull/2299)
### Notes
We ran all our tests using MongoDB 3.6 and they all passed, so it seems safe to use BigchainDB with MongoDB 3.6 from now on.
## [2.0 Alpha 5] - 2018-05-11
Tag name: v2.0.0a5
### Changed
To resolve [issue #2279](https://github.com/bigchaindb/bigchaindb/issues/2279), we made some changes to the `bigchaindb-abci` package (which is our fork of `py-abci`) and told BigchainDB to use the new version (`bigchaindb-abci==0.4.5`). [Pull request #2281](https://github.com/bigchaindb/bigchaindb/pull/2281).
## [2.0 Alpha 4] - 2018-05-09
Tag name: v2.0.0a4
### Changed
The Kubernetes liveness probe for the BigchainDB StatefulSet was improved to check the Tendermint /status endpoint in addition to the Tendermint /abci_info endpoint. [Pull request #2275](https://github.com/bigchaindb/bigchaindb/pull/2275)
### Fixed
[Pull request #2270](https://github.com/bigchaindb/bigchaindb/pull/2270) resolved [issue #2269](https://github.com/bigchaindb/bigchaindb/issues/2269).
### Notes
There's a new [page in the docs about storing files in BigchainDB](https://docs.bigchaindb.com/en/latest/store-files.html). [Pull request #2259](https://github.com/bigchaindb/bigchaindb/pull/2259)
## [2.0 Alpha 3] - 2018-05-03
Tag name: v2.0.0a3
### Changed
* Upgraded BigchainDB Server code to use the latest version of Tendermint: version 0.19.2. Pull requests [#2249](https://github.com/bigchaindb/bigchaindb/pull/2249), [#2252](https://github.com/bigchaindb/bigchaindb/pull/2252) and [#2253](https://github.com/bigchaindb/bigchaindb/pull/2253)
* Made some fixes to `py-abci` (an external Python package) and used our fixed version with BigchainDB. Those fixes resolved several known issues, including [issue #2182](https://github.com/bigchaindb/bigchaindb/issues/2182) and issues with large transactions in general. Note: At the time of writing, our fixes to `py-abci` hadn't been merged into the main `py-abci` repository or its latest release on PyPI; we were using our own special `bigchaindb-abci` package (which included our fixes). Pull requests [#2250](https://github.com/bigchaindb/bigchaindb/pull/2250) and [#2261](https://github.com/bigchaindb/bigchaindb/pull/2261)
* If BigchainDB Server crashes and then comes back, Tendermint Core doesn't try to reconnect to it. That's just how Tendermint Core works. We revised our Kubernetes-based production deployment template to resolve that issue: BigchainDB Server and Tendermint Core are now in the same Kubernetes StatefulSet; if the connection between them ever goes down, then Kubernetes restarts the whole StatefulSet. [Pull request #2242](https://github.com/bigchaindb/bigchaindb/pull/2242)
### Fixed
Re-enabled multi-threading. [Pull request #2258](https://github.com/bigchaindb/bigchaindb/pull/2258)
### Known Issues
Tendermint changed how it responds to a request to store data (via the [Tendermint Broadcast API](https://tendermint.com/docs/tendermint-core/using-tendermint.html#broadcast-api)) between version 0.12 and 0.19.2. We started modifying the code of BigchainDB Server to account for those changes in responses (in [pull request #2239](https://github.com/bigchaindb/bigchaindb/pull/2239)), but we found that there's a difference between what the Tendermint documentation _says_ about those responses and how Tendermint actually responds. We need to determine Tendermint's intent before we can finalize that pull request.
### Notes
We were focused on getting the public BigchainDB Testnet stable during the development of BigchainDB 2.0 Alpha 3, and we think we largely succeeded. Because of that focus, we delayed the deployment of an internal test network until later. It would have had the same instabilities as the public BigchainDB Testnet anyway. In the future, we'll always test a new version of BigchainDB on our internal test network before deploying it on the public BigchainDB Testnet. (That wasn't possible this time around, because there was no old/stable version of BigchainDB 2.n to run on the public BigchainDB Testnet while we tested BigchainDB 2.[n+1] internally.)
## [2.0 Alpha 2] - 2018-04-18
Tag name: v2.0.0a2
### Added
An implementation of [BEP-8 (BigchainDB Enhancement Proposal #8)](https://github.com/bigchaindb/BEPs/tree/master/8), which makes sure a node can recover from a system fault (e.g. a crash) into a consistent state, i.e. a state where the data in the node's local MongoDB database is consistent with the data stored in the blockchain. Pull requests [#2135](https://github.com/bigchaindb/bigchaindb/pull/2135) and [#2207](https://github.com/bigchaindb/bigchaindb/pull/2207)
### Changed
When someone uses the HTTP API to send a new transaction to a BigchainDB network using the [POST /api/v1/transactions?mode={mode}](https://docs.bigchaindb.com/projects/server/en/master/http-client-server-api.html#post--api-v1-transactions?mode=mode) endpoint, they now get back a more informative HTTP response, so they can better-understand what happened. This is only when mode is `commit` or `sync`, because `async` _means_ that the response is immediate, without waiting to see what happened to the transaction. [Pull request #2198](https://github.com/bigchaindb/bigchaindb/pull/2198)
### Known Issues
* If BigchainDB Server crashes and then is restarted, Tendermint Core won't try to reconnect to BigchainDB Server and so all operations requiring that connection won't work. We only understood this recently. We'll write a blog post explaining what we intend to do about it.
* The known issues in 2.0 Alpha (listed below) are still there.
## [2.0 Alpha] - 2018-04-03
Tag name: v2.0.0a1

View File

@ -1,10 +1,3 @@
<!---
Copyright © 2020 Interplanetary Database Association e.V.,
BigchainDB and IPDB software contributors.
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
--->
# Contributor Code of Conduct
As contributors and maintainers of this project, and in the interest of
@ -42,7 +35,7 @@ This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior directed at yourself or another community member may be
reported by contacting a project maintainer at [contact@bigchaindb.com](mailto:contact@bigchaindb.com). All
reported by contacting a project maintainer at [conduct@bigchaindb.com](mailto:conduct@bigchaindb.com). All
complaints will be reviewed and investigated and will result in a response that
is appropriate to the circumstances. Maintainers are
obligated to maintain confidentiality with regard to the reporter of an

View File

@ -1,12 +1,11 @@
FROM python:3.6
LABEL maintainer "contact@ipdb.global"
LABEL maintainer "dev@bigchaindb.com"
RUN mkdir -p /usr/src/app
COPY . /usr/src/app/
WORKDIR /usr/src/app
RUN apt-get -qq update \
&& apt-get -y upgrade \
&& apt-get install -y jq \
&& pip install . \
&& pip install --no-cache-dir . \
&& apt-get autoremove \
&& apt-get clean

View File

@ -1,51 +0,0 @@
FROM alpine:3.9
LABEL maintainer "contact@ipdb.global"
ARG TM_VERSION=v0.31.5
RUN mkdir -p /usr/src/app
ENV HOME /root
COPY . /usr/src/app/
WORKDIR /usr/src/app
RUN apk --update add sudo bash \
&& apk --update add python3 openssl ca-certificates git \
&& apk --update add --virtual build-dependencies python3-dev \
libffi-dev openssl-dev build-base jq \
&& apk add --no-cache libstdc++ dpkg gnupg \
&& pip3 install --upgrade pip cffi \
&& pip install -e . \
&& apk del build-dependencies \
&& rm -f /var/cache/apk/*
# Install mongodb and monit
RUN apk --update add mongodb monit
# Install Tendermint
RUN wget https://github.com/tendermint/tendermint/releases/download/${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip \
&& unzip tendermint_${TM_VERSION}_linux_amd64.zip \
&& mv tendermint /usr/local/bin/ \
&& rm tendermint_${TM_VERSION}_linux_amd64.zip
ENV TMHOME=/tendermint
# Set permissions required for mongodb
RUN mkdir -p /data/db /data/configdb \
&& chown -R mongodb:mongodb /data/db /data/configdb
# BigchainDB enviroment variables
ENV BIGCHAINDB_DATABASE_PORT 27017
ENV BIGCHAINDB_DATABASE_BACKEND localmongodb
ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984
ENV BIGCHAINDB_WSSERVER_HOST 0.0.0.0
ENV BIGCHAINDB_WSSERVER_SCHEME ws
ENV BIGCHAINDB_WSSERVER_ADVERTISED_HOST 0.0.0.0
ENV BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME ws
ENV BIGCHAINDB_TENDERMINT_PORT 26657
VOLUME /data/db /data/configdb /tendermint
EXPOSE 27017 28017 9984 9985 26656 26657 26658
WORKDIR $HOME
ENTRYPOINT ["/usr/src/app/pkg/scripts/all-in-one.bash"]

View File

@ -1,30 +0,0 @@
FROM alpine:latest
LABEL maintainer "contact@ipdb.global"
RUN mkdir -p /usr/src/app
COPY . /usr/src/app/
WORKDIR /usr/src/app
RUN apk --update add sudo \
&& apk --update add python3 py-pip openssl ca-certificates git\
&& apk --update add --virtual build-dependencies python3-dev \
libffi-dev openssl-dev build-base \
&& apk add --no-cache libstdc++ \
&& pip3 install --upgrade pip cffi \
&& pip install -e . \
&& apk del build-dependencies \
&& rm -f /var/cache/apk/*
# When developing with Python in a docker container, we are using PYTHONBUFFERED
# to force stdin, stdout and stderr to be totally unbuffered and to capture logs/outputs
ENV PYTHONUNBUFFERED 0
ENV BIGCHAINDB_DATABASE_PORT 27017
ENV BIGCHAINDB_DATABASE_BACKEND $backend
ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984
ENV BIGCHAINDB_WSSERVER_HOST 0.0.0.0
ENV BIGCHAINDB_WSSERVER_SCHEME ws
ENV BIGCHAINDB_WSSERVER_ADVERTISED_HOST 0.0.0.0
ENV BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME ws
ENV BIGCHAINDB_TENDERMINT_PORT 26657
ARG backend
RUN bigchaindb -y configure "$backend"

View File

@ -1,15 +1,14 @@
ARG python_version=3.6
FROM python:${python_version}
LABEL maintainer "contact@ipdb.global"
FROM python:3.6
LABEL maintainer "dev@bigchaindb.com"
RUN apt-get update \
&& apt-get install -y git \
&& apt-get install -y vim \
&& pip install -U pip \
&& pip install pynacl \
&& apt-get autoremove \
&& apt-get clean
ARG backend
ARG abci_status
# When developing with Python in a docker container, we are using PYTHONBUFFERED
# to force stdin, stdout and stderr to be totally unbuffered and to capture logs/outputs
@ -24,12 +23,11 @@ ENV BIGCHAINDB_WSSERVER_SCHEME ws
ENV BIGCHAINDB_WSSERVER_ADVERTISED_HOST 0.0.0.0
ENV BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME ws
ENV BIGCHAINDB_TENDERMINT_PORT 26657
ENV BIGCHAINDB_CI_ABCI ${abci_status}
ENV BIGCHAINDB_TENDERMINT_PORT 46657
RUN mkdir -p /usr/src/app
COPY . /usr/src/app/
WORKDIR /usr/src/app
RUN pip install -e .[dev]
RUN bigchaindb -y configure
RUN pip install --no-cache-dir -e .[dev]
RUN bigchaindb -y configure "$backend"

View File

@ -0,0 +1,65 @@
# How to Handle Pull Requests
This document is for whoever has the ability to merge pull requests in the Git repositories associated with BigchainDB.
If the pull request is from an employee of BigchainDB GmbH, then you can ignore this document.
If the pull request is from someone who is _not_ an employee of BigchainDB, then:
* Have they agreed to the Individual Contributor Agreement in the past? (Troy, Greg, and others have a list.) If yes, then you can merge the PR and ignore the rest of this document.
* Do they belong to a company or organization which agreed to the Entity Contributor Agreement in the past, and will they be contributing on behalf of that company or organization? (Troy, Greg, and others have a list.) If yes, then you can merge the PR and ignore the rest of this document.
* Otherwise, go to the pull request in question and post a comment using this template:
Hi @nameofuser
Before we can merge this pull request, we need you or your organization to agree to one of our contributor agreements. One of the big concerns for people using and developing open source software is that someone who contributed to the code might claim the code infringes on their copyright or patent. To guard against this, we ask all our contributors to sign a Contributor License Agreement. This gives us the right to use the code contributed and any patents the contribution relies on. It also gives us and our users comfort that they won't be sued for using open source software. We know it's a hassle, but it makes the project more reliable in the long run. Thank you for your understanding and your contribution!
If you are contributing on behalf of yourself (and not on behalf of your employer or another organization you are part of) then you should:
1. Go to: https://www.bigchaindb.com/cla/
2. Read the Individual Contributor Agreement
3. Fill in the form "For Individuals"
4. Check the box to agree
5. Click the SEND button
If you're contributing as an employee, and/or you want all employees of your employing organization to be covered by our contributor agreement, then someone in your organization with the authority to enter agreements on behalf of all employees must do the following:
1. Go to: https://www.bigchaindb.com/cla/
2. Read the Entity Contributor Agreement
3. Fill in the form "For Organizations”
4. Check the box to agree
5. Click the SEND button
We will email you (or your employer) with further instructions.
(END OF COMMENT)
Once they click SEND, we (BigchainDB) will get an email with the information in the form. (Troy gets those emails for sure, I'm not sure who else.) The next step is to send an email to the email address submitted in the form, saying something like (where the stuff in [square brackets] should be replaced):
Hi [NAME],
The next step is for you to copy the following block of text into the comments of Pull Request #[NN] on GitHub:
BEGIN BLOCK
This is to confirm that I agreed to and accepted the BigchainDB [Entity/Individual] Contributor Agreement at https://www.bigchaindb.com/cla/ and to represent and warrant that I have authority to do so.
[Insert long random string here. One good source of those is https://www.grc.com/passwords.htm ]
END BLOCK
(END OF EMAIL)
The next step is to wait for them to copy that comment into the comments of the indicated pull request. Once they do so, it's safe to merge the pull request.
## How to Handle CLA Agreement Emails with No Associated Pull Request
Reply with an email like this:
Hi [First Name],
Today I got an email (copied below) to tell me that you agreed to the BigchainDB Contributor License Agreement. Did you intend to do that?
If no, then you can ignore this email.
If yes, then there's another step to connect your email address with your GitHub account. To do that, you must first create a pull request in one of the BigchainDB repositories on GitHub. Once you've done that, please reply to this email with a link to the pull request. Then I'll send you a special block of text to paste into the comments on that pull request.

201
LICENSE
View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,24 +1,27 @@
# Copyrights and Licenses
# Code Licenses
## Copyrights
Except as noted in the **Exceptions** section below, for all code in this repository, BigchainDB GmbH ("We") either:
For all the code and documentation in this repository, the copyright is owned by one or more of the following:
1. owns the copyright, or
2. owns the right to sublicense it under any license (because all external contributors must agree to a Contributor License Agreement).
- BigchainDB GmbH
- A BigchainDB contributor who agreed to a BigchainDB Contributor License Agreement (CLA) with BigchainDB GmbH. (See [BEP-16](https://github.com/bigchaindb/BEPs/tree/master/16).)
- A BigchainDB contributor who signed off on the Developer Certificate of Origin (DCO) for all their contributions. (See [BEP-24](https://github.com/bigchaindb/BEPs/tree/master/24).)
- (Rarely, see the **Exceptions Section** below) A third pary who licensed the code in question under an open source license.
Therefore We can choose how to license all the code in this repository (except for the Exceptions). We can license it to Joe Xname under one license and Company Yname under a different license.
## Code Licenses
The two general options are:
All code in this repository, including short code snippets in the documentation, but not including the **Exceptions** noted below, is licensed under the Apache License, Version 2.0, the full text of which can be found at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0).
1. You can get it under a commercial license for a fee. We can negotiate the terms of that license. It's not like we have some standard take-it-or-leave it commercial license. If you want to modify it and keep your modifications private, then that's certainly possible. Just ask.
2. You can get it under the AGPLv3 license for free. You don't even have to ask us. That's because all code in _this_ repository is licensed under the GNU Affero General Public License version 3 (AGPLv3), the full text of which can be found at [http://www.gnu.org/licenses/agpl.html](http://www.gnu.org/licenses/agpl.html).
For the licenses on all other BigchainDB-related code (i.e. in other repositories), see the LICENSE file in the associated repository.
If you don't like the AGPL license, then contact us to get a different license.
## Documentation Licenses
All short code snippets embedded in the official BigchainDB _documentation_ are licensed under the Apache License, Version 2.0, the full text of which can be found at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0).
The official BigchainDB documentation, _except for the short code snippets embedded within it_, is licensed under a Creative Commons Attribution 4.0 International license, the full text of which can be found at [http://creativecommons.org/licenses/by/4.0/legalcode](http://creativecommons.org/licenses/by/4.0/legalcode).
For the licenses on all other BigchainDB-related code, see the LICENSE file in the associated repository.
## Exceptions
# Documentation Licenses
The official BigchainDB documentation, _except for the short code snippets embedded within it_, is licensed under a Creative Commons Attribution-ShareAlike 4.0 International license, the full text of which can be found at [http://creativecommons.org/licenses/by-sa/4.0/legalcode](http://creativecommons.org/licenses/by-sa/4.0/legalcode).
# Exceptions
The contents of the `k8s/nginx-openresty/` directory are licensed as described in the `LICENSE.md` file in that directory.

View File

@ -1,4 +1,4 @@
.PHONY: help run start stop logs test test-unit test-unit-watch test-acceptance cov doc doc-acceptance clean reset release dist check-deps clean-build clean-pyc clean-test
.PHONY: check-deps help run test test-all coverage clean clean-build clean-pyc clean-test docs servedocs release dist install
.DEFAULT_GOAL := help
@ -56,10 +56,7 @@ help: ## Show this help
@$(HELP) < $(MAKEFILE_LIST)
run: check-deps ## Run BigchainDB from source (stop it with ctrl+c)
# although bigchaindb has tendermint and mongodb in depends_on,
# launch them first otherwise tendermint will get stuck upon sending yet another log
# due to some docker-compose issue; does not happen when containers are run as daemons
@$(DC) up --no-deps mongodb tendermint bigchaindb
@$(DC) up bigchaindb
start: check-deps ## Run BigchainDB from source and daemonize it (stop with `make stop`)
@$(DC) up -d bigchaindb
@ -70,30 +67,21 @@ stop: check-deps ## Stop BigchainDB
logs: check-deps ## Attach to the logs
@$(DC) logs -f bigchaindb
test: check-deps test-unit test-acceptance ## Run unit and acceptance tests
test: check-deps ## Run all tests once
@$(DC) run --rm bigchaindb pytest -v
test-unit: check-deps ## Run all tests once
@$(DC) up -d bdb
@$(DC) exec bigchaindb pytest
test-unit-watch: check-deps ## Run all tests and wait. Every time you change code, tests will be run again
@$(DC) run --rm --no-deps bigchaindb pytest -f
test-acceptance: check-deps ## Run all acceptance tests
@./run-acceptance-test.sh
test-watch: check-deps ## Run all tests and wait. Every time you change code, tests will be run again
@$(DC) run --rm bigchaindb pytest -f -v
cov: check-deps ## Check code coverage and open the result in the browser
@$(DC) run --rm bigchaindb pytest -v --cov=bigchaindb --cov-report html
$(BROWSER) htmlcov/index.html
doc: check-deps ## Generate HTML documentation and open it in the browser
doc: ## Generate HTML documentation and open it in the browser
@$(DC) run --rm --no-deps bdocs make -C docs/root html
@$(DC) run --rm --no-deps bdocs make -C docs/server html
$(BROWSER) docs/root/build/html/index.html
doc-acceptance: check-deps ## Create documentation for acceptance tests
@$(DC) run --rm python-acceptance pycco -i -s /src -d /docs
$(BROWSER) acceptance/python/docs/index.html
clean: clean-build clean-pyc clean-test ## Remove all build, test, coverage and Python artifacts
@$(ECHO) "Cleaning was successful."
@ -136,7 +124,6 @@ clean-pyc: # Remove Python file artifacts
@find . -name '__pycache__' -exec rm -fr {} +
clean-test: # Remove test and coverage artifacts
@find . -name '.pytest_cache' -exec rm -fr {} +
@rm -fr .tox/
@rm -f .coverage
@rm -fr htmlcov/

View File

@ -1,10 +1,3 @@
<!---
Copyright © 2020 Interplanetary Database Association e.V.,
BigchainDB and IPDB software contributors.
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
--->
# Python Style Guide
This guide starts out with our general Python coding style guidelines and ends with a section on how we write & run (Python) tests.
@ -92,6 +85,6 @@ flake8 --max-line-length 119 bigchaindb/
## Writing and Running (Python) Tests
The content of this section was moved to [`bigchaindb/tests/README.md`](https://github.com/bigchaindb/bigchaindb/blob/master/tests/README.md).
The content of this section was moved to [`bigchaindb/tests/README.md`](./tests/README.md).
Note: We automatically run all tests on all pull requests (using Travis CI), so you should definitely run all tests locally before you submit a pull request. See the above-linked README file for instructions.

View File

@ -1,37 +1,20 @@
<!---
Copyright © 2020 Interplanetary Database Association e.V.,
BigchainDB and IPDB software contributors.
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
--->
<!--- There is no shield to get the latest version
(including pre-release versions) from PyPI,
so show the latest GitHub release instead.
--->
[![PyPI](https://img.shields.io/pypi/status/bigchaindb.svg?maxAge=2592000)](https://pypi.python.org/pypi/BigchainDB)
[![PyPI](https://img.shields.io/pypi/v/bigchaindb.svg)](https://pypi.python.org/pypi/BigchainDB)
[![Travis branch](https://img.shields.io/travis/bigchaindb/bigchaindb/master.svg)](https://travis-ci.org/bigchaindb/bigchaindb)
[![Codecov branch](https://img.shields.io/codecov/c/github/bigchaindb/bigchaindb/master.svg)](https://codecov.io/github/bigchaindb/bigchaindb?branch=master)
[![Latest release](https://img.shields.io/github/release/bigchaindb/bigchaindb/all.svg)](https://github.com/bigchaindb/bigchaindb/releases)
[![Status on PyPI](https://img.shields.io/pypi/status/bigchaindb.svg)](https://pypi.org/project/BigchainDB/)
[![Travis branch](https://img.shields.io/travis/bigchaindb/bigchaindb/master.svg)](https://travis-ci.com/bigchaindb/bigchaindb)
[![Documentation Status](https://readthedocs.org/projects/bigchaindb-server/badge/?version=latest)](https://docs.bigchaindb.com/projects/server/en/latest/)
[![Join the chat at https://gitter.im/bigchaindb/bigchaindb](https://badges.gitter.im/bigchaindb/bigchaindb.svg)](https://gitter.im/bigchaindb/bigchaindb?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# BigchainDB Server
BigchainDB is the blockchain database. This repository is for _BigchainDB Server_.
BigchainDB is a blockchain database.
## The Basics
## Run and test BigchainDB from the `master` branch
Running and testing the latest version of BigchainDB is easy. Make sure you have a recent version of [Docker Compose installed](https://docs.docker.com/compose/install/) in your host.
* [Try the Quickstart](https://docs.bigchaindb.com/projects/server/en/latest/quickstart.html)
* [Read the BigchainDB 2.0 whitepaper](https://www.bigchaindb.com/whitepaper/)
* [Check out the _Hitchiker's Guide to BigchainDB_](https://www.bigchaindb.com/developers/guide/)
## Run and Test BigchainDB Server from the `master` Branch
Running and testing the latest version of BigchainDB Server is easy. Make sure you have a recent version of [Docker Compose](https://docs.docker.com/compose/install/) installed. When you are ready, fire up a terminal and run:
```text
Whenever you are ready, fire up a terminal and run:
```
git clone https://github.com/bigchaindb/bigchaindb.git
cd bigchaindb
make run
@ -40,19 +23,26 @@ make run
BigchainDB should be reachable now on `http://localhost:9984/`.
There are also other commands you can execute:
* `make start`: Run BigchainDB from source and daemonize it (stop it with `make stop`).
* `make stop`: Stop BigchainDB.
* `make logs`: Attach to the logs.
* `make test`: Run all unit and acceptance tests.
* `make test-unit-watch`: Run all tests and wait. Every time you change code, tests will be run again.
* `make cov`: Check code coverage and open the result in the browser.
* `make doc`: Generate HTML documentation and open it in the browser.
* `make clean`: Remove all build, test, coverage and Python artifacts.
* `make reset`: Stop and REMOVE all containers. WARNING: you will LOSE all data stored in BigchainDB.
- `make start`: Run BigchainDB from source and daemonize it (stop it with `make stop`).
- `make stop`: Stop BigchainDB.
- `make logs`: Attach to the logs.
- `make test`: Run all tests.
- `make test-watch`: Run all tests and wait. Every time you change code, tests will be run again.
- `make cov`: Check code coverage and open the result in the browser.
- `make doc`: Generate HTML documentation and open it in the browser.
- `make clean`: Remove all build, test, coverage and Python artifacts.
- `make reset`: Stop and REMOVE all containers. WARNING: you will LOSE all data stored in BigchainDB.
To view all commands available, run `make`.
## Get Started with BigchainDB Server
### [Quickstart](https://docs.bigchaindb.com/projects/server/en/latest/quickstart.html)
### [Set Up & Run a Dev/Test Node](https://docs.bigchaindb.com/projects/server/en/latest/dev-and-test/index.html)
### [Run BigchainDB Server with Docker](https://docs.bigchaindb.com/projects/server/en/latest/appendices/run-with-docker.html)
### [Run BigchainDB Server with Vagrant](https://docs.bigchaindb.com/projects/server/en/latest/appendices/run-with-vagrant.html)
### [Run BigchainDB Server with Ansible](https://docs.bigchaindb.com/projects/server/en/latest/appendices/run-with-ansible.html)
## Links for Everyone
* [BigchainDB.com](https://www.bigchaindb.com/) - the main BigchainDB website, including newsletter signup
@ -64,7 +54,7 @@ To view all commands available, run `make`.
* [All BigchainDB Documentation](https://docs.bigchaindb.com/en/latest/)
* [BigchainDB Server Documentation](https://docs.bigchaindb.com/projects/server/en/latest/index.html)
* [CONTRIBUTING.md](.github/CONTRIBUTING.md) - how to contribute
* [CONTRIBUTING.md](CONTRIBUTING.md) - how to contribute
* [Community guidelines](CODE_OF_CONDUCT.md)
* [Open issues](https://github.com/bigchaindb/bigchaindb/issues)
* [Open pull requests](https://github.com/bigchaindb/bigchaindb/pulls)

View File

@ -1,77 +0,0 @@
<!---
Copyright © 2020 Interplanetary Database Association e.V.,
BigchainDB and IPDB software contributors.
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
--->
<!--- There is no shield to get the latest version
(including pre-release versions) from PyPI,
so show the latest GitHub release instead.
--->
[![Codecov branch](https://img.shields.io/codecov/c/github/bigchaindb/bigchaindb/master.svg)](https://codecov.io/github/bigchaindb/bigchaindb?branch=master)
[![Latest release](https://img.shields.io/github/release/bigchaindb/bigchaindb/all.svg)](https://github.com/bigchaindb/bigchaindb/releases)
[![Status on PyPI](https://img.shields.io/pypi/status/bigchaindb.svg)](https://pypi.org/project/BigchainDB/)
[![Travis branch](https://img.shields.io/travis/bigchaindb/bigchaindb/master.svg)](https://travis-ci.com/bigchaindb/bigchaindb)
[![Documentation Status](https://readthedocs.org/projects/bigchaindb-server/badge/?version=latest)](https://docs.bigchaindb.com/projects/server/en/latest/)
[![Join the chat at https://gitter.im/bigchaindb/bigchaindb](https://badges.gitter.im/bigchaindb/bigchaindb.svg)](https://gitter.im/bigchaindb/bigchaindb?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# BigchainDB 服务器
BigchainDB 是区块链数据库. 这是 _BigchainDB 服务器_ 的仓库.
## 基础知识
* [尝试快速开始](https://docs.bigchaindb.com/projects/server/en/latest/quickstart.html)
* [阅读 BigchainDB 2.0 白皮书](https://www.bigchaindb.com/whitepaper/)
* [查阅漫游指南](https://www.bigchaindb.com/developers/guide/)
## 运行和测试 `master` 分支的 BigchainDB 服务器
运行和测试最新版本的 BigchainDB 服务器非常简单. 确认你有安装最新版本的 [Docker Compose](https://docs.docker.com/compose/install/). 当你准备好了, 打开一个终端并运行:
```text
git clone https://github.com/bigchaindb/bigchaindb.git
cd bigchaindb
make run
```
BigchainDB 应该可以通过 `http://localhost:9984/` 访问.
这里也有一些其他的命令你可以运行:
* `make start`: 通过源码和守护进程的方式运行 BigchainDB (通过 `make stop` 停止).
* `make stop`: 停止运行 BigchainDB.
* `make logs`: 附在日志上.
* `make test`: 运行所有单元和验收测试.
* `make test-unit-watch`: 运行所有测试并等待. 每次更改代码时都会再次运行测试.
* `make cov`: 检查代码覆盖率并在浏览器中打开结果.
* `make doc`: 生成 HTML 文档并在浏览器中打开它.
* `make clean`: 删除所有构建, 测试, 覆盖和 Python 生成物.
* `make reset`: 停止并移除所有容器. 警告: 您将丢失存储在 BigchainDB 中的所有数据.
查看所有可用命令, 请运行 `make`.
## 一般人员链接
* [BigchainDB.com](https://www.bigchaindb.com/) - BigchainDB 主网站, 包括新闻订阅
* [路线图](https://github.com/bigchaindb/org/blob/master/ROADMAP.md)
* [博客](https://medium.com/the-bigchaindb-blog)
* [推特](https://twitter.com/BigchainDB)
## 开发人员链接
* [所有的 BigchainDB 文档](https://docs.bigchaindb.com/en/latest/)
* [BigchainDB 服务器 文档](https://docs.bigchaindb.com/projects/server/en/latest/index.html)
* [CONTRIBUTING.md](.github/CONTRIBUTING.md) - how to contribute
* [社区指南](CODE_OF_CONDUCT.md)
* [公开问题](https://github.com/bigchaindb/bigchaindb/issues)
* [公开的 pull request](https://github.com/bigchaindb/bigchaindb/pulls)
* [Gitter 聊天室](https://gitter.im/bigchaindb/bigchaindb)
## 法律声明
* [许可](LICENSES.md) - 开源代码 & 开源内容
* [印记](https://www.bigchaindb.com/imprint/)
* [联系我们](https://www.bigchaindb.com/contact/)

View File

@ -1,65 +0,0 @@
[![Codecov branch](https://img.shields.io/codecov/c/github/bigchaindb/bigchaindb/master.svg)](https://codecov.io/github/bigchaindb/bigchaindb?branch=master)
[![Latest release](https://img.shields.io/github/release/bigchaindb/bigchaindb/all.svg)](https://github.com/bigchaindb/bigchaindb/releases)
[![Status on PyPI](https://img.shields.io/pypi/status/bigchaindb.svg)](https://pypi.org/project/BigchainDB/)
[![Travis branch](https://img.shields.io/travis/bigchaindb/bigchaindb/master.svg)](https://travis-ci.org/bigchaindb/bigchaindb)
[![Documentation Status](https://readthedocs.org/projects/bigchaindb-server/badge/?version=latest)](https://docs.bigchaindb.com/projects/server/en/latest/)
[![Join the chat at https://gitter.im/bigchaindb/bigchaindb](https://badges.gitter.im/bigchaindb/bigchaindb.svg)](https://gitter.im/bigchaindb/bigchaindb?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# BigchainDB 서버
BigchaingDB는 블록체인 데이터베이스입니다. 이 저장소는 _BigchaingDB 서버_를 위한 저장소입니다.
### 기본 사항
* [빠른 시작 사용해보기](https://docs.bigchaindb.com/projects/server/en/latest/quickstart.html)
* [BigchainDB 2.0 백서 읽기](https://www.bigchaindb.com/whitepaper/)
* [BigchainDB에 대한 _Hitchiker's Guide_를 확인십시오.](https://www.bigchaindb.com/developers/guide/)
### `master` Branch에서 BigchainDB 서버 실행 및 테스트
BigchaingDB 서버의 최신 버전을 실행하고 테스트하는 것은 어렵지 않습니다. [Docker Compose](https://docs.docker.com/compose/install/)의 최신 버전이 설치되어 있는지 확인하십시오. 준비가 되었다면, 터미널에서 다음을 실행하십시오.
```text
git clone https://github.com/bigchaindb/bigchaindb.git
cd bigchaindb
make run
```
이제 BigchainDB는 `http://localhost:9984/`에 연결되어야 합니다.
또한, 실행시키기 위한 다른 명령어들도 있습니다.
* `make start` : 소스로부터 BigchainDB를 실행하고 데몬화합니다. \(이는 `make stop` 을 하면 중지합니다.\)
* `make stop` : BigchainDB를 중지합니다.
* `make logs` : 로그에 첨부합니다.
* `make text` : 모든 유닛과 허가 테스트를 실행합니다.
* `make test-unit-watch` : 모든 테스트를 수행하고 기다립니다. 코드를 변경할 때마다 테스트는 다시 실행될 것입니다.
* `make cov` : 코드 커버리지를 확인하고 브라우저에서 결과를 엽니다.
* `make doc` : HTML 문서를 만들고, 브라우저에서 엽니다.
* `make clean` : 모든 빌드와 테스트, 커버리지 및 파이썬 아티팩트를 제거합니다.
* `make reset` : 모든 컨테이너들을 중지하고 제거합니다. 경고 : BigchainDB에 저장된 모든 데이터를 잃을 수 있습니다.
사용 가능한 모든 명령어를 보기 위해서는 `make` 를 실행하십시오.
### 모두를 위한 링크들
* [BigchainDB.com ](https://www.bigchaindb.com/)- 뉴스 레터 가입을 포함하는 BigchainDB 주요 웹 사이트
* [로드맵](https://github.com/bigchaindb/org/blob/master/ROADMAP.md)
* [블로그](https://medium.com/the-bigchaindb-blog)
* [트위터](https://twitter.com/BigchainDB)
### 개발자들을 위한 링크들
* [모든 BigchainDB 문서](https://docs.bigchaindb.com/en/latest/)
* [BigchainDB 서버 문서](https://docs.bigchaindb.com/projects/server/en/latest/index.html)
* [CONTRIBUTING.md](https://github.com/bigchaindb/bigchaindb/blob/master/.github/CONTRIBUTING.md) - 기여를 하는 방법
* [커뮤니티 가이드라인](https://github.com/bigchaindb/bigchaindb/blob/master/CODE_OF_CONDUCT.md)
* [이슈 작성](https://github.com/bigchaindb/bigchaindb/issues)
* [pull request 하기](https://github.com/bigchaindb/bigchaindb/pulls)
* [Gitter 채팅방](https://gitter.im/bigchaindb/bigchaindb)
### 합법
* [라이선스](https://github.com/bigchaindb/bigchaindb/blob/master/LICENSES.md) - 오픈 소스 & 오픈 콘텐츠
* [발행](https://www.bigchaindb.com/imprint/)
* [연락처](https://www.bigchaindb.com/contact/)

View File

@ -1,10 +1,3 @@
<!---
Copyright © 2020 Interplanetary Database Association e.V.,
BigchainDB and IPDB software contributors.
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
--->
# Our Release Process
## Notes
@ -19,10 +12,6 @@ to [regular semantic versioning](http://semver.org/), but there's no hyphen, e.g
- `4.5.2a1` not `4.5.2-a1` for the first Alpha release
- `3.4.5rc2` not `3.4.5-rc2` for Release Candidate 2
**Note 1:** For Git tags (which are used to identify releases on GitHub), we append a `v` in front. For example, the Git tag for version `2.0.0a1` was `v2.0.0a1`.
**Note 2:** For Docker image tags (e.g. on Docker Hub), we use longer version names for Alpha, Beta and Release Candidate releases. For example, the Docker image tag for version `2.0.0a2` was `2.0.0-alpha2`.
We use `0.9` and `0.9.0` as example version and short-version values below. You should replace those with the correct values for your new version.
We follow [BEP-1](https://github.com/bigchaindb/BEPs/tree/master/1), which is our variant of C4, the Collective Code Construction Contract, so a release is just a [tagged commit](https://git-scm.com/book/en/v2/Git-Basics-Tagging) on the `master` branch, i.e. a label for a particular Git commit.
@ -34,68 +23,39 @@ The following steps are what we do to release a new version of _BigchainDB Serve
1. Create a pull request where you make the following changes:
- Update `CHANGELOG.md`
- Update all Docker image tags in all Kubernetes YAML files (in the `k8s/` directory).
For example, in the files:
- `k8s/bigchaindb/bigchaindb-ss.yaml` and
- `k8s/dev-setup/bigchaindb.yaml`
find the line of the form `image: bigchaindb/bigchaindb:0.8.1` and change the version number to the new version number, e.g. `0.9.0`. (This is the Docker image that Kubernetes should pull from Docker Hub.)
Keep in mind that this is a _Docker image tag_ so our naming convention is
a bit different; see Note 2 in the **Notes** section above.
- In `k8s/bigchaindb/bigchaindb-dep.yaml` _and_ in `k8s/dev-setup/bigchaindb.yaml`, find the line of the form `image: bigchaindb/bigchaindb:0.8.1` and change the version number to the new version number, e.g. `0.9.0`. (This is the Docker image that Kubernetes should pull from Docker Hub.)
- In `bigchaindb/version.py`:
- update `__version__` to e.g. `0.9.0` (with no `.dev` on the end)
- update `__short_version__` to e.g. `0.9` (with no `.dev` on the end)
- In the docs about installing BigchainDB (and Tendermint), and in the associated scripts, recommend/install a version of Tendermint that _actually works_ with the soon-to-be-released version of BigchainDB. You can find all such references by doing a search for the previously-recommended version number, such as `0.31.5`.
- In `setup.py`, _maybe_ update the development status item in the `classifiers` list. For example, one allowed value is `"Development Status :: 5 - Production/Stable"`. The [allowed values are listed at pypi.python.org](https://pypi.python.org/pypi?%3Aaction=list_classifiers).
2. **Wait for all the tests to pass!**
3. Merge the pull request into the `master` branch.
4. Go to the [bigchaindb/bigchaindb Releases page on GitHub](https://github.com/bigchaindb/bigchaindb/releases)
1. **Wait for all the tests to pass!**
1. Merge the pull request into the `master` branch.
1. Go to the [bigchaindb/bigchaindb Releases page on GitHub](https://github.com/bigchaindb/bigchaindb/releases)
and click the "Draft a new release" button.
5. Fill in the details:
1. Fill in the details:
- **Tag version:** version number preceded by `v`, e.g. `v0.9.1`
- **Target:** the last commit that was just merged. In other words, that commit will get a Git tag with the value given for tag version above.
- **Title:** Same as tag version above, e.g `v0.9.1`
- **Description:** The body of the changelog entry (Added, Changed, etc.)
6. Click "Publish release" to publish the release on GitHub.
7. On your local computer, make sure you're on the `master` branch and that it's up-to-date with the `master` branch in the bigchaindb/bigchaindb repository (e.g. `git pull upstream master`). We're going to use that to push a new `bigchaindb` package to PyPI.
8. Make sure you have a `~/.pypirc` file containing credentials for PyPI.
9. Do `make release` to build and publish the new `bigchaindb` package on PyPI. For this step you need to have `twine` installed. If you get an error like `Makefile:135: recipe for target 'clean-pyc' failed` then try doing
```text
sudo chown -R $(whoami):$(whoami) .
```
10. [Log in to readthedocs.org](https://readthedocs.org/accounts/login/) and go to the **BigchainDB Server** project, then:
- Click on "Builds", select "latest" from the drop-down menu, then click the "Build Version:" button.
- Wait for the build of "latest" to finish. This can take a few minutes.
1. Click "Publish release" to publish the release on GitHub.
1. On your local computer, make sure you're on the `master` branch and that it's up-to-date with the `master` branch in the bigchaindb/bigchaindb repository (e.g. `git fetch upstream` and `git merge upstream/master`). We're going to use that to push a new `bigchaindb` package to PyPI.
1. Make sure you have a `~/.pypirc` file containing credentials for PyPI.
1. Do `make release` to build and publish the new `bigchaindb` package on PyPI.
1. [Log in to readthedocs.org](https://readthedocs.org/accounts/login/) and go to the **BigchainDB Server** project, then:
- Go to Admin --> Advanced Settings
and make sure that "Default branch:" (i.e. what "latest" points to)
is set to the new release's tag, e.g. `v0.9.1`.
(It won't be an option if you didn't wait for the build of "latest" to finish.)
Then scroll to the bottom and click "Save".
(Don't miss the `v` in front.)
- Go to Admin --> Versions
and under **Choose Active Versions**, do these things:
1. Make sure that the new version's tag is "Active" and "Public"
2. Make sure the **stable** branch is _not_ active.
3. Scroll to the bottom of the page and click "Save".
11. Go to [Docker Hub](https://hub.docker.com/) and sign in, then:
- Click on "Organizations"
- Click on "bigchaindb"
- Click on "bigchaindb/bigchaindb"
- Click on "Build Settings"
- Find the row where "Docker Tag Name" equals `latest`
and change the value of "Name" to the name (Git tag)
of the new release, e.g. `v0.9.0`.
- If the release is an Alpha, Beta or Release Candidate release,
then a new row must be added.
You can do that by clicking the green "+" (plus) icon.
The contents of the new row should be similar to the existing rows
of previous releases like that.
- Click on "Tags"
- Delete the "latest" tag (so we can rebuild it)
- Click on "Build Settings" again
- Click on the "Trigger" button for the "latest" tag and make sure it worked by clicking on "Tags" again
- If the release is an Alpha, Beta or Release Candidate release,
then click on the "Trigger" button for that tag as well.
1. Make sure the **stable** branch is _not_ active.
1. Scroll to the bottom of the page and click the "Submit" button.
1. Go to [Docker Hub](https://hub.docker.com/), sign in, go to bigchaindb/bigchaindb, and go to Settings --> Build Settings. Find the row where "Docker Tag Name" equals `latest` and change the value of "Name" to the name (Git tag) of the new release, e.g. `v0.9.0`.
Congratulations, you have released a new version of BigchainDB Server!
## Post-Release Steps
In the `master` branch, open `bigchaindb/version.py` and increment the minor version to the next planned release, e.g. `0.10.0.dev`. Note: If you just released `X.Y.Zrc1` then increment the minor version to `X.Y.Zrc2`. This step is so people reading the latest docs will know that they're for the latest (`master` branch) version of BigchainDB Server, not the docs at the time of the most recent release (which are also available).

View File

@ -1,10 +1,3 @@
<!---
Copyright © 2020 Interplanetary Database Association e.V.,
BigchainDB and IPDB software contributors.
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
--->
# BigchainDB Roadmap
We moved the BigchainDB Roadmap to the bigchaindb/org repository; see:

View File

@ -1,27 +0,0 @@
<!---
Copyright © 2020 Interplanetary Database Association e.V.,
BigchainDB and IPDB software contributors.
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
--->
# Acceptance test suite
This directory contains the acceptance test suite for BigchainDB.
The suite uses Docker Compose to set up a single BigchainDB node, run all tests, and finally stop the node. In the future we will add support for a four node network setup.
## Running the tests
It should be as easy as `make test-acceptance`.
Note that `make test-acceptance` will take some time to start the node and shutting it down. If you are developing a test, or you wish to run a specific test in the acceptance test suite, first start the node with `make start`. After the node is running, you can run `pytest` inside the `python-acceptance` container with:
```bash
docker-compose run --rm python-acceptance pytest <use whatever option you need>
```
## Writing and documenting the tests
Tests are sometimes difficult to read. For acceptance tests, we try to be really explicit on what the test is doing, so please write code that is *simple* and easy to understand. We decided to use literate-programming documentation. To generate the documentation run:
```bash
make doc-acceptance
```

View File

@ -1 +0,0 @@
docs

View File

@ -1,9 +0,0 @@
FROM python:3.6.3
RUN mkdir -p /src
RUN pip install --upgrade \
pycco \
websocket-client~=0.47.0 \
pytest~=3.0 \
bigchaindb-driver~=0.6.2 \
blns

View File

@ -1,125 +0,0 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
# # Basic Acceptance Test
# Here we check that the primitives of the system behave as expected.
# As you will see, this script tests basic stuff like:
#
# - create a transaction
# - check if the transaction is stored
# - check for the outputs of a given public key
# - transfer the transaction to another key
#
# We run a series of checks for each steps, that is retrieving the transaction from
# the remote system, and also checking the `outputs` of a given public key.
#
# This acceptance test is a rip-off of our
# [tutorial](https://docs.bigchaindb.com/projects/py-driver/en/latest/usage.html).
# ## Imports
# We need some utils from the `os` package, we will interact with
# env variables.
import os
# For this test case we import and use the Python Driver.
from bigchaindb_driver import BigchainDB
from bigchaindb_driver.crypto import generate_keypair
def test_basic():
# ## Set up a connection to BigchainDB
# To use BighainDB we need a connection. Here we create one. By default we
# connect to localhost, but you can override this value using the env variable
# called `BIGCHAINDB_ENDPOINT`, a valid value must include the schema:
# `https://example.com:9984`
bdb = BigchainDB(os.environ.get('BIGCHAINDB_ENDPOINT'))
# ## Create keypairs
# This test requires the interaction between two actors with their own keypair.
# The two keypairs will be called—drum roll—Alice and Bob.
alice, bob = generate_keypair(), generate_keypair()
# ## Alice registers her bike in BigchainDB
# Alice has a nice bike, and here she creates the "digital twin"
# of her bike.
bike = {'data': {'bicycle': {'serial_number': 420420}}}
# She prepares a `CREATE` transaction...
prepared_creation_tx = bdb.transactions.prepare(
operation='CREATE',
signers=alice.public_key,
asset=bike)
# ... and she fulfills it with her private key.
fulfilled_creation_tx = bdb.transactions.fulfill(
prepared_creation_tx,
private_keys=alice.private_key)
# We will use the `id` of this transaction several time, so we store it in
# a variable with a short and easy name
bike_id = fulfilled_creation_tx['id']
# Now she is ready to send it to the BigchainDB Network.
sent_transfer_tx = bdb.transactions.send_commit(fulfilled_creation_tx)
# And just to be 100% sure, she also checks if she can retrieve
# it from the BigchainDB node.
assert bdb.transactions.retrieve(bike_id), 'Cannot find transaction {}'.format(bike_id)
# Alice is now the proud owner of one unspent asset.
assert len(bdb.outputs.get(alice.public_key, spent=False)) == 1
assert bdb.outputs.get(alice.public_key)[0]['transaction_id'] == bike_id
# ## Alice transfers her bike to Bob
# After registering her bike, Alice is ready to transfer it to Bob.
# She needs to create a new `TRANSFER` transaction.
# A `TRANSFER` transaction contains a pointer to the original asset. The original asset
# is identified by the `id` of the `CREATE` transaction that defined it.
transfer_asset = {'id': bike_id}
# Alice wants to spend the one and only output available, the one with index `0`.
output_index = 0
output = fulfilled_creation_tx['outputs'][output_index]
# Here, she defines the `input` of the `TRANSFER` transaction. The `input` contains
# several keys:
#
# - `fulfillment`, taken from the previous `CREATE` transaction.
# - `fulfills`, that specifies which condition she is fulfilling.
# - `owners_before`.
transfer_input = {'fulfillment': output['condition']['details'],
'fulfills': {'output_index': output_index,
'transaction_id': fulfilled_creation_tx['id']},
'owners_before': output['public_keys']}
# Now that all the elements are set, she creates the actual transaction...
prepared_transfer_tx = bdb.transactions.prepare(
operation='TRANSFER',
asset=transfer_asset,
inputs=transfer_input,
recipients=bob.public_key)
# ... and signs it with her private key.
fulfilled_transfer_tx = bdb.transactions.fulfill(
prepared_transfer_tx,
private_keys=alice.private_key)
# She finally sends the transaction to a BigchainDB node.
sent_transfer_tx = bdb.transactions.send_commit(fulfilled_transfer_tx)
# And just to be 100% sure, she also checks if she can retrieve
# it from the BigchainDB node.
assert bdb.transactions.retrieve(fulfilled_transfer_tx['id']) == sent_transfer_tx
# Now Alice has zero unspent transactions.
assert len(bdb.outputs.get(alice.public_key, spent=False)) == 0
# While Bob has one.
assert len(bdb.outputs.get(bob.public_key, spent=False)) == 1
# Bob double checks what he got was the actual bike.
bob_tx_id = bdb.outputs.get(bob.public_key, spent=False)[0]['transaction_id']
assert bdb.transactions.retrieve(bob_tx_id) == sent_transfer_tx

View File

@ -1,181 +0,0 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
# # Divisible assets integration testing
# This test checks if we can successfully divide assets.
# The script tests various things like:
#
# - create a transaction with a divisible asset and issue them to someone
# - check if the transaction is stored and has the right amount of tokens
# - spend some tokens
# - try to spend more tokens than available
#
# We run a series of checks for each step, that is retrieving
# the transaction from the remote system, and also checking the `amount`
# of a given transaction.
#
# This integration test is a rip-off of our
# [tutorial](https://docs.bigchaindb.com/projects/py-driver/en/latest/usage.html).
# ## Imports
# We need some utils from the `os` package, we will interact with
# env variables.
# We need the `pytest` package to catch the `BadRequest` exception properly.
# And of course, we also need the `BadRequest`.
import os
import pytest
from bigchaindb_driver.exceptions import BadRequest
# For this test case we import and use the Python Driver.
from bigchaindb_driver import BigchainDB
from bigchaindb_driver.crypto import generate_keypair
def test_divisible_assets():
# ## Set up a connection to BigchainDB
# Check [test_basic.py](./test_basic.html) to get some more details
# about the endpoint.
bdb = BigchainDB(os.environ.get('BIGCHAINDB_ENDPOINT'))
# Oh look, it is Alice again and she brought her friend Bob along.
alice, bob = generate_keypair(), generate_keypair()
# ## Alice creates a time sharing token
# Alice wants to go on vacation, while Bobs bike just broke down.
# Alice decides to rent her bike to Bob while she is gone.
# So she prepares a `CREATE` transaction to issues 10 tokens.
# First, she prepares an asset for a time sharing token. As you can see in
# the description, Bob and Alice agree that each token can be used to ride
# the bike for one hour.
bike_token = {
'data': {
'token_for': {
'bike': {
'serial_number': 420420
}
},
'description': 'Time share token. Each token equals one hour of riding.',
},
}
# She prepares a `CREATE` transaction and issues 10 tokens.
# Here, Alice defines in a tuple that she wants to assign
# these 10 tokens to Bob.
prepared_token_tx = bdb.transactions.prepare(
operation='CREATE',
signers=alice.public_key,
recipients=[([bob.public_key], 10)],
asset=bike_token)
# She fulfills and sends the transaction.
fulfilled_token_tx = bdb.transactions.fulfill(
prepared_token_tx,
private_keys=alice.private_key)
bdb.transactions.send_commit(fulfilled_token_tx)
# We store the `id` of the transaction to use it later on.
bike_token_id = fulfilled_token_tx['id']
# Let's check if the transaction was successful.
assert bdb.transactions.retrieve(bike_token_id), \
'Cannot find transaction {}'.format(bike_token_id)
# Bob owns 10 tokens now.
assert bdb.transactions.retrieve(bike_token_id)['outputs'][0][
'amount'] == '10'
# ## Bob wants to use the bike
# Now that Bob got the tokens and the sun is shining, he wants to get out
# with the bike for three hours.
# To use the bike he has to send the tokens back to Alice.
# To learn about the details of transferring a transaction check out
# [test_basic.py](./test_basic.html)
transfer_asset = {'id': bike_token_id}
output_index = 0
output = fulfilled_token_tx['outputs'][output_index]
transfer_input = {'fulfillment': output['condition']['details'],
'fulfills': {'output_index': output_index,
'transaction_id': fulfilled_token_tx[
'id']},
'owners_before': output['public_keys']}
# To use the tokens Bob has to reassign 7 tokens to himself and the
# amount he wants to use to Alice.
prepared_transfer_tx = bdb.transactions.prepare(
operation='TRANSFER',
asset=transfer_asset,
inputs=transfer_input,
recipients=[([alice.public_key], 3), ([bob.public_key], 7)])
# He signs and sends the transaction.
fulfilled_transfer_tx = bdb.transactions.fulfill(
prepared_transfer_tx,
private_keys=bob.private_key)
sent_transfer_tx = bdb.transactions.send_commit(fulfilled_transfer_tx)
# First, Bob checks if the transaction was successful.
assert bdb.transactions.retrieve(
fulfilled_transfer_tx['id']) == sent_transfer_tx
# There are two outputs in the transaction now.
# The first output shows that Alice got back 3 tokens...
assert bdb.transactions.retrieve(
fulfilled_transfer_tx['id'])['outputs'][0]['amount'] == '3'
# ... while Bob still has 7 left.
assert bdb.transactions.retrieve(
fulfilled_transfer_tx['id'])['outputs'][1]['amount'] == '7'
# ## Bob wants to ride the bike again
# It's been a week and Bob wants to right the bike again.
# Now he wants to ride for 8 hours, that's a lot Bob!
# He prepares the transaction again.
transfer_asset = {'id': bike_token_id}
# This time we need an `output_index` of 1, since we have two outputs
# in the `fulfilled_transfer_tx` we created before. The first output with
# index 0 is for Alice and the second output is for Bob.
# Since Bob wants to spend more of his tokens he has to provide the
# correct output with the correct amount of tokens.
output_index = 1
output = fulfilled_transfer_tx['outputs'][output_index]
transfer_input = {'fulfillment': output['condition']['details'],
'fulfills': {'output_index': output_index,
'transaction_id': fulfilled_transfer_tx['id']},
'owners_before': output['public_keys']}
# This time Bob only provides Alice in the `recipients` because he wants
# to spend all his tokens
prepared_transfer_tx = bdb.transactions.prepare(
operation='TRANSFER',
asset=transfer_asset,
inputs=transfer_input,
recipients=[([alice.public_key], 8)])
fulfilled_transfer_tx = bdb.transactions.fulfill(
prepared_transfer_tx,
private_keys=bob.private_key)
# Oh Bob, what have you done?! You tried to spend more tokens than you had.
# Remember Bob, last time you spent 3 tokens already,
# so you only have 7 left.
with pytest.raises(BadRequest) as error:
bdb.transactions.send_commit(fulfilled_transfer_tx)
# Now Bob gets an error saying that the amount he wanted to spent is
# higher than the amount of tokens he has left.
assert error.value.args[0] == 400
message = 'Invalid transaction (AmountError): The amount used in the ' \
'inputs `7` needs to be same as the amount used in the ' \
'outputs `8`'
assert error.value.args[2]['message'] == message
# We have to stop this test now, I am sorry, but Bob is pretty upset
# about his mistake. See you next time :)

View File

@ -1,48 +0,0 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
# # Double Spend testing
# This test challenge the system with double spends.
import os
from uuid import uuid4
from threading import Thread
import queue
import bigchaindb_driver.exceptions
from bigchaindb_driver import BigchainDB
from bigchaindb_driver.crypto import generate_keypair
def test_double_create():
bdb = BigchainDB(os.environ.get('BIGCHAINDB_ENDPOINT'))
alice = generate_keypair()
results = queue.Queue()
tx = bdb.transactions.fulfill(
bdb.transactions.prepare(
operation='CREATE',
signers=alice.public_key,
asset={'data': {'uuid': str(uuid4())}}),
private_keys=alice.private_key)
def send_and_queue(tx):
try:
bdb.transactions.send_commit(tx)
results.put('OK')
except bigchaindb_driver.exceptions.TransportError as e:
results.put('FAIL')
t1 = Thread(target=send_and_queue, args=(tx, ))
t2 = Thread(target=send_and_queue, args=(tx, ))
t1.start()
t2.start()
results = [results.get(timeout=2), results.get(timeout=2)]
assert results.count('OK') == 1
assert results.count('FAIL') == 1

View File

@ -1,126 +0,0 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
# # Multiple owners integration testing
# This test checks if we can successfully create and transfer a transaction
# with multiple owners.
# The script tests various things like:
#
# - create a transaction with multiple owners
# - check if the transaction is stored and has the right amount of public keys
# - transfer the transaction to a third person
#
# We run a series of checks for each step, that is retrieving
# the transaction from the remote system, and also checking the public keys
# of a given transaction.
#
# This integration test is a rip-off of our
# [tutorial](https://docs.bigchaindb.com/projects/py-driver/en/latest/usage.html).
# ## Imports
# We need some utils from the `os` package, we will interact with
# env variables.
import os
# For this test case we import and use the Python Driver.
from bigchaindb_driver import BigchainDB
from bigchaindb_driver.crypto import generate_keypair
def test_multiple_owners():
# ## Set up a connection to BigchainDB
# Check [test_basic.py](./test_basic.html) to get some more details
# about the endpoint.
bdb = BigchainDB(os.environ.get('BIGCHAINDB_ENDPOINT'))
# Hey Alice and Bob, nice to see you again!
alice, bob = generate_keypair(), generate_keypair()
# ## Alice and Bob create a transaction
# Alice and Bob just moved into a shared flat, no one can afford these
# high rents anymore. Bob suggests to get a dish washer for the
# kitchen. Alice agrees and here they go, creating the asset for their
# dish washer.
dw_asset = {
'data': {
'dish washer': {
'serial_number': 1337
}
}
}
# They prepare a `CREATE` transaction. To have multiple owners, both
# Bob and Alice need to be the recipients.
prepared_dw_tx = bdb.transactions.prepare(
operation='CREATE',
signers=alice.public_key,
recipients=(alice.public_key, bob.public_key),
asset=dw_asset)
# Now they both sign the transaction by providing their private keys.
# And send it afterwards.
fulfilled_dw_tx = bdb.transactions.fulfill(
prepared_dw_tx,
private_keys=[alice.private_key, bob.private_key])
bdb.transactions.send_commit(fulfilled_dw_tx)
# We store the `id` of the transaction to use it later on.
dw_id = fulfilled_dw_tx['id']
# Let's check if the transaction was successful.
assert bdb.transactions.retrieve(dw_id), \
'Cannot find transaction {}'.format(dw_id)
# The transaction should have two public keys in the outputs.
assert len(
bdb.transactions.retrieve(dw_id)['outputs'][0]['public_keys']) == 2
# ## Alice and Bob transfer a transaction to Carol.
# Alice and Bob save a lot of money living together. They often go out
# for dinner and don't cook at home. But now they don't have any dishes to
# wash, so they decide to sell the dish washer to their friend Carol.
# Hey Carol, nice to meet you!
carol = generate_keypair()
# Alice and Bob prepare the transaction to transfer the dish washer to
# Carol.
transfer_asset = {'id': dw_id}
output_index = 0
output = fulfilled_dw_tx['outputs'][output_index]
transfer_input = {'fulfillment': output['condition']['details'],
'fulfills': {'output_index': output_index,
'transaction_id': fulfilled_dw_tx[
'id']},
'owners_before': output['public_keys']}
# Now they create the transaction...
prepared_transfer_tx = bdb.transactions.prepare(
operation='TRANSFER',
asset=transfer_asset,
inputs=transfer_input,
recipients=carol.public_key)
# ... and sign it with their private keys, then send it.
fulfilled_transfer_tx = bdb.transactions.fulfill(
prepared_transfer_tx,
private_keys=[alice.private_key, bob.private_key])
sent_transfer_tx = bdb.transactions.send_commit(fulfilled_transfer_tx)
# They check if the transaction was successful.
assert bdb.transactions.retrieve(
fulfilled_transfer_tx['id']) == sent_transfer_tx
# The owners before should include both Alice and Bob.
assert len(
bdb.transactions.retrieve(fulfilled_transfer_tx['id'])['inputs'][0][
'owners_before']) == 2
# While the new owner is Carol.
assert bdb.transactions.retrieve(fulfilled_transfer_tx['id'])[
'outputs'][0]['public_keys'][0] == carol.public_key

View File

@ -1,101 +0,0 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
# ## Testing potentially hazardous strings
# This test uses a library of `naughty` strings (code injections, weird unicode chars., etc.) as both keys and values.
# We look for either a successful tx, or in the case that we use a naughty string as a key, and it violates some key
# constraints, we expect to receive a well formatted error message.
# ## Imports
# We need some utils from the `os` package, we will interact with
# env variables.
import os
# Since the naughty strings get encoded and decoded in odd ways,
# we'll use a regex to sweep those details under the rug.
import re
# We'll use a nice library of naughty strings...
from blns import blns
# And parameterize our test so each one is treated as a separate test case
import pytest
# For this test case we import and use the Python Driver.
from bigchaindb_driver import BigchainDB
from bigchaindb_driver.crypto import generate_keypair
from bigchaindb_driver.exceptions import BadRequest
naughty_strings = blns.all()
# This is our base test case, but we'll reuse it to send naughty strings as both keys and values.
def send_naughty_tx(asset, metadata):
# ## Set up a connection to BigchainDB
# Check [test_basic.py](./test_basic.html) to get some more details
# about the endpoint.
bdb = BigchainDB(os.environ.get('BIGCHAINDB_ENDPOINT'))
# Here's Alice.
alice = generate_keypair()
# Alice is in a naughty mood today, so she creates a tx with some naughty strings
prepared_transaction = bdb.transactions.prepare(
operation='CREATE',
signers=alice.public_key,
asset=asset,
metadata=metadata)
# She fulfills the transaction
fulfilled_transaction = bdb.transactions.fulfill(
prepared_transaction,
private_keys=alice.private_key)
# The fulfilled tx gets sent to the BDB network
try:
sent_transaction = bdb.transactions.send_commit(fulfilled_transaction)
except BadRequest as e:
sent_transaction = e
# If her key contained a '.', began with a '$', or contained a NUL character
regex = '.*\..*|\$.*|.*\x00.*'
key = next(iter(metadata))
if re.match(regex, key):
# Then she expects a nicely formatted error code
status_code = sent_transaction.status_code
error = sent_transaction.error
regex = (
r'\{\s*\n*'
r'\s*"message":\s*"Invalid transaction \(ValidationError\):\s*'
r'Invalid key name.*The key name cannot contain characters.*\n*'
r'\s*"status":\s*400\n*'
r'\s*\}\n*')
assert status_code == 400
assert re.fullmatch(regex, error), sent_transaction
# Otherwise, she expects to see her transaction in the database
elif 'id' in sent_transaction.keys():
tx_id = sent_transaction['id']
assert bdb.transactions.retrieve(tx_id)
# If neither condition was true, then something weird happened...
else:
raise TypeError(sent_transaction)
@pytest.mark.parametrize("naughty_string", naughty_strings, ids=naughty_strings)
def test_naughty_keys(naughty_string):
asset = {'data': {naughty_string: 'nice_value'}}
metadata = {naughty_string: 'nice_value'}
send_naughty_tx(asset, metadata)
@pytest.mark.parametrize("naughty_string", naughty_strings, ids=naughty_strings)
def test_naughty_values(naughty_string):
asset = {'data': {'nice_key': naughty_string}}
metadata = {'nice_key': naughty_string}
send_naughty_tx(asset, metadata)

View File

@ -1,132 +0,0 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
# # Stream Acceptance Test
# This test checks if the event stream works correctly. The basic idea of this
# test is to generate some random **valid** transaction, send them to a
# BigchainDB node, and expect those transactions to be returned by the valid
# transactions Stream API. During this test, two threads work together,
# sharing a queue to exchange events.
#
# - The *main thread* first creates and sends the transactions to BigchainDB;
# then it run through all events in the shared queue to check if all
# transactions sent have been validated by BigchainDB.
# - The *listen thread* listens to the events coming from BigchainDB and puts
# them in a queue shared with the main thread.
import os
import queue
import json
from threading import Thread, Event
from uuid import uuid4
# For this script, we need to set up a websocket connection, that's the reason
# we import the
# [websocket](https://github.com/websocket-client/websocket-client) module
from websocket import create_connection
from bigchaindb_driver import BigchainDB
from bigchaindb_driver.crypto import generate_keypair
def test_stream():
# ## Set up the test
# We use the env variable `BICHAINDB_ENDPOINT` to know where to connect.
# Check [test_basic.py](./test_basic.html) for more information.
BDB_ENDPOINT = os.environ.get('BIGCHAINDB_ENDPOINT')
# *That's pretty bad, but let's do like this for now.*
WS_ENDPOINT = 'ws://{}:9985/api/v1/streams/valid_transactions'.format(BDB_ENDPOINT.rsplit(':')[0])
bdb = BigchainDB(BDB_ENDPOINT)
# Hello to Alice again, she is pretty active in those tests, good job
# Alice!
alice = generate_keypair()
# We need few variables to keep the state, specifically we need `sent` to
# keep track of all transactions Alice sent to BigchainDB, while `received`
# are the transactions BigchainDB validated and sent back to her.
sent = []
received = queue.Queue()
# In this test we use a websocket. The websocket must be started **before**
# sending transactions to BigchainDB, otherwise we might lose some
# transactions. The `ws_ready` event is used to synchronize the main thread
# with the listen thread.
ws_ready = Event()
# ## Listening to events
# This is the function run by the complementary thread.
def listen():
# First we connect to the remote endpoint using the WebSocket protocol.
ws = create_connection(WS_ENDPOINT)
# After the connection has been set up, we can signal the main thread
# to proceed (continue reading, it should make sense in a second.)
ws_ready.set()
# It's time to consume all events coming from the BigchainDB stream API.
# Every time a new event is received, it is put in the queue shared
# with the main thread.
while True:
result = ws.recv()
received.put(result)
# Put `listen` in a thread, and start it. Note that `listen` is a local
# function and it can access all variables in the enclosing function.
t = Thread(target=listen, daemon=True)
t.start()
# ## Pushing the transactions to BigchainDB
# After starting the listen thread, we wait for it to connect, and then we
# proceed.
ws_ready.wait()
# Here we prepare, sign, and send ten different `CREATE` transactions. To
# make sure each transaction is different from the other, we generate a
# random `uuid`.
for _ in range(10):
tx = bdb.transactions.fulfill(
bdb.transactions.prepare(
operation='CREATE',
signers=alice.public_key,
asset={'data': {'uuid': str(uuid4())}}),
private_keys=alice.private_key)
# We don't want to wait for each transaction to be in a block. By using
# `async` mode, we make sure that the driver returns as soon as the
# transaction is pushed to the BigchainDB API. Remember: we expect all
# transactions to be in the shared queue: this is a two phase test,
# first we send a bunch of transactions, then we check if they are
# valid (and, in this case, they should).
bdb.transactions.send_async(tx)
# The `id` of every sent transaction is then stored in a list.
sent.append(tx['id'])
# ## Check the valid transactions coming from BigchainDB
# Now we are ready to check if BigchainDB did its job. A simple way to
# check if all sent transactions have been processed is to **remove** from
# `sent` the transactions we get from the *listen thread*. At one point in
# time, `sent` should be empty, and we exit the test.
while sent:
# To avoid waiting forever, we have an arbitrary timeout of 5
# seconds: it should be enough time for BigchainDB to create
# blocks, in fact a new block is created every second. If we hit
# the timeout, then game over ¯\\\_(ツ)\_/¯
try:
event = received.get(timeout=5)
txid = json.loads(event)['transaction_id']
except queue.Empty:
assert False, 'Did not receive all expected transactions'
# Last thing is to try to remove the `txid` from the set of sent
# transactions. If this test is running in parallel with others, we
# might get a transaction id of another test, and `remove` can fail.
# It's OK if this happens.
try:
sent.remove(txid)
except ValueError:
pass

View File

@ -1,27 +1,20 @@
<!---
Copyright © 2020 Interplanetary Database Association e.V.,
BigchainDB and IPDB software contributors.
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
--->
# Overview
A high-level description of the files and subdirectories of BigchainDB.
## Files
### [`lib.py`](lib.py)
### [`core.py`](./core.py)
The `BigchainDB` class is defined here. Most node-level operations and database interactions are found in this file. This is the place to start if you are interested in implementing a server API, since many of these class methods concern BigchainDB interacting with the outside world.
The `Bigchain` class is defined here. Most node-level operations and database interactions are found in this file. This is the place to start if you are interested in implementing a server API, since many of these class methods concern BigchainDB interacting with the outside world.
### [`models.py`](./models.py)
`Block`, `Transaction`, and `Asset` classes are defined here. The classes mirror the block and transaction structure from the [documentation](https://docs.bigchaindb.com/projects/server/en/latest/data-models/index.html), but also include methods for validation and signing.
### [`validation.py`](./validation.py)
### [`consensus.py`](./consensus.py)
Base class for validation methods (verification of votes, blocks, and transactions). The actual logic is mostly found in `transaction` and `block` models, defined in [`models.py`](./models.py).
Base class for consensus methods (verification of votes, blocks, and transactions). The actual logic is mostly found in `transaction` and `block` models, defined in [`models.py`](./models.py).
### [`processes.py`](./processes.py)
@ -33,6 +26,10 @@ Methods for managing the configuration, including loading configuration files, a
## Folders
### [`pipelines`](./pipelines)
Structure and implementation of various subprocesses started in [`processes.py`](./processes.py).
### [`commands`](./commands)
Contains code for the [CLI](https://docs.bigchaindb.com/projects/server/en/latest/server-reference/bigchaindb-cli.html) for BigchainDB.

View File

@ -1,28 +1,28 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import copy
import logging
from bigchaindb.log import DEFAULT_LOGGING_CONFIG as log_config
from bigchaindb.lib import BigchainDB # noqa
from bigchaindb.migrations.chain_migration_election import ChainMigrationElection
from bigchaindb.version import __version__ # noqa
from bigchaindb.core import App # noqa
from bigchaindb.log.configs import SUBSCRIBER_LOGGING_CONFIG as log_config
# from functools import reduce
# PORT_NUMBER = reduce(lambda x, y: x * y, map(ord, 'BigchainDB')) % 2**16
# basically, the port number is 9984
_base_database_rethinkdb = {
'host': 'localhost',
'port': 28015,
'name': 'bigchain',
}
# The following variable is used by `bigchaindb configure` to
# prompt the user for database values. We cannot rely on
# _base_database_localmongodb.keys() because dicts are unordered.
# I tried to configure
# _base_database_rethinkdb.keys() or _base_database_mongodb.keys()
# because dicts are unordered. I tried to configure
_database_keys_map = {
'localmongodb': ('host', 'port', 'name'),
'mongodb': ('host', 'port', 'name', 'replicaset'),
'rethinkdb': ('host', 'port', 'name')
}
_base_database_localmongodb = {
@ -34,6 +34,35 @@ _base_database_localmongodb = {
'password': None,
}
_base_database_mongodb = {
'host': 'localhost',
'port': 27017,
'name': 'bigchain',
'replicaset': 'bigchain-rs',
'login': None,
'password': None,
}
_database_rethinkdb = {
'backend': 'rethinkdb',
'connection_timeout': 5000,
'max_tries': 3,
}
_database_rethinkdb.update(_base_database_rethinkdb)
_database_mongodb = {
'backend': 'mongodb',
'connection_timeout': 5000,
'max_tries': 3,
'ssl': False,
'ca_cert': None,
'certfile': None,
'keyfile': None,
'keyfile_passphrase': None,
'crlfile': None,
}
_database_mongodb.update(_base_database_mongodb)
_database_localmongodb = {
'backend': 'localmongodb',
'connection_timeout': 5000,
@ -49,6 +78,8 @@ _database_localmongodb.update(_base_database_localmongodb)
_database_map = {
'localmongodb': _database_localmongodb,
'mongodb': _database_mongodb,
'rethinkdb': _database_rethinkdb
}
config = {
@ -58,7 +89,8 @@ config = {
'bind': 'localhost:9984',
'loglevel': logging.getLevelName(
log_config['handlers']['console']['level']).lower(),
'workers': None, # if None, the value will be cpu_count * 2 + 1
'threads': 1,
'workers': 1, # if None, the value will be cpu_count * 2 + 1
},
'wsserver': {
'scheme': 'ws',
@ -68,13 +100,14 @@ config = {
'advertised_host': 'localhost',
'advertised_port': 9985,
},
'tendermint': {
'host': 'localhost',
'port': 26657,
'version': 'v0.31.5', # look for __tm_supported_versions__
},
# FIXME: hardcoding to localmongodb for now
'database': _database_map['localmongodb'],
'keypair': {
'public': None,
'private': None,
},
'keyring': [],
'backlog_reassign_delay': 120,
'log': {
'file': log_config['handlers']['file']['filename'],
'error_file': log_config['handlers']['errors']['filename'],
@ -87,6 +120,7 @@ config = {
'fmt_console': log_config['formatters']['console']['format'],
'fmt_logfile': log_config['formatters']['file']['format'],
'granular_levels': {},
'port': log_config['root']['port']
},
}
@ -94,13 +128,5 @@ config = {
# the user wants to reconfigure the node. Check ``bigchaindb.config_utils``
# for more info.
_config = copy.deepcopy(config)
from bigchaindb.common.transaction import Transaction # noqa
from bigchaindb import models # noqa
from bigchaindb.upsert_validator import ValidatorElection # noqa
from bigchaindb.elections.vote import Vote # noqa
Transaction.register_type(Transaction.CREATE, models.Transaction)
Transaction.register_type(Transaction.TRANSFER, models.Transaction)
Transaction.register_type(ValidatorElection.OPERATION, ValidatorElection)
Transaction.register_type(ChainMigrationElection.OPERATION, ChainMigrationElection)
Transaction.register_type(Vote.OPERATION, Vote)
from bigchaindb.core import Bigchain # noqa
from bigchaindb.version import __version__ # noqa

View File

@ -1,20 +1,14 @@
<!---
Copyright © 2020 Interplanetary Database Association e.V.,
BigchainDB and IPDB software contributors.
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
--->
# Backend Interfaces
## Structure
- [`changefeed.py`](./changefeed.py): Changefeed-related interfaces
- [`connection.py`](./connection.py): Database connection-related interfaces
- [`query.py`](./query.py): Database query-related interfaces, dispatched through single-dispatch
- [`schema.py`](./schema.py): Database setup and schema-related interfaces, dispatched through
single-dispatch
Built-in implementations (e.g. [MongoDB's](./localmongodb)) are provided in sub-directories and
Built-in implementations (e.g. [RethinkDB's](./rethinkdb)) are provided in sub-directories and
have their connection type's location exposed as `BACKENDS` in [`connection.py`](./connection.py).
## Single-Dispatched Interfaces

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Generic backend database interfaces expected by BigchainDB.
The interfaces in this module allow BigchainDB to be agnostic about its
@ -12,6 +7,7 @@ configuration or the ``BIGCHAINDB_DATABASE_BACKEND`` environment variable.
"""
# Include the backend interfaces
from bigchaindb.backend import schema, query # noqa
from bigchaindb.backend import admin, changefeed, schema, query # noqa
from bigchaindb.backend.connection import connect # noqa
from bigchaindb.backend.changefeed import get_changefeed # noqa

View File

@ -0,0 +1,34 @@
"""Database configuration functions."""
from functools import singledispatch
@singledispatch
def get_config(connection, *, table):
raise NotImplementedError
@singledispatch
def reconfigure(connection, *, table, shards, replicas, **kwargs):
raise NotImplementedError
@singledispatch
def set_shards(connection, *, shards):
raise NotImplementedError
@singledispatch
def set_replicas(connection, *, replicas):
raise NotImplementedError
@singledispatch
def add_replicas(connection, replicas):
raise NotImplementedError('This command is specific to the '
'MongoDB backend.')
@singledispatch
def remove_replicas(connection, replicas):
raise NotImplementedError('This command is specific to the '
'MongoDB backend.')

View File

@ -0,0 +1,90 @@
"""Changefeed interfaces for backends."""
from functools import singledispatch
from multipipes import Node
import bigchaindb
class ChangeFeed(Node):
"""Create a new changefeed.
It extends :class:`multipipes.Node` to make it pluggable in other
Pipelines instances, and makes usage of ``self.outqueue`` to output
the data.
A changefeed is a real time feed on inserts, updates, and deletes, and
is volatile. This class is a helper to create changefeeds. Moreover,
it provides a way to specify a ``prefeed`` of iterable data to output
before the actual changefeed.
"""
INSERT = 1
DELETE = 2
UPDATE = 4
def __init__(self, table, operation, *, prefeed=None, connection=None):
"""Create a new ChangeFeed.
Args:
table (str): name of the table to listen to for changes.
operation (int): can be ChangeFeed.INSERT, ChangeFeed.DELETE, or
ChangeFeed.UPDATE. Combining multiple operations is possible
with the bitwise ``|`` operator
(e.g. ``ChangeFeed.INSERT | ChangeFeed.UPDATE``)
prefeed (:class:`~collections.abc.Iterable`, optional): whatever
set of data you want to be published first.
connection (:class:`~bigchaindb.backend.connection.Connection`, optional): # noqa
A connection to the database. If no connection is provided a
default connection will be created.
"""
super().__init__(name='changefeed')
self.prefeed = prefeed if prefeed else []
self.table = table
self.operation = operation
if connection:
self.connection = connection
else:
self.connection = bigchaindb.backend.connect(
**bigchaindb.config['database'])
def run_forever(self):
"""Main loop of the ``multipipes.Node``
This method is responsible for first feeding the prefeed to the
outqueue and after that starting the changefeed and recovering from any
errors that may occur in the backend.
"""
raise NotImplementedError
def run_changefeed(self):
"""Backend specific method to run the changefeed.
The changefeed is usually a backend cursor that is not closed when all
the results are exausted. Instead it remains open waiting for new
results.
This method should also filter each result based on the ``operation``
and put all matching results on the outqueue of ``multipipes.Node``.
"""
raise NotImplementedError
@singledispatch
def get_changefeed(connection, table, operation, *, prefeed=None):
"""Return a ChangeFeed.
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
table (str): name of the table to listen to for changes.
operation (int): can be ChangeFeed.INSERT, ChangeFeed.DELETE, or
ChangeFeed.UPDATE. Combining multiple operation is possible
with the bitwise ``|`` operator
(e.g. ``ChangeFeed.INSERT | ChangeFeed.UPDATE``)
prefeed (iterable): whatever set of data you want to be published
first.
"""
raise NotImplementedError

View File

@ -1,19 +1,16 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import logging
from importlib import import_module
from itertools import repeat
from importlib import import_module
import logging
import bigchaindb
from bigchaindb.backend.exceptions import ConnectionError
from bigchaindb.backend.utils import get_bigchaindb_config_value, get_bigchaindb_config_value_or_key_error
from bigchaindb.common.exceptions import ConfigurationError
from bigchaindb.backend.exceptions import ConnectionError
BACKENDS = {
'localmongodb': 'bigchaindb.backend.localmongodb.connection.LocalMongoDBConnection',
'mongodb': 'bigchaindb.backend.mongodb.connection.MongoDBConnection',
'rethinkdb': 'bigchaindb.backend.rethinkdb.connection.RethinkDBConnection'
}
logger = logging.getLogger(__name__)
@ -48,28 +45,24 @@ def connect(backend=None, host=None, port=None, name=None, max_tries=None,
Authentication failure after connecting to the database.
"""
backend = backend or get_bigchaindb_config_value_or_key_error('backend')
host = host or get_bigchaindb_config_value_or_key_error('host')
port = port or get_bigchaindb_config_value_or_key_error('port')
dbname = name or get_bigchaindb_config_value_or_key_error('name')
backend = backend or bigchaindb.config['database']['backend']
host = host or bigchaindb.config['database']['host']
port = port or bigchaindb.config['database']['port']
dbname = name or bigchaindb.config['database']['name']
# Not sure how to handle this here. This setting is only relevant for
# mongodb.
# I added **kwargs for both RethinkDBConnection and MongoDBConnection
# to handle these these additional args. In case of RethinkDBConnection
# it just does not do anything with it.
#
# UPD: RethinkDBConnection is not here anymore cause we no longer support RethinkDB.
# The problem described above might be reconsidered next time we introduce a backend,
# if it ever happens.
replicaset = replicaset or get_bigchaindb_config_value('replicaset')
ssl = ssl if ssl is not None else get_bigchaindb_config_value('ssl', False)
login = login or get_bigchaindb_config_value('login')
password = password or get_bigchaindb_config_value('password')
ca_cert = ca_cert or get_bigchaindb_config_value('ca_cert')
certfile = certfile or get_bigchaindb_config_value('certfile')
keyfile = keyfile or get_bigchaindb_config_value('keyfile')
keyfile_passphrase = keyfile_passphrase or get_bigchaindb_config_value('keyfile_passphrase', None)
crlfile = crlfile or get_bigchaindb_config_value('crlfile')
replicaset = replicaset or bigchaindb.config['database'].get('replicaset')
ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)
login = login or bigchaindb.config['database'].get('login')
password = password or bigchaindb.config['database'].get('password')
ca_cert = ca_cert or bigchaindb.config['database'].get('ca_cert', None)
certfile = certfile or bigchaindb.config['database'].get('certfile', None)
keyfile = keyfile or bigchaindb.config['database'].get('keyfile', None)
keyfile_passphrase = keyfile_passphrase or bigchaindb.config['database'].get('keyfile_passphrase', None)
crlfile = crlfile or bigchaindb.config['database'].get('crlfile', None)
try:
module_name, _, class_name = BACKENDS[backend].rpartition('.')
@ -118,7 +111,7 @@ class Connection:
self.host = host or dbconf['host']
self.port = port or dbconf['port']
self.dbname = dbname or dbconf['name']
self.connection_timeout = connection_timeout if connection_timeout is not None \
self.connection_timeout = connection_timeout if connection_timeout is not None\
else dbconf['connection_timeout']
self.max_tries = max_tries if max_tries is not None else dbconf['max_tries']
self.max_tries_counter = range(self.max_tries) if self.max_tries != 0 else repeat(0)

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
from bigchaindb.exceptions import BigchainDBError

View File

@ -1,20 +1,13 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""MongoDB backend implementation.
Contains a MongoDB-specific implementation of the
:mod:`~bigchaindb.backend.schema` and :mod:`~bigchaindb.backend.query` interfaces.
:mod:`~bigchaindb.backend.schema` interface.
You can specify BigchainDB to use MongoDB as its database backend by either
setting ``database.backend`` to ``'localmongodb'`` in your configuration file, or
setting the ``BIGCHAINDB_DATABASE_BACKEND`` environment variable to
``'localmongodb'``.
MongoDB is the default database backend for BigchainDB.
If configured to use MongoDB, BigchainDB will automatically return instances
of :class:`~bigchaindb.backend.localmongodb.LocalMongoDBConnection` for
:func:`~bigchaindb.backend.connection.connect` and dispatch calls of the

View File

@ -1,136 +1,5 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import logging
from ssl import CERT_REQUIRED
import pymongo
from bigchaindb.backend.connection import Connection
from bigchaindb.backend.exceptions import (DuplicateKeyError,
OperationError,
ConnectionError)
from bigchaindb.backend.utils import get_bigchaindb_config_value
from bigchaindb.common.exceptions import ConfigurationError
from bigchaindb.utils import Lazy
logger = logging.getLogger(__name__)
from bigchaindb.backend.mongodb.connection import MongoDBConnection
class LocalMongoDBConnection(Connection):
def __init__(self, replicaset=None, ssl=None, login=None, password=None,
ca_cert=None, certfile=None, keyfile=None,
keyfile_passphrase=None, crlfile=None, **kwargs):
"""Create a new Connection instance.
Args:
replicaset (str, optional): the name of the replica set to
connect to.
**kwargs: arbitrary keyword arguments provided by the
configuration's ``database`` settings
"""
super().__init__(**kwargs)
self.replicaset = replicaset or get_bigchaindb_config_value('replicaset')
self.ssl = ssl if ssl is not None else get_bigchaindb_config_value('ssl', False)
self.login = login or get_bigchaindb_config_value('login')
self.password = password or get_bigchaindb_config_value('password')
self.ca_cert = ca_cert or get_bigchaindb_config_value('ca_cert')
self.certfile = certfile or get_bigchaindb_config_value('certfile')
self.keyfile = keyfile or get_bigchaindb_config_value('keyfile')
self.keyfile_passphrase = keyfile_passphrase or get_bigchaindb_config_value('keyfile_passphrase')
self.crlfile = crlfile or get_bigchaindb_config_value('crlfile')
@property
def db(self):
return self.conn[self.dbname]
def query(self):
return Lazy()
def collection(self, name):
"""Return a lazy object that can be used to compose a query.
Args:
name (str): the name of the collection to query.
"""
return self.query()[self.dbname][name]
def run(self, query):
try:
try:
return query.run(self.conn)
except pymongo.errors.AutoReconnect:
logger.warning('Lost connection to the database, '
'retrying query.')
return query.run(self.conn)
except pymongo.errors.AutoReconnect as exc:
raise ConnectionError from exc
except pymongo.errors.DuplicateKeyError as exc:
raise DuplicateKeyError from exc
except pymongo.errors.OperationFailure as exc:
print(f'DETAILS: {exc.details}')
raise OperationError from exc
def _connect(self):
"""Try to connect to the database.
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails.
:exc:`~AuthenticationError`: If there is a OperationFailure due to
Authentication failure after connecting to the database.
:exc:`~ConfigurationError`: If there is a ConfigurationError while
connecting to the database.
"""
try:
# FYI: the connection process might raise a
# `ServerSelectionTimeoutError`, that is a subclass of
# `ConnectionFailure`.
# The presence of ca_cert, certfile, keyfile, crlfile implies the
# use of certificates for TLS connectivity.
if self.ca_cert is None or self.certfile is None or \
self.keyfile is None or self.crlfile is None:
client = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl,
**MONGO_OPTS)
if self.login is not None and self.password is not None:
client[self.dbname].authenticate(self.login, self.password)
else:
logger.info('Connecting to MongoDB over TLS/SSL...')
client = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl,
ssl_ca_certs=self.ca_cert,
ssl_certfile=self.certfile,
ssl_keyfile=self.keyfile,
ssl_pem_passphrase=self.keyfile_passphrase,
ssl_crlfile=self.crlfile,
ssl_cert_reqs=CERT_REQUIRED,
**MONGO_OPTS)
if self.login is not None:
client[self.dbname].authenticate(self.login,
mechanism='MONGODB-X509')
return client
except (pymongo.errors.ConnectionFailure,
pymongo.errors.OperationFailure) as exc:
logger.info('Exception in _connect(): {}'.format(exc))
raise ConnectionError(str(exc)) from exc
except pymongo.errors.ConfigurationError as exc:
raise ConfigurationError from exc
MONGO_OPTS = {
'socketTimeoutMS': 20000,
}
class LocalMongoDBConnection(MongoDBConnection):
pass

View File

@ -1,21 +1,29 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Query implementation for MongoDB"""
from pymongo import DESCENDING
from bigchaindb import backend
from bigchaindb.backend.exceptions import DuplicateKeyError
from bigchaindb.common.exceptions import MultipleValidatorOperationError
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
from bigchaindb.common.transaction import Transaction
from bigchaindb.backend import mongodb
from bigchaindb.backend.query import VALIDATOR_UPDATE_ID
register_query = module_dispatch_registrar(backend.query)
@register_query(LocalMongoDBConnection)
def store_transaction(conn, signed_transaction):
try:
return conn.run(
conn.collection('transactions')
.insert_one(signed_transaction))
except DuplicateKeyError:
pass
@register_query(LocalMongoDBConnection)
def store_transactions(conn, signed_transactions):
return conn.run(conn.collection('transactions')
@ -24,9 +32,12 @@ def store_transactions(conn, signed_transactions):
@register_query(LocalMongoDBConnection)
def get_transaction(conn, transaction_id):
return conn.run(
conn.collection('transactions')
.find_one({'id': transaction_id}, {'_id': 0}))
try:
return conn.run(
conn.collection('transactions')
.find_one({'id': transaction_id}, {'_id': 0}))
except IndexError:
pass
@register_query(LocalMongoDBConnection)
@ -82,32 +93,23 @@ def get_asset(conn, asset_id):
pass
@register_query(LocalMongoDBConnection)
def get_assets(conn, asset_ids):
return conn.run(
conn.collection('assets')
.find({'id': {'$in': asset_ids}},
projection={'_id': False}))
@register_query(LocalMongoDBConnection)
def get_spent(conn, transaction_id, output):
query = {'inputs':
{'$elemMatch':
{'$and': [{'fulfills.transaction_id': transaction_id},
{'fulfills.output_index': output}]}}}
return conn.run(
conn.collection('transactions')
.find(query, {'_id': 0}))
try:
return conn.run(
conn.collection('transactions')
.find_one({'inputs.fulfills.transaction_id': transaction_id,
'inputs.fulfills.output_index': output},
{'_id': 0}))
except IndexError:
pass
@register_query(LocalMongoDBConnection)
def get_latest_block(conn):
return conn.run(
conn.collection('blocks')
.find_one(projection={'_id': False},
sort=[('height', DESCENDING)]))
.find_one(sort=[('height', DESCENDING)]))
@register_query(LocalMongoDBConnection)
@ -121,45 +123,35 @@ def store_block(conn, block):
@register_query(LocalMongoDBConnection)
def get_txids_filtered(conn, asset_id, operation=None, last_tx=None):
def get_txids_filtered(conn, asset_id, operation=None):
match_create = {
'operation': 'CREATE',
'id': asset_id
}
match_transfer = {
'operation': 'TRANSFER',
'asset.id': asset_id
}
match = {
Transaction.CREATE: {'operation': 'CREATE', 'id': asset_id},
Transaction.TRANSFER: {'operation': 'TRANSFER', 'asset.id': asset_id},
None: {'$or': [{'asset.id': asset_id}, {'id': asset_id}]},
}[operation]
cursor = conn.run(conn.collection('transactions').find(match))
if last_tx:
cursor = cursor.sort([('$natural', DESCENDING)]).limit(1)
if operation == Transaction.CREATE:
match = match_create
elif operation == Transaction.TRANSFER:
match = match_transfer
else:
match = {'$or': [match_create, match_transfer]}
pipeline = [
{'$match': match}
]
cursor = conn.run(
conn.collection('transactions')
.aggregate(pipeline))
return (elem['id'] for elem in cursor)
@register_query(LocalMongoDBConnection)
def text_search(conn, search, *, language='english', case_sensitive=False,
diacritic_sensitive=False, text_score=False, limit=0, table='assets'):
cursor = conn.run(
conn.collection(table)
.find({'$text': {
'$search': search,
'$language': language,
'$caseSensitive': case_sensitive,
'$diacriticSensitive': diacritic_sensitive}},
{'score': {'$meta': 'textScore'}, '_id': False})
.sort([('score', {'$meta': 'textScore'})])
.limit(limit))
if text_score:
return cursor
return (_remove_text_score(obj) for obj in cursor)
def _remove_text_score(asset):
asset.pop('score', None)
return asset
def text_search(*args, **kwargs):
return mongodb.query.text_search(*args, **kwargs)
@register_query(LocalMongoDBConnection)
@ -174,18 +166,15 @@ def get_owned_ids(conn, owner):
@register_query(LocalMongoDBConnection)
def get_spending_transactions(conn, inputs):
transaction_ids = [i['transaction_id'] for i in inputs]
output_indexes = [i['output_index'] for i in inputs]
query = {'inputs':
{'$elemMatch':
{'$and':
[
{'fulfills.transaction_id': {'$in': transaction_ids}},
{'fulfills.output_index': {'$in': output_indexes}}
]}}}
cursor = conn.run(
conn.collection('transactions').find(query, {'_id': False}))
conn.collection('transactions').aggregate([
{'$match': {
'inputs.fulfills': {
'$in': inputs,
},
}},
{'$project': {'_id': False}}
]))
return cursor
@ -205,6 +194,33 @@ def get_block_with_transaction(conn, txid):
projection={'_id': False, 'height': True}))
@register_query(LocalMongoDBConnection)
def delete_zombie_transactions(conn):
txns = conn.run(conn.collection('transactions').find({}))
for txn in txns:
txn_id = txn['id']
block = list(get_block_with_transaction(conn, txn_id))
if len(block) == 0:
delete_transaction(conn, txn_id)
def delete_transaction(conn, txn_id):
conn.run(
conn.collection('transactions').delete_one({'id': txn_id}))
conn.run(
conn.collection('assets').delete_one({'id': txn_id}))
conn.run(
conn.collection('metadata').delete_one({'id': txn_id}))
@register_query(LocalMongoDBConnection)
def delete_latest_block(conn):
block = get_latest_block(conn)
txn_ids = block['transactions']
delete_transactions(conn, txn_ids)
conn.run(conn.collection('blocks').delete_one({'height': block['height']}))
@register_query(LocalMongoDBConnection)
def delete_transactions(conn, txn_ids):
conn.run(conn.collection('assets').delete_many({'id': {'$in': txn_ids}}))
@ -231,7 +247,7 @@ def store_unspent_outputs(conn, *unspent_outputs):
def delete_unspent_outputs(conn, *unspent_outputs):
if unspent_outputs:
return conn.run(
conn.collection('utxos').delete_many({
conn.collection('utxos').remove({
'$or': [{
'$and': [
{'transaction_id': unspent_output['transaction_id']},
@ -251,127 +267,25 @@ def get_unspent_outputs(conn, *, query=None):
@register_query(LocalMongoDBConnection)
def store_pre_commit_state(conn, state):
def store_validator_update(conn, validator_update):
try:
return conn.run(
conn.collection('validators')
.insert_one(validator_update))
except DuplicateKeyError:
raise MultipleValidatorOperationError('Validator update already exists')
@register_query(LocalMongoDBConnection)
def get_validator_update(conn, update_id=VALIDATOR_UPDATE_ID):
return conn.run(
conn.collection('pre_commit')
.replace_one({}, state, upsert=True)
)
@register_query(LocalMongoDBConnection)
def get_pre_commit_state(conn):
return conn.run(conn.collection('pre_commit').find_one())
@register_query(LocalMongoDBConnection)
def store_validator_set(conn, validators_update):
height = validators_update['height']
return conn.run(
conn.collection('validators').replace_one(
{'height': height},
validators_update,
upsert=True
)
)
@register_query(LocalMongoDBConnection)
def delete_validator_set(conn, height):
return conn.run(
conn.collection('validators').delete_many({'height': height})
)
@register_query(LocalMongoDBConnection)
def store_election(conn, election_id, height, is_concluded):
return conn.run(
conn.collection('elections').replace_one(
{'election_id': election_id,
'height': height},
{'election_id': election_id,
'height': height,
'is_concluded': is_concluded},
upsert=True,
)
)
@register_query(LocalMongoDBConnection)
def store_elections(conn, elections):
return conn.run(
conn.collection('elections').insert_many(elections)
)
@register_query(LocalMongoDBConnection)
def delete_elections(conn, height):
return conn.run(
conn.collection('elections').delete_many({'height': height})
)
@register_query(LocalMongoDBConnection)
def get_validator_set(conn, height=None):
query = {}
if height is not None:
query = {'height': {'$lte': height}}
cursor = conn.run(
conn.collection('validators')
.find(query, projection={'_id': False})
.sort([('height', DESCENDING)])
.limit(1)
)
return next(cursor, None)
.find_one({'update_id': update_id}, projection={'_id': False}))
@register_query(LocalMongoDBConnection)
def get_election(conn, election_id):
query = {'election_id': election_id}
def delete_validator_update(conn, update_id=VALIDATOR_UPDATE_ID):
return conn.run(
conn.collection('elections')
.find_one(query, projection={'_id': False},
sort=[('height', DESCENDING)])
)
@register_query(LocalMongoDBConnection)
def get_asset_tokens_for_public_key(conn, asset_id, public_key):
query = {'outputs.public_keys': [public_key],
'asset.id': asset_id}
cursor = conn.run(
conn.collection('transactions').aggregate([
{'$match': query},
{'$project': {'_id': False}}
]))
return cursor
@register_query(LocalMongoDBConnection)
def store_abci_chain(conn, height, chain_id, is_synced=True):
return conn.run(
conn.collection('abci_chains').replace_one(
{'height': height},
{'height': height, 'chain_id': chain_id,
'is_synced': is_synced},
upsert=True,
)
)
@register_query(LocalMongoDBConnection)
def delete_abci_chain(conn, height):
return conn.run(
conn.collection('abci_chains').delete_many({'height': height})
)
@register_query(LocalMongoDBConnection)
def get_latest_abci_chain(conn):
return conn.run(
conn.collection('abci_chains')
.find_one(projection={'_id': False}, sort=[('height', DESCENDING)])
conn.collection('validators')
.delete_one({'update_id': update_id})
)

View File

@ -1,16 +1,11 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Utils to initialize and drop the database."""
import logging
from pymongo import ASCENDING, DESCENDING, TEXT
from pymongo.errors import CollectionInvalid
from bigchaindb import backend
from bigchaindb.common import exceptions
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
@ -19,48 +14,12 @@ logger = logging.getLogger(__name__)
register_schema = module_dispatch_registrar(backend.schema)
INDEXES = {
'transactions': [
('id', dict(unique=True, name='transaction_id')),
('asset.id', dict(name='asset_id')),
('outputs.public_keys', dict(name='outputs')),
([('inputs.fulfills.transaction_id', ASCENDING),
('inputs.fulfills.output_index', ASCENDING)], dict(name='inputs')),
],
'assets': [
('id', dict(name='asset_id', unique=True)),
([('$**', TEXT)], dict(name='text')),
],
'blocks': [
([('height', DESCENDING)], dict(name='height', unique=True)),
],
'metadata': [
('id', dict(name='transaction_id', unique=True)),
([('$**', TEXT)], dict(name='text')),
],
'utxos': [
([('transaction_id', ASCENDING),
('output_index', ASCENDING)], dict(name='utxo', unique=True)),
],
'pre_commit': [
('height', dict(name='height', unique=True)),
],
'elections': [
([('height', DESCENDING), ('election_id', ASCENDING)],
dict(name='election_id_height', unique=True)),
],
'validators': [
('height', dict(name='height', unique=True)),
],
'abci_chains': [
('height', dict(name='height', unique=True)),
('chain_id', dict(name='chain_id', unique=True)),
],
}
@register_schema(LocalMongoDBConnection)
def create_database(conn, dbname):
if dbname in conn.conn.database_names():
raise exceptions.DatabaseAlreadyExists('Database `{}` already exists'
.format(dbname))
logger.info('Create database `%s`.', dbname)
# TODO: read and write concerns can be declared here
conn.conn.get_database(dbname)
@ -68,23 +27,95 @@ def create_database(conn, dbname):
@register_schema(LocalMongoDBConnection)
def create_tables(conn, dbname):
for table_name in backend.schema.TABLES:
for table_name in ['transactions', 'utxos', 'assets', 'blocks', 'metadata', 'validators']:
logger.info('Create `%s` table.', table_name)
# create the table
# TODO: read and write concerns can be declared here
try:
logger.info(f'Create `{table_name}` table.')
conn.conn[dbname].create_collection(table_name)
except CollectionInvalid:
logger.info(f'Collection {table_name} already exists.')
create_indexes(conn, dbname, table_name, INDEXES[table_name])
conn.conn[dbname].create_collection(table_name)
def create_indexes(conn, dbname, collection, indexes):
logger.info(f'Ensure secondary indexes for `{collection}`.')
for fields, kwargs in indexes:
conn.conn[dbname][collection].create_index(fields, **kwargs)
@register_schema(LocalMongoDBConnection)
def create_indexes(conn, dbname):
create_transactions_secondary_index(conn, dbname)
create_assets_secondary_index(conn, dbname)
create_blocks_secondary_index(conn, dbname)
create_metadata_secondary_index(conn, dbname)
create_utxos_secondary_index(conn, dbname)
create_validators_secondary_index(conn, dbname)
@register_schema(LocalMongoDBConnection)
def drop_database(conn, dbname):
conn.conn.drop_database(dbname)
def create_transactions_secondary_index(conn, dbname):
logger.info('Create `transactions` secondary index.')
# to query the transactions for a transaction id, this field is unique
conn.conn[dbname]['transactions'].create_index('transactions.id',
name='transaction_id')
# secondary index for asset uuid, this field is unique
conn.conn[dbname]['transactions']\
.create_index('asset.id', name='asset_id')
# secondary index on the public keys of outputs
conn.conn[dbname]['transactions']\
.create_index('outputs.public_keys',
name='outputs')
# secondary index on inputs/transaction links (transaction_id, output)
conn.conn[dbname]['transactions']\
.create_index([
('inputs.fulfills.transaction_id', ASCENDING),
('inputs.fulfills.output_index', ASCENDING),
], name='inputs')
def create_assets_secondary_index(conn, dbname):
logger.info('Create `assets` secondary index.')
# unique index on the id of the asset.
# the id is the txid of the transaction that created the asset
conn.conn[dbname]['assets'].create_index('id',
name='asset_id',
unique=True)
# full text search index
conn.conn[dbname]['assets'].create_index([('$**', TEXT)], name='text')
def create_blocks_secondary_index(conn, dbname):
conn.conn[dbname]['blocks']\
.create_index([('height', DESCENDING)], name='height')
def create_metadata_secondary_index(conn, dbname):
logger.info('Create `assets` secondary index.')
# the id is the txid of the transaction where metadata was defined
conn.conn[dbname]['metadata'].create_index('id',
name='transaction_id',
unique=True)
# full text search index
conn.conn[dbname]['metadata'].create_index([('$**', TEXT)], name='text')
def create_utxos_secondary_index(conn, dbname):
logger.info('Create `utxos` secondary index.')
conn.conn[dbname]['utxos'].create_index(
[('transaction_id', ASCENDING), ('output_index', ASCENDING)],
name='utxo',
unique=True,
)
def create_validators_secondary_index(conn, dbname):
logger.info('Create `validators` secondary index.')
conn.conn[dbname]['validators'].create_index('update_id',
name='update_id',
unique=True,)

View File

@ -0,0 +1,22 @@
"""MongoDB backend implementation.
Contains a MongoDB-specific implementation of the
:mod:`~bigchaindb.backend.changefeed`, :mod:`~bigchaindb.backend.query`, and
:mod:`~bigchaindb.backend.schema` interfaces.
You can specify BigchainDB to use MongoDB as its database backend by either
setting ``database.backend`` to ``'rethinkdb'`` in your configuration file, or
setting the ``BIGCHAINDB_DATABASE_BACKEND`` environment variable to
``'rethinkdb'``.
If configured to use MongoDB, BigchainDB will automatically return instances
of :class:`~bigchaindb.backend.rethinkdb.MongoDBConnection` for
:func:`~bigchaindb.backend.connection.connect` and dispatch calls of the
generic backend interfaces to the implementations in this module.
"""
# Register the single dispatched modules on import.
from bigchaindb.backend.mongodb import admin, schema, query, changefeed # noqa
# MongoDBConnection should always be accessed via
# ``bigchaindb.backend.connect()``.

View File

@ -0,0 +1,86 @@
"""Database configuration functions."""
import logging
from pymongo.errors import OperationFailure
from bigchaindb.backend import admin
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.exceptions import OperationError
from bigchaindb.backend.mongodb.connection import MongoDBConnection
logger = logging.getLogger(__name__)
register_admin = module_dispatch_registrar(admin)
@register_admin(MongoDBConnection)
def add_replicas(connection, replicas):
"""Add a set of replicas to the replicaset
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
replicas (:obj:`list` of :obj:`str`): replica addresses in the
form "hostname:port".
Raises:
OperationError: If the reconfiguration fails due to a MongoDB
:exc:`OperationFailure`
"""
# get current configuration
conf = connection.conn.admin.command('replSetGetConfig')
# MongoDB does not automatically add an id for the members so we need
# to choose one that does not exist yet. The safest way is to use
# incrementing ids, so we first check what is the highest id already in
# the set and continue from there.
cur_id = max([member['_id'] for member in conf['config']['members']])
# add the nodes to the members list of the replica set
for replica in replicas:
cur_id += 1
conf['config']['members'].append({'_id': cur_id, 'host': replica})
# increase the configuration version number
# when reconfiguring, mongodb expects a version number higher than the one
# it currently has
conf['config']['version'] += 1
# apply new configuration
try:
connection.conn.admin.command('replSetReconfig', conf['config'])
except OperationFailure as exc:
raise OperationError(exc.details['errmsg'])
@register_admin(MongoDBConnection)
def remove_replicas(connection, replicas):
"""Remove a set of replicas from the replicaset
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
replicas (:obj:`list` of :obj:`str`): replica addresses in the
form "hostname:port".
Raises:
OperationError: If the reconfiguration fails due to a MongoDB
:exc:`OperationFailure`
"""
# get the current configuration
conf = connection.conn.admin.command('replSetGetConfig')
# remove the nodes from the members list in the replica set
conf['config']['members'] = list(
filter(lambda member: member['host'] not in replicas,
conf['config']['members'])
)
# increase the configuration version number
conf['config']['version'] += 1
# apply new configuration
try:
connection.conn.admin.command('replSetReconfig', conf['config'])
except OperationFailure as exc:
raise OperationError(exc.details['errmsg'])

View File

@ -0,0 +1,112 @@
import logging
import time
import pymongo
from bigchaindb import backend
from bigchaindb.backend.changefeed import ChangeFeed
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.mongodb.connection import MongoDBConnection
from bigchaindb.backend.exceptions import BackendError
logger = logging.getLogger(__name__)
register_changefeed = module_dispatch_registrar(backend.changefeed)
class MongoDBChangeFeed(ChangeFeed):
"""This class implements a MongoDB changefeed as a multipipes Node.
We emulate the behaviour of the RethinkDB changefeed by using a tailable
cursor that listens for events on the oplog.
"""
def run_forever(self):
for element in self.prefeed:
self.outqueue.put(element)
table = self.table
dbname = self.connection.dbname
# last timestamp in the oplog. We only care for operations happening
# in the future.
last_ts = self.connection.run(
self.connection.query().local.oplog.rs.find()
.sort('$natural', pymongo.DESCENDING).limit(1)
.next()['ts'])
for record in run_changefeed(self.connection, table, last_ts):
is_insert = record['op'] == 'i'
is_delete = record['op'] == 'd'
is_update = record['op'] == 'u'
# mongodb documents uses the `_id` for the primary key.
# We are not using this field at this point and we need to
# remove it to prevent problems with schema validation.
# See https://github.com/bigchaindb/bigchaindb/issues/992
if is_insert and (self.operation & ChangeFeed.INSERT):
record['o'].pop('_id', None)
self.outqueue.put(record['o'])
elif is_delete and (self.operation & ChangeFeed.DELETE):
# on delete it only returns the id of the document
self.outqueue.put(record['o'])
elif is_update and (self.operation & ChangeFeed.UPDATE):
# the oplog entry for updates only returns the update
# operations to apply to the document and not the
# document itself. So here we first read the document
# and then return it.
doc = self.connection.conn[dbname][table].find_one(
{'_id': record['o2']['_id']},
{'_id': False}
)
self.outqueue.put(doc)
logger.debug('Record in changefeed: %s:%s', table, record['op'])
@register_changefeed(MongoDBConnection)
def get_changefeed(connection, table, operation, *, prefeed=None):
"""Return a MongoDB changefeed.
Returns:
An instance of
:class:`~bigchaindb.backend.mongodb.MongoDBChangeFeed`.
"""
return MongoDBChangeFeed(table, operation, prefeed=prefeed,
connection=connection)
_FEED_STOP = False
"""If it's True then the changefeed will return when there are no more items.
"""
def run_changefeed(conn, table, last_ts):
"""Encapsulate operational logic of tailing changefeed from MongoDB
"""
while True:
try:
# XXX: hack to force reconnection, in case the connection
# is lost while waiting on the cursor. See #1154.
conn._conn = None
namespace = conn.dbname + '.' + table
query = conn.query().local.oplog.rs.find(
{'ns': namespace, 'ts': {'$gt': last_ts}},
{'o._id': False},
cursor_type=pymongo.CursorType.TAILABLE_AWAIT
)
cursor = conn.run(query)
logging.debug('Tailing oplog at %s/%s', namespace, last_ts)
while cursor.alive:
try:
record = cursor.next()
yield record
last_ts = record['ts']
except StopIteration:
if _FEED_STOP:
return
except (BackendError, pymongo.errors.ConnectionFailure):
logger.exception('Lost connection while tailing oplog, retrying')
time.sleep(1)

View File

@ -0,0 +1,267 @@
import time
import logging
from ssl import CERT_REQUIRED
import pymongo
import bigchaindb
from bigchaindb.utils import Lazy
from bigchaindb.common.exceptions import ConfigurationError
from bigchaindb.backend.exceptions import (DuplicateKeyError,
OperationError,
ConnectionError)
from bigchaindb.backend.connection import Connection
logger = logging.getLogger(__name__)
class MongoDBConnection(Connection):
def __init__(self, replicaset=None, ssl=None, login=None, password=None,
ca_cert=None, certfile=None, keyfile=None,
keyfile_passphrase=None, crlfile=None, **kwargs):
"""Create a new Connection instance.
Args:
replicaset (str, optional): the name of the replica set to
connect to.
**kwargs: arbitrary keyword arguments provided by the
configuration's ``database`` settings
"""
super().__init__(**kwargs)
self.replicaset = replicaset or bigchaindb.config['database'].get('replicaset')
self.ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)
self.login = login or bigchaindb.config['database'].get('login')
self.password = password or bigchaindb.config['database'].get('password')
self.ca_cert = ca_cert or bigchaindb.config['database'].get('ca_cert', None)
self.certfile = certfile or bigchaindb.config['database'].get('certfile', None)
self.keyfile = keyfile or bigchaindb.config['database'].get('keyfile', None)
self.keyfile_passphrase = keyfile_passphrase or bigchaindb.config['database'].get('keyfile_passphrase', None)
self.crlfile = crlfile or bigchaindb.config['database'].get('crlfile', None)
@property
def db(self):
return self.conn[self.dbname]
def query(self):
return Lazy()
def collection(self, name):
"""Return a lazy object that can be used to compose a query.
Args:
name (str): the name of the collection to query.
"""
return self.query()[self.dbname][name]
def run(self, query):
try:
try:
return query.run(self.conn)
except pymongo.errors.AutoReconnect as exc:
logger.warning('Lost connection to the database, '
'retrying query.')
return query.run(self.conn)
except pymongo.errors.AutoReconnect as exc:
raise ConnectionError from exc
except pymongo.errors.DuplicateKeyError as exc:
raise DuplicateKeyError from exc
except pymongo.errors.OperationFailure as exc:
raise OperationError from exc
def _connect(self):
"""Try to connect to the database.
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails.
:exc:`~AuthenticationError`: If there is a OperationFailure due to
Authentication failure after connecting to the database.
:exc:`~ConfigurationError`: If there is a ConfigurationError while
connecting to the database.
"""
try:
if self.replicaset:
# we should only return a connection if the replica set is
# initialized. initialize_replica_set will check if the
# replica set is initialized else it will initialize it.
initialize_replica_set(self.host,
self.port,
self.connection_timeout,
self.dbname,
self.ssl,
self.login,
self.password,
self.ca_cert,
self.certfile,
self.keyfile,
self.keyfile_passphrase,
self.crlfile)
# FYI: the connection process might raise a
# `ServerSelectionTimeoutError`, that is a subclass of
# `ConnectionFailure`.
# The presence of ca_cert, certfile, keyfile, crlfile implies the
# use of certificates for TLS connectivity.
if self.ca_cert is None or self.certfile is None or \
self.keyfile is None or self.crlfile is None:
client = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl,
**MONGO_OPTS)
if self.login is not None and self.password is not None:
client[self.dbname].authenticate(self.login, self.password)
else:
logger.info('Connecting to MongoDB over TLS/SSL...')
client = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl,
ssl_ca_certs=self.ca_cert,
ssl_certfile=self.certfile,
ssl_keyfile=self.keyfile,
ssl_pem_passphrase=self.keyfile_passphrase,
ssl_crlfile=self.crlfile,
ssl_cert_reqs=CERT_REQUIRED,
**MONGO_OPTS)
if self.login is not None:
client[self.dbname].authenticate(self.login,
mechanism='MONGODB-X509')
return client
# `initialize_replica_set` might raise `ConnectionFailure`,
# `OperationFailure` or `ConfigurationError`.
except (pymongo.errors.ConnectionFailure,
pymongo.errors.OperationFailure) as exc:
logger.info('Exception in _connect(): {}'.format(exc))
raise ConnectionError(str(exc)) from exc
except pymongo.errors.ConfigurationError as exc:
raise ConfigurationError from exc
MONGO_OPTS = {
'socketTimeoutMS': 20000,
}
def initialize_replica_set(host, port, connection_timeout, dbname, ssl, login,
password, ca_cert, certfile, keyfile,
keyfile_passphrase, crlfile):
"""Initialize a replica set. If already initialized skip."""
# Setup a MongoDB connection
# The reason we do this instead of `backend.connect` is that
# `backend.connect` will connect you to a replica set but this fails if
# you try to connect to a replica set that is not yet initialized
try:
# The presence of ca_cert, certfile, keyfile, crlfile implies the
# use of certificates for TLS connectivity.
if ca_cert is None or certfile is None or keyfile is None or \
crlfile is None:
conn = pymongo.MongoClient(host,
port,
serverselectiontimeoutms=connection_timeout,
ssl=ssl,
**MONGO_OPTS)
if login is not None and password is not None:
conn[dbname].authenticate(login, password)
else:
logger.info('Connecting to MongoDB over TLS/SSL...')
conn = pymongo.MongoClient(host,
port,
serverselectiontimeoutms=connection_timeout,
ssl=ssl,
ssl_ca_certs=ca_cert,
ssl_certfile=certfile,
ssl_keyfile=keyfile,
ssl_pem_passphrase=keyfile_passphrase,
ssl_crlfile=crlfile,
ssl_cert_reqs=CERT_REQUIRED,
**MONGO_OPTS)
if login is not None:
logger.info('Authenticating to the database...')
conn[dbname].authenticate(login, mechanism='MONGODB-X509')
except (pymongo.errors.ConnectionFailure,
pymongo.errors.OperationFailure) as exc:
logger.info('Exception in _connect(): {}'.format(exc))
raise ConnectionError(str(exc)) from exc
except pymongo.errors.ConfigurationError as exc:
raise ConfigurationError from exc
_check_replica_set(conn)
host = '{}:{}'.format(bigchaindb.config['database']['host'],
bigchaindb.config['database']['port'])
config = {'_id': bigchaindb.config['database']['replicaset'],
'members': [{'_id': 0, 'host': host}]}
try:
conn.admin.command('replSetInitiate', config)
except pymongo.errors.OperationFailure as exc_info:
if exc_info.details['codeName'] == 'AlreadyInitialized':
return
raise
else:
_wait_for_replica_set_initialization(conn)
logger.info('Initialized replica set')
finally:
if conn is not None:
logger.info('Closing initial connection to MongoDB')
conn.close()
def _check_replica_set(conn):
"""Checks if the replSet option was enabled either through the command
line option or config file and if it matches the one provided by
bigchaindb configuration.
Note:
The setting we are looking for will have a different name depending
if it was set by the config file (`replSetName`) or by command
line arguments (`replSet`).
Raise:
:exc:`~ConfigurationError`: If mongod was not started with the
replSet option.
"""
options = conn.admin.command('getCmdLineOpts')
try:
repl_opts = options['parsed']['replication']
repl_set_name = repl_opts.get('replSetName', repl_opts.get('replSet'))
except KeyError:
raise ConfigurationError('mongod was not started with'
' the replSet option.')
bdb_repl_set_name = bigchaindb.config['database']['replicaset']
if repl_set_name != bdb_repl_set_name:
raise ConfigurationError('The replicaset configuration of '
'bigchaindb (`{}`) needs to match '
'the replica set name from MongoDB'
' (`{}`)'.format(bdb_repl_set_name,
repl_set_name))
def _wait_for_replica_set_initialization(conn):
"""Wait for a replica set to finish initialization.
If a replica set is being initialized for the first time it takes some
time. Nodes need to discover each other and an election needs to take
place. During this time the database is not writable so we need to wait
before continuing with the rest of the initialization
"""
# I did not find a better way to do this for now.
# To check if the database is ready we will poll the mongodb logs until
# we find the line that says the database is ready
logger.info('Waiting for mongodb replica set initialization')
while True:
logs = conn.admin.command('getLog', 'rs')['log']
if any('database writes are now permitted' in line for line in logs):
return
time.sleep(0.1)

View File

@ -0,0 +1,389 @@
"""Query implementation for MongoDB"""
from time import time
from pymongo import ReturnDocument
from bigchaindb import backend
from bigchaindb.backend.mongodb.changefeed import run_changefeed
from bigchaindb.common.exceptions import CyclicBlockchainError
from bigchaindb.common.transaction import Transaction
from bigchaindb.backend.exceptions import DuplicateKeyError, OperationError
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.mongodb.connection import MongoDBConnection
register_query = module_dispatch_registrar(backend.query)
@register_query(MongoDBConnection)
def write_transaction(conn, signed_transaction):
try:
return conn.run(
conn.collection('backlog')
.insert_one(signed_transaction))
except DuplicateKeyError:
return
@register_query(MongoDBConnection)
def update_transaction(conn, transaction_id, doc):
# with mongodb we need to add update operators to the doc
doc = {'$set': doc}
return conn.run(
conn.collection('backlog')
.find_one_and_update(
{'id': transaction_id},
doc,
return_document=ReturnDocument.AFTER))
@register_query(MongoDBConnection)
def delete_transaction(conn, *transaction_id):
return conn.run(
conn.collection('backlog')
.delete_many({'id': {'$in': transaction_id}}))
@register_query(MongoDBConnection)
def get_stale_transactions(conn, reassign_delay):
return conn.run(
conn.collection('backlog')
.find({'assignment_timestamp': {'$lt': time() - reassign_delay}},
projection={'_id': False}))
@register_query(MongoDBConnection)
def get_transaction_from_block(conn, transaction_id, block_id):
try:
return conn.run(
conn.collection('bigchain')
.aggregate([
{'$match': {'id': block_id}},
{'$project': {
'block.transactions': {
'$filter': {
'input': '$block.transactions',
'as': 'transaction',
'cond': {
'$eq': ['$$transaction.id', transaction_id]
}
}
}
}}])
.next()['block']['transactions']
.pop())
except (StopIteration, IndexError):
# StopIteration is raised if the block was not found
# IndexError is returned if the block is found but no transactions
# match
return
@register_query(MongoDBConnection)
def get_transaction_from_backlog(conn, transaction_id):
return conn.run(
conn.collection('backlog')
.find_one({'id': transaction_id},
projection={'_id': False,
'assignee': False,
'assignment_timestamp': False}))
@register_query(MongoDBConnection)
def get_blocks_status_from_transaction(conn, transaction_id):
return conn.run(
conn.collection('bigchain')
.find({'block.transactions.id': transaction_id},
projection=['id', 'block.voters']))
@register_query(MongoDBConnection)
def get_txids_filtered(conn, asset_id, operation=None):
match_create = {
'block.transactions.operation': 'CREATE',
'block.transactions.id': asset_id
}
match_transfer = {
'block.transactions.operation': 'TRANSFER',
'block.transactions.asset.id': asset_id
}
if operation == Transaction.CREATE:
match = match_create
elif operation == Transaction.TRANSFER:
match = match_transfer
else:
match = {'$or': [match_create, match_transfer]}
pipeline = [
{'$match': match},
{'$unwind': '$block.transactions'},
{'$match': match},
{'$project': {'block.transactions.id': True}}
]
cursor = conn.run(
conn.collection('bigchain')
.aggregate(pipeline))
return (elem['block']['transactions']['id'] for elem in cursor)
# TODO: This doesn't seem to be used anywhere
@register_query(MongoDBConnection)
def get_asset_by_id(conn, asset_id):
cursor = conn.run(
conn.collection('bigchain')
.aggregate([
{'$match': {
'block.transactions.id': asset_id,
'block.transactions.operation': 'CREATE'
}},
{'$unwind': '$block.transactions'},
{'$match': {
'block.transactions.id': asset_id,
'block.transactions.operation': 'CREATE'
}},
{'$project': {'block.transactions.asset': True}}
]))
# we need to access some nested fields before returning so lets use a
# generator to avoid having to read all records on the cursor at this point
return (elem['block']['transactions'] for elem in cursor)
@register_query(MongoDBConnection)
def get_spent(conn, transaction_id, output):
cursor = conn.run(
conn.collection('bigchain').aggregate([
{'$match': {
'block.transactions.inputs': {
'$elemMatch': {
'fulfills.transaction_id': transaction_id,
'fulfills.output_index': output,
},
},
}},
{'$unwind': '$block.transactions'},
{'$match': {
'block.transactions.inputs': {
'$elemMatch': {
'fulfills.transaction_id': transaction_id,
'fulfills.output_index': output,
},
},
}},
]))
# we need to access some nested fields before returning so lets use a
# generator to avoid having to read all records on the cursor at this point
return (elem['block']['transactions'] for elem in cursor)
@register_query(MongoDBConnection)
def get_spending_transactions(conn, inputs):
cursor = conn.run(
conn.collection('bigchain').aggregate([
{'$match': {
'block.transactions.inputs.fulfills': {
'$in': inputs,
},
}},
{'$unwind': '$block.transactions'},
{'$match': {
'block.transactions.inputs.fulfills': {
'$in': inputs,
},
}},
]))
return ((b['id'], b['block']['transactions']) for b in cursor)
@register_query(MongoDBConnection)
def get_owned_ids(conn, owner):
cursor = conn.run(
conn.collection('bigchain').aggregate([
{'$match': {'block.transactions.outputs.public_keys': owner}},
{'$unwind': '$block.transactions'},
{'$match': {'block.transactions.outputs.public_keys': owner}}
]))
return ((b['id'], b['block']['transactions']) for b in cursor)
@register_query(MongoDBConnection)
def get_votes_by_block_id(conn, block_id):
return conn.run(
conn.collection('votes')
.find({'vote.voting_for_block': block_id},
projection={'_id': False}))
@register_query(MongoDBConnection)
def get_votes_for_blocks_by_voter(conn, block_ids, node_pubkey):
return conn.run(
conn.collection('votes')
.find({'vote.voting_for_block': {'$in': block_ids},
'node_pubkey': node_pubkey},
projection={'_id': False}))
@register_query(MongoDBConnection)
def get_votes_by_block_id_and_voter(conn, block_id, node_pubkey):
return conn.run(
conn.collection('votes')
.find({'vote.voting_for_block': block_id,
'node_pubkey': node_pubkey},
projection={'_id': False}))
@register_query(MongoDBConnection)
def write_block(conn, block_dict):
return conn.run(
conn.collection('bigchain')
.insert_one(block_dict))
@register_query(MongoDBConnection)
def get_block(conn, block_id):
return conn.run(
conn.collection('bigchain')
.find_one({'id': block_id},
projection={'_id': False}))
@register_query(MongoDBConnection)
def write_assets(conn, assets):
try:
# unordered means that all the inserts will be attempted instead of
# stopping after the first error.
return conn.run(
conn.collection('assets')
.insert_many(assets, ordered=False))
# This can happen if we try to write the same asset multiple times.
# One case is when we write the same transaction into multiple blocks due
# to invalid blocks.
# The actual mongodb exception is a BulkWriteError due to a duplicated key
# in one of the inserts.
except OperationError:
return
@register_query(MongoDBConnection)
def write_metadata(conn, metadata):
try:
return conn.run(
conn.collection('metadata')
.insert_many(metadata, ordered=False))
except OperationError:
return
@register_query(MongoDBConnection)
def get_assets(conn, asset_ids):
return conn.run(
conn.collection('assets')
.find({'id': {'$in': asset_ids}},
projection={'_id': False}))
@register_query(MongoDBConnection)
def get_metadata(conn, txn_ids):
return conn.run(
conn.collection('metadata')
.find({'id': {'$in': txn_ids}},
projection={'_id': False}))
@register_query(MongoDBConnection)
def count_blocks(conn):
return conn.run(
conn.collection('bigchain')
.count())
@register_query(MongoDBConnection)
def count_backlog(conn):
return conn.run(
conn.collection('backlog')
.count())
@register_query(MongoDBConnection)
def write_vote(conn, vote):
conn.run(conn.collection('votes').insert_one(vote))
vote.pop('_id')
return vote
@register_query(MongoDBConnection)
def get_genesis_block(conn):
return conn.run(
conn.collection('bigchain')
.find_one(
{'block.transactions.0.operation': 'GENESIS'},
{'_id': False}
))
@register_query(MongoDBConnection)
def get_last_voted_block_id(conn, node_pubkey):
last_voted = conn.run(
conn.collection('votes')
.find({'node_pubkey': node_pubkey},
sort=[('vote.timestamp', -1)]))
# pymongo seems to return a cursor even if there are no results
# so we actually need to check the count
if last_voted.count() == 0:
return get_genesis_block(conn)['id']
mapping = {v['vote']['previous_block']: v['vote']['voting_for_block']
for v in last_voted}
last_block_id = list(mapping.values())[0]
explored = set()
while True:
try:
if last_block_id in explored:
raise CyclicBlockchainError()
explored.add(last_block_id)
last_block_id = mapping[last_block_id]
except KeyError:
break
return last_block_id
@register_query(MongoDBConnection)
def get_new_blocks_feed(conn, start_block_id):
namespace = conn.dbname + '.bigchain'
match = {'o.id': start_block_id, 'op': 'i', 'ns': namespace}
# Neccesary to find in descending order since tests may write same block id several times
query = conn.query().local.oplog.rs.find(match).sort('$natural', -1).next()['ts']
last_ts = conn.run(query)
feed = run_changefeed(conn, 'bigchain', last_ts)
return (evt['o'] for evt in feed if evt['op'] == 'i')
@register_query(MongoDBConnection)
def text_search(conn, search, *, language='english', case_sensitive=False,
diacritic_sensitive=False, text_score=False, limit=0, table='assets'):
cursor = conn.run(
conn.collection(table)
.find({'$text': {
'$search': search,
'$language': language,
'$caseSensitive': case_sensitive,
'$diacriticSensitive': diacritic_sensitive}},
{'score': {'$meta': 'textScore'}, '_id': False})
.sort([('score', {'$meta': 'textScore'})])
.limit(limit))
if text_score:
return cursor
return (_remove_text_score(obj) for obj in cursor)
def _remove_text_score(asset):
asset.pop('score', None)
return asset

View File

@ -0,0 +1,138 @@
"""Utils to initialize and drop the database."""
import logging
from pymongo import ASCENDING, DESCENDING, TEXT
from bigchaindb import backend
from bigchaindb.common import exceptions
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.mongodb.connection import MongoDBConnection
logger = logging.getLogger(__name__)
register_schema = module_dispatch_registrar(backend.schema)
@register_schema(MongoDBConnection)
def create_database(conn, dbname):
if dbname in conn.conn.database_names():
raise exceptions.DatabaseAlreadyExists('Database `{}` already exists'
.format(dbname))
logger.info('Create database `%s`.', dbname)
# TODO: read and write concerns can be declared here
conn.conn.get_database(dbname)
@register_schema(MongoDBConnection)
def create_tables(conn, dbname):
for table_name in ['bigchain', 'backlog', 'votes', 'assets', 'metadata']:
logger.info('Create `%s` table.', table_name)
# create the table
# TODO: read and write concerns can be declared here
conn.conn[dbname].create_collection(table_name)
@register_schema(MongoDBConnection)
def create_indexes(conn, dbname):
create_bigchain_secondary_index(conn, dbname)
create_backlog_secondary_index(conn, dbname)
create_votes_secondary_index(conn, dbname)
create_assets_secondary_index(conn, dbname)
create_metadata_secondary_index(conn, dbname)
@register_schema(MongoDBConnection)
def drop_database(conn, dbname):
conn.conn.drop_database(dbname)
def create_bigchain_secondary_index(conn, dbname):
logger.info('Create `bigchain` secondary index.')
# secondary index on block id which should be unique
conn.conn[dbname]['bigchain'].create_index('id',
name='block_id',
unique=True)
# to order blocks by timestamp
conn.conn[dbname]['bigchain'].create_index([('block.timestamp',
ASCENDING)],
name='block_timestamp')
# to query the bigchain for a transaction id, this field is unique
conn.conn[dbname]['bigchain'].create_index('block.transactions.id',
name='transaction_id')
# secondary index for asset uuid, this field is unique
conn.conn[dbname]['bigchain']\
.create_index('block.transactions.asset.id', name='asset_id')
# secondary index on the public keys of outputs
conn.conn[dbname]['bigchain']\
.create_index('block.transactions.outputs.public_keys',
name='outputs')
# secondary index on inputs/transaction links (transaction_id, output)
conn.conn[dbname]['bigchain']\
.create_index([
('block.transactions.inputs.fulfills.transaction_id', ASCENDING),
('block.transactions.inputs.fulfills.output_index', ASCENDING),
], name='inputs')
def create_backlog_secondary_index(conn, dbname):
logger.info('Create `backlog` secondary index.')
# secondary index on the transaction id with a uniqueness constraint
# to make sure there are no duplicated transactions in the backlog
conn.conn[dbname]['backlog'].create_index('id',
name='transaction_id',
unique=True)
# compound index to read transactions from the backlog per assignee
conn.conn[dbname]['backlog']\
.create_index([('assignee', ASCENDING),
('assignment_timestamp', DESCENDING)],
name='assignee__transaction_timestamp')
def create_votes_secondary_index(conn, dbname):
logger.info('Create `votes` secondary index.')
# is the first index redundant then?
# compound index to order votes by block id and node
conn.conn[dbname]['votes'].create_index([('vote.voting_for_block',
ASCENDING),
('node_pubkey',
ASCENDING)],
name='block_and_voter',
unique=True)
def create_assets_secondary_index(conn, dbname):
logger.info('Create `assets` secondary index.')
# unique index on the id of the asset.
# the id is the txid of the transaction that created the asset
conn.conn[dbname]['assets'].create_index('id',
name='asset_id',
unique=True)
# full text search index
conn.conn[dbname]['assets'].create_index([('$**', TEXT)], name='text')
def create_metadata_secondary_index(conn, dbname):
logger.info('Create `metadata` secondary index.')
# unique index on the id of the metadata.
# the id is the txid of the transaction for which the metadata
# was specified
conn.conn[dbname]['metadata'].create_index('id',
name='transaction_id',
unique=True)
# full text search index
conn.conn[dbname]['metadata'].create_index([('$**', TEXT)], name='text')

View File

@ -1,14 +1,25 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Query interfaces for backends."""
from functools import singledispatch
from bigchaindb.backend.exceptions import OperationError
VALIDATOR_UPDATE_ID = 'a_unique_id_string'
@singledispatch
def write_transaction(connection, signed_transaction):
"""Write a transaction to the backlog table.
Args:
signed_transaction (dict): a signed transaction.
Returns:
The result of the operation.
"""
raise NotImplementedError
@singledispatch
def store_asset(connection, asset):
@ -52,9 +63,16 @@ def store_metadatas(connection, metadata):
raise NotImplementedError
@singledispatch
def store_transaction(connection, signed_transaction):
"""Same as write_transaction."""
raise NotImplementedError
@singledispatch
def store_transactions(connection, signed_transactions):
"""Store the list of transactions."""
"""Store list of transactions."""
raise NotImplementedError
@ -101,6 +119,110 @@ def get_asset(connection, asset_id):
raise NotImplementedError
@singledispatch
def update_transaction(connection, transaction_id, doc):
"""Update a transaction in the backlog table.
Args:
transaction_id (str): the id of the transaction.
doc (dict): the values to update.
Returns:
The result of the operation.
"""
raise NotImplementedError
@singledispatch
def delete_transaction(connection, *transaction_id):
"""Delete a transaction from the backlog.
Args:
*transaction_id (str): the transaction(s) to delete.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def get_stale_transactions(connection, reassign_delay):
"""Get a cursor of stale transactions.
Transactions are considered stale if they have been assigned a node,
but are still in the backlog after some amount of time specified in the
configuration.
Args:
reassign_delay (int): threshold (in seconds) to mark a transaction stale.
Returns:
A cursor of transactions.
"""
raise NotImplementedError
@singledispatch
def get_transaction_from_block(connection, transaction_id, block_id):
"""Get a transaction from a specific block.
Args:
transaction_id (str): the id of the transaction.
block_id (str): the id of the block.
Returns:
The matching transaction.
"""
raise NotImplementedError
@singledispatch
def get_transaction_from_backlog(connection, transaction_id):
"""Get a transaction from backlog.
Args:
transaction_id (str): the id of the transaction.
Returns:
The matching transaction.
"""
raise NotImplementedError
@singledispatch
def get_blocks_status_from_transaction(connection, transaction_id):
"""Retrieve block election information given a secondary index and value.
Args:
value: a value to search (e.g. transaction id string, payload hash string)
index (str): name of a secondary index, e.g. 'transaction_id'
Returns:
:obj:`list` of :obj:`dict`: A list of blocks with with only election information
"""
raise NotImplementedError
@singledispatch
def get_asset_by_id(conneciton, asset_id):
"""Returns the asset associated with an asset_id.
Args:
asset_id (str): The asset id.
Returns:
Returns a rethinkdb cursor.
"""
raise NotImplementedError
@singledispatch
def get_spent(connection, transaction_id, condition_id):
"""Check if a `txid` was already used as an input.
@ -148,6 +270,63 @@ def get_owned_ids(connection, owner):
raise NotImplementedError
@singledispatch
def get_votes_by_block_id(connection, block_id):
"""Get all the votes casted for a specific block.
Args:
block_id (str): the block id to use.
Returns:
A cursor for the matching votes.
"""
raise NotImplementedError
@singledispatch
def get_votes_by_block_id_and_voter(connection, block_id, node_pubkey):
"""Get all the votes casted for a specific block by a specific voter.
Args:
block_id (str): the block id to use.
node_pubkey (str): base58 encoded public key
Returns:
A cursor for the matching votes.
"""
raise NotImplementedError
@singledispatch
def get_votes_for_blocks_by_voter(connection, block_ids, pubkey):
"""Return votes for many block_ids
Args:
block_ids (set): block_ids
pubkey (str): public key of voting node
Returns:
A cursor of votes matching given block_ids and public key
"""
raise NotImplementedError
@singledispatch
def write_block(connection, block):
"""Write a block to the bigchain table.
Args:
block (dict): the block to write.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def get_block(connection, block_id):
"""Get a block from the bigchain table.
@ -176,6 +355,46 @@ def get_block_with_transaction(connection, txid):
raise NotImplementedError
@singledispatch
def write_assets(connection, assets):
"""Write a list of assets to the assets table.
Args:
assets (list): a list of assets to write.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def write_metadata(connection, metadata):
"""Write a list of metadata to the metadata table.
Args:
metadata (list): a list of metadata to write.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def get_assets(connection, asset_ids):
"""Get a list of assets from the assets table.
Args:
asset_ids (list): a list of ids for the assets to be retrieved from
the database.
Returns:
assets (list): the list of returned assets.
"""
raise NotImplementedError
@singledispatch
def get_metadata(connection, transaction_ids):
"""Get a list of metadata from the metadata table.
@ -191,14 +410,64 @@ def get_metadata(connection, transaction_ids):
@singledispatch
def get_assets(connection, asset_ids):
"""Get a list of assets from the assets table.
Args:
asset_ids (list): a list of ids for the assets to be retrieved from
the database.
def count_blocks(connection):
"""Count the number of blocks in the bigchain table.
Returns:
assets (list): the list of returned assets.
The number of blocks.
"""
raise NotImplementedError
@singledispatch
def count_backlog(connection):
"""Count the number of transactions in the backlog table.
Returns:
The number of transactions in the backlog.
"""
raise NotImplementedError
@singledispatch
def write_vote(connection, vote):
"""Write a vote to the votes table.
Args:
vote (dict): the vote to write.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def get_genesis_block(connection):
"""Get the genesis block.
Returns:
The genesis block
"""
raise NotImplementedError
@singledispatch
def get_last_voted_block_id(connection, node_pubkey):
"""Get the last voted block for a specific node.
Args:
node_pubkey (str): base58 encoded public key.
Returns:
The id of the last block the node has voted on. If the node didn't cast
any vote then the genesis block id is returned.
"""
raise NotImplementedError
@ -214,6 +483,20 @@ def get_txids_filtered(connection, asset_id, operation=None):
raise NotImplementedError
@singledispatch
def get_new_blocks_feed(connection, start_block_id):
"""Return a generator that yields change events of the blocks feed
Args:
start_block_id (str): ID of block to resume from
Returns:
Generator of change events
"""
raise NotImplementedError
@singledispatch
def text_search(conn, search, *, language='english', case_sensitive=False,
diacritic_sensitive=False, text_score=False, limit=0, table=None):
@ -249,7 +532,7 @@ def text_search(conn, search, *, language='english', case_sensitive=False,
@singledispatch
def get_latest_block(conn):
"""Get the latest commited block i.e. block with largest height"""
"""Get the latest commited block i.e. block with largest height """
raise NotImplementedError
@ -268,6 +551,13 @@ def store_block(conn, block):
raise NotImplementedError
@singledispatch
def delete_zombie_transactions(conn):
"""Delete transactions not included in any block"""
raise NotImplementedError
@singledispatch
def store_unspent_outputs(connection, unspent_outputs):
"""Store unspent outputs in ``utxo_set`` table."""
@ -275,6 +565,13 @@ def store_unspent_outputs(connection, unspent_outputs):
raise NotImplementedError
@singledispatch
def delete_latest_block(conn):
"""Delete the latest block along with its transactions"""
raise NotImplementedError
@singledispatch
def delete_unspent_outputs(connection, unspent_outputs):
"""Delete unspent outputs in ``utxo_set`` table."""
@ -314,117 +611,21 @@ def get_unspent_outputs(connection, *, query=None):
@singledispatch
def store_pre_commit_state(connection, state):
"""Store pre-commit state.
Args:
state (dict): pre-commit state.
Returns:
The result of the operation.
"""
def store_validator_update(conn, validator_update):
"""Store a update for the validator set """
raise NotImplementedError
@singledispatch
def get_pre_commit_state(connection):
"""Get pre-commit state.
Returns:
Document representing the pre-commit state.
"""
def get_validator_update(conn):
"""Get validator updates which are not synced"""
raise NotImplementedError
@singledispatch
def store_validator_set(conn, validator_update):
"""Store updated validator set"""
def delete_validator_update(conn, id):
"""Set the sync status for validator update documents"""
raise NotImplementedError
@singledispatch
def delete_validator_set(conn, height):
"""Delete the validator set at the given height."""
raise NotImplementedError
@singledispatch
def store_election(conn, election_id, height, is_concluded):
"""Store election record"""
raise NotImplementedError
@singledispatch
def store_elections(conn, elections):
"""Store election records in bulk"""
raise NotImplementedError
@singledispatch
def delete_elections(conn, height):
"""Delete all election records at the given height"""
raise NotImplementedError
@singledispatch
def get_validator_set(conn, height):
"""Get validator set for a given `height`, if `height` is not specified
then return the latest validator set
"""
raise NotImplementedError
@singledispatch
def get_election(conn, election_id):
"""Return the election record
"""
raise NotImplementedError
@singledispatch
def get_asset_tokens_for_public_key(connection, asset_id, public_key):
"""Retrieve a list of tokens of type `asset_id` that are owned by the `public_key`.
Args:
asset_id (str): Id of the token.
public_key (str): base58 encoded public key
Returns:
Iterator of transaction that list given owner in conditions.
"""
raise NotImplementedError
@singledispatch
def store_abci_chain(conn, height, chain_id, is_synced=True):
"""Create or update an ABCI chain at the given height.
Usually invoked in the beginning of the ABCI communications (height=0)
or when ABCI client (like Tendermint) is migrated (any height).
Args:
is_synced: True if the chain is known by both ABCI client and server
"""
raise NotImplementedError
@singledispatch
def delete_abci_chain(conn, height):
"""Delete the ABCI chain at the given height."""
raise NotImplementedError
@singledispatch
def get_latest_abci_chain(conn):
"""Returns the ABCI chain stored at the biggest height, if any,
None otherwise.
"""
raise NotImplementedError

View File

@ -0,0 +1,22 @@
"""RethinkDB backend implementation.
Contains a RethinkDB-specific implementation of the
:mod:`~bigchaindb.backend.changefeed`, :mod:`~bigchaindb.backend.query`, and
:mod:`~bigchaindb.backend.schema` interfaces.
You can specify BigchainDB to use RethinkDB as its database backend by either
setting ``database.backend`` to ``'rethinkdb'`` in your configuration file, or
setting the ``BIGCHAINDB_DATABASE_BACKEND`` environment variable to
``'rethinkdb'``.
If configured to use RethinkDB, BigchainDB will automatically return instances
of :class:`~bigchaindb.backend.rethinkdb.RethinkDBConnection` for
:func:`~bigchaindb.backend.connection.connect` and dispatch calls of the
generic backend interfaces to the implementations in this module.
"""
# Register the single dispatched modules on import.
from bigchaindb.backend.rethinkdb import admin, changefeed, schema, query # noqa
# RethinkDBConnection should always be accessed via
# ``bigchaindb.backend.connect()``.

View File

@ -0,0 +1,165 @@
"""Database configuration functions."""
import logging
import rethinkdb as r
from bigchaindb.backend import admin
from bigchaindb.backend.schema import TABLES
from bigchaindb.backend.exceptions import OperationError
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
logger = logging.getLogger(__name__)
register_admin = module_dispatch_registrar(admin)
@register_admin(RethinkDBConnection)
def get_config(connection, *, table):
"""Get the configuration of the given table.
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
table (str): The name of the table to get the configuration for.
Returns:
dict: The configuration of the given table
"""
return connection.run(r.table(table).config())
@register_admin(RethinkDBConnection)
def reconfigure(connection, *, table, shards, replicas,
primary_replica_tag=None, dry_run=False,
nonvoting_replica_tags=None):
"""Reconfigures the given table.
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
table (str): The name of the table to reconfigure.
shards (int): The number of shards, an integer from 1-64.
replicas (:obj:`int` | :obj:`dict`):
* If replicas is an integer, it specifies the number of
replicas per shard. Specifying more replicas than there
are servers will return an error.
* If replicas is a dictionary, it specifies key-value pairs
of server tags and the number of replicas to assign to
those servers::
{'africa': 2, 'asia': 4, 'europe': 2, ...}
primary_replica_tag (str): The primary server specified by its
server tag. Required if ``replicas`` is a dictionary. The
tag must be in the ``replicas`` dictionary. This must not be
specified if ``replicas`` is an integer. Defaults to
``None``.
dry_run (bool): If ``True`` the generated configuration will not
be applied to the table, only returned. Defaults to
``False``.
nonvoting_replica_tags (:obj:`list` of :obj:`str`): Replicas
with these server tags will be added to the
``nonvoting_replicas`` list of the resulting configuration.
Defaults to ``None``.
Returns:
dict: A dictionary with possibly three keys:
* ``reconfigured``: the number of tables reconfigured. This
will be ``0`` if ``dry_run`` is ``True``.
* ``config_changes``: a list of new and old table
configuration values.
* ``status_changes``: a list of new and old table status
values.
For more information please consult RethinkDB's
documentation `ReQL command: reconfigure
<https://rethinkdb.com/api/python/reconfigure/>`_.
Raises:
OperationError: If the reconfiguration fails due to a
RethinkDB :exc:`ReqlOpFailedError` or
:exc:`ReqlQueryLogicError`.
"""
params = {
'shards': shards,
'replicas': replicas,
'dry_run': dry_run,
}
if primary_replica_tag:
params.update(
primary_replica_tag=primary_replica_tag,
nonvoting_replica_tags=nonvoting_replica_tags,
)
try:
return connection.run(r.table(table).reconfigure(**params))
except (r.ReqlOpFailedError, r.ReqlQueryLogicError) as e:
raise OperationError('Failed to reconfigure tables.') from e
@register_admin(RethinkDBConnection)
def set_shards(connection, *, shards, dry_run=False):
"""Sets the shards for the tables
:const:`~bigchaindb.backend.schema.TABLES`.
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
shards (int): The number of shards, an integer from 1-64.
dry_run (bool): If ``True`` the generated configuration will not
be applied to the table, only returned. Defaults to
``False``.
Returns:
dict: A dictionary with the configuration and status changes.
For more details please see :func:`.reconfigure`.
"""
changes = {}
for table in TABLES:
replicas = len(
get_config(connection, table=table)['shards'][0]['replicas'])
change = reconfigure(
connection,
table=table,
shards=shards,
replicas=replicas,
dry_run=dry_run,
)
changes[table] = change
return changes
@register_admin(RethinkDBConnection)
def set_replicas(connection, *, replicas, dry_run=False):
"""Sets the replicas for the tables
:const:`~bigchaindb.backend.schema.TABLES`.
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
replicas (int): The number of replicas per shard. Specifying
more replicas than there are servers will return an error.
dry_run (bool): If ``True`` the generated configuration will not
be applied to the table, only returned. Defaults to
``False``.
Returns:
dict: A dictionary with the configuration and status changes.
For more details please see :func:`.reconfigure`.
"""
changes = {}
for table in TABLES:
shards = len(get_config(connection, table=table)['shards'])
change = reconfigure(
connection,
table=table,
shards=shards,
replicas=replicas,
dry_run=dry_run,
)
changes[table] = change
return changes

View File

@ -0,0 +1,59 @@
import time
import logging
import rethinkdb as r
from bigchaindb import backend
from bigchaindb.backend.exceptions import BackendError
from bigchaindb.backend.changefeed import ChangeFeed
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
logger = logging.getLogger(__name__)
register_changefeed = module_dispatch_registrar(backend.changefeed)
class RethinkDBChangeFeed(ChangeFeed):
"""This class wraps a RethinkDB changefeed as a multipipes Node."""
def run_forever(self):
for element in self.prefeed:
self.outqueue.put(element)
for change in run_changefeed(self.connection, self.table):
is_insert = change['old_val'] is None
is_delete = change['new_val'] is None
is_update = not is_insert and not is_delete
if is_insert and (self.operation & ChangeFeed.INSERT):
self.outqueue.put(change['new_val'])
elif is_delete and (self.operation & ChangeFeed.DELETE):
self.outqueue.put(change['old_val'])
elif is_update and (self.operation & ChangeFeed.UPDATE):
self.outqueue.put(change['new_val'])
def run_changefeed(connection, table):
"""Encapsulate operational logic of tailing changefeed from RethinkDB
"""
while True:
try:
for change in connection.run(r.table(table).changes()):
yield change
break
except (BackendError, r.ReqlDriverError) as exc:
logger.exception('Error connecting to the database, retrying')
time.sleep(1)
@register_changefeed(RethinkDBConnection)
def get_changefeed(connection, table, operation, *, prefeed=None):
"""Return a RethinkDB changefeed.
Returns:
An instance of
:class:`~bigchaindb.backend.rethinkdb.RethinkDBChangeFeed`.
"""
return RethinkDBChangeFeed(table, operation, prefeed=prefeed,
connection=connection)

View File

@ -0,0 +1,46 @@
import rethinkdb as r
from bigchaindb.backend.connection import Connection
from bigchaindb.backend.exceptions import ConnectionError, OperationError
class RethinkDBConnection(Connection):
"""This class is a proxy to run queries against the database, it is:
- lazy, since it creates a connection only when needed
- resilient, because before raising exceptions it tries
more times to run the query or open a connection.
"""
def run(self, query):
"""Run a RethinkDB query.
Args:
query: the RethinkDB query.
Raises:
:exc:`rethinkdb.ReqlDriverError`: After
:attr:`~.RethinkDBConnection.max_tries`.
"""
try:
return query.run(self.conn)
except r.ReqlDriverError as exc:
raise OperationError from exc
def _connect(self):
"""Set a connection to RethinkDB.
The connection is available via :attr:`~.RethinkDBConnection.conn`.
Raises:
:exc:`rethinkdb.ReqlDriverError`: After
:attr:`~.RethinkDBConnection.max_tries`.
"""
try:
return r.connect(host=self.host,
port=self.port,
db=self.dbname,
timeout=self.connection_timeout)
except (r.ReqlDriverError, r.ReqlTimeoutError) as exc:
raise ConnectionError from exc

View File

@ -0,0 +1,312 @@
from itertools import chain
import logging as logger
from time import time
import rethinkdb as r
from bigchaindb import backend, utils
from bigchaindb.backend.rethinkdb import changefeed
from bigchaindb.common import exceptions
from bigchaindb.common.transaction import Transaction
from bigchaindb.common.utils import serialize
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
logger = logger.getLogger(__name__)
READ_MODE = 'majority'
WRITE_DURABILITY = 'hard'
register_query = module_dispatch_registrar(backend.query)
@register_query(RethinkDBConnection)
def write_transaction(connection, signed_transaction):
return connection.run(
r.table('backlog')
.insert(signed_transaction, durability=WRITE_DURABILITY))
@register_query(RethinkDBConnection)
def update_transaction(connection, transaction_id, doc):
return connection.run(
r.table('backlog')
.get(transaction_id)
.update(doc))
@register_query(RethinkDBConnection)
def delete_transaction(connection, *transaction_id):
return connection.run(
r.table('backlog')
.get_all(*transaction_id)
.delete(durability=WRITE_DURABILITY))
@register_query(RethinkDBConnection)
def get_stale_transactions(connection, reassign_delay):
return connection.run(
r.table('backlog')
.filter(lambda tx: time() - tx['assignment_timestamp'] > reassign_delay))
@register_query(RethinkDBConnection)
def get_transaction_from_block(connection, transaction_id, block_id):
return connection.run(
r.table('bigchain', read_mode=READ_MODE)
.get(block_id)
.get_field('block')
.get_field('transactions')
.filter(lambda tx: tx['id'] == transaction_id))[0]
@register_query(RethinkDBConnection)
def get_transaction_from_backlog(connection, transaction_id):
return connection.run(
r.table('backlog')
.get(transaction_id)
.without('assignee', 'assignment_timestamp')
.default(None))
@register_query(RethinkDBConnection)
def get_blocks_status_from_transaction(connection, transaction_id):
return connection.run(
r.table('bigchain', read_mode=READ_MODE)
.get_all(transaction_id, index='transaction_id')
.pluck('votes', 'id', {'block': ['voters']}))
@register_query(RethinkDBConnection)
def get_txids_filtered(connection, asset_id, operation=None):
# here we only want to return the transaction ids since later on when
# we are going to retrieve the transaction with status validation
parts = []
if operation in (Transaction.CREATE, None):
# First find the asset's CREATE transaction
parts.append(connection.run(
_get_asset_create_tx_query(asset_id).get_field('id')))
if operation in (Transaction.TRANSFER, None):
# Then find any TRANSFER transactions related to the asset
parts.append(connection.run(
r.table('bigchain')
.get_all(asset_id, index='asset_id')
.concat_map(lambda block: block['block']['transactions'])
.filter(lambda transaction: transaction['asset']['id'] == asset_id)
.get_field('id')))
return chain(*parts)
@register_query(RethinkDBConnection)
def get_asset_by_id(connection, asset_id):
return connection.run(_get_asset_create_tx_query(asset_id).pluck('asset'))
def _get_asset_create_tx_query(asset_id):
return r.table('bigchain', read_mode=READ_MODE) \
.get_all(asset_id, index='transaction_id') \
.concat_map(lambda block: block['block']['transactions']) \
.filter(lambda transaction: transaction['id'] == asset_id)
@register_query(RethinkDBConnection)
def get_spent(connection, transaction_id, output):
return connection.run(
r.table('bigchain', read_mode=READ_MODE)
.get_all([transaction_id, output], index='inputs')
.concat_map(lambda doc: doc['block']['transactions'])
.filter(lambda transaction: transaction['inputs'].contains(
lambda input_: input_['fulfills'] == {
'transaction_id': transaction_id, 'output_index': output})))
@register_query(RethinkDBConnection)
def get_owned_ids(connection, owner):
query = (r.table('bigchain', read_mode=READ_MODE)
.get_all(owner, index='outputs')
.distinct()
.concat_map(unwind_block_transactions)
.filter(lambda doc: doc['tx']['outputs'].contains(
lambda c: c['public_keys'].contains(owner))))
cursor = connection.run(query)
return ((b['id'], b['tx']) for b in cursor)
@register_query(RethinkDBConnection)
def get_votes_by_block_id(connection, block_id):
return connection.run(
r.table('votes', read_mode=READ_MODE)
.between([block_id, r.minval], [block_id, r.maxval], index='block_and_voter')
.without('id'))
@register_query(RethinkDBConnection)
def get_votes_by_block_id_and_voter(connection, block_id, node_pubkey):
return connection.run(
r.table('votes')
.get_all([block_id, node_pubkey], index='block_and_voter')
.without('id'))
@register_query(RethinkDBConnection)
def write_block(connection, block_dict):
return connection.run(
r.table('bigchain')
.insert(r.json(serialize(block_dict)), durability=WRITE_DURABILITY))
@register_query(RethinkDBConnection)
def get_block(connection, block_id):
return connection.run(r.table('bigchain').get(block_id))
@register_query(RethinkDBConnection)
def write_assets(connection, assets):
return connection.run(
r.table('assets')
.insert(assets, durability=WRITE_DURABILITY))
@register_query(RethinkDBConnection)
def write_metadata(connection, metadata):
return connection.run(
r.table('metadata')
.insert(metadata, durability=WRITE_DURABILITY))
@register_query(RethinkDBConnection)
def get_assets(connection, asset_ids):
return connection.run(
r.table('assets', read_mode=READ_MODE)
.get_all(*asset_ids))
@register_query(RethinkDBConnection)
def get_metadata(connection, txn_ids):
return connection.run(
r.table('metadata', read_mode=READ_MODE)
.get_all(*txn_ids))
@register_query(RethinkDBConnection)
def count_blocks(connection):
return connection.run(
r.table('bigchain', read_mode=READ_MODE)
.count())
@register_query(RethinkDBConnection)
def count_backlog(connection):
return connection.run(
r.table('backlog', read_mode=READ_MODE)
.count())
@register_query(RethinkDBConnection)
def write_vote(connection, vote):
return connection.run(
r.table('votes')
.insert(vote))
@register_query(RethinkDBConnection)
def get_genesis_block(connection):
return connection.run(
r.table('bigchain', read_mode=READ_MODE)
.filter(utils.is_genesis_block)
.nth(0))
@register_query(RethinkDBConnection)
def get_last_voted_block_id(connection, node_pubkey):
try:
# get the latest value for the vote timestamp (over all votes)
max_timestamp = connection.run(
r.table('votes', read_mode=READ_MODE)
.filter(r.row['node_pubkey'] == node_pubkey)
.max(r.row['vote']['timestamp']))['vote']['timestamp']
last_voted = list(connection.run(
r.table('votes', read_mode=READ_MODE)
.filter(r.row['vote']['timestamp'] == max_timestamp)
.filter(r.row['node_pubkey'] == node_pubkey)))
except r.ReqlNonExistenceError:
# return last vote if last vote exists else return Genesis block
return get_genesis_block(connection)['id']
# Now the fun starts. Since the resolution of timestamp is a second,
# we might have more than one vote per timestamp. If this is the case
# then we need to rebuild the chain for the blocks that have been retrieved
# to get the last one.
# Given a block_id, mapping returns the id of the block pointing at it.
mapping = {v['vote']['previous_block']: v['vote']['voting_for_block']
for v in last_voted}
# Since we follow the chain backwards, we can start from a random
# point of the chain and "move up" from it.
last_block_id = list(mapping.values())[0]
# We must be sure to break the infinite loop. This happens when:
# - the block we are currenty iterating is the one we are looking for.
# This will trigger a KeyError, breaking the loop
# - we are visiting again a node we already explored, hence there is
# a loop. This might happen if a vote points both `previous_block`
# and `voting_for_block` to the same `block_id`
explored = set()
while True:
try:
if last_block_id in explored:
raise exceptions.CyclicBlockchainError()
explored.add(last_block_id)
last_block_id = mapping[last_block_id]
except KeyError:
break
return last_block_id
@register_query(RethinkDBConnection)
def get_new_blocks_feed(connection, start_block_id): # pragma: no cover
logger.warning('RethinkDB changefeed unable to resume from given block: %s',
start_block_id)
# In order to get blocks in the correct order, it may be acceptable to
# look in the votes table to see what order other nodes have used.
for change in changefeed.run_changefeed(connection, 'bigchain'):
yield change['new_val']
@register_query(RethinkDBConnection)
def get_votes_for_blocks_by_voter(connection, block_ids, node_pubkey):
return connection.run(
r.table('votes')
.filter(lambda row: r.expr(block_ids).contains(row['vote']['voting_for_block']))
.filter(lambda row: row['node_pubkey'] == node_pubkey))
def unwind_block_transactions(block):
"""Yield a block for each transaction in given block"""
return block['block']['transactions'].map(lambda tx: block.merge({'tx': tx}))
@register_query(RethinkDBConnection)
def get_spending_transactions(connection, links):
query = (
r.table('bigchain')
.get_all(*[(l['transaction_id'], l['output_index']) for l in links],
index='inputs')
.concat_map(unwind_block_transactions)
# filter transactions spending output
.filter(lambda doc: r.expr(links).set_intersection(
doc['tx']['inputs'].map(lambda i: i['fulfills'])))
)
cursor = connection.run(query)
return ((b['id'], b['tx']) for b in cursor)

View File

@ -0,0 +1,130 @@
import logging
import rethinkdb as r
from bigchaindb import backend
from bigchaindb.common import exceptions
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
logger = logging.getLogger(__name__)
register_schema = module_dispatch_registrar(backend.schema)
@register_schema(RethinkDBConnection)
def create_database(connection, dbname):
if connection.run(r.db_list().contains(dbname)):
raise exceptions.DatabaseAlreadyExists('Database `{}` already exists'.format(dbname))
logger.info('Create database `%s`.', dbname)
connection.run(r.db_create(dbname))
@register_schema(RethinkDBConnection)
def create_tables(connection, dbname):
for table_name in ['bigchain', 'backlog', 'votes', 'assets', 'metadata']:
logger.info('Create `%s` table.', table_name)
connection.run(r.db(dbname).table_create(table_name))
@register_schema(RethinkDBConnection)
def create_indexes(connection, dbname):
create_bigchain_secondary_index(connection, dbname)
create_backlog_secondary_index(connection, dbname)
create_votes_secondary_index(connection, dbname)
@register_schema(RethinkDBConnection)
def drop_database(connection, dbname):
try:
logger.info('Drop database `%s`', dbname)
connection.run(r.db_drop(dbname))
logger.info('Done.')
except r.ReqlOpFailedError:
raise exceptions.DatabaseDoesNotExist('Database `{}` does not exist'.format(dbname))
def create_bigchain_secondary_index(connection, dbname):
logger.info('Create `bigchain` secondary index.')
# to order blocks by timestamp
connection.run(
r.db(dbname)
.table('bigchain')
.index_create('block_timestamp', r.row['block']['timestamp']))
# to query the bigchain for a transaction id
connection.run(
r.db(dbname)
.table('bigchain')
.index_create('transaction_id', r.row['block']['transactions']['id'], multi=True))
# secondary index for asset links (in TRANSFER transactions)
connection.run(
r.db(dbname)
.table('bigchain')
.index_create('asset_id', r.row['block']['transactions']['asset']['id'], multi=True))
# secondary index on the public keys of outputs
# the last reduce operation is to return a flatten list of public_keys
# without it we would need to match exactly the public_keys list.
# For instance querying for `pk1` would not match documents with
# `public_keys: [pk1, pk2, pk3]`
connection.run(
r.db(dbname)
.table('bigchain')
.index_create('outputs',
r.row['block']['transactions']
.concat_map(lambda tx: tx['outputs']['public_keys'])
.reduce(lambda l, r: l + r), multi=True))
# secondary index on inputs/transaction links (transaction_id, output)
connection.run(
r.db(dbname)
.table('bigchain')
.index_create('inputs',
r.row['block']['transactions']
.concat_map(lambda tx: tx['inputs']['fulfills'])
.with_fields('transaction_id', 'output_index')
.map(lambda fulfills: [fulfills['transaction_id'],
fulfills['output_index']]),
multi=True))
# wait for rethinkdb to finish creating secondary indexes
connection.run(
r.db(dbname)
.table('bigchain')
.index_wait())
def create_backlog_secondary_index(connection, dbname):
logger.info('Create `backlog` secondary index.')
# compound index to read transactions from the backlog per assignee
connection.run(
r.db(dbname)
.table('backlog')
.index_create('assignee__transaction_timestamp', [r.row['assignee'], r.row['assignment_timestamp']]))
# wait for rethinkdb to finish creating secondary indexes
connection.run(
r.db(dbname)
.table('backlog')
.index_wait())
def create_votes_secondary_index(connection, dbname):
logger.info('Create `votes` secondary index.')
# compound index to order votes by block id and node
connection.run(
r.db(dbname)
.table('votes')
.index_create('block_and_voter', [r.row['vote']['voting_for_block'], r.row['node_pubkey']]))
# wait for rethinkdb to finish creating secondary indexes
connection.run(
r.db(dbname)
.table('votes')
.index_wait())

View File

@ -1,9 +1,15 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Database creation and schema-providing interfaces for backends.
"""Database creation and schema-providing interfaces for backends."""
Attributes:
TABLES (tuple): The three standard tables BigchainDB relies on:
* ``backlog`` for incoming transactions awaiting to be put into
a block.
* ``bigchain`` for blocks.
* ``votes`` to store votes for each block by each federation
node.
"""
from functools import singledispatch
import logging
@ -11,14 +17,11 @@ import logging
import bigchaindb
from bigchaindb.backend.connection import connect
from bigchaindb.common.exceptions import ValidationError
from bigchaindb.common.utils import validate_all_values_for_key_in_obj, validate_all_values_for_key_in_list
from bigchaindb.common.utils import validate_all_values_for_key
logger = logging.getLogger(__name__)
# Tables/collections that every backend database must create
TABLES = ('transactions', 'blocks', 'assets', 'metadata',
'validators', 'elections', 'pre_commit', 'utxos', 'abci_chains')
TABLES = ('bigchain', 'backlog', 'votes', 'assets', 'metadata')
VALID_LANGUAGES = ('danish', 'dutch', 'english', 'finnish', 'french', 'german',
'hungarian', 'italian', 'norwegian', 'portuguese', 'romanian',
'russian', 'spanish', 'swedish', 'turkish', 'none',
@ -32,6 +35,10 @@ def create_database(connection, dbname):
Args:
dbname (str): the name of the database to create.
Raises:
:exc:`~DatabaseAlreadyExists`: If the given :attr:`dbname` already
exists as a database.
"""
raise NotImplementedError
@ -48,6 +55,17 @@ def create_tables(connection, dbname):
raise NotImplementedError
@singledispatch
def create_indexes(connection, dbname):
"""Create the indexes to be used by BigchainDB.
Args:
dbname (str): the name of the database to create indexes for.
"""
raise NotImplementedError
@singledispatch
def drop_database(connection, dbname):
"""Drop the database used by BigchainDB.
@ -76,6 +94,10 @@ def init_database(connection=None, dbname=None):
dbname (str): the name of the database to create.
Defaults to the database name given in the BigchainDB
configuration.
Raises:
:exc:`~DatabaseAlreadyExists`: If the given :attr:`dbname` already
exists as a database.
"""
connection = connection or connect()
@ -83,6 +105,7 @@ def init_database(connection=None, dbname=None):
create_database(connection, dbname)
create_tables(connection, dbname)
create_indexes(connection, dbname)
def validate_language_key(obj, key):
@ -99,12 +122,10 @@ def validate_language_key(obj, key):
"""
backend = bigchaindb.config['database']['backend']
if backend == 'localmongodb':
if backend == 'mongodb':
data = obj.get(key, {})
if isinstance(data, dict):
validate_all_values_for_key_in_obj(data, 'language', validate_language)
elif isinstance(data, list):
validate_all_values_for_key_in_list(data, 'language', validate_language)
validate_all_values_for_key(data, 'language', validate_language)
def validate_language(value):

View File

@ -1,11 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import bigchaindb
class ModuleDispatchRegistrationError(Exception):
"""Raised when there is a problem registering dispatched functions for a
module
@ -24,16 +16,6 @@ def module_dispatch_registrar(module):
('`{module}` does not contain a single-dispatchable '
'function named `{func}`. The module being registered '
'was not implemented correctly!').format(
func=func_name, module=module.__name__)) from ex
func=func_name, module=module.__name__)) from ex
return wrapper
return dispatch_wrapper
def get_bigchaindb_config_value(key, default_value=None):
return bigchaindb.config['database'].get(key, default_value)
def get_bigchaindb_config_value_or_key_error(key):
return bigchaindb.config['database'][key]

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Implementation of the `bigchaindb` command,
the command-line interface (CLI) for BigchainDB Server.
"""
@ -14,24 +9,18 @@ import copy
import json
import sys
from bigchaindb.core import rollback
from bigchaindb.migrations.chain_migration_election import ChainMigrationElection
from bigchaindb.utils import load_node_key
from bigchaindb.common.transaction_mode_types import BROADCAST_TX_COMMIT
from bigchaindb.common.exceptions import (DatabaseDoesNotExist,
ValidationError)
from bigchaindb.elections.vote import Vote
from bigchaindb.common.exceptions import (DatabaseAlreadyExists,
DatabaseDoesNotExist,
MultipleValidatorOperationError)
import bigchaindb
from bigchaindb import (backend, ValidatorElection,
BigchainDB)
from bigchaindb import backend
from bigchaindb.backend import schema
from bigchaindb.backend import query
from bigchaindb.commands import utils
from bigchaindb.commands.utils import (configure_bigchaindb,
input_on_stderr)
from bigchaindb.log import setup_logging
from bigchaindb.tendermint_utils import public_key_from_base64
from bigchaindb.commands.election_types import elections
from bigchaindb.version import __tm_supported_versions__
from bigchaindb.commands.utils import (
configure_bigchaindb, start_logging_process, input_on_stderr)
from bigchaindb.backend.query import VALIDATOR_UPDATE_ID
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
@ -51,6 +40,8 @@ def run_show_config(args):
# configure the system.
config = copy.deepcopy(bigchaindb.config)
del config['CONFIGURED']
private_key = config['keypair']['private']
config['keypair']['private'] = 'x' * 45 if private_key else None
print(json.dumps(config, indent=4, sort_keys=True))
@ -91,9 +82,9 @@ def run_configure(args):
val = conf['database'][key]
conf['database'][key] = input_on_stderr('Database {}? (default `{}`): '.format(key, val), val)
for key in ('host', 'port'):
val = conf['tendermint'][key]
conf['tendermint'][key] = input_on_stderr('Tendermint {}? (default `{}`)'.format(key, val), val)
val = conf['backlog_reassign_delay']
conf['backlog_reassign_delay'] = input_on_stderr(
'Stale transaction reassignment delay (in seconds)? (default `{}`): '.format(val), val)
if config_path != '-':
bigchaindb.config_utils.write_config(conf, config_path)
@ -104,144 +95,25 @@ def run_configure(args):
@configure_bigchaindb
def run_election(args):
"""Initiate and manage elections"""
def run_upsert_validator(args):
"""Store validators which should be synced with Tendermint"""
b = BigchainDB()
# Call the function specified by args.action, as defined above
globals()[f'run_election_{args.action}'](args, b)
def run_election_new(args, bigchain):
election_type = args.election_type.replace('-', '_')
globals()[f'run_election_new_{election_type}'](args, bigchain)
def create_new_election(sk, bigchain, election_class, data):
b = bigchaindb.Bigchain()
validator = {'pub_key': {'type': 'ed25519',
'data': args.public_key},
'power': args.power}
validator_update = {'validator': validator,
'update_id': VALIDATOR_UPDATE_ID}
try:
key = load_node_key(sk)
voters = election_class.recipients(bigchain)
election = election_class.generate([key.public_key],
voters,
data, None).sign([key.private_key])
election.validate(bigchain)
except ValidationError as e:
logger.error(e)
return False
except FileNotFoundError as fd_404:
logger.error(fd_404)
return False
resp = bigchain.write_transaction(election, BROADCAST_TX_COMMIT)
if resp == (202, ''):
logger.info('[SUCCESS] Submitted proposal with id: {}'.format(election.id))
return election.id
else:
logger.error('Failed to commit election proposal')
return False
def run_election_new_upsert_validator(args, bigchain):
"""Initiates an election to add/update/remove a validator to an existing BigchainDB network
:param args: dict
args = {
'public_key': the public key of the proposed peer, (str)
'power': the proposed validator power for the new peer, (str)
'node_id': the node_id of the new peer (str)
'sk': the path to the private key of the node calling the election (str)
}
:param bigchain: an instance of BigchainDB
:return: election_id or `False` in case of failure
"""
new_validator = {
'public_key': {'value': public_key_from_base64(args.public_key),
'type': 'ed25519-base16'},
'power': args.power,
'node_id': args.node_id
}
return create_new_election(args.sk, bigchain, ValidatorElection, new_validator)
def run_election_new_chain_migration(args, bigchain):
"""Initiates an election to halt block production
:param args: dict
args = {
'sk': the path to the private key of the node calling the election (str)
}
:param bigchain: an instance of BigchainDB
:return: election_id or `False` in case of failure
"""
return create_new_election(args.sk, bigchain, ChainMigrationElection, {})
def run_election_approve(args, bigchain):
"""Approve an election
:param args: dict
args = {
'election_id': the election_id of the election (str)
'sk': the path to the private key of the signer (str)
}
:param bigchain: an instance of BigchainDB
:return: success log message or `False` in case of error
"""
key = load_node_key(args.sk)
tx = bigchain.get_transaction(args.election_id)
voting_powers = [v.amount for v in tx.outputs if key.public_key in v.public_keys]
if len(voting_powers) > 0:
voting_power = voting_powers[0]
else:
logger.error('The key you provided does not match any of the eligible voters in this election.')
return False
inputs = [i for i in tx.to_inputs() if key.public_key in i.owners_before]
election_pub_key = ValidatorElection.to_public_key(tx.id)
approval = Vote.generate(inputs,
[([election_pub_key], voting_power)],
tx.id).sign([key.private_key])
approval.validate(bigchain)
resp = bigchain.write_transaction(approval, BROADCAST_TX_COMMIT)
if resp == (202, ''):
logger.info('[SUCCESS] Your vote has been submitted')
return approval.id
else:
logger.error('Failed to commit vote')
return False
def run_election_show(args, bigchain):
"""Retrieves information about an election
:param args: dict
args = {
'election_id': the transaction_id for an election (str)
}
:param bigchain: an instance of BigchainDB
"""
election = bigchain.get_transaction(args.election_id)
if not election:
logger.error(f'No election found with election_id {args.election_id}')
return
response = election.show_election(bigchain)
logger.info(response)
return response
query.store_validator_update(b.connection, validator_update)
except MultipleValidatorOperationError:
logger.error('A validator update is pending to be applied. '
'Please re-try after the current update has '
'been processed.')
def _run_init():
bdb = bigchaindb.BigchainDB()
bdb = bigchaindb.Bigchain()
schema.init_database(connection=bdb.connection)
@ -249,7 +121,27 @@ def _run_init():
@configure_bigchaindb
def run_init(args):
"""Initialize the database"""
_run_init()
# TODO Provide mechanism to:
# 1. prompt the user to inquire whether they wish to drop the db
# 2. force the init, (e.g., via -f flag)
try:
_run_init()
except DatabaseAlreadyExists:
print('The database already exists.', file=sys.stderr)
print('If you wish to re-initialize it, first drop it.', file=sys.stderr)
def run_recover(b):
query.delete_zombie_transactions(b.connection)
tendermint_height = b.get_latest_block_height_from_tendermint()
block = b.get_latest_block()
if block:
while block['height'] > tendermint_height:
logger.info('BigchainDB is ahead of tendermint, removing block %s', block['height'])
query.delete_latest_block(b.connection)
block = b.get_latest_block()
@configure_bigchaindb
@ -263,42 +155,31 @@ def run_drop(args):
return
conn = backend.connect()
dbname = bigchaindb.config['database']['name']
try:
schema.drop_database(conn, dbname)
except DatabaseDoesNotExist:
print("Cannot drop '{name}'. The database does not exist.".format(name=dbname), file=sys.stderr)
def run_recover(b):
rollback(b)
@configure_bigchaindb
@start_logging_process
def run_start(args):
"""Start the processes to run the node"""
# Configure Logging
setup_logging()
logger.info('BigchainDB Version %s', bigchaindb.__version__)
run_recover(bigchaindb.lib.BigchainDB())
if not args.skip_initialize_database:
logger.info('Initializing database')
_run_init()
# run_recover(BigchainDB())
try:
if not args.skip_initialize_database:
logger.info('Initializing database')
_run_init()
except DatabaseAlreadyExists:
pass
logger.info('Starting BigchainDB main process.')
from bigchaindb.start import start
start(args)
def run_tendermint_version(args):
"""Show the supported Tendermint version(s)"""
supported_tm_ver = {
'description': 'BigchainDB supports the following Tendermint version(s)',
'tendermint': __tm_supported_versions__,
}
print(json.dumps(supported_tm_ver, indent=4, sort_keys=True))
from bigchaindb.tendermint.commands import start
start()
def create_parser():
@ -325,41 +206,16 @@ def create_parser():
help='The backend to use. It can only be '
'"localmongodb", currently.')
# parser for managing elections
election_parser = subparsers.add_parser('election',
help='Manage elections.')
validator_parser = subparsers.add_parser('upsert-validator',
help='Add/update/delete a validator')
election_subparser = election_parser.add_subparsers(title='Action',
dest='action')
validator_parser.add_argument('public_key',
help='Public key of the validator.')
new_election_parser = election_subparser.add_parser('new',
help='Calls a new election.')
new_election_subparser = new_election_parser.add_subparsers(title='Election_Type',
dest='election_type')
# Parser factory for each type of new election, so we get a bunch of commands that look like this:
# election new <some_election_type> <args>...
for name, data in elections.items():
args = data['args']
generic_parser = new_election_subparser.add_parser(name, help=data['help'])
for arg, kwargs in args.items():
generic_parser.add_argument(arg, **kwargs)
approve_election_parser = election_subparser.add_parser('approve',
help='Approve the election.')
approve_election_parser.add_argument('election_id',
help='The election_id of the election.')
approve_election_parser.add_argument('--private-key',
dest='sk',
required=True,
help='Path to the private key of the election initiator.')
show_election_parser = election_subparser.add_parser('show',
help='Provides information about an election.')
show_election_parser.add_argument('election_id',
help='The transaction id of the election you wish to query.')
validator_parser.add_argument('power',
type=int,
help='Voting power of the validator. '
'Setting it to 0 will delete the validator.')
# parsers for showing/exporting config values
subparsers.add_parser('show-config',
@ -382,15 +238,6 @@ def create_parser():
action='store_true',
help='Skip database initialization')
subparsers.add_parser('tendermint-version',
help='Show the Tendermint supported versions')
start_parser.add_argument('--experimental-parallel-validation',
dest='experimental_parallel_validation',
default=False,
action='store_true',
help='💀 EXPERIMENTAL: parallelize validation for better throughput 💀')
return parser

View File

@ -1,31 +0,0 @@
elections = {
'upsert-validator': {
'help': 'Propose a change to the validator set',
'args': {
'public_key': {
'help': 'Public key of the validator to be added/updated/removed.'
},
'power': {
'type': int,
'help': 'The proposed power for the validator. Setting to 0 will remove the validator.'},
'node_id': {
'help': 'The node_id of the validator.'
},
'--private-key': {
'dest': 'sk',
'required': True,
'help': 'Path to the private key of the election initiator.'
}
}
},
'chain-migration': {
'help': 'Call for a halt to block production to allow for a version change across breaking changes.',
'args': {
'--private-key': {
'dest': 'sk',
'required': True,
'help': 'Path to the private key of the election initiator.'
}
}
}
}

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Utility functions and basic common arguments
for ``argparse.ArgumentParser``.
"""
@ -15,6 +10,7 @@ import sys
import bigchaindb
import bigchaindb.config_utils
from bigchaindb.log.setup import setup_logging
from bigchaindb.version import __version__
@ -51,6 +47,32 @@ def configure_bigchaindb(command):
return configure
def start_logging_process(command):
"""Decorator to start the logging subscriber process.
Args:
command: The command to decorate.
Returns:
The command wrapper function.
.. important::
Configuration, if needed, should be applied before invoking this
decorator, as starting the subscriber process for logging will
configure the root logger for the child process based on the
state of :obj:`bigchaindb.config` at the moment this decorator
is invoked.
"""
@functools.wraps(command)
def start_logging(args):
from bigchaindb import config
setup_logging(user_log_config=config.get('log'))
command(args)
return start_logging
def _convert(value, default=None, convert=None):
def convert_bool(value):
if value.lower() in ('true', 't', 'yes', 'y'):
@ -148,7 +170,7 @@ base_parser.add_argument('-c', '--config',
# the environment variables provided to configure the logger.
base_parser.add_argument('-l', '--log-level',
type=str.upper, # convert to uppercase for comparison to choices
choices=['DEBUG', 'BENCHMARK', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Log level')
base_parser.add_argument('-y', '--yes', '--yes-please',

View File

@ -1,16 +1,7 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
# Separate all crypto code so that we can easily test several implementations
from collections import namedtuple
try:
from hashlib import sha3_256
except ImportError:
from sha3 import sha3_256
import sha3
from cryptoconditions import crypto
@ -19,7 +10,7 @@ CryptoKeypair = namedtuple('CryptoKeypair', ('private_key', 'public_key'))
def hash_data(data):
"""Hash the provided data using SHA3-256"""
return sha3_256(data.encode()).hexdigest()
return sha3.sha3_256(data.encode()).hexdigest()
def generate_key_pair():
@ -39,17 +30,3 @@ def generate_key_pair():
PrivateKey = crypto.Ed25519SigningKey
PublicKey = crypto.Ed25519VerifyingKey
def key_pair_from_ed25519_key(hex_private_key):
"""Generate base58 encode public-private key pair from a hex encoded private key"""
priv_key = crypto.Ed25519SigningKey(bytes.fromhex(hex_private_key)[:32], encoding='bytes')
public_key = priv_key.get_verifying_key()
return CryptoKeypair(private_key=priv_key.encode(encoding='base58').decode('utf-8'),
public_key=public_key.encode(encoding='base58').decode('utf-8'))
def public_key_from_ed25519_key(hex_public_key):
"""Generate base58 public key from hex encoded public key"""
public_key = crypto.Ed25519VerifyingKey(bytes.fromhex(hex_public_key), encoding='bytes')
return public_key.encode(encoding='base58').decode('utf-8')

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Custom exceptions used in the `bigchaindb` package.
"""
from bigchaindb.exceptions import BigchainDBError
@ -12,6 +7,10 @@ class ConfigurationError(BigchainDBError):
"""Raised when there is a problem with server configuration"""
class DatabaseAlreadyExists(BigchainDBError):
"""Raised when trying to create the database but the db is already there"""
class DatabaseDoesNotExist(BigchainDBError):
"""Raised when trying to delete the database but the db is not there"""
@ -24,6 +23,10 @@ class CyclicBlockchainError(BigchainDBError):
"""Raised when there is a cycle in the blockchain"""
class KeypairNotFoundException(BigchainDBError):
"""Raised if operation cannot proceed because the keypair was not given"""
class KeypairMismatchException(BigchainDBError):
"""Raised if the private key(s) provided for signing don't match any of the
current owner(s)
@ -67,6 +70,20 @@ class InvalidSignature(ValidationError):
"""
class ImproperVoteError(ValidationError):
"""Raised if a vote is not constructed correctly, or signed incorrectly"""
class MultipleVotesError(ValidationError):
"""Raised if a voter has voted more than once"""
class TransactionNotInValidBlock(ValidationError):
"""Raised when a transfer transaction is attempting to fulfill the
outputs of a transaction that is in an invalid or undecided block
"""
class AssetIdMismatch(ValidationError):
"""Raised when multiple transaction inputs related to different assets"""
@ -83,6 +100,10 @@ class TransactionOwnerError(ValidationError):
"""Raised if a user tries to transfer a transaction they don't own"""
class SybilError(ValidationError):
"""If a block or vote comes from an unidentifiable node"""
class DuplicateTransaction(ValidationError):
"""Raised if a duplicated transaction is found"""
@ -91,25 +112,9 @@ class ThresholdTooDeep(ValidationError):
"""Raised if threshold condition is too deep"""
class GenesisBlockAlreadyExistsError(ValidationError):
"""Raised when trying to create the already existing genesis block"""
class MultipleValidatorOperationError(ValidationError):
"""Raised when a validator update pending but new request is submited"""
class MultipleInputsError(ValidationError):
"""Raised if there were multiple inputs when only one was expected"""
class InvalidProposer(ValidationError):
"""Raised if the public key is not a part of the validator set"""
class UnequalValidatorSet(ValidationError):
"""Raised if the validator sets differ"""
class InvalidPowerChange(ValidationError):
"""Raised if proposed power change in validator set is >=1/3 total power"""
class InvalidPublicKey(ValidationError):
"""Raised if public key doesn't match the encoding type"""

View File

@ -1,58 +0,0 @@
import functools
import codecs
from functools import lru_cache
class HDict(dict):
def __hash__(self):
return hash(codecs.decode(self['id'], 'hex'))
@lru_cache(maxsize=16384)
def from_dict(func, *args, **kwargs):
return func(*args, **kwargs)
def memoize_from_dict(func):
@functools.wraps(func)
def memoized_func(*args, **kwargs):
if args[1].get('id', None):
args = list(args)
args[1] = HDict(args[1])
new_args = tuple(args)
return from_dict(func, *new_args, **kwargs)
else:
return func(*args, **kwargs)
return memoized_func
class ToDictWrapper():
def __init__(self, tx):
self.tx = tx
def __eq__(self, other):
return self.tx.id == other.tx.id
def __hash__(self):
return hash(self.tx.id)
@lru_cache(maxsize=16384)
def to_dict(func, tx_wrapped):
return func(tx_wrapped.tx)
def memoize_to_dict(func):
@functools.wraps(func)
def memoized_func(*args, **kwargs):
if args[0].id:
return to_dict(func, ToDictWrapper(args[0]))
else:
return func(*args, **kwargs)
return memoized_func

View File

@ -1,10 +1,3 @@
<!---
Copyright © 2020 Interplanetary Database Association e.V.,
BigchainDB and IPDB software contributors.
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
--->
# Introduction
This directory contains the schemas for the different JSON documents BigchainDB uses.
@ -20,18 +13,14 @@ The aim is to provide:
## Sources
The files defining the JSON Schema for transactions (`transaction_*.yaml`)
are based on the [BigchainDB Transactions Specs](https://github.com/bigchaindb/BEPs/tree/master/tx-specs).
are based on the [IPDB Transaction Spec](https://github.com/ipdb/ipdb-tx-spec).
If you want to add a new transaction version,
you must write a spec for it first.
you must add it to the IPDB Transaction Spec first.
(You can't change the JSON Schema files for old versions.
Those were used to validate old transactions
and are needed to re-check those transactions.)
There used to be a file defining the JSON Schema for votes, named `vote.yaml`.
It was used by BigchainDB version 1.3.0 and earlier.
If you want a copy of the latest `vote.yaml` file,
then you can get it from the version 1.3.0 release on GitHub, at
[https://github.com/bigchaindb/bigchaindb/blob/v1.3.0/bigchaindb/common/schema/vote.yaml](https://github.com/bigchaindb/bigchaindb/blob/v1.3.0/bigchaindb/common/schema/vote.yaml).
The file defining the JSON Schema for votes (`vote.yaml`) is BigchainDB-specific.
## Learn about JSON Schema

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Schema validation related functions and data"""
import os.path
import logging
@ -10,6 +5,7 @@ import logging
import jsonschema
import yaml
import rapidjson
import rapidjson_schema
from bigchaindb.common.exceptions import SchemaValidationError
@ -17,12 +13,12 @@ from bigchaindb.common.exceptions import SchemaValidationError
logger = logging.getLogger(__name__)
def _load_schema(name, path=__file__):
def _load_schema(name):
"""Load a schema from disk"""
path = os.path.join(os.path.dirname(path), name + '.yaml')
path = os.path.join(os.path.dirname(__file__), name + '.yaml')
with open(path) as handle:
schema = yaml.safe_load(handle)
fast_schema = rapidjson.Validator(rapidjson.dumps(schema))
fast_schema = rapidjson_schema.loads(rapidjson.dumps(schema))
return path, (schema, fast_schema)
@ -34,14 +30,7 @@ _, TX_SCHEMA_CREATE = _load_schema('transaction_create_' +
TX_SCHEMA_VERSION)
_, TX_SCHEMA_TRANSFER = _load_schema('transaction_transfer_' +
TX_SCHEMA_VERSION)
_, TX_SCHEMA_VALIDATOR_ELECTION = _load_schema('transaction_validator_election_' +
TX_SCHEMA_VERSION)
_, TX_SCHEMA_CHAIN_MIGRATION_ELECTION = _load_schema('transaction_chain_migration_election_' +
TX_SCHEMA_VERSION)
_, TX_SCHEMA_VOTE = _load_schema('transaction_vote_' + TX_SCHEMA_VERSION)
VOTE_SCHEMA_PATH, VOTE_SCHEMA = _load_schema('vote')
def _validate_schema(schema, body):
@ -59,7 +48,7 @@ def _validate_schema(schema, body):
# a helpful error message.
try:
schema[1](rapidjson.dumps(body))
schema[1].validate(rapidjson.dumps(body))
except ValueError as exc:
try:
jsonschema.validate(body, schema[0])
@ -80,3 +69,8 @@ def validate_transaction_schema(tx):
_validate_schema(TX_SCHEMA_TRANSFER, tx)
else:
_validate_schema(TX_SCHEMA_CREATE, tx)
def validate_vote_schema(vote):
"""Validate a vote dict"""
_validate_schema(VOTE_SCHEMA, vote)

View File

@ -1,45 +0,0 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
---
"$schema": "http://json-schema.org/draft-04/schema#"
type: object
title: Chain Migration Election Schema - Propose a halt in block production to allow for a version change
required:
- operation
- asset
- outputs
properties:
operation:
type: string
value: "CHAIN_MIGRATION_ELECTION"
asset:
additionalProperties: false
properties:
data:
additionalProperties: false
properties:
seed:
type: string
required:
- data
outputs:
type: array
items:
"$ref": "#/definitions/output"
definitions:
output:
type: object
properties:
condition:
type: object
required:
- uri
properties:
uri:
type: string
pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
(fpt=ed25519-sha-256(&)?|cost=[0-9]+(&)?|\
subtypes=ed25519-sha-256(&)?){2,3}$"

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
---
"$schema": "http://json-schema.org/draft-04/schema#"
type: object

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
---
"$schema": "http://json-schema.org/draft-04/schema#"
type: object

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
---
"$schema": "http://json-schema.org/draft-04/schema#"
type: object

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
---
"$schema": "http://json-schema.org/draft-04/schema#"
type: object

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
---
"$schema": "http://json-schema.org/draft-04/schema#"
type: object

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
---
"$schema": "http://json-schema.org/draft-04/schema#"
type: object
@ -63,9 +58,6 @@ definitions:
enum:
- CREATE
- TRANSFER
- VALIDATOR_ELECTION
- CHAIN_MIGRATION_ELECTION
- VOTE
asset:
type: object
additionalProperties: false

View File

@ -1,68 +0,0 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
---
"$schema": "http://json-schema.org/draft-04/schema#"
type: object
title: Validator Election Schema - Propose a change to validator set
required:
- operation
- asset
- outputs
properties:
operation:
type: string
value: "VALIDATOR_ELECTION"
asset:
additionalProperties: false
properties:
data:
additionalProperties: false
properties:
node_id:
type: string
seed:
type: string
public_key:
type: object
additionalProperties: false
required:
- value
- type
properties:
value:
type: string
type:
type: string
enum:
- ed25519-base16
- ed25519-base32
- ed25519-base64
power:
"$ref": "#/definitions/positiveInteger"
required:
- node_id
- public_key
- power
required:
- data
outputs:
type: array
items:
"$ref": "#/definitions/output"
definitions:
output:
type: object
properties:
condition:
type: object
required:
- uri
properties:
uri:
type: string
pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
(fpt=ed25519-sha-256(&)?|cost=[0-9]+(&)?|\
subtypes=ed25519-sha-256(&)?){2,3}$"

View File

@ -1,34 +0,0 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
---
"$schema": "http://json-schema.org/draft-04/schema#"
type: object
title: Vote Schema - Vote on an election
required:
- operation
- outputs
properties:
operation:
type: string
value: "VOTE"
outputs:
type: array
items:
"$ref": "#/definitions/output"
definitions:
output:
type: object
properties:
condition:
type: object
required:
- uri
properties:
uri:
type: string
pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
(fpt=ed25519-sha-256(&)?|cost=[0-9]+(&)?|\
subtypes=ed25519-sha-256(&)?){2,3}$"

View File

@ -0,0 +1,44 @@
---
"$schema": "http://json-schema.org/draft-04/schema#"
id: "http://www.bigchaindb.com/schema/vote.json"
type: object
additionalProperties: false
title: Vote Schema
required:
- node_pubkey
- signature
- vote
properties:
node_pubkey:
type: "string"
pattern: "[1-9a-zA-Z^OIl]{43,44}"
signature:
type: "string"
pattern: "[1-9a-zA-Z^OIl]{86,88}"
vote:
type: "object"
additionalProperties: false
required:
- invalid_reason
- is_block_valid
- previous_block
- voting_for_block
- timestamp
properties:
previous_block:
"$ref": "#/definitions/sha3_hexdigest"
voting_for_block:
"$ref": "#/definitions/sha3_hexdigest"
is_block_valid:
type: "boolean"
invalid_reason:
anyOf:
- type: "string"
- type: "null"
timestamp:
type: "string"
pattern: "[0-9]{10}"
definitions:
sha3_hexdigest:
pattern: "[0-9a-f]{64}"
type: string

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Transaction related models to parse and construct transaction
payloads.
@ -13,26 +8,20 @@ Attributes:
"""
from collections import namedtuple
from copy import deepcopy
from functools import reduce, lru_cache
import rapidjson
from functools import reduce
import base58
from cryptoconditions import Fulfillment, ThresholdSha256, Ed25519Sha256
from cryptoconditions.exceptions import (
ParsingError, ASN1DecodeError, ASN1EncodeError, UnsupportedTypeError)
try:
from hashlib import sha3_256
except ImportError:
from sha3 import sha3_256
from sha3 import sha3_256
from bigchaindb.common.crypto import PrivateKey, hash_data
from bigchaindb.common.exceptions import (KeypairMismatchException,
InputDoesNotExist, DoubleSpend,
InvalidHash, InvalidSignature,
AmountError, AssetIdMismatch,
ThresholdTooDeep)
from bigchaindb.common.utils import serialize
from .memoize import memoize_from_dict, memoize_to_dict
UnspentOutput = namedtuple(
@ -78,7 +67,7 @@ class Input(object):
if fulfills is not None and not isinstance(fulfills, TransactionLink):
raise TypeError('`fulfills` must be a TransactionLink instance')
if not isinstance(owners_before, list):
raise TypeError('`owners_before` must be a list instance')
raise TypeError('`owners_after` must be a list instance')
self.fulfillment = fulfillment
self.fulfills = fulfills
@ -88,11 +77,6 @@ class Input(object):
# TODO: If `other !== Fulfillment` return `False`
return self.to_dict() == other.to_dict()
# NOTE: This function is used to provide a unique key for a given
# Input to suppliment memoization
def __hash__(self):
return hash((self.fulfillment, self.fulfills))
def to_dict(self):
"""Transforms the object to a Python dictionary.
@ -105,7 +89,7 @@ class Input(object):
"""
try:
fulfillment = self.fulfillment.serialize_uri()
except (TypeError, AttributeError, ASN1EncodeError, ASN1DecodeError):
except (TypeError, AttributeError, ASN1EncodeError):
fulfillment = _fulfillment_to_details(self.fulfillment)
try:
@ -172,7 +156,7 @@ def _fulfillment_to_details(fulfillment):
if fulfillment.type_name == 'ed25519-sha-256':
return {
'type': 'ed25519-sha-256',
'public_key': base58.b58encode(fulfillment.public_key).decode(),
'public_key': base58.b58encode(fulfillment.public_key),
}
if fulfillment.type_name == 'threshold-sha-256':
@ -511,7 +495,7 @@ class Transaction(object):
VERSION = '2.0'
def __init__(self, operation, asset, inputs=None, outputs=None,
metadata=None, version=None, hash_id=None, tx_dict=None):
metadata=None, version=None, hash_id=None):
"""The constructor allows to create a customizable Transaction.
Note:
@ -531,7 +515,7 @@ class Transaction(object):
version (string): Defines the version number of a Transaction.
hash_id (string): Hash id of the transaction.
"""
if operation not in self.ALLOWED_OPERATIONS:
if operation not in Transaction.ALLOWED_OPERATIONS:
allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS)
raise ValueError('`operation` must be one of {}'
.format(allowed_ops))
@ -539,14 +523,14 @@ class Transaction(object):
# Asset payloads for 'CREATE' operations must be None or
# dicts holding a `data` property. Asset payloads for 'TRANSFER'
# operations must be dicts holding an `id` property.
if (operation == self.CREATE and
if (operation == Transaction.CREATE and
asset is not None and not (isinstance(asset, dict) and 'data' in asset)):
raise TypeError(('`asset` must be None or a dict holding a `data` '
" property instance for '{}' Transactions".format(operation)))
elif (operation == self.TRANSFER and
elif (operation == Transaction.TRANSFER and
not (isinstance(asset, dict) and 'id' in asset)):
raise TypeError(('`asset` must be a dict holding an `id` property '
'for \'TRANSFER\' Transactions'))
"for 'TRANSFER' Transactions".format(operation)))
if outputs and not isinstance(outputs, list):
raise TypeError('`outputs` must be a list instance or None')
@ -564,7 +548,6 @@ class Transaction(object):
self.outputs = outputs or []
self.metadata = metadata
self._id = hash_id
self.tx_dict = tx_dict
@property
def unspent_outputs(self):
@ -572,9 +555,9 @@ class Transaction(object):
structure containing relevant information for storing them in
a UTXO set, and performing validation.
"""
if self.operation == self.CREATE:
if self.operation == Transaction.CREATE:
self._asset_id = self._id
elif self.operation == self.TRANSFER:
elif self.operation == Transaction.TRANSFER:
self._asset_id = self.asset['id']
return (UnspentOutput(
transaction_id=self._id,
@ -586,7 +569,7 @@ class Transaction(object):
@property
def spent_outputs(self):
"""Tuple of :obj:`dict`: Inputs of this transaction. Each input
"""tuple of :obj:`dict`: Inputs of this transaction. Each input
is represented as a dictionary containing a transaction id and
output index.
"""
@ -602,38 +585,6 @@ class Transaction(object):
def _hash(self):
self._id = hash_data(self.serialized)
@classmethod
def validate_create(cls, tx_signers, recipients, asset, metadata):
if not isinstance(tx_signers, list):
raise TypeError('`tx_signers` must be a list instance')
if not isinstance(recipients, list):
raise TypeError('`recipients` must be a list instance')
if len(tx_signers) == 0:
raise ValueError('`tx_signers` list cannot be empty')
if len(recipients) == 0:
raise ValueError('`recipients` list cannot be empty')
if not (asset is None or isinstance(asset, dict)):
raise TypeError('`asset` must be a dict or None')
if not (metadata is None or isinstance(metadata, dict)):
raise TypeError('`metadata` must be a dict or None')
inputs = []
outputs = []
# generate_outputs
for recipient in recipients:
if not isinstance(recipient, tuple) or len(recipient) != 2:
raise ValueError(('Each `recipient` in the list must be a'
' tuple of `([<list of public keys>],'
' <amount>)`'))
pub_keys, amount = recipient
outputs.append(Output.generate(pub_keys, amount))
# generate inputs
inputs.append(Input.generate(tx_signers))
return (inputs, outputs)
@classmethod
def create(cls, tx_signers, recipients, metadata=None, asset=None):
"""A simple way to generate a `CREATE` transaction.
@ -662,22 +613,21 @@ class Transaction(object):
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
"""
(inputs, outputs) = cls.validate_create(tx_signers, recipients, asset, metadata)
return cls(cls.CREATE, {'data': asset}, inputs, outputs, metadata)
@classmethod
def validate_transfer(cls, inputs, recipients, asset_id, metadata):
if not isinstance(inputs, list):
raise TypeError('`inputs` must be a list instance')
if len(inputs) == 0:
raise ValueError('`inputs` must contain at least one item')
if not isinstance(tx_signers, list):
raise TypeError('`tx_signers` must be a list instance')
if not isinstance(recipients, list):
raise TypeError('`recipients` must be a list instance')
if len(tx_signers) == 0:
raise ValueError('`tx_signers` list cannot be empty')
if len(recipients) == 0:
raise ValueError('`recipients` list cannot be empty')
if not (asset is None or isinstance(asset, dict)):
raise TypeError('`asset` must be a dict or None')
inputs = []
outputs = []
# generate_outputs
for recipient in recipients:
if not isinstance(recipient, tuple) or len(recipient) != 2:
raise ValueError(('Each `recipient` in the list must be a'
@ -686,10 +636,10 @@ class Transaction(object):
pub_keys, amount = recipient
outputs.append(Output.generate(pub_keys, amount))
if not isinstance(asset_id, str):
raise TypeError('`asset_id` must be a string')
# generate inputs
inputs.append(Input.generate(tx_signers))
return (deepcopy(inputs), outputs)
return cls(cls.CREATE, {'data': asset}, inputs, outputs, metadata)
@classmethod
def transfer(cls, inputs, recipients, asset_id, metadata=None):
@ -730,7 +680,28 @@ class Transaction(object):
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
"""
(inputs, outputs) = cls.validate_transfer(inputs, recipients, asset_id, metadata)
if not isinstance(inputs, list):
raise TypeError('`inputs` must be a list instance')
if len(inputs) == 0:
raise ValueError('`inputs` must contain at least one item')
if not isinstance(recipients, list):
raise TypeError('`recipients` must be a list instance')
if len(recipients) == 0:
raise ValueError('`recipients` list cannot be empty')
outputs = []
for recipient in recipients:
if not isinstance(recipient, tuple) or len(recipient) != 2:
raise ValueError(('Each `recipient` in the list must be a'
' tuple of `([<list of public keys>],'
' <amount>)`'))
pub_keys, amount = recipient
outputs.append(Output.generate(pub_keys, amount))
if not isinstance(asset_id, str):
raise TypeError('`asset_id` must be a string')
inputs = deepcopy(inputs)
return cls(cls.TRANSFER, {'id': asset_id}, inputs, outputs, metadata)
def __eq__(self, other):
@ -869,9 +840,8 @@ class Transaction(object):
return cls._sign_threshold_signature_fulfillment(input_, message,
key_pairs)
else:
raise ValueError(
'Fulfillment couldn\'t be matched to '
'Cryptocondition fulfillment type.')
raise ValueError("Fulfillment couldn't be matched to "
'Cryptocondition fulfillment type.')
@classmethod
def _sign_simple_signature_fulfillment(cls, input_, message, key_pairs):
@ -969,14 +939,14 @@ class Transaction(object):
Returns:
bool: If all Inputs are valid.
"""
if self.operation == self.CREATE:
if self.operation == Transaction.CREATE:
# NOTE: Since in the case of a `CREATE`-transaction we do not have
# to check for outputs, we're just submitting dummy
# values to the actual method. This simplifies it's logic
# greatly, as we do not have to check against `None` values.
return self._inputs_valid(['dummyvalue'
for _ in self.inputs])
elif self.operation == self.TRANSFER:
elif self.operation == Transaction.TRANSFER:
return self._inputs_valid([output.fulfillment.condition_uri
for output in outputs])
else:
@ -1003,7 +973,7 @@ class Transaction(object):
raise ValueError('Inputs and '
'output_condition_uris must have the same count')
tx_dict = self.tx_dict if self.tx_dict else self.to_dict()
tx_dict = self.to_dict()
tx_dict = Transaction._remove_signatures(tx_dict)
tx_dict['id'] = None
tx_serialized = Transaction._to_str(tx_dict)
@ -1016,8 +986,8 @@ class Transaction(object):
return all(validate(i, cond)
for i, cond in enumerate(output_condition_uris))
@lru_cache(maxsize=16384)
def _input_valid(self, input_, operation, message, output_condition_uri=None):
@staticmethod
def _input_valid(input_, operation, message, output_condition_uri=None):
"""Validates a single Input against a single Output.
Note:
@ -1042,7 +1012,7 @@ class Transaction(object):
ParsingError, ASN1DecodeError, ASN1EncodeError):
return False
if operation == self.CREATE:
if operation == Transaction.CREATE:
# NOTE: In the case of a `CREATE` transaction, the
# output is always valid.
output_valid = True
@ -1062,11 +1032,6 @@ class Transaction(object):
ffill_valid = parsed_ffill.validate(message=message.digest())
return output_valid and ffill_valid
# This function is required by `lru_cache` to create a key for memoization
def __hash__(self):
return hash(self.id)
@memoize_to_dict
def to_dict(self):
"""Transforms the object to a Python dictionary.
@ -1126,8 +1091,8 @@ class Transaction(object):
tx = Transaction._remove_signatures(self.to_dict())
return Transaction._to_str(tx)
@classmethod
def get_asset_id(cls, transactions):
@staticmethod
def get_asset_id(transactions):
"""Get the asset id from a list of :class:`~.Transactions`.
This is useful when we want to check if the multiple inputs of a
@ -1151,7 +1116,7 @@ class Transaction(object):
transactions = [transactions]
# create a set of the transactions' asset ids
asset_ids = {tx.id if tx.operation == tx.CREATE
asset_ids = {tx.id if tx.operation == Transaction.CREATE
else tx.asset['id']
for tx in transactions}
@ -1169,9 +1134,7 @@ class Transaction(object):
tx_body (dict): The Transaction to be transformed.
"""
# NOTE: Remove reference to avoid side effects
# tx_body = deepcopy(tx_body)
tx_body = rapidjson.loads(rapidjson.dumps(tx_body))
tx_body = deepcopy(tx_body)
try:
proposed_tx_id = tx_body['id']
except KeyError:
@ -1188,8 +1151,7 @@ class Transaction(object):
raise InvalidHash(err_msg.format(proposed_tx_id))
@classmethod
@memoize_from_dict
def from_dict(cls, tx, skip_schema_validation=True):
def from_dict(cls, tx):
"""Transforms a Python dictionary to a Transaction object.
Args:
@ -1198,133 +1160,7 @@ class Transaction(object):
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
"""
operation = tx.get('operation', Transaction.CREATE) if isinstance(tx, dict) else Transaction.CREATE
cls = Transaction.resolve_class(operation)
if not skip_schema_validation:
cls.validate_id(tx)
cls.validate_schema(tx)
inputs = [Input.from_dict(input_) for input_ in tx['inputs']]
outputs = [Output.from_dict(output) for output in tx['outputs']]
return cls(tx['operation'], tx['asset'], inputs, outputs,
tx['metadata'], tx['version'], hash_id=tx['id'], tx_dict=tx)
@classmethod
def from_db(cls, bigchain, tx_dict_list):
"""Helper method that reconstructs a transaction dict that was returned
from the database. It checks what asset_id to retrieve, retrieves the
asset from the asset table and reconstructs the transaction.
Args:
bigchain (:class:`~bigchaindb.tendermint.BigchainDB`): An instance
of BigchainDB used to perform database queries.
tx_dict_list (:list:`dict` or :obj:`dict`): The transaction dict or
list of transaction dict as returned from the database.
Returns:
:class:`~Transaction`
"""
return_list = True
if isinstance(tx_dict_list, dict):
tx_dict_list = [tx_dict_list]
return_list = False
tx_map = {}
tx_ids = []
for tx in tx_dict_list:
tx.update({'metadata': None})
tx_map[tx['id']] = tx
tx_ids.append(tx['id'])
assets = list(bigchain.get_assets(tx_ids))
for asset in assets:
if asset is not None:
tx = tx_map[asset['id']]
del asset['id']
tx['asset'] = asset
tx_ids = list(tx_map.keys())
metadata_list = list(bigchain.get_metadata(tx_ids))
for metadata in metadata_list:
tx = tx_map[metadata['id']]
tx.update({'metadata': metadata.get('metadata')})
if return_list:
tx_list = []
for tx_id, tx in tx_map.items():
tx_list.append(cls.from_dict(tx))
return tx_list
else:
tx = list(tx_map.values())[0]
return cls.from_dict(tx)
type_registry = {}
@staticmethod
def register_type(tx_type, tx_class):
Transaction.type_registry[tx_type] = tx_class
def resolve_class(operation):
"""For the given `tx` based on the `operation` key return its implementation class"""
create_txn_class = Transaction.type_registry.get(Transaction.CREATE)
return Transaction.type_registry.get(operation, create_txn_class)
@classmethod
def validate_schema(cls, tx):
pass
def validate_transfer_inputs(self, bigchain, current_transactions=[]):
# store the inputs so that we can check if the asset ids match
input_txs = []
input_conditions = []
for input_ in self.inputs:
input_txid = input_.fulfills.txid
input_tx = bigchain.get_transaction(input_txid)
if input_tx is None:
for ctxn in current_transactions:
if ctxn.id == input_txid:
input_tx = ctxn
if input_tx is None:
raise InputDoesNotExist("input `{}` doesn't exist"
.format(input_txid))
spent = bigchain.get_spent(input_txid, input_.fulfills.output,
current_transactions)
if spent:
raise DoubleSpend('input `{}` was already spent'
.format(input_txid))
output = input_tx.outputs[input_.fulfills.output]
input_conditions.append(output)
input_txs.append(input_tx)
# Validate that all inputs are distinct
links = [i.fulfills.to_uri() for i in self.inputs]
if len(links) != len(set(links)):
raise DoubleSpend('tx "{}" spends inputs twice'.format(self.id))
# validate asset id
asset_id = self.get_asset_id(input_txs)
if asset_id != self.asset['id']:
raise AssetIdMismatch(('The asset id of the input does not'
' match the asset id of the'
' transaction'))
input_amount = sum([input_condition.amount for input_condition in input_conditions])
output_amount = sum([output_condition.amount for output_condition in self.outputs])
if output_amount != input_amount:
raise AmountError(('The amount used in the inputs `{}`'
' needs to be same as the amount used'
' in the outputs `{}`')
.format(input_amount, output_amount))
if not self.inputs_valid(input_conditions):
raise InvalidSignature('Transaction signature is invalid.')
return True
tx['metadata'], tx['version'], hash_id=tx['id'])

View File

@ -1,8 +0,0 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
BROADCAST_TX_COMMIT = 'broadcast_tx_commit'
BROADCAST_TX_ASYNC = 'broadcast_tx_async'
BROADCAST_TX_SYNC = 'broadcast_tx_sync'

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import time
import re
import rapidjson
@ -74,23 +69,13 @@ def validate_txn_obj(obj_name, obj, key, validation_fun):
"""
backend = bigchaindb.config['database']['backend']
if backend == 'localmongodb':
if backend == 'mongodb':
data = obj.get(key, {})
if isinstance(data, dict):
validate_all_keys_in_obj(obj_name, data, validation_fun)
elif isinstance(data, list):
validate_all_items_in_list(obj_name, data, validation_fun)
validate_all_keys(obj_name, data, validation_fun)
def validate_all_items_in_list(obj_name, data, validation_fun):
for item in data:
if isinstance(item, dict):
validate_all_keys_in_obj(obj_name, item, validation_fun)
elif isinstance(item, list):
validate_all_items_in_list(obj_name, item, validation_fun)
def validate_all_keys_in_obj(obj_name, obj, validation_fun):
def validate_all_keys(obj_name, obj, validation_fun):
"""Validate all (nested) keys in `obj` by using `validation_fun`.
Args:
@ -108,12 +93,10 @@ def validate_all_keys_in_obj(obj_name, obj, validation_fun):
for key, value in obj.items():
validation_fun(obj_name, key)
if isinstance(value, dict):
validate_all_keys_in_obj(obj_name, value, validation_fun)
elif isinstance(value, list):
validate_all_items_in_list(obj_name, value, validation_fun)
validate_all_keys(obj_name, value, validation_fun)
def validate_all_values_for_key_in_obj(obj, key, validation_fun):
def validate_all_values_for_key(obj, key, validation_fun):
"""Validate value for all (nested) occurrence of `key` in `obj`
using `validation_fun`.
@ -130,17 +113,7 @@ def validate_all_values_for_key_in_obj(obj, key, validation_fun):
if vkey == key:
validation_fun(value)
elif isinstance(value, dict):
validate_all_values_for_key_in_obj(value, key, validation_fun)
elif isinstance(value, list):
validate_all_values_for_key_in_list(value, key, validation_fun)
def validate_all_values_for_key_in_list(input_list, key, validation_fun):
for item in input_list:
if isinstance(item, dict):
validate_all_values_for_key_in_obj(item, key, validation_fun)
elif isinstance(item, list):
validate_all_values_for_key_in_list(item, key, validation_fun)
validate_all_values_for_key(value, key, validation_fun)
def validate_key(obj_name, key):

View File

@ -1,8 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Utils for reading and setting configuration settings.
The value of each BigchainDB Server configuration setting is
@ -20,7 +15,7 @@ import os
import copy
import json
import logging
import collections.abc
import collections
from functools import lru_cache
from pkg_resources import iter_entry_points, ResolutionError
@ -29,7 +24,7 @@ from bigchaindb.common import exceptions
import bigchaindb
from bigchaindb.validation import BaseValidationRules
from bigchaindb.consensus import BaseConsensusRules
# TODO: move this to a proper configuration file for logging
logging.getLogger('requests').setLevel(logging.WARNING)
@ -52,7 +47,7 @@ def map_leafs(func, mapping):
path = []
for key, val in mapping.items():
if isinstance(val, collections.abc.Mapping):
if isinstance(val, collections.Mapping):
_inner(val, path + [key])
else:
mapping[key] = func(val, path=path+[key])
@ -80,7 +75,7 @@ def update(d, u):
mapping: An updated version of d (updated by u).
"""
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
if isinstance(v, collections.Mapping):
r = update(d.get(k, {}), v)
d[k] = r
else:
@ -113,7 +108,7 @@ def file_config(filename=None):
'Failed to parse the JSON configuration from `{}`, {}'.format(filename, err)
)
logger.info('Configuration loaded from `{}`'.format(filename))
logger.info('Configuration loaded from `{}`'.format(filename))
return config
@ -133,7 +128,6 @@ def env_config(config):
def load_from_env(value, path):
var_name = CONFIG_SEP.join([CONFIG_PREFIX] + list(map(lambda s: s.upper(), path)))
return os.environ.get(var_name, value)
return map_leafs(load_from_env, config)
@ -253,44 +247,46 @@ def autoconfigure(filename=None, config=None, force=False):
# override configuration with env variables
newconfig = env_config(newconfig)
if config:
newconfig = update(newconfig, config)
set_config(newconfig) # sets bigchaindb.config
@lru_cache()
def load_validation_plugin(name=None):
"""Find and load the chosen validation plugin.
def load_consensus_plugin(name=None):
"""Find and load the chosen consensus plugin.
Args:
name (string): the name of the entry_point, as advertised in the
setup.py of the providing package.
Returns:
an uninstantiated subclass of ``bigchaindb.validation.AbstractValidationRules``
an uninstantiated subclass of ``bigchaindb.consensus.AbstractConsensusRules``
"""
if not name:
return BaseValidationRules
return BaseConsensusRules
# TODO: This will return the first plugin with group `bigchaindb.validation`
# TODO: This will return the first plugin with group `bigchaindb.consensus`
# and name `name` in the active WorkingSet.
# We should probably support Requirements specs in the config, e.g.
# validation_plugin: 'my-plugin-package==0.0.1;default'
# consensus_plugin: 'my-plugin-package==0.0.1;default'
plugin = None
for entry_point in iter_entry_points('bigchaindb.validation', name):
for entry_point in iter_entry_points('bigchaindb.consensus', name):
plugin = entry_point.load()
# No matching entry_point found
if not plugin:
raise ResolutionError(
'No plugin found in group `bigchaindb.validation` with name `{}`'.
'No plugin found in group `bigchaindb.consensus` with name `{}`'.
format(name))
# Is this strictness desireable?
# It will probably reduce developer headaches in the wild.
if not issubclass(plugin, (BaseValidationRules,)):
if not issubclass(plugin, (BaseConsensusRules,)):
raise TypeError('object of type "{}" does not implement `bigchaindb.'
'validation.BaseValidationRules`'.format(type(plugin)))
'consensus.BaseConsensusRules`'.format(type(plugin)))
return plugin

25
bigchaindb/consensus.py Normal file
View File

@ -0,0 +1,25 @@
from bigchaindb.voting import Voting
class BaseConsensusRules():
"""Base consensus rules for Bigchain.
A consensus plugin must expose a class inheriting from this one via an entry_point.
All methods listed below must be implemented.
"""
voting = Voting
@staticmethod
def validate_transaction(bigchain, transaction):
"""See :meth:`bigchaindb.models.Transaction.validate`
for documentation.
"""
return transaction.validate(bigchain)
@staticmethod
def validate_block(bigchain, block):
"""See :meth:`bigchaindb.models.Block.validate` for documentation."""
return block.validate(bigchain)

View File

@ -1,271 +1,679 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import random
from time import time
"""This module contains all the goodness to integrate BigchainDB
with Tendermint.
"""
import logging
import sys
from bigchaindb import exceptions as core_exceptions
from bigchaindb.common import crypto, exceptions
from bigchaindb.common.utils import gen_timestamp, serialize
from abci.application import BaseApplication
from abci import CodeTypeOk
import bigchaindb
from bigchaindb import BigchainDB
from bigchaindb.elections.election import Election
from bigchaindb.version import __tm_supported_versions__
from bigchaindb.utils import tendermint_version_is_compatible
from bigchaindb.tendermint_utils import (decode_transaction,
calculate_hash)
from bigchaindb.lib import Block
import bigchaindb.upsert_validator.validator_utils as vutils
from bigchaindb.events import EventTypes, Event
from bigchaindb import backend, config_utils, fastquery
from bigchaindb.consensus import BaseConsensusRules
from bigchaindb.models import Block, Transaction
CodeTypeError = 1
logger = logging.getLogger(__name__)
class Bigchain(object):
"""Bigchain API
class App(BaseApplication):
"""Bridge between BigchainDB and Tendermint.
The role of this class is to expose the BigchainDB
transaction logic to Tendermint Core.
Create, read, sign, write transactions to the database
"""
def __init__(self, abci, bigchaindb=None, events_queue=None,):
super().__init__(abci)
self.events_queue = events_queue
self.bigchaindb = bigchaindb or BigchainDB()
self.block_txn_ids = []
self.block_txn_hash = ''
self.block_transactions = []
self.validators = None
self.new_height = None
self.chain = self.bigchaindb.get_latest_abci_chain()
BLOCK_INVALID = 'invalid'
"""return if a block has been voted invalid"""
def log_abci_migration_error(self, chain_id, validators):
logger.error('An ABCI chain migration is in process. '
'Download the new ABCI client and configure it with '
f'chain_id={chain_id} and validators={validators}.')
BLOCK_VALID = TX_VALID = 'valid'
"""return if a block is valid, or tx is in valid block"""
def abort_if_abci_chain_is_not_synced(self):
if self.chain is None or self.chain['is_synced']:
return
BLOCK_UNDECIDED = TX_UNDECIDED = 'undecided'
"""return if block is undecided, or tx is in undecided block"""
validators = self.bigchaindb.get_validators()
self.log_abci_migration_error(self.chain['chain_id'], validators)
sys.exit(1)
TX_IN_BACKLOG = 'backlog'
"""return if transaction is in backlog"""
def init_chain(self, genesis):
"""Initialize chain upon genesis or a migration"""
def __init__(self, public_key=None, private_key=None, keyring=[], connection=None, backlog_reassign_delay=None):
"""Initialize the Bigchain instance
app_hash = ''
height = 0
known_chain = self.bigchaindb.get_latest_abci_chain()
if known_chain is not None:
chain_id = known_chain['chain_id']
if known_chain['is_synced']:
msg = (f'Got invalid InitChain ABCI request ({genesis}) - '
f'the chain {chain_id} is already synced.')
logger.error(msg)
sys.exit(1)
if chain_id != genesis.chain_id:
validators = self.bigchaindb.get_validators()
self.log_abci_migration_error(chain_id, validators)
sys.exit(1)
# set migration values for app hash and height
block = self.bigchaindb.get_latest_block()
app_hash = '' if block is None else block['app_hash']
height = 0 if block is None else block['height'] + 1
known_validators = self.bigchaindb.get_validators()
validator_set = [vutils.decode_validator(v)
for v in genesis.validators]
if known_validators and known_validators != validator_set:
self.log_abci_migration_error(known_chain['chain_id'],
known_validators)
sys.exit(1)
block = Block(app_hash=app_hash, height=height, transactions=[])
self.bigchaindb.store_block(block._asdict())
self.bigchaindb.store_validator_set(height + 1, validator_set)
abci_chain_height = 0 if known_chain is None else known_chain['height']
self.bigchaindb.store_abci_chain(abci_chain_height,
genesis.chain_id, True)
self.chain = {'height': abci_chain_height, 'is_synced': True,
'chain_id': genesis.chain_id}
return self.abci.ResponseInitChain()
def info(self, request):
"""Return height of the latest committed block."""
self.abort_if_abci_chain_is_not_synced()
# Check if BigchainDB supports the Tendermint version
if not (hasattr(request, 'version') and tendermint_version_is_compatible(request.version)):
logger.error(f'Unsupported Tendermint version: {getattr(request, "version", "no version")}.'
f' Currently, BigchainDB only supports {__tm_supported_versions__}. Exiting!')
sys.exit(1)
logger.info(f"Tendermint version: {request.version}")
r = self.abci.ResponseInfo()
block = self.bigchaindb.get_latest_block()
if block:
chain_shift = 0 if self.chain is None else self.chain['height']
r.last_block_height = block['height'] - chain_shift
r.last_block_app_hash = block['app_hash'].encode('utf-8')
else:
r.last_block_height = 0
r.last_block_app_hash = b''
return r
def check_tx(self, raw_transaction):
"""Validate the transaction before entry into
the mempool.
A Bigchain instance has several configuration parameters (e.g. host).
If a parameter value is passed as an argument to the Bigchain
__init__ method, then that is the value it will have.
Otherwise, the parameter value will come from an environment variable.
If that environment variable isn't set, then the value
will come from the local configuration file. And if that variable
isn't in the local configuration file, then the parameter will have
its default value (defined in bigchaindb.__init__).
Args:
raw_tx: a raw string (in bytes) transaction.
public_key (str): the base58 encoded public key for the ED25519 curve.
private_key (str): the base58 encoded private key for the ED25519 curve.
keyring (list[str]): list of base58 encoded public keys of the federation nodes.
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
"""
self.abort_if_abci_chain_is_not_synced()
config_utils.autoconfigure()
logger.debug('check_tx: %s', raw_transaction)
transaction = decode_transaction(raw_transaction)
if self.bigchaindb.is_valid_transaction(transaction):
logger.debug('check_tx: VALID')
return self.abci.ResponseCheckTx(code=CodeTypeOk)
self.me = public_key or bigchaindb.config['keypair']['public']
self.me_private = private_key or bigchaindb.config['keypair']['private']
self.nodes_except_me = keyring or bigchaindb.config['keyring']
if backlog_reassign_delay is None:
backlog_reassign_delay = bigchaindb.config['backlog_reassign_delay']
self.backlog_reassign_delay = backlog_reassign_delay
consensusPlugin = bigchaindb.config.get('consensus_plugin')
if consensusPlugin:
self.consensus = config_utils.load_consensus_plugin(consensusPlugin)
else:
logger.debug('check_tx: INVALID')
return self.abci.ResponseCheckTx(code=CodeTypeError)
self.consensus = BaseConsensusRules
def begin_block(self, req_begin_block):
"""Initialize list of transaction.
Args:
req_begin_block: block object which contains block header
and block hash.
"""
self.abort_if_abci_chain_is_not_synced()
self.connection = connection if connection else backend.connect(**bigchaindb.config['database'])
# if not self.me:
# raise exceptions.KeypairNotFoundException()
chain_shift = 0 if self.chain is None else self.chain['height']
logger.debug('BEGIN BLOCK, height:%s, num_txs:%s',
req_begin_block.header.height + chain_shift,
req_begin_block.header.num_txs)
federation = property(lambda self: set(self.nodes_except_me + [self.me]))
""" Set of federation member public keys """
self.block_txn_ids = []
self.block_transactions = []
return self.abci.ResponseBeginBlock()
def write_transaction(self, signed_transaction):
"""Write the transaction to bigchain.
def deliver_tx(self, raw_transaction):
"""Validate the transaction before mutating the state.
When first writing a transaction to the bigchain the transaction will be kept in a backlog until
it has been validated by the nodes of the federation.
Args:
raw_tx: a raw string (in bytes) transaction.
signed_transaction (Transaction): transaction with the `signature` included.
Returns:
dict: database response
"""
signed_transaction = signed_transaction.to_dict()
self.abort_if_abci_chain_is_not_synced()
logger.debug('deliver_tx: %s', raw_transaction)
transaction = self.bigchaindb.is_valid_transaction(
decode_transaction(raw_transaction), self.block_transactions)
if not transaction:
logger.debug('deliver_tx: INVALID')
return self.abci.ResponseDeliverTx(code=CodeTypeError)
# we will assign this transaction to `one` node. This way we make sure that there are no duplicate
# transactions on the bigchain
if self.nodes_except_me:
assignee = random.choice(self.nodes_except_me)
else:
logger.debug('storing tx')
self.block_txn_ids.append(transaction.id)
self.block_transactions.append(transaction)
return self.abci.ResponseDeliverTx(code=CodeTypeOk)
# I am the only node
assignee = self.me
def end_block(self, request_end_block):
"""Calculate block hash using transaction ids and previous block
hash to be stored in the next block.
signed_transaction.update({'assignee': assignee})
signed_transaction.update({'assignment_timestamp': time()})
# write to the backlog
return backend.query.write_transaction(self.connection, signed_transaction)
def reassign_transaction(self, transaction):
"""Assign a transaction to a new node
Args:
height (int): new height of the chain.
transaction (dict): assigned transaction
Returns:
dict: database response or None if no reassignment is possible
"""
self.abort_if_abci_chain_is_not_synced()
other_nodes = tuple(
self.federation.difference([transaction['assignee']])
)
new_assignee = random.choice(other_nodes) if other_nodes else self.me
chain_shift = 0 if self.chain is None else self.chain['height']
return backend.query.update_transaction(
self.connection, transaction['id'],
{'assignee': new_assignee, 'assignment_timestamp': time()})
height = request_end_block.height + chain_shift
self.new_height = height
def delete_transaction(self, *transaction_id):
"""Delete a transaction from the backlog.
# store pre-commit state to recover in case there is a crash during
# `end_block` or `commit`
logger.debug(f'Updating pre-commit state: {self.new_height}')
pre_commit_state = dict(height=self.new_height,
transactions=self.block_txn_ids)
self.bigchaindb.store_pre_commit_state(pre_commit_state)
Args:
*transaction_id (str): the transaction(s) to delete
block_txn_hash = calculate_hash(self.block_txn_ids)
block = self.bigchaindb.get_latest_block()
Returns:
The database response.
"""
if self.block_txn_ids:
self.block_txn_hash = calculate_hash([block['app_hash'], block_txn_hash])
return backend.query.delete_transaction(self.connection, *transaction_id)
def get_stale_transactions(self):
"""Get a cursor of stale transactions.
Transactions are considered stale if they have been assigned a node, but are still in the
backlog after some amount of time specified in the configuration
"""
return backend.query.get_stale_transactions(self.connection, self.backlog_reassign_delay)
def validate_transaction(self, transaction):
"""Validate a transaction.
Args:
transaction (Transaction): transaction to validate.
Returns:
The transaction if the transaction is valid else it raises an
exception describing the reason why the transaction is invalid.
"""
return self.consensus.validate_transaction(self, transaction)
def is_new_transaction(self, txid, exclude_block_id=None):
"""Return True if the transaction does not exist in any
VALID or UNDECIDED block. Return False otherwise.
Args:
txid (str): Transaction ID
exclude_block_id (str): Exclude block from search
"""
block_statuses = self.get_blocks_status_containing_tx(txid)
block_statuses.pop(exclude_block_id, None)
for status in block_statuses.values():
if status != self.BLOCK_INVALID:
return False
return True
def get_block(self, block_id, include_status=False):
"""Get the block with the specified `block_id` (and optionally its status)
Returns the block corresponding to `block_id` or None if no match is
found.
Args:
block_id (str): transaction id of the transaction to get
include_status (bool): also return the status of the block
the return value is then a tuple: (block, status)
"""
# get block from database
block_dict = backend.query.get_block(self.connection, block_id)
# get the asset ids from the block
if block_dict:
asset_ids = Block.get_asset_ids(block_dict)
txn_ids = Block.get_txn_ids(block_dict)
# get the assets from the database
assets = self.get_assets(asset_ids)
# get the metadata from the database
metadata = self.get_metadata(txn_ids)
# add the assets to the block transactions
block_dict = Block.couple_assets(block_dict, assets)
# add the metadata to the block transactions
block_dict = Block.couple_metadata(block_dict, metadata)
status = None
if include_status:
if block_dict:
status = self.block_election_status(block_dict)
return block_dict, status
else:
self.block_txn_hash = block['app_hash']
return block_dict
validator_update = Election.process_block(self.bigchaindb,
self.new_height,
self.block_transactions)
def get_transaction(self, txid, include_status=False):
"""Get the transaction with the specified `txid` (and optionally its status)
return self.abci.ResponseEndBlock(validator_updates=validator_update)
This query begins by looking in the bigchain table for all blocks containing
a transaction with the specified `txid`. If one of those blocks is valid, it
returns the matching transaction from that block. Else if some of those
blocks are undecided, it returns a matching transaction from one of them. If
the transaction was found in invalid blocks only, or in no blocks, then this
query looks for a matching transaction in the backlog table, and if it finds
one there, it returns that.
def commit(self):
"""Store the new height and along with block hash."""
Args:
txid (str): transaction id of the transaction to get
include_status (bool): also return the status of the transaction
the return value is then a tuple: (tx, status)
self.abort_if_abci_chain_is_not_synced()
Returns:
A :class:`~.models.Transaction` instance if the transaction
was found in a valid block, an undecided block, or the backlog table,
otherwise ``None``.
If :attr:`include_status` is ``True``, also returns the
transaction's status if the transaction was found.
"""
data = self.block_txn_hash.encode('utf-8')
response, tx_status = None, None
# register a new block only when new transactions are received
if self.block_txn_ids:
self.bigchaindb.store_bulk_transactions(self.block_transactions)
blocks_validity_status = self.get_blocks_status_containing_tx(txid)
check_backlog = True
block = Block(app_hash=self.block_txn_hash,
height=self.new_height,
transactions=self.block_txn_ids)
# NOTE: storing the block should be the last operation during commit
# this effects crash recovery. Refer BEP#8 for details
self.bigchaindb.store_block(block._asdict())
if blocks_validity_status:
# Disregard invalid blocks, and return if there are no valid or undecided blocks
blocks_validity_status = {
_id: status for _id, status in blocks_validity_status.items()
if status != Bigchain.BLOCK_INVALID
}
if blocks_validity_status:
logger.debug('Commit-ing new block with hash: apphash=%s ,'
'height=%s, txn ids=%s', data, self.new_height,
self.block_txn_ids)
# The transaction _was_ found in an undecided or valid block,
# so there's no need to look in the backlog table
check_backlog = False
if self.events_queue:
event = Event(EventTypes.BLOCK_VALID, {
'height': self.new_height,
'transactions': self.block_transactions
})
self.events_queue.put(event)
tx_status = self.TX_UNDECIDED
# If the transaction is in a valid or any undecided block, return it. Does not check
# if transactions in undecided blocks are consistent, but selects the valid block
# before undecided ones
for target_block_id in blocks_validity_status:
if blocks_validity_status[target_block_id] == Bigchain.BLOCK_VALID:
tx_status = self.TX_VALID
break
return self.abci.ResponseCommit(data=data)
# Query the transaction in the target block and return
response = backend.query.get_transaction_from_block(self.connection, txid, target_block_id)
if check_backlog:
response = backend.query.get_transaction_from_backlog(self.connection, txid)
def rollback(b):
pre_commit = b.get_pre_commit_state()
if response:
tx_status = self.TX_IN_BACKLOG
if pre_commit is None:
# the pre_commit record is first stored in the first `end_block`
return
if response:
if tx_status == self.TX_IN_BACKLOG:
response = Transaction.from_dict(response)
else:
# If we are reading from the bigchain collection the asset is
# not in the transaction so we need to fetch the asset and
# reconstruct the transaction.
response = Transaction.from_db(self, response)
latest_block = b.get_latest_block()
if latest_block is None:
logger.error('Found precommit state but no blocks!')
sys.exit(1)
if include_status:
return response, tx_status
else:
return response
# NOTE: the pre-commit state is always at most 1 block ahead of the commited state
if latest_block['height'] < pre_commit['height']:
Election.rollback(b, pre_commit['height'], pre_commit['transactions'])
b.delete_transactions(pre_commit['transactions'])
def get_status(self, txid):
"""Retrieve the status of a transaction with `txid` from bigchain.
Args:
txid (str): transaction id of the transaction to query
Returns:
(string): transaction status ('valid', 'undecided',
or 'backlog'). If no transaction with that `txid` was found it
returns `None`
"""
_, status = self.get_transaction(txid, include_status=True)
return status
def get_blocks_status_containing_tx(self, txid):
"""Retrieve block ids and statuses related to a transaction
Transactions may occur in multiple blocks, but no more than one valid block.
Args:
txid (str): transaction id of the transaction to query
Returns:
A dict of blocks containing the transaction,
e.g. {block_id_1: 'valid', block_id_2: 'invalid' ...}, or None
"""
# First, get information on all blocks which contain this transaction
blocks = backend.query.get_blocks_status_from_transaction(self.connection, txid)
if blocks:
# Determine the election status of each block
blocks_validity_status = {
block['id']: self.block_election_status(block)
for block in blocks
}
# NOTE: If there are multiple valid blocks with this transaction,
# something has gone wrong
if list(blocks_validity_status.values()).count(Bigchain.BLOCK_VALID) > 1:
block_ids = str([
block for block in blocks_validity_status
if blocks_validity_status[block] == Bigchain.BLOCK_VALID
])
raise core_exceptions.CriticalDoubleInclusion(
'Transaction {tx} is present in '
'multiple valid blocks: {block_ids}'
.format(tx=txid, block_ids=block_ids))
return blocks_validity_status
else:
return None
def get_asset_by_id(self, asset_id):
"""Returns the asset associated with an asset_id.
Args:
asset_id (str): The asset id.
Returns:
dict if the asset exists else None.
"""
cursor = backend.query.get_asset_by_id(self.connection, asset_id)
cursor = list(cursor)
if cursor:
return cursor[0]['asset']
def get_spent(self, txid, output):
"""Check if a `txid` was already used as an input.
A transaction can be used as an input for another transaction. Bigchain
needs to make sure that a given `(txid, output)` is only used once.
This method will check if the `(txid, output)` has already been
spent in a transaction that is in either the `VALID`, `UNDECIDED` or
`BACKLOG` state.
Args:
txid (str): The id of the transaction
output (num): the index of the output in the respective transaction
Returns:
The transaction (Transaction) that used the `(txid, output)` as an
input else `None`
Raises:
CriticalDoubleSpend: If the given `(txid, output)` was spent in
more than one valid transaction.
"""
# checks if an input was already spent
# checks if the bigchain has any transaction with input {'txid': ...,
# 'output': ...}
transactions = list(backend.query.get_spent(self.connection, txid,
output))
# a transaction_id should have been spent at most one time
# determine if these valid transactions appear in more than one valid
# block
num_valid_transactions = 0
non_invalid_transactions = []
for transaction in transactions:
# ignore transactions in invalid blocks
# FIXME: Isn't there a faster solution than doing I/O again?
txn, status = self.get_transaction(transaction['id'],
include_status=True)
if status == self.TX_VALID:
num_valid_transactions += 1
# `txid` can only have been spent in at most on valid block.
if num_valid_transactions > 1:
raise core_exceptions.CriticalDoubleSpend(
'`{}` was spent more than once. There is a problem'
' with the chain'.format(txid))
# if its not and invalid transaction
if status is not None:
transaction.update({'metadata': txn.metadata})
non_invalid_transactions.append(transaction)
if non_invalid_transactions:
return Transaction.from_dict(non_invalid_transactions[0])
# Either no transaction was returned spending the `(txid, output)` as
# input or the returned transactions are not valid.
def get_owned_ids(self, owner):
"""Retrieve a list of ``txid`` s that can be used as inputs.
Args:
owner (str): base58 encoded public key.
Returns:
:obj:`list` of TransactionLink: list of ``txid`` s and ``output`` s
pointing to another transaction's condition
"""
return self.get_outputs_filtered(owner, spent=False)
@property
def fastquery(self):
return fastquery.FastQuery(self.connection, self.me)
def get_outputs_filtered(self, owner, spent=None):
"""Get a list of output links filtered on some criteria
Args:
owner (str): base58 encoded public_key.
spent (bool): If ``True`` return only the spent outputs. If
``False`` return only unspent outputs. If spent is
not specified (``None``) return all outputs.
Returns:
:obj:`list` of TransactionLink: list of ``txid`` s and ``output`` s
pointing to another transaction's condition
"""
outputs = self.fastquery.get_outputs_by_public_key(owner)
if spent is None:
return outputs
elif spent is True:
return self.fastquery.filter_unspent_outputs(outputs)
elif spent is False:
return self.fastquery.filter_spent_outputs(outputs)
def get_transactions_filtered(self, asset_id, operation=None):
"""Get a list of transactions filtered on some criteria
"""
txids = backend.query.get_txids_filtered(self.connection, asset_id,
operation)
for txid in txids:
tx, status = self.get_transaction(txid, True)
if status == self.TX_VALID:
yield tx
def create_block(self, validated_transactions):
"""Creates a block given a list of `validated_transactions`.
Note that this method does not validate the transactions. Transactions
should be validated before calling create_block.
Args:
validated_transactions (list(Transaction)): list of validated
transactions.
Returns:
Block: created block.
"""
# Prevent the creation of empty blocks
if not validated_transactions:
raise exceptions.OperationError('Empty block creation is not '
'allowed')
voters = list(self.federation)
block = Block(validated_transactions, self.me, gen_timestamp(), voters)
block = block.sign(self.me_private)
return block
# TODO: check that the votings structure is correctly constructed
def validate_block(self, block):
"""Validate a block.
Args:
block (Block): block to validate.
Returns:
The block if the block is valid else it raises and exception
describing the reason why the block is invalid.
"""
return self.consensus.validate_block(self, block)
def has_previous_vote(self, block_id):
"""Check for previous votes from this node
Args:
block_id (str): the id of the block to check
Returns:
bool: :const:`True` if this block already has a
valid vote from this node, :const:`False` otherwise.
"""
votes = list(backend.query.get_votes_by_block_id_and_voter(self.connection, block_id, self.me))
el, _ = self.consensus.voting.partition_eligible_votes(votes, [self.me])
return bool(el)
def write_block(self, block):
"""Write a block to bigchain.
Args:
block (Block): block to write to bigchain.
"""
# Decouple assets from block
assets, block_dict = block.decouple_assets()
metadatas, block_dict = block.decouple_metadata(block_dict)
# write the assets
if assets:
self.write_assets(assets)
if metadatas:
self.write_metadata(metadatas)
# write the block
return backend.query.write_block(self.connection, block_dict)
def prepare_genesis_block(self):
"""Prepare a genesis block."""
metadata = {'message': 'Hello World from the BigchainDB'}
transaction = Transaction.create([self.me], [([self.me], 1)],
metadata=metadata)
# NOTE: The transaction model doesn't expose an API to generate a
# GENESIS transaction, as this is literally the only usage.
transaction.operation = 'GENESIS'
transaction = transaction.sign([self.me_private])
# create the block
return self.create_block([transaction])
def create_genesis_block(self):
"""Create the genesis block
Block created when bigchain is first initialized. This method is not atomic, there might be concurrency
problems if multiple instances try to write the genesis block when the BigchainDB Federation is started,
but it's a highly unlikely scenario.
"""
# 1. create one transaction
# 2. create the block with one transaction
# 3. write the block to the bigchain
blocks_count = backend.query.count_blocks(self.connection)
if blocks_count:
raise exceptions.GenesisBlockAlreadyExistsError('Cannot create the Genesis block')
block = self.prepare_genesis_block()
self.write_block(block)
return block
def vote(self, block_id, previous_block_id, decision, invalid_reason=None):
"""Create a signed vote for a block given the
:attr:`previous_block_id` and the :attr:`decision` (valid/invalid).
Args:
block_id (str): The id of the block to vote on.
previous_block_id (str): The id of the previous block.
decision (bool): Whether the block is valid or invalid.
invalid_reason (Optional[str]): Reason the block is invalid
"""
if block_id == previous_block_id:
raise exceptions.CyclicBlockchainError()
vote = {
'voting_for_block': block_id,
'previous_block': previous_block_id,
'is_block_valid': decision,
'invalid_reason': invalid_reason,
'timestamp': gen_timestamp()
}
vote_data = serialize(vote)
signature = crypto.PrivateKey(self.me_private).sign(vote_data.encode())
vote_signed = {
'node_pubkey': self.me,
'signature': signature.decode(),
'vote': vote
}
return vote_signed
def write_vote(self, vote):
"""Write the vote to the database."""
return backend.query.write_vote(self.connection, vote)
def get_last_voted_block(self):
"""Returns the last block that this node voted on."""
last_block_id = backend.query.get_last_voted_block_id(self.connection,
self.me)
return Block.from_dict(self.get_block(last_block_id))
def block_election(self, block):
if type(block) != dict:
block = block.to_dict()
votes = list(backend.query.get_votes_by_block_id(self.connection,
block['id']))
return self.consensus.voting.block_election(block, votes,
self.federation)
def block_election_status(self, block):
"""Tally the votes on a block, and return the status:
valid, invalid, or undecided.
"""
return self.block_election(block)['status']
def get_assets(self, asset_ids):
"""Return a list of assets that match the asset_ids
Args:
asset_ids (:obj:`list` of :obj:`str`): A list of asset_ids to
retrieve from the database.
Returns:
list: The list of assets returned from the database.
"""
return backend.query.get_assets(self.connection, asset_ids)
def get_metadata(self, txn_ids):
"""Return a list of metadata that match the transaction ids (txn_ids)
Args:
txn_ids (:obj:`list` of :obj:`str`): A list of txn_ids to
retrieve from the database.
Returns:
list: The list of metadata returned from the database.
"""
return backend.query.get_metadata(self.connection, txn_ids)
def write_assets(self, assets):
"""Writes a list of assets into the database.
Args:
assets (:obj:`list` of :obj:`dict`): A list of assets to write to
the database.
"""
return backend.query.write_assets(self.connection, assets)
def write_metadata(self, metadata):
"""Writes a list of metadata into the database.
Args:
metadata (:obj:`list` of :obj:`dict`): A list of metadata to write to
the database.
"""
return backend.query.write_metadata(self.connection, metadata)
def text_search(self, search, *, limit=0, table='assets'):
"""Return an iterator of assets that match the text search
Args:
search (str): Text search string to query the text index
limit (int, optional): Limit the number of returned documents.
Returns:
iter: An iterator of assets that match the text search.
"""
objects = backend.query.text_search(self.connection, search, limit=limit,
table=table)
# TODO: This is not efficient. There may be a more efficient way to
# query by storing block ids with the assets and using fastquery.
# See https://github.com/bigchaindb/bigchaindb/issues/1496
for obj in objects:
tx, status = self.get_transaction(obj['id'], True)
if status == self.TX_VALID:
yield obj

View File

@ -1,355 +0,0 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
from collections import OrderedDict
import base58
from uuid import uuid4
from bigchaindb import backend
from bigchaindb.elections.vote import Vote
from bigchaindb.common.exceptions import (InvalidSignature,
MultipleInputsError,
InvalidProposer,
UnequalValidatorSet,
DuplicateTransaction)
from bigchaindb.tendermint_utils import key_from_base64, public_key_to_base64
from bigchaindb.common.crypto import (public_key_from_ed25519_key)
from bigchaindb.common.transaction import Transaction
from bigchaindb.common.schema import (_validate_schema,
TX_SCHEMA_COMMON,
TX_SCHEMA_CREATE)
class Election(Transaction):
"""Represents election transactions.
To implement a custom election, create a class deriving from this one
with OPERATION set to the election operation, ALLOWED_OPERATIONS
set to (OPERATION,), CREATE set to OPERATION.
"""
OPERATION = None
# Custom validation schema
TX_SCHEMA_CUSTOM = None
# Election Statuses:
ONGOING = 'ongoing'
CONCLUDED = 'concluded'
INCONCLUSIVE = 'inconclusive'
# Vote ratio to approve an election
ELECTION_THRESHOLD = 2 / 3
@classmethod
def get_validator_change(cls, bigchain):
"""Return the validator set from the most recent approved block
:return: {
'height': <block_height>,
'validators': <validator_set>
}
"""
latest_block = bigchain.get_latest_block()
if latest_block is None:
return None
return bigchain.get_validator_change(latest_block['height'])
@classmethod
def get_validators(cls, bigchain, height=None):
"""Return a dictionary of validators with key as `public_key` and
value as the `voting_power`
"""
validators = {}
for validator in bigchain.get_validators(height):
# NOTE: we assume that Tendermint encodes public key in base64
public_key = public_key_from_ed25519_key(key_from_base64(validator['public_key']['value']))
validators[public_key] = validator['voting_power']
return validators
@classmethod
def recipients(cls, bigchain):
"""Convert validator dictionary to a recipient list for `Transaction`"""
recipients = []
for public_key, voting_power in cls.get_validators(bigchain).items():
recipients.append(([public_key], voting_power))
return recipients
@classmethod
def is_same_topology(cls, current_topology, election_topology):
voters = {}
for voter in election_topology:
if len(voter.public_keys) > 1:
return False
[public_key] = voter.public_keys
voting_power = voter.amount
voters[public_key] = voting_power
# Check whether the voters and their votes is same to that of the
# validators and their voting power in the network
return current_topology == voters
def validate(self, bigchain, current_transactions=[]):
"""Validate election transaction
NOTE:
* A valid election is initiated by an existing validator.
* A valid election is one where voters are validators and votes are
allocated according to the voting power of each validator node.
Args:
:param bigchain: (BigchainDB) an instantiated bigchaindb.lib.BigchainDB object.
:param current_transactions: (list) A list of transactions to be validated along with the election
Returns:
Election: a Election object or an object of the derived Election subclass.
Raises:
ValidationError: If the election is invalid
"""
input_conditions = []
duplicates = any(txn for txn in current_transactions if txn.id == self.id)
if bigchain.is_committed(self.id) or duplicates:
raise DuplicateTransaction('transaction `{}` already exists'
.format(self.id))
if not self.inputs_valid(input_conditions):
raise InvalidSignature('Transaction signature is invalid.')
current_validators = self.get_validators(bigchain)
# NOTE: Proposer should be a single node
if len(self.inputs) != 1 or len(self.inputs[0].owners_before) != 1:
raise MultipleInputsError('`tx_signers` must be a list instance of length one')
# NOTE: Check if the proposer is a validator.
[election_initiator_node_pub_key] = self.inputs[0].owners_before
if election_initiator_node_pub_key not in current_validators.keys():
raise InvalidProposer('Public key is not a part of the validator set')
# NOTE: Check if all validators have been assigned votes equal to their voting power
if not self.is_same_topology(current_validators, self.outputs):
raise UnequalValidatorSet('Validator set much be exactly same to the outputs of election')
return self
@classmethod
def generate(cls, initiator, voters, election_data, metadata=None):
# Break symmetry in case we need to call an election with the same properties twice
uuid = uuid4()
election_data['seed'] = str(uuid)
(inputs, outputs) = cls.validate_create(initiator, voters, election_data, metadata)
election = cls(cls.OPERATION, {'data': election_data}, inputs, outputs, metadata)
cls.validate_schema(election.to_dict())
return election
@classmethod
def validate_schema(cls, tx):
"""Validate the election transaction. Since `ELECTION` extends `CREATE` transaction, all the validations for
`CREATE` transaction should be inherited
"""
_validate_schema(TX_SCHEMA_COMMON, tx)
_validate_schema(TX_SCHEMA_CREATE, tx)
if cls.TX_SCHEMA_CUSTOM:
_validate_schema(cls.TX_SCHEMA_CUSTOM, tx)
@classmethod
def create(cls, tx_signers, recipients, metadata=None, asset=None):
raise NotImplementedError
@classmethod
def transfer(cls, tx_signers, recipients, metadata=None, asset=None):
raise NotImplementedError
@classmethod
def to_public_key(cls, election_id):
return base58.b58encode(bytes.fromhex(election_id)).decode()
@classmethod
def count_votes(cls, election_pk, transactions, getter=getattr):
votes = 0
for txn in transactions:
if getter(txn, 'operation') == Vote.OPERATION:
for output in getter(txn, 'outputs'):
# NOTE: We enforce that a valid vote to election id will have only
# election_pk in the output public keys, including any other public key
# along with election_pk will lead to vote being not considered valid.
if len(getter(output, 'public_keys')) == 1 and [election_pk] == getter(output, 'public_keys'):
votes = votes + int(getter(output, 'amount'))
return votes
def get_commited_votes(self, bigchain, election_pk=None):
if election_pk is None:
election_pk = self.to_public_key(self.id)
txns = list(backend.query.get_asset_tokens_for_public_key(bigchain.connection,
self.id,
election_pk))
return self.count_votes(election_pk, txns, dict.get)
def has_concluded(self, bigchain, current_votes=[]):
"""Check if the election can be concluded or not.
* Elections can only be concluded if the validator set has not changed
since the election was initiated.
* Elections can be concluded only if the current votes form a supermajority.
Custom elections may override this function and introduce additional checks.
"""
if self.has_validator_set_changed(bigchain):
return False
election_pk = self.to_public_key(self.id)
votes_committed = self.get_commited_votes(bigchain, election_pk)
votes_current = self.count_votes(election_pk, current_votes)
total_votes = sum(output.amount for output in self.outputs)
if (votes_committed < (2/3) * total_votes) and \
(votes_committed + votes_current >= (2/3)*total_votes):
return True
return False
def get_status(self, bigchain):
election = self.get_election(self.id, bigchain)
if election and election['is_concluded']:
return self.CONCLUDED
return self.INCONCLUSIVE if self.has_validator_set_changed(bigchain) else self.ONGOING
def has_validator_set_changed(self, bigchain):
latest_change = self.get_validator_change(bigchain)
if latest_change is None:
return False
latest_change_height = latest_change['height']
election = self.get_election(self.id, bigchain)
return latest_change_height > election['height']
def get_election(self, election_id, bigchain):
return bigchain.get_election(election_id)
def store(self, bigchain, height, is_concluded):
bigchain.store_election(self.id, height, is_concluded)
def show_election(self, bigchain):
data = self.asset['data']
if 'public_key' in data.keys():
data['public_key'] = public_key_to_base64(data['public_key']['value'])
response = ''
for k, v in data.items():
if k != 'seed':
response += f'{k}={v}\n'
response += f'status={self.get_status(bigchain)}'
return response
@classmethod
def _get_initiated_elections(cls, height, txns):
elections = []
for tx in txns:
if not isinstance(tx, Election):
continue
elections.append({'election_id': tx.id, 'height': height,
'is_concluded': False})
return elections
@classmethod
def _get_votes(cls, txns):
elections = OrderedDict()
for tx in txns:
if not isinstance(tx, Vote):
continue
election_id = tx.asset['id']
if election_id not in elections:
elections[election_id] = []
elections[election_id].append(tx)
return elections
@classmethod
def process_block(cls, bigchain, new_height, txns):
"""Looks for election and vote transactions inside the block, records
and processes elections.
Every election is recorded in the database.
Every vote has a chance to conclude the corresponding election. When
an election is concluded, the corresponding database record is
marked as such.
Elections and votes are processed in the order in which they
appear in the block. Elections are concluded in the order of
appearance of their first votes in the block.
For every election concluded in the block, calls its `on_approval`
method. The returned value of the last `on_approval`, if any,
is a validator set update to be applied in one of the following blocks.
`on_approval` methods are implemented by elections of particular type.
The method may contain side effects but should be idempotent. To account
for other concluded elections, if it requires so, the method should
rely on the database state.
"""
# elections initiated in this block
initiated_elections = cls._get_initiated_elections(new_height, txns)
if initiated_elections:
bigchain.store_elections(initiated_elections)
# elections voted for in this block and their votes
elections = cls._get_votes(txns)
validator_update = None
for election_id, votes in elections.items():
election = bigchain.get_transaction(election_id)
if election is None:
continue
if not election.has_concluded(bigchain, votes):
continue
validator_update = election.on_approval(bigchain, new_height)
election.store(bigchain, new_height, is_concluded=True)
return [validator_update] if validator_update else []
@classmethod
def rollback(cls, bigchain, new_height, txn_ids):
"""Looks for election and vote transactions inside the block and
cleans up the database artifacts possibly created in `process_blocks`.
Part of the `end_block`/`commit` crash recovery.
"""
# delete election records for elections initiated at this height and
# elections concluded at this height
bigchain.delete_elections(new_height)
txns = [bigchain.get_transaction(tx_id) for tx_id in txn_ids]
elections = cls._get_votes(txns)
for election_id in elections:
election = bigchain.get_transaction(election_id)
election.on_rollback(bigchain, new_height)
def on_approval(self, bigchain, new_height):
"""Override to update the database state according to the
election rules. Consider the current database state to account for
other concluded elections, if required.
"""
raise NotImplementedError
def on_rollback(self, bigchain, new_height):
"""Override to clean up the database artifacts possibly created
in `on_approval`. Part of the `end_block`/`commit` crash recovery.
"""
raise NotImplementedError

View File

@ -1,64 +0,0 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
from bigchaindb.common.transaction import Transaction
from bigchaindb.common.schema import (_validate_schema,
TX_SCHEMA_COMMON,
TX_SCHEMA_TRANSFER,
TX_SCHEMA_VOTE)
class Vote(Transaction):
OPERATION = 'VOTE'
# NOTE: This class inherits TRANSFER txn type. The `TRANSFER` property is
# overriden to re-use methods from parent class
TRANSFER = OPERATION
ALLOWED_OPERATIONS = (OPERATION,)
# Custom validation schema
TX_SCHEMA_CUSTOM = TX_SCHEMA_VOTE
def validate(self, bigchain, current_transactions=[]):
"""Validate election vote transaction
NOTE: There are no additional validity conditions on casting votes i.e.
a vote is just a valid TRANFER transaction
For more details refer BEP-21: https://github.com/bigchaindb/BEPs/tree/master/21
Args:
bigchain (BigchainDB): an instantiated bigchaindb.lib.BigchainDB object.
Returns:
Vote: a Vote object
Raises:
ValidationError: If the election vote is invalid
"""
self.validate_transfer_inputs(bigchain, current_transactions)
return self
@classmethod
def generate(cls, inputs, recipients, election_id, metadata=None):
(inputs, outputs) = cls.validate_transfer(inputs, recipients, election_id, metadata)
election_vote = cls(cls.OPERATION, {'id': election_id}, inputs, outputs, metadata)
cls.validate_schema(election_vote.to_dict())
return election_vote
@classmethod
def validate_schema(cls, tx):
"""Validate the validator election vote transaction. Since `VOTE` extends `TRANSFER`
transaction, all the validations for `CREATE` transaction should be inherited
"""
_validate_schema(TX_SCHEMA_COMMON, tx)
_validate_schema(TX_SCHEMA_TRANSFER, tx)
_validate_schema(cls.TX_SCHEMA_CUSTOM, tx)
@classmethod
def create(cls, tx_signers, recipients, metadata=None, asset=None):
raise NotImplementedError
@classmethod
def transfer(cls, tx_signers, recipients, metadata=None, asset=None):
raise NotImplementedError

View File

@ -1,9 +1,3 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
from queue import Empty
from collections import defaultdict
from multiprocessing import Queue
@ -47,7 +41,6 @@ class Exchange:
def __init__(self):
self.publisher_queue = Queue()
self.started_queue = Queue()
# Map <event_types -> queues>
self.queues = defaultdict(list)
@ -67,16 +60,7 @@ class Exchange:
Returns:
a :class:`multiprocessing.Queue`.
Raises:
RuntimeError if called after `run`
"""
try:
self.started_queue.get(timeout=1)
raise RuntimeError('Cannot create a new subscriber queue while Exchange is running.')
except Empty:
pass
if event_types is None:
event_types = EventTypes.ALL
@ -99,7 +83,6 @@ class Exchange:
def run(self):
"""Start the exchange"""
self.started_queue.put('STARTED')
while True:
event = self.publisher_queue.get()

View File

@ -1,12 +1,14 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
class BigchainDBError(Exception):
"""Base class for BigchainDB exceptions."""
class CriticalDoubleSpend(BigchainDBError):
"""Data integrity error that requires attention"""
class CriticalDoubleInclusion(BigchainDBError):
"""Data integrity error that requires attention"""
class CriticalDuplicateVote(BigchainDBError):
"""Data integrity error that requires attention"""

View File

@ -1,22 +1,49 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
from bigchaindb.utils import condition_details_has_owner
from bigchaindb.backend import query
from bigchaindb.common.transaction import TransactionLink
class FastQuery():
"""Database queries that join on block results from a single node."""
class FastQuery:
"""Database queries that join on block results from a single node.
def __init__(self, connection):
* Votes are not validated for security (security is a replication concern)
* Votes come from only one node, and as such, non-byzantine fault tolerance
is reduced.
Previously, to consider the status of a block, all votes for that block
were retrieved and the election results were counted. This meant that a
faulty node may still have been able to obtain a correct election result.
However, from the point of view of a client, it is still neccesary to
query multiple nodes to insure against getting an incorrect response from
a byzantine node.
"""
def __init__(self, connection, me):
self.connection = connection
self.me = me
def filter_valid_block_ids(self, block_ids, include_undecided=False):
"""Given block ids, return only the ones that are valid."""
block_ids = list(set(block_ids))
votes = query.get_votes_for_blocks_by_voter(
self.connection, block_ids, self.me)
votes = {vote['vote']['voting_for_block']: vote['vote']['is_block_valid']
for vote in votes}
return [block_id for block_id in block_ids
if votes.get(block_id, include_undecided)]
def filter_valid_items(self, items, block_id_key=lambda b: b[0]):
"""Given items with block ids, return only the ones that are valid or undecided.
"""
items = list(items)
block_ids = map(block_id_key, items)
valid_block_ids = set(self.filter_valid_block_ids(block_ids, True))
return [b for b in items if block_id_key(b) in valid_block_ids]
def get_outputs_by_public_key(self, public_key):
"""Get outputs for a public key"""
txs = list(query.get_owned_ids(self.connection, public_key))
res = list(query.get_owned_ids(self.connection, public_key))
txs = [tx for _, tx in self.filter_valid_items(res)]
return [TransactionLink(tx['id'], index)
for tx in txs
for index, output in enumerate(tx['outputs'])
@ -30,7 +57,8 @@ class FastQuery():
outputs: list of TransactionLink
"""
links = [o.to_dict() for o in outputs]
txs = list(query.get_spending_transactions(self.connection, links))
res = query.get_spending_transactions(self.connection, links)
txs = [tx for _, tx in self.filter_valid_items(res)]
spends = {TransactionLink.from_dict(input_['fulfills'])
for tx in txs
for input_ in tx['inputs']}
@ -43,7 +71,8 @@ class FastQuery():
outputs: list of TransactionLink
"""
links = [o.to_dict() for o in outputs]
txs = list(query.get_spending_transactions(self.connection, links))
res = query.get_spending_transactions(self.connection, links)
txs = [tx for _, tx in self.filter_valid_items(res)]
spends = {TransactionLink.from_dict(input_['fulfills'])
for tx in txs
for input_ in tx['inputs']}

View File

@ -1,514 +0,0 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Module containing main contact points with Tendermint and
MongoDB.
"""
import logging
from collections import namedtuple
from uuid import uuid4
import rapidjson
try:
from hashlib import sha3_256
except ImportError:
# NOTE: needed for Python < 3.6
from sha3 import sha3_256
import requests
import bigchaindb
from bigchaindb import backend, config_utils, fastquery
from bigchaindb.models import Transaction
from bigchaindb.common.exceptions import (SchemaValidationError,
ValidationError,
DoubleSpend)
from bigchaindb.common.transaction_mode_types import (BROADCAST_TX_COMMIT,
BROADCAST_TX_ASYNC,
BROADCAST_TX_SYNC)
from bigchaindb.tendermint_utils import encode_transaction, merkleroot
from bigchaindb import exceptions as core_exceptions
from bigchaindb.validation import BaseValidationRules
logger = logging.getLogger(__name__)
class BigchainDB(object):
"""Bigchain API
Create, read, sign, write transactions to the database
"""
def __init__(self, connection=None):
"""Initialize the Bigchain instance
A Bigchain instance has several configuration parameters (e.g. host).
If a parameter value is passed as an argument to the Bigchain
__init__ method, then that is the value it will have.
Otherwise, the parameter value will come from an environment variable.
If that environment variable isn't set, then the value
will come from the local configuration file. And if that variable
isn't in the local configuration file, then the parameter will have
its default value (defined in bigchaindb.__init__).
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
"""
config_utils.autoconfigure()
self.mode_commit = BROADCAST_TX_COMMIT
self.mode_list = (BROADCAST_TX_ASYNC,
BROADCAST_TX_SYNC,
self.mode_commit)
self.tendermint_host = bigchaindb.config['tendermint']['host']
self.tendermint_port = bigchaindb.config['tendermint']['port']
self.endpoint = 'http://{}:{}/'.format(self.tendermint_host, self.tendermint_port)
validationPlugin = bigchaindb.config.get('validation_plugin')
if validationPlugin:
self.validation = config_utils.load_validation_plugin(validationPlugin)
else:
self.validation = BaseValidationRules
self.connection = connection if connection else backend.connect(**bigchaindb.config['database'])
def post_transaction(self, transaction, mode):
"""Submit a valid transaction to the mempool."""
if not mode or mode not in self.mode_list:
raise ValidationError('Mode must be one of the following {}.'
.format(', '.join(self.mode_list)))
tx_dict = transaction.tx_dict if transaction.tx_dict else transaction.to_dict()
payload = {
'method': mode,
'jsonrpc': '2.0',
'params': [encode_transaction(tx_dict)],
'id': str(uuid4())
}
# TODO: handle connection errors!
return requests.post(self.endpoint, json=payload)
def write_transaction(self, transaction, mode):
# This method offers backward compatibility with the Web API.
"""Submit a valid transaction to the mempool."""
response = self.post_transaction(transaction, mode)
return self._process_post_response(response.json(), mode)
def _process_post_response(self, response, mode):
logger.debug(response)
error = response.get('error')
if error:
status_code = 500
message = error.get('message', 'Internal Error')
data = error.get('data', '')
if 'Tx already exists in cache' in data:
status_code = 400
return (status_code, message + ' - ' + data)
result = response['result']
if mode == self.mode_commit:
check_tx_code = result.get('check_tx', {}).get('code', 0)
deliver_tx_code = result.get('deliver_tx', {}).get('code', 0)
error_code = check_tx_code or deliver_tx_code
else:
error_code = result.get('code', 0)
if error_code:
return (500, 'Transaction validation failed')
return (202, '')
def store_bulk_transactions(self, transactions):
txns = []
assets = []
txn_metadatas = []
for t in transactions:
transaction = t.tx_dict if t.tx_dict else rapidjson.loads(rapidjson.dumps(t.to_dict()))
if transaction['operation'] == t.CREATE:
asset = transaction.pop('asset')
asset['id'] = transaction['id']
assets.append(asset)
metadata = transaction.pop('metadata')
txn_metadatas.append({'id': transaction['id'],
'metadata': metadata})
txns.append(transaction)
backend.query.store_metadatas(self.connection, txn_metadatas)
if assets:
backend.query.store_assets(self.connection, assets)
return backend.query.store_transactions(self.connection, txns)
def delete_transactions(self, txs):
return backend.query.delete_transactions(self.connection, txs)
def update_utxoset(self, transaction):
"""Update the UTXO set given ``transaction``. That is, remove
the outputs that the given ``transaction`` spends, and add the
outputs that the given ``transaction`` creates.
Args:
transaction (:obj:`~bigchaindb.models.Transaction`): A new
transaction incoming into the system for which the UTXO
set needs to be updated.
"""
spent_outputs = [
spent_output for spent_output in transaction.spent_outputs
]
if spent_outputs:
self.delete_unspent_outputs(*spent_outputs)
self.store_unspent_outputs(
*[utxo._asdict() for utxo in transaction.unspent_outputs]
)
def store_unspent_outputs(self, *unspent_outputs):
"""Store the given ``unspent_outputs`` (utxos).
Args:
*unspent_outputs (:obj:`tuple` of :obj:`dict`): Variable
length tuple or list of unspent outputs.
"""
if unspent_outputs:
return backend.query.store_unspent_outputs(
self.connection, *unspent_outputs)
def get_utxoset_merkle_root(self):
"""Returns the merkle root of the utxoset. This implies that
the utxoset is first put into a merkle tree.
For now, the merkle tree and its root will be computed each
time. This obviously is not efficient and a better approach
that limits the repetition of the same computation when
unnecesary should be sought. For instance, future optimizations
could simply re-compute the branches of the tree that were
affected by a change.
The transaction hash (id) and output index should be sufficient
to uniquely identify a utxo, and consequently only that
information from a utxo record is needed to compute the merkle
root. Hence, each node of the merkle tree should contain the
tuple (txid, output_index).
.. important:: The leaves of the tree will need to be sorted in
some kind of lexicographical order.
Returns:
str: Merkle root in hexadecimal form.
"""
utxoset = backend.query.get_unspent_outputs(self.connection)
# TODO Once ready, use the already pre-computed utxo_hash field.
# See common/transactions.py for details.
hashes = [
sha3_256(
'{}{}'.format(utxo['transaction_id'], utxo['output_index']).encode()
).digest() for utxo in utxoset
]
# TODO Notice the sorted call!
return merkleroot(sorted(hashes))
def get_unspent_outputs(self):
"""Get the utxoset.
Returns:
generator of unspent_outputs.
"""
cursor = backend.query.get_unspent_outputs(self.connection)
return (record for record in cursor)
def delete_unspent_outputs(self, *unspent_outputs):
"""Deletes the given ``unspent_outputs`` (utxos).
Args:
*unspent_outputs (:obj:`tuple` of :obj:`dict`): Variable
length tuple or list of unspent outputs.
"""
if unspent_outputs:
return backend.query.delete_unspent_outputs(
self.connection, *unspent_outputs)
def is_committed(self, transaction_id):
transaction = backend.query.get_transaction(self.connection, transaction_id)
return bool(transaction)
def get_transaction(self, transaction_id):
transaction = backend.query.get_transaction(self.connection, transaction_id)
if transaction:
asset = backend.query.get_asset(self.connection, transaction_id)
metadata = backend.query.get_metadata(self.connection, [transaction_id])
if asset:
transaction['asset'] = asset
if 'metadata' not in transaction:
metadata = metadata[0] if metadata else None
if metadata:
metadata = metadata.get('metadata')
transaction.update({'metadata': metadata})
transaction = Transaction.from_dict(transaction)
return transaction
def get_transactions(self, txn_ids):
return backend.query.get_transactions(self.connection, txn_ids)
def get_transactions_filtered(self, asset_id, operation=None, last_tx=None):
"""Get a list of transactions filtered on some criteria
"""
txids = backend.query.get_txids_filtered(self.connection, asset_id,
operation, last_tx)
for txid in txids:
yield self.get_transaction(txid)
def get_outputs_filtered(self, owner, spent=None):
"""Get a list of output links filtered on some criteria
Args:
owner (str): base58 encoded public_key.
spent (bool): If ``True`` return only the spent outputs. If
``False`` return only unspent outputs. If spent is
not specified (``None``) return all outputs.
Returns:
:obj:`list` of TransactionLink: list of ``txid`` s and ``output`` s
pointing to another transaction's condition
"""
outputs = self.fastquery.get_outputs_by_public_key(owner)
if spent is None:
return outputs
elif spent is True:
return self.fastquery.filter_unspent_outputs(outputs)
elif spent is False:
return self.fastquery.filter_spent_outputs(outputs)
def get_spent(self, txid, output, current_transactions=[]):
transactions = backend.query.get_spent(self.connection, txid,
output)
transactions = list(transactions) if transactions else []
if len(transactions) > 1:
raise core_exceptions.CriticalDoubleSpend(
'`{}` was spent more than once. There is a problem'
' with the chain'.format(txid))
current_spent_transactions = []
for ctxn in current_transactions:
for ctxn_input in ctxn.inputs:
if ctxn_input.fulfills and\
ctxn_input.fulfills.txid == txid and\
ctxn_input.fulfills.output == output:
current_spent_transactions.append(ctxn)
transaction = None
if len(transactions) + len(current_spent_transactions) > 1:
raise DoubleSpend('tx "{}" spends inputs twice'.format(txid))
elif transactions:
transaction = Transaction.from_db(self, transactions[0])
elif current_spent_transactions:
transaction = current_spent_transactions[0]
return transaction
def store_block(self, block):
"""Create a new block."""
return backend.query.store_block(self.connection, block)
def get_latest_block(self):
"""Get the block with largest height."""
return backend.query.get_latest_block(self.connection)
def get_block(self, block_id):
"""Get the block with the specified `block_id`.
Returns the block corresponding to `block_id` or None if no match is
found.
Args:
block_id (int): block id of the block to get.
"""
block = backend.query.get_block(self.connection, block_id)
latest_block = self.get_latest_block()
latest_block_height = latest_block['height'] if latest_block else 0
if not block and block_id > latest_block_height:
return
result = {'height': block_id,
'transactions': []}
if block:
transactions = backend.query.get_transactions(self.connection, block['transactions'])
result['transactions'] = [t.to_dict() for t in Transaction.from_db(self, transactions)]
return result
def get_block_containing_tx(self, txid):
"""Retrieve the list of blocks (block ids) containing a
transaction with transaction id `txid`
Args:
txid (str): transaction id of the transaction to query
Returns:
Block id list (list(int))
"""
blocks = list(backend.query.get_block_with_transaction(self.connection, txid))
if len(blocks) > 1:
logger.critical('Transaction id %s exists in multiple blocks', txid)
return [block['height'] for block in blocks]
def validate_transaction(self, tx, current_transactions=[]):
"""Validate a transaction against the current status of the database."""
transaction = tx
# CLEANUP: The conditional below checks for transaction in dict format.
# It would be better to only have a single format for the transaction
# throught the code base.
if isinstance(transaction, dict):
try:
transaction = Transaction.from_dict(tx)
except SchemaValidationError as e:
logger.warning('Invalid transaction schema: %s', e.__cause__.message)
return False
except ValidationError as e:
logger.warning('Invalid transaction (%s): %s', type(e).__name__, e)
return False
return transaction.validate(self, current_transactions)
def is_valid_transaction(self, tx, current_transactions=[]):
# NOTE: the function returns the Transaction object in case
# the transaction is valid
try:
return self.validate_transaction(tx, current_transactions)
except ValidationError as e:
logger.warning('Invalid transaction (%s): %s', type(e).__name__, e)
return False
def text_search(self, search, *, limit=0, table='assets'):
"""Return an iterator of assets that match the text search
Args:
search (str): Text search string to query the text index
limit (int, optional): Limit the number of returned documents.
Returns:
iter: An iterator of assets that match the text search.
"""
return backend.query.text_search(self.connection, search, limit=limit,
table=table)
def get_assets(self, asset_ids):
"""Return a list of assets that match the asset_ids
Args:
asset_ids (:obj:`list` of :obj:`str`): A list of asset_ids to
retrieve from the database.
Returns:
list: The list of assets returned from the database.
"""
return backend.query.get_assets(self.connection, asset_ids)
def get_metadata(self, txn_ids):
"""Return a list of metadata that match the transaction ids (txn_ids)
Args:
txn_ids (:obj:`list` of :obj:`str`): A list of txn_ids to
retrieve from the database.
Returns:
list: The list of metadata returned from the database.
"""
return backend.query.get_metadata(self.connection, txn_ids)
@property
def fastquery(self):
return fastquery.FastQuery(self.connection)
def get_validator_change(self, height=None):
return backend.query.get_validator_set(self.connection, height)
def get_validators(self, height=None):
result = self.get_validator_change(height)
return [] if result is None else result['validators']
def get_election(self, election_id):
return backend.query.get_election(self.connection, election_id)
def get_pre_commit_state(self):
return backend.query.get_pre_commit_state(self.connection)
def store_pre_commit_state(self, state):
return backend.query.store_pre_commit_state(self.connection, state)
def store_validator_set(self, height, validators):
"""Store validator set at a given `height`.
NOTE: If the validator set already exists at that `height` then an
exception will be raised.
"""
return backend.query.store_validator_set(self.connection, {'height': height,
'validators': validators})
def delete_validator_set(self, height):
return backend.query.delete_validator_set(self.connection, height)
def store_abci_chain(self, height, chain_id, is_synced=True):
return backend.query.store_abci_chain(self.connection, height,
chain_id, is_synced)
def delete_abci_chain(self, height):
return backend.query.delete_abci_chain(self.connection, height)
def get_latest_abci_chain(self):
return backend.query.get_latest_abci_chain(self.connection)
def migrate_abci_chain(self):
"""Generate and record a new ABCI chain ID. New blocks are not
accepted until we receive an InitChain ABCI request with
the matching chain ID and validator set.
Chain ID is generated based on the current chain and height.
`chain-X` => `chain-X-migrated-at-height-5`.
`chain-X-migrated-at-height-5` => `chain-X-migrated-at-height-21`.
If there is no known chain (we are at genesis), the function returns.
"""
latest_chain = self.get_latest_abci_chain()
if latest_chain is None:
return
block = self.get_latest_block()
suffix = '-migrated-at-height-'
chain_id = latest_chain['chain_id']
block_height_str = str(block['height'])
new_chain_id = chain_id.split(suffix)[0] + suffix + block_height_str
self.store_abci_chain(block['height'] + 1, new_chain_id, False)
def store_election(self, election_id, height, is_concluded):
return backend.query.store_election(self.connection, election_id,
height, is_concluded)
def store_elections(self, elections):
return backend.query.store_elections(self.connection, elections)
def delete_elections(self, height):
return backend.query.delete_elections(self.connection, height)
Block = namedtuple('Block', ('app_hash', 'height', 'transactions'))

View File

@ -1,130 +0,0 @@
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import bigchaindb
import logging
from bigchaindb.common.exceptions import ConfigurationError
from logging.config import dictConfig as set_logging_config
import os
DEFAULT_LOG_DIR = os.getcwd()
DEFAULT_LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'class': 'logging.Formatter',
'format': ('[%(asctime)s] [%(levelname)s] (%(name)s) '
'%(message)s (%(processName)-10s - pid: %(process)d)'),
'datefmt': '%Y-%m-%d %H:%M:%S',
},
'file': {
'class': 'logging.Formatter',
'format': ('[%(asctime)s] [%(levelname)s] (%(name)s) '
'%(message)s (%(processName)-10s - pid: %(process)d)'),
'datefmt': '%Y-%m-%d %H:%M:%S',
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'console',
'level': logging.INFO,
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(DEFAULT_LOG_DIR, 'bigchaindb.log'),
'mode': 'w',
'maxBytes': 209715200,
'backupCount': 5,
'formatter': 'file',
'level': logging.INFO,
},
'errors': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(DEFAULT_LOG_DIR, 'bigchaindb-errors.log'),
'mode': 'w',
'maxBytes': 209715200,
'backupCount': 5,
'formatter': 'file',
'level': logging.ERROR,
}
},
'loggers': {},
'root': {
'level': logging.DEBUG,
'handlers': ['console', 'file', 'errors'],
},
}
def _normalize_log_level(level):
try:
return level.upper()
except AttributeError as exc:
raise ConfigurationError('Log level must be a string!') from exc
def setup_logging():
"""Function to configure log hadlers.
.. important::
Configuration, if needed, should be applied before invoking this
decorator, as starting the subscriber process for logging will
configure the root logger for the child process based on the
state of :obj:`bigchaindb.config` at the moment this decorator
is invoked.
"""
logging_configs = DEFAULT_LOGGING_CONFIG
new_logging_configs = bigchaindb.config['log']
if 'file' in new_logging_configs:
filename = new_logging_configs['file']
logging_configs['handlers']['file']['filename'] = filename
if 'error_file' in new_logging_configs:
error_filename = new_logging_configs['error_file']
logging_configs['handlers']['errors']['filename'] = error_filename
if 'level_console' in new_logging_configs:
level = _normalize_log_level(new_logging_configs['level_console'])
logging_configs['handlers']['console']['level'] = level
if 'level_logfile' in new_logging_configs:
level = _normalize_log_level(new_logging_configs['level_logfile'])
logging_configs['handlers']['file']['level'] = level
if 'fmt_console' in new_logging_configs:
fmt = new_logging_configs['fmt_console']
logging_configs['formatters']['console']['format'] = fmt
if 'fmt_logfile' in new_logging_configs:
fmt = new_logging_configs['fmt_logfile']
logging_configs['formatters']['file']['format'] = fmt
if 'datefmt_console' in new_logging_configs:
fmt = new_logging_configs['datefmt_console']
logging_configs['formatters']['console']['datefmt'] = fmt
if 'datefmt_logfile' in new_logging_configs:
fmt = new_logging_configs['datefmt_logfile']
logging_configs['formatters']['file']['datefmt'] = fmt
log_levels = new_logging_configs.get('granular_levels', {})
for logger_name, level in log_levels.items():
level = _normalize_log_level(level)
try:
logging_configs['loggers'][logger_name]['level'] = level
except KeyError:
logging_configs['loggers'][logger_name] = {'level': level}
set_logging_config(logging_configs)

68
bigchaindb/log/configs.py Normal file
View File

@ -0,0 +1,68 @@
import logging
from logging.handlers import DEFAULT_TCP_LOGGING_PORT
from os.path import expanduser, join
DEFAULT_SOCKET_LOGGING_HOST = 'localhost'
DEFAULT_SOCKET_LOGGING_PORT = DEFAULT_TCP_LOGGING_PORT
DEFAULT_SOCKET_LOGGING_ADDR = (DEFAULT_SOCKET_LOGGING_HOST,
DEFAULT_SOCKET_LOGGING_PORT)
DEFAULT_LOG_DIR = expanduser('~')
PUBLISHER_LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': logging.DEBUG,
},
}
SUBSCRIBER_LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'class': 'logging.Formatter',
'format': ('[%(asctime)s] [%(levelname)s] (%(name)s) '
'%(message)s (%(processName)-10s - pid: %(process)d)'),
'datefmt': '%Y-%m-%d %H:%M:%S',
},
'file': {
'class': 'logging.Formatter',
'format': ('[%(asctime)s] [%(levelname)s] (%(name)s) '
'%(message)s (%(processName)-10s - pid: %(process)d)'),
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'console',
'level': logging.INFO,
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': join(DEFAULT_LOG_DIR, 'bigchaindb.log'),
'mode': 'w',
'maxBytes': 209715200,
'backupCount': 5,
'formatter': 'file',
'level': logging.INFO,
},
'errors': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': join(DEFAULT_LOG_DIR, 'bigchaindb-errors.log'),
'mode': 'w',
'maxBytes': 209715200,
'backupCount': 5,
'formatter': 'file',
'level': logging.ERROR,
},
},
'loggers': {},
'root': {
'level': logging.DEBUG,
'handlers': ['console', 'file', 'errors'],
'port': DEFAULT_SOCKET_LOGGING_PORT
},
}

36
bigchaindb/log/loggers.py Normal file
View File

@ -0,0 +1,36 @@
import logging.handlers
from gunicorn.glogging import Logger
from .configs import DEFAULT_SOCKET_LOGGING_HOST, DEFAULT_SOCKET_LOGGING_PORT
class HttpServerLogger(Logger):
"""Custom logger class for ``gunicorn`` logs.
Meant for internal usage only, to set the ``logger_class``
configuration setting on gunicorn.
"""
def setup(self, cfg):
"""Setup the gunicorn access and error loggers. This overrides
the parent method. Its main goal is to simply pipe all the logs to
the TCP socket used througout BigchainDB.
Args:
cfg (:obj:`gunicorn.config.Config`): Gunicorn configuration
object. *Ignored*.
"""
log_cfg = self.cfg.env_orig.get('custom_log_config', {})
self.log_port = log_cfg.get('port', DEFAULT_SOCKET_LOGGING_PORT)
self._set_socklog_handler(self.error_log)
self._set_socklog_handler(self.access_log)
def _set_socklog_handler(self, log):
socket_handler = logging.handlers.SocketHandler(
DEFAULT_SOCKET_LOGGING_HOST, self.log_port)
socket_handler._gunicorn = True
log.addHandler(socket_handler)

187
bigchaindb/log/setup.py Normal file
View File

@ -0,0 +1,187 @@
"""Setup logging."""
from copy import deepcopy
import logging
from logging.config import dictConfig
import logging.handlers
import pickle
from socketserver import StreamRequestHandler, ThreadingTCPServer
import struct
import sys
from .configs import (
DEFAULT_SOCKET_LOGGING_HOST,
DEFAULT_SOCKET_LOGGING_PORT,
PUBLISHER_LOGGING_CONFIG,
SUBSCRIBER_LOGGING_CONFIG,
)
from bigchaindb.utils import Process
from bigchaindb.common.exceptions import ConfigurationError
def _normalize_log_level(level):
try:
return level.upper()
except AttributeError as exc:
raise ConfigurationError('Log level must be a string!') from exc
def setup_pub_logger(logging_port=None):
logging_port = logging_port or DEFAULT_SOCKET_LOGGING_PORT
dictConfig(PUBLISHER_LOGGING_CONFIG)
socket_handler = logging.handlers.SocketHandler(
DEFAULT_SOCKET_LOGGING_HOST, logging_port)
socket_handler.setLevel(logging.DEBUG)
logger = logging.getLogger()
logger.addHandler(socket_handler)
def setup_sub_logger(*, user_log_config=None):
kwargs = {}
log_port = user_log_config.get('port') if user_log_config is not None else None
if log_port is not None:
kwargs['port'] = log_port
server = LogRecordSocketServer(**kwargs)
with server:
server_proc = Process(
name='logging_server',
target=server.serve_forever,
kwargs={'log_config': user_log_config},
)
server_proc.start()
def setup_logging(*, user_log_config=None):
port = user_log_config.get('port') if user_log_config is not None else None
setup_pub_logger(logging_port=port)
setup_sub_logger(user_log_config=user_log_config)
def create_subscriber_logging_config(*, user_log_config=None): # noqa: C901
sub_log_config = deepcopy(SUBSCRIBER_LOGGING_CONFIG)
if not user_log_config:
return sub_log_config
if 'file' in user_log_config:
filename = user_log_config['file']
sub_log_config['handlers']['file']['filename'] = filename
if 'error_file' in user_log_config:
error_filename = user_log_config['error_file']
sub_log_config['handlers']['errors']['filename'] = error_filename
if 'level_console' in user_log_config:
level = _normalize_log_level(user_log_config['level_console'])
sub_log_config['handlers']['console']['level'] = level
if 'level_logfile' in user_log_config:
level = _normalize_log_level(user_log_config['level_logfile'])
sub_log_config['handlers']['file']['level'] = level
if 'fmt_console' in user_log_config:
fmt = user_log_config['fmt_console']
sub_log_config['formatters']['console']['format'] = fmt
if 'fmt_logfile' in user_log_config:
fmt = user_log_config['fmt_logfile']
sub_log_config['formatters']['file']['format'] = fmt
if 'datefmt_console' in user_log_config:
fmt = user_log_config['datefmt_console']
sub_log_config['formatters']['console']['datefmt'] = fmt
if 'datefmt_logfile' in user_log_config:
fmt = user_log_config['datefmt_logfile']
sub_log_config['formatters']['file']['datefmt'] = fmt
log_levels = user_log_config.get('granular_levels', {})
for logger_name, level in log_levels.items():
level = _normalize_log_level(level)
try:
sub_log_config['loggers'][logger_name]['level'] = level
except KeyError:
sub_log_config['loggers'][logger_name] = {'level': level}
return sub_log_config
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request.
This basically logs the record using whatever logging policy is
configured locally.
"""
def handle(self):
"""Handle multiple requests - each expected to be a 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally.
"""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack('>L', chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
def unpickle(self, data):
try:
return pickle.loads(data)
except (pickle.UnpicklingError,
AttributeError, EOFError, TypeError) as exc:
return {
'msg': '({}) Log handling error: un-pickling failed!'.format(
exc.__class__.__name__),
'exc_info': exc.args,
'level': logging.ERROR,
'func': self.unpickle.__name__,
}
def handle_log_record(self, record):
logger = logging.getLogger(record.name)
logger.handle(record)
class LogRecordSocketServer(ThreadingTCPServer):
"""Simple TCP socket-based logging server.
"""
allow_reuse_address = True
def __init__(self,
host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
super().__init__((host, port), handler)
def serve_forever(self, *, poll_interval=0.5, log_config=None):
sub_logging_config = create_subscriber_logging_config(
user_log_config=log_config)
dictConfig(sub_logging_config)
try:
super().serve_forever(poll_interval=poll_interval)
except KeyboardInterrupt:
pass
# NOTE: Because the context manager is only available
# from 3.6 and up, we add it for lower versions.
if sys.version_info < (3, 6):
def __enter__(self):
return self
def __exit__(self, *args):
self.server_close()
LogRecordSocketServer.__enter__ = __enter__
LogRecordSocketServer.__exit__ = __exit__

View File

@ -1,48 +0,0 @@
import json
from bigchaindb.common.schema import TX_SCHEMA_CHAIN_MIGRATION_ELECTION
from bigchaindb.elections.election import Election
class ChainMigrationElection(Election):
OPERATION = 'CHAIN_MIGRATION_ELECTION'
CREATE = OPERATION
ALLOWED_OPERATIONS = (OPERATION,)
TX_SCHEMA_CUSTOM = TX_SCHEMA_CHAIN_MIGRATION_ELECTION
def has_concluded(self, bigchaindb, *args, **kwargs):
chain = bigchaindb.get_latest_abci_chain()
if chain is not None and not chain['is_synced']:
# do not conclude the migration election if
# there is another migration in progress
return False
return super().has_concluded(bigchaindb, *args, **kwargs)
def on_approval(self, bigchain, *args, **kwargs):
bigchain.migrate_abci_chain()
def show_election(self, bigchain):
output = super().show_election(bigchain)
chain = bigchain.get_latest_abci_chain()
if chain is None or chain['is_synced']:
return output
output += f'\nchain_id={chain["chain_id"]}'
block = bigchain.get_latest_block()
output += f'\napp_hash={block["app_hash"]}'
validators = [
{
'pub_key': {
'type': 'tendermint/PubKeyEd25519',
'value': k,
},
'power': v,
} for k, v in self.get_validators(bigchain).items()
]
output += f'\nvalidators={json.dumps(validators, indent=4)}'
return output
def on_rollback(self, bigchain, new_height):
bigchain.delete_abci_chain(new_height)

Some files were not shown because too many files have changed in this diff Show More