mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Compare commits
179 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
3c89d306ef | ||
![]() |
ad83ef1d35 | ||
![]() |
0961aa6b26 | ||
![]() |
6a6a19207c | ||
![]() |
caf31647e0 | ||
![]() |
8630e74f77 | ||
![]() |
5166fa6b46 | ||
![]() |
22cf5f26c3 | ||
![]() |
62ae66fef7 | ||
![]() |
d77ab60651 | ||
![]() |
af2517b2ae | ||
![]() |
8895d50f5b | ||
![]() |
40a92756f9 | ||
![]() |
41f7534f3a | ||
![]() |
26cb560bb1 | ||
![]() |
2b0babb95b | ||
![]() |
b272fb342a | ||
![]() |
e86666b6b3 | ||
![]() |
d9dfa98819 | ||
![]() |
4a008e51e3 | ||
![]() |
2accca9ff1 | ||
![]() |
186cd87444 | ||
![]() |
9e99c024d3 | ||
![]() |
09391351a4 | ||
![]() |
f795301f90 | ||
![]() |
7df59994e9 | ||
![]() |
86472157db | ||
![]() |
b54c31539f | ||
![]() |
ecd6e9cc46 | ||
![]() |
f12d44ff82 | ||
![]() |
9bcefdf3e2 | ||
![]() |
16a9888d1e | ||
![]() |
645d457b7b | ||
![]() |
2975c372c8 | ||
![]() |
df23bec320 | ||
![]() |
c801c833fc | ||
![]() |
a5452169b9 | ||
![]() |
a24c0f429b | ||
![]() |
835fdfcf59 | ||
![]() |
20a59a9640 | ||
![]() |
f0dc9986aa | ||
![]() |
6bed80cff3 | ||
![]() |
7781f5aae8 | ||
![]() |
eb2f8ddc73 | ||
![]() |
d8f51d8999 | ||
![]() |
3b72181b3d | ||
![]() |
ca012ae8b6 | ||
![]() |
1a9c5d47e6 | ||
![]() |
edcd59e235 | ||
![]() |
11817b9590 | ||
![]() |
d9725d483b | ||
![]() |
f0df5bd2e0 | ||
![]() |
f8191b0d8e | ||
![]() |
dcfe23f292 | ||
![]() |
5de2fef284 | ||
![]() |
a62cc4e101 | ||
![]() |
288c2ecd2a | ||
![]() |
fe467fddbd | ||
![]() |
0b935c09c7 | ||
![]() |
1bd5845a83 | ||
![]() |
d85b9df615 | ||
![]() |
ff906b0abe | ||
![]() |
20e6c6a686 | ||
![]() |
73afc38697 | ||
![]() |
c5fea574b5 | ||
![]() |
ee7bd938dc | ||
![]() |
e2b3c78d9c | ||
![]() |
f1353a3db9 | ||
![]() |
c1993c3787 | ||
![]() |
5c1511b479 | ||
![]() |
3da13eda3d | ||
![]() |
0c4d6ec906 | ||
![]() |
f4d6ccbacf | ||
![]() |
009cbe4e11 | ||
![]() |
54ac7adaa9 | ||
![]() |
4f01dd4997 | ||
![]() |
72d7986a58 | ||
![]() |
78dafce146 | ||
![]() |
bedb1945a9 | ||
![]() |
cb74cb43d2 | ||
![]() |
40ea961d4a | ||
![]() |
24ca0b32a9 | ||
![]() |
39be7a2fdf | ||
![]() |
eb139fba00 | ||
![]() |
407571ddf4 | ||
![]() |
2f6bbaeb4b | ||
![]() |
126e90e732 | ||
![]() |
cf6fa6b553 | ||
![]() |
528ba07c68 | ||
![]() |
eb0e387b18 | ||
![]() |
16355748dd | ||
![]() |
e3e95836d0 | ||
![]() |
8e5f770707 | ||
![]() |
e6893632dc | ||
![]() |
3cb0f8e2ab | ||
![]() |
bd39076522 | ||
![]() |
754730a045 | ||
![]() |
5394054132 | ||
![]() |
61ce427e1d | ||
![]() |
c68fab6c31 | ||
![]() |
f55f22144f | ||
![]() |
6a9064196a | ||
![]() |
241aae335d | ||
![]() |
901b6b9d02 | ||
![]() |
4b54e702f8 | ||
![]() |
0f41869bea | ||
![]() |
35e35ecd57 | ||
![]() |
8a7650c13a | ||
![]() |
5a440843b6 | ||
![]() |
b33e3808a6 | ||
![]() |
0fe749d830 | ||
![]() |
c79848d66a | ||
![]() |
dd84d4eb6f | ||
![]() |
cbfbfa8fc4 | ||
![]() |
699494613f | ||
![]() |
2656302c60 | ||
![]() |
9bf09324df | ||
![]() |
f6bee3b63e | ||
![]() |
bee9468bd0 | ||
![]() |
af2b5424c0 | ||
![]() |
cb22557771 | ||
![]() |
c72c7a4626 | ||
![]() |
f8bb29535a | ||
![]() |
230a5b2d69 | ||
![]() |
fe0a4c494b | ||
![]() |
8e55b11da2 | ||
![]() |
80b6881797 | ||
![]() |
905b1a5141 | ||
![]() |
cf1f253019 | ||
![]() |
96932793b1 | ||
![]() |
2d1f670eec | ||
![]() |
cb418265b6 | ||
![]() |
cfc2c5900b | ||
![]() |
7a0b474d11 | ||
![]() |
3cf368aab7 | ||
![]() |
3f7b521809 | ||
![]() |
407b771185 | ||
![]() |
8e97c753eb | ||
![]() |
dfadbff60f | ||
![]() |
d78ff75225 | ||
![]() |
41a2687b9b | ||
![]() |
6fdcaf44a7 | ||
![]() |
d31ab9fb40 | ||
![]() |
a16d561f54 | ||
![]() |
e2aafb9cf9 | ||
![]() |
045136f9a6 | ||
![]() |
90f2fdfc23 | ||
![]() |
55a9151c14 | ||
![]() |
acc60bcce1 | ||
![]() |
4815e75855 | ||
![]() |
01dba7e883 | ||
![]() |
a3dce723be | ||
![]() |
bd49a3804f | ||
![]() |
423820bcda | ||
![]() |
0142e98dba | ||
![]() |
abdd23f5a6 | ||
![]() |
3092beb995 | ||
![]() |
bbf5310ac8 | ||
![]() |
3760824261 | ||
![]() |
3011548317 | ||
![]() |
4636a48918 | ||
![]() |
205e2cf3fd | ||
![]() |
4806b81577 | ||
![]() |
65b6040e6b | ||
![]() |
ab41b463d8 | ||
![]() |
44be8f572f | ||
![]() |
54b81d3ae8 | ||
![]() |
66fd001311 | ||
![]() |
bdfa059046 | ||
![]() |
9b4273a987 | ||
![]() |
32b64ccc2a | ||
![]() |
01dbb20248 | ||
![]() |
2e9a9b1121 | ||
![]() |
e0676306b7 | ||
![]() |
8090a35676 | ||
![]() |
d25d806cd8 | ||
![]() |
66b243a2b4 | ||
![]() |
1a74afa9cd | ||
![]() |
c2e61ae8c1 |
@ -1,4 +1,9 @@
|
||||
#!/bin/bash
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
|
||||
set -e -x
|
||||
|
||||
|
@ -1,4 +1,9 @@
|
||||
#!/bin/bash
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
|
||||
set -e -x
|
||||
|
||||
|
@ -1,4 +1,9 @@
|
||||
#!/bin/bash
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
|
||||
if [[ -z ${TOXENV} ]]; then
|
||||
sudo apt-get update
|
||||
|
@ -1,4 +1,9 @@
|
||||
#!/bin/bash
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
|
||||
set -e -x
|
||||
|
||||
|
@ -1,4 +1,9 @@
|
||||
#!/bin/bash
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
|
||||
set -e -x
|
||||
|
||||
@ -8,9 +13,6 @@ if [[ -n ${TOXENV} ]]; then
|
||||
pip install --upgrade tox
|
||||
elif [[ ${BIGCHAINDB_CI_ABCI} == 'enable' ]]; then
|
||||
docker-compose build --no-cache --build-arg abci_status=enable bigchaindb
|
||||
elif [[ $TRAVIS_PYTHON_VERSION == 3.5 ]]; then
|
||||
docker-compose build --build-arg python_version=3.5 --no-cache bigchaindb
|
||||
pip install --upgrade codecov
|
||||
elif [[ $BIGCHAINDB_INTEGRATION_TEST == 'enable' ]]; then
|
||||
docker-compose build bigchaindb python-driver
|
||||
else
|
||||
|
@ -1,4 +1,9 @@
|
||||
#!/bin/bash
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
|
||||
set -e -x
|
||||
|
||||
|
7
.github/CONTRIBUTING.md
vendored
7
.github/CONTRIBUTING.md
vendored
@ -1,3 +1,10 @@
|
||||
<!---
|
||||
Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
BigchainDB and IPDB software contributors.
|
||||
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
--->
|
||||
|
||||
# How to Contribute to the BigchainDB Project
|
||||
|
||||
There are many ways you can contribute to the BigchainDB project, some very easy and others more involved.
|
||||
|
30
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
30
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Logs or terminal output**
|
||||
If applicable, add add textual content to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
- Distribution: [e.g. Ubuntu 18.04]
|
||||
- Bigchaindb version:
|
||||
- Tendermint version:
|
||||
- Mongodb version:
|
||||
- Python full version: [e.g. Python 3.6.6]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -95,3 +95,5 @@ network/*/data
|
||||
|
||||
# Docs that are fetched at build time
|
||||
docs/contributing/source/cross-project-policies/*.md
|
||||
|
||||
.DS_Store
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
repos:
|
||||
- repo: git://github.com/pre-commit/pre-commit-hooks
|
||||
sha: v1.1.1
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
build:
|
||||
image: latest
|
||||
|
||||
|
45
.travis.yml
45
.travis.yml
@ -1,6 +1,11 @@
|
||||
# Copyright © 2020, 2021 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
sudo: required
|
||||
|
||||
dist: trusty
|
||||
dist: focal
|
||||
|
||||
services:
|
||||
- docker
|
||||
@ -9,26 +14,19 @@ language: python
|
||||
cache: pip
|
||||
|
||||
python:
|
||||
- 3.5
|
||||
- 3.6
|
||||
- 3.7
|
||||
- 3.8
|
||||
|
||||
env:
|
||||
global:
|
||||
- DOCKER_COMPOSE_VERSION=1.19.0
|
||||
- DOCKER_COMPOSE_VERSION=1.29.2
|
||||
matrix:
|
||||
- TOXENV=flake8
|
||||
- TOXENV=docsroot
|
||||
- TOXENV=docsserver
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
exclude:
|
||||
- python: 3.5
|
||||
env: TOXENV=flake8
|
||||
- python: 3.5
|
||||
env: TOXENV=docsroot
|
||||
- python: 3.5
|
||||
env: TOXENV=docsserver
|
||||
include:
|
||||
- python: 3.6
|
||||
env:
|
||||
@ -42,6 +40,31 @@ matrix:
|
||||
- python: 3.6
|
||||
env:
|
||||
- BIGCHAINDB_ACCEPTANCE_TEST=enable
|
||||
- python: 3.7
|
||||
env:
|
||||
- BIGCHAINDB_DATABASE_BACKEND=localmongodb
|
||||
- BIGCHAINDB_DATABASE_SSL=
|
||||
- python: 3.7
|
||||
env:
|
||||
- BIGCHAINDB_DATABASE_BACKEND=localmongodb
|
||||
- BIGCHAINDB_DATABASE_SSL=
|
||||
- BIGCHAINDB_CI_ABCI=enable
|
||||
- python: 3.7
|
||||
env:
|
||||
- BIGCHAINDB_ACCEPTANCE_TEST=enable
|
||||
- python: 3.8
|
||||
env:
|
||||
- BIGCHAINDB_DATABASE_BACKEND=localmongodb
|
||||
- BIGCHAINDB_DATABASE_SSL=
|
||||
- python: 3.8
|
||||
env:
|
||||
- BIGCHAINDB_DATABASE_BACKEND=localmongodb
|
||||
- BIGCHAINDB_DATABASE_SSL=
|
||||
- BIGCHAINDB_CI_ABCI=enable
|
||||
- python: 3.8
|
||||
env:
|
||||
- BIGCHAINDB_ACCEPTANCE_TEST=enable
|
||||
|
||||
|
||||
before_install: sudo .ci/travis-before-install.sh
|
||||
|
||||
|
156
CHANGELOG.md
156
CHANGELOG.md
@ -1,3 +1,10 @@
|
||||
<!---
|
||||
Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
BigchainDB and IPDB software contributors.
|
||||
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
--->
|
||||
|
||||
# Change Log (Release Notes)
|
||||
|
||||
All _notable_ changes to this project will be documented in this file (`CHANGELOG.md`).
|
||||
@ -18,6 +25,153 @@ For reference, the possible headings are:
|
||||
* **Known Issues**
|
||||
* **Notes**
|
||||
|
||||
## [2.2.2] - 2020-08-12
|
||||
|
||||
### Security
|
||||
|
||||
Several dependencies updated including Flask that had vulnerability.
|
||||
|
||||
### Fixed
|
||||
|
||||
* Updated priv_validator key format in stack script (#2707)
|
||||
|
||||
### External Contributors
|
||||
|
||||
* @aostrun - [#2708](https://github.com/bigchaindb/bigchaindb/pull/2708)
|
||||
|
||||
## [2.2.1] - 2020-04-14
|
||||
|
||||
### Fixed
|
||||
|
||||
Gevent library API update is incompatible with bigchaindb-abci 1.0.1 version.
|
||||
Updated bigchaindb-abci.
|
||||
|
||||
## [2.2.0] - 2020-02-20
|
||||
|
||||
### Added
|
||||
|
||||
Support for multiple ABCI versions.
|
||||
|
||||
## [2.1.0] - 2019-11-06
|
||||
|
||||
### Added
|
||||
|
||||
Option for last transaction retrieval added.
|
||||
|
||||
## [2.0] - 2019-09-26
|
||||
|
||||
### Changed
|
||||
|
||||
Migrated from Tendermint 0.22.8 to 0.31.5.
|
||||
|
||||
## [2.0 Beta 9] - 2018-11-27
|
||||
|
||||
### Changed
|
||||
|
||||
Removed support for TLSv1 and TLSv1.1 in all NGINX config files. Kept support for TLSv1.2 and added support for TLSv1.3. [Pull Request #2601](https://github.com/bigchaindb/bigchaindb/pull/2601)
|
||||
|
||||
### Fixed
|
||||
|
||||
Fixed two issues with schema validation. Pull requests [#2606](https://github.com/bigchaindb/bigchaindb/pull/2606) & [#2607](https://github.com/bigchaindb/bigchaindb/pull/2607)
|
||||
|
||||
### External Contributors
|
||||
|
||||
[@gamjapark](https://github.com/gamjapark) and team translated all the [BigchainDB root docs](https://docs.bigchaindb.com/en/latest/korean/index.html) into Korean. [Pull Request #2603](https://github.com/bigchaindb/bigchaindb/pull/2603)
|
||||
|
||||
## [2.0 Beta 8] - 2018-11-03
|
||||
|
||||
### Changed
|
||||
|
||||
* Revised the [Simple Deployment Template](http://docs.bigchaindb.com/projects/server/en/latest/simple-deployment-template/index.html) in the docs. Added NGINX to the mix. Pull Requests [#2578](https://github.com/bigchaindb/bigchaindb/pull/2578) and [#2579](https://github.com/bigchaindb/bigchaindb/pull/2579)
|
||||
* Revised `nginx/nginx.conf` to enable CORS. [Pull Request #2580](https://github.com/bigchaindb/bigchaindb/pull/2580)
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fixed a typo in the Kubernetes ConfigMap template. [Pull Request #2583](https://github.com/bigchaindb/bigchaindb/pull/2583)
|
||||
|
||||
### External Contributors
|
||||
|
||||
[@gamjapark](https://github.com/gamjapark) translated the main `README.md` file into Korean. [Pull Request #2592](https://github.com/bigchaindb/bigchaindb/pull/2592)
|
||||
|
||||
## [2.0 Beta 7] - 2018-09-28
|
||||
|
||||
Tag name: v2.0.0b7
|
||||
|
||||
### Added
|
||||
|
||||
Completed the implementation of chain-migration elections (BEP-42). Pull requests [#2553](https://github.com/bigchaindb/bigchaindb/pull/2553), [#2556](https://github.com/bigchaindb/bigchaindb/pull/2556), [#2558](https://github.com/bigchaindb/bigchaindb/pull/2558), [#2563](https://github.com/bigchaindb/bigchaindb/pull/2563) and [#2566](https://github.com/bigchaindb/bigchaindb/pull/2566)
|
||||
|
||||
### Changed
|
||||
|
||||
* Code that used the Python driver's (deprecated) transactions.send() method now uses its transactions.send_commit() method instead. [Pull request #2547](https://github.com/bigchaindb/bigchaindb/pull/2547)
|
||||
* Code that implied pluggable "consensus" now implies pluggable transaction "validation" (a more accurate word). [Pull request #2561](https://github.com/bigchaindb/bigchaindb/pull/2561)
|
||||
|
||||
### Removed
|
||||
|
||||
Benchmark logs. [Pull request #2565](https://github.com/bigchaindb/bigchaindb/pull/2565)
|
||||
|
||||
### Fixed
|
||||
|
||||
A bug caused by an incorrect MongoDB query. [Pull request #2567](https://github.com/bigchaindb/bigchaindb/pull/2567)
|
||||
|
||||
### Notes
|
||||
|
||||
There's now better documentation about logs, log rotation, and the `server.bind` config setting. Pull requests [#2546](https://github.com/bigchaindb/bigchaindb/pull/2546) and [#2575](https://github.com/bigchaindb/bigchaindb/pull/2575)
|
||||
|
||||
## [2.0 Beta 6] - 2018-09-17
|
||||
|
||||
Tag name: v2.0.0b6
|
||||
|
||||
### Added
|
||||
|
||||
* [New documentation about privacy and handling private data](https://docs.bigchaindb.com/en/latest/private-data.html). [Pull request #2437](https://github.com/bigchaindb/bigchaindb/pull/2437)
|
||||
* New documentation about log rotation. Also rotate Tendermint logs if started using Monit. [Pull request #2528](https://github.com/bigchaindb/bigchaindb/pull/2528)
|
||||
* Began implementing one of the migration strategies outlined in [BEP-42](https://github.com/bigchaindb/BEPs/tree/master/42). That involved creating a more general-purpose election process and commands. Pull requests [#2488](https://github.com/bigchaindb/bigchaindb/pull/2488), [#2495](https://github.com/bigchaindb/bigchaindb/pull/2495), [#2498](https://github.com/bigchaindb/bigchaindb/pull/2498), [#2515](https://github.com/bigchaindb/bigchaindb/pull/2515), [#2535](https://github.com/bigchaindb/bigchaindb/pull/2535)
|
||||
* Used memoization to avoid doing some validation checks multiple times. [Pull request #2490](https://github.com/bigchaindb/bigchaindb/pull/2490)
|
||||
* Created an all-in-one Docker image containing BigchainDB Server, Tendermint and MongoDB. It was created for a particular user and is not recommended for production use unless you really know what you're doing. [Pull request #2424](https://github.com/bigchaindb/bigchaindb/pull/2424)
|
||||
|
||||
### Changed
|
||||
|
||||
* The supported versions of Tendermint are now hard-wired into BigchainDB Server: it checks to see what version the connected Tendermint has, and if it's not compatible, BigchainDB Server exits with an error message. [Pull request #2541](https://github.com/bigchaindb/bigchaindb/pull/2541)
|
||||
* The docs no longer say to install the highest version of Tendermint: they say to install a specific version. [Pull request #2524](https://github.com/bigchaindb/bigchaindb/pull/2524)
|
||||
* The setup docs include more recommended settings for `config.toml`. [Pull request #2516](https://github.com/bigchaindb/bigchaindb/pull/2516)
|
||||
* The process to add, remove or update the voting power of a validator at run time (using the `bigchaindb upsert-validator` subcommands) was completely changed and is now fully working. See [issue #2372](https://github.com/bigchaindb/bigchaindb/issues/2372) and all the pull requests it references. Pull requests [#2439](https://github.com/bigchaindb/bigchaindb/pull/2439) and [#2440](https://github.com/bigchaindb/bigchaindb/pull/2440)
|
||||
* The license on the documentation was changed from CC-BY-SA-4 to CC-BY-4. [Pull request #2427](https://github.com/bigchaindb/bigchaindb/pull/2427)
|
||||
* Re-activated and/or updated some unit tests that had been deacivated during the migration to Tendermint. Pull requests [#2390](https://github.com/bigchaindb/bigchaindb/pull/2390), [#2415](https://github.com/bigchaindb/bigchaindb/pull/2415), [#2452](https://github.com/bigchaindb/bigchaindb/pull/24), [#2456](https://github.com/bigchaindb/bigchaindb/pull/2456)
|
||||
* Updated RapidJSON to a newer, faster version. [Pull request #2470](https://github.com/bigchaindb/bigchaindb/pull/2470)
|
||||
* The Java driver is now officially supported. [Pull request #2478](https://github.com/bigchaindb/bigchaindb/pull/2478)
|
||||
* The MongoDB indexes on transaction id and block height were changed to be [unique indexes](https://docs.mongodb.com/manual/core/index-unique/). [Pull request #2492](https://github.com/bigchaindb/bigchaindb/pull/2492)
|
||||
* Updated the required `cryptoconditions` package to a newer one. [Pull request #2494](https://github.com/bigchaindb/bigchaindb/pull/2494)
|
||||
|
||||
### Removed
|
||||
|
||||
* Removed some old code and tests. Pull requests
|
||||
[#2374](https://github.com/bigchaindb/bigchaindb/pull/2374),
|
||||
[#2452](https://github.com/bigchaindb/bigchaindb/pull/2452),
|
||||
[#2474](https://github.com/bigchaindb/bigchaindb/pull/2474),
|
||||
[#2476](https://github.com/bigchaindb/bigchaindb/pull/2476),
|
||||
[#2491](https://github.com/bigchaindb/bigchaindb/pull/2491)
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fixed the Events API so that it only sends valid transactions to subscribers. Also changed how it works internally, so now it is more reliable. [Pull request #2529](https://github.com/bigchaindb/bigchaindb/pull/2529)
|
||||
* Fixed a bug where MongoDB database initialization would abort if a collection already existed. [Pull request #2520](https://github.com/bigchaindb/bigchaindb/pull/2520)
|
||||
* Fixed a unit test that was failing randomly. [Pull request #2423](https://github.com/bigchaindb/bigchaindb/pull/2423)
|
||||
* Fixed the validator curl port. [Pull request #2447](https://github.com/bigchaindb/bigchaindb/pull/2447)
|
||||
* Fixed an error in the docs about the HTTP POST /transactions endpoint. [Pull request #2481](https://github.com/bigchaindb/bigchaindb/pull/2481)
|
||||
* Fixed a unit test that could loop forever. [Pull requqest #2486](https://github.com/bigchaindb/bigchaindb/pull/2486)
|
||||
* Fixed a bug when validating a CREATE + TRANSFER. [Pull request #2487](https://github.com/bigchaindb/bigchaindb/pull/2487)
|
||||
* Fixed the HTTP response when posting a transaction in commit mode. [Pull request #2510](https://github.com/bigchaindb/bigchaindb/pull/2510)
|
||||
* Fixed a crash that happened when attempting to restart BigchainDB at Tendermint block height 1. [Pull request#2519](https://github.com/bigchaindb/bigchaindb/pull/2519)
|
||||
|
||||
### External Contributors
|
||||
|
||||
@danacr - [Pull request #2447](https://github.com/bigchaindb/bigchaindb/pull/2447)
|
||||
|
||||
### Notes
|
||||
|
||||
The docs section titled "Production Deployment Template" was renamed to "Kubernetes Deployment Template" and we no longer consider it the go-to deployment template. The "Simple Deployment Template" is simpler, easier to understand, and less expensive (unless you are with an organization that already has a big Kubernetes cluster).
|
||||
|
||||
## [2.0 Beta 5] - 2018-08-01
|
||||
|
||||
Tag name: v2.0.0b5
|
||||
@ -183,7 +337,7 @@ Re-enabled multi-threading. [Pull request #2258](https://github.com/bigchaindb/b
|
||||
|
||||
### Known Issues
|
||||
|
||||
Tendermint changed how it responds to a request to store data (via the [Tendermint Broadcast API](http://tendermint.readthedocs.io/projects/tools/en/master/using-tendermint.html#broadcast-api)) between version 0.12 and 0.19.2. We started modifying the code of BigchainDB Server to account for those changes in responses (in [pull request #2239](https://github.com/bigchaindb/bigchaindb/pull/2239)), but we found that there's a difference between what the Tendermint documentation _says_ about those responses and how Tendermint actually responds. We need to determine Tendermint's intent before we can finalize that pull request.
|
||||
Tendermint changed how it responds to a request to store data (via the [Tendermint Broadcast API](https://tendermint.com/docs/tendermint-core/using-tendermint.html#broadcast-api)) between version 0.12 and 0.19.2. We started modifying the code of BigchainDB Server to account for those changes in responses (in [pull request #2239](https://github.com/bigchaindb/bigchaindb/pull/2239)), but we found that there's a difference between what the Tendermint documentation _says_ about those responses and how Tendermint actually responds. We need to determine Tendermint's intent before we can finalize that pull request.
|
||||
|
||||
### Notes
|
||||
|
||||
|
@ -1,3 +1,10 @@
|
||||
<!---
|
||||
Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
BigchainDB and IPDB software contributors.
|
||||
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
--->
|
||||
|
||||
# Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of
|
||||
@ -35,7 +42,7 @@ This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior directed at yourself or another community member may be
|
||||
reported by contacting a project maintainer at [conduct@bigchaindb.com](mailto:conduct@bigchaindb.com). All
|
||||
reported by contacting a project maintainer at [contact@bigchaindb.com](mailto:contact@bigchaindb.com). All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is appropriate to the circumstances. Maintainers are
|
||||
obligated to maintain confidentiality with regard to the reporter of an
|
||||
|
@ -1,13 +1,12 @@
|
||||
FROM python:3.6
|
||||
LABEL maintainer "dev@bigchaindb.com"
|
||||
LABEL maintainer "contact@ipdb.global"
|
||||
RUN mkdir -p /usr/src/app
|
||||
COPY . /usr/src/app/
|
||||
WORKDIR /usr/src/app
|
||||
RUN apt-get -qq update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get install -y jq \
|
||||
&& pip install --no-cache-dir --process-dependency-links . \
|
||||
&& pip install --no-cache-dir . \
|
||||
&& pip install . \
|
||||
&& apt-get autoremove \
|
||||
&& apt-get clean
|
||||
|
||||
|
51
Dockerfile-all-in-one
Normal file
51
Dockerfile-all-in-one
Normal file
@ -0,0 +1,51 @@
|
||||
FROM alpine:3.9
|
||||
LABEL maintainer "contact@ipdb.global"
|
||||
|
||||
ARG TM_VERSION=v0.31.5
|
||||
RUN mkdir -p /usr/src/app
|
||||
ENV HOME /root
|
||||
COPY . /usr/src/app/
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
RUN apk --update add sudo bash \
|
||||
&& apk --update add python3 openssl ca-certificates git \
|
||||
&& apk --update add --virtual build-dependencies python3-dev \
|
||||
libffi-dev openssl-dev build-base jq \
|
||||
&& apk add --no-cache libstdc++ dpkg gnupg \
|
||||
&& pip3 install --upgrade pip cffi \
|
||||
&& pip install -e . \
|
||||
&& apk del build-dependencies \
|
||||
&& rm -f /var/cache/apk/*
|
||||
|
||||
# Install mongodb and monit
|
||||
RUN apk --update add mongodb monit
|
||||
|
||||
# Install Tendermint
|
||||
RUN wget https://github.com/tendermint/tendermint/releases/download/${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip \
|
||||
&& unzip tendermint_${TM_VERSION}_linux_amd64.zip \
|
||||
&& mv tendermint /usr/local/bin/ \
|
||||
&& rm tendermint_${TM_VERSION}_linux_amd64.zip
|
||||
|
||||
ENV TMHOME=/tendermint
|
||||
|
||||
# Set permissions required for mongodb
|
||||
RUN mkdir -p /data/db /data/configdb \
|
||||
&& chown -R mongodb:mongodb /data/db /data/configdb
|
||||
|
||||
# BigchainDB enviroment variables
|
||||
ENV BIGCHAINDB_DATABASE_PORT 27017
|
||||
ENV BIGCHAINDB_DATABASE_BACKEND localmongodb
|
||||
ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984
|
||||
ENV BIGCHAINDB_WSSERVER_HOST 0.0.0.0
|
||||
ENV BIGCHAINDB_WSSERVER_SCHEME ws
|
||||
|
||||
ENV BIGCHAINDB_WSSERVER_ADVERTISED_HOST 0.0.0.0
|
||||
ENV BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME ws
|
||||
ENV BIGCHAINDB_TENDERMINT_PORT 26657
|
||||
|
||||
VOLUME /data/db /data/configdb /tendermint
|
||||
|
||||
EXPOSE 27017 28017 9984 9985 26656 26657 26658
|
||||
|
||||
WORKDIR $HOME
|
||||
ENTRYPOINT ["/usr/src/app/pkg/scripts/all-in-one.bash"]
|
@ -1,15 +1,15 @@
|
||||
FROM alpine:latest
|
||||
LABEL maintainer "dev@bigchaindb.com"
|
||||
LABEL maintainer "contact@ipdb.global"
|
||||
RUN mkdir -p /usr/src/app
|
||||
COPY . /usr/src/app/
|
||||
WORKDIR /usr/src/app
|
||||
RUN apk --update add sudo \
|
||||
&& apk --update add python3 openssl ca-certificates git\
|
||||
&& apk --update add python3 py-pip openssl ca-certificates git\
|
||||
&& apk --update add --virtual build-dependencies python3-dev \
|
||||
libffi-dev openssl-dev build-base \
|
||||
&& apk add --no-cache libstdc++ \
|
||||
&& pip3 install --upgrade pip cffi \
|
||||
&& pip install --no-cache-dir --process-dependency-links -e .[dev] \
|
||||
&& pip install -e . \
|
||||
&& apk del build-dependencies \
|
||||
&& rm -f /var/cache/apk/*
|
||||
# When developing with Python in a docker container, we are using PYTHONBUFFERED
|
||||
|
@ -1,11 +1,10 @@
|
||||
ARG python_version=3.6
|
||||
FROM python:${python_version}
|
||||
LABEL maintainer "dev@bigchaindb.com"
|
||||
LABEL maintainer "contact@ipdb.global"
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y vim \
|
||||
&& apt-get install -y git \
|
||||
&& pip install -U pip \
|
||||
&& pip install pynacl \
|
||||
&& apt-get autoremove \
|
||||
&& apt-get clean
|
||||
|
||||
@ -32,6 +31,5 @@ ENV BIGCHAINDB_CI_ABCI ${abci_status}
|
||||
RUN mkdir -p /usr/src/app
|
||||
COPY . /usr/src/app/
|
||||
WORKDIR /usr/src/app
|
||||
RUN pip install --no-cache-dir --process-dependency-links -e .[dev]
|
||||
RUN pip install -e .[dev]
|
||||
RUN bigchaindb -y configure
|
||||
|
||||
|
@ -1,80 +0,0 @@
|
||||
# How to Handle Pull Requests
|
||||
|
||||
This document is for whoever has the ability to merge pull requests in the Git repositories associated with BigchainDB.
|
||||
|
||||
If the pull request is from an employee of BigchainDB GmbH, then you can ignore this document.
|
||||
|
||||
If the pull request is from someone who is _not_ an employee of BigchainDB, then:
|
||||
|
||||
A. Have they agreed to the Individual Contributor Agreement in the past? There's a list of them in [a Google Spreadsheet that's accessible to all bigchaindb.com accounts](https://docs.google.com/spreadsheets/d/1VhekO6lgk1ZPx8dSjriucy4UinaU9pIdPQ5JXKcbD_Y/edit?usp=sharing). If yes, then you can merge the PR and ignore the rest of this document.
|
||||
|
||||
B. Do they belong to a company or organization which agreed to the Entity Contributor Agreement in the past, and will they be contributing on behalf of that company or organization? (See the Google Spreadsheet link in A.) If yes, then you can merge the PR and ignore the rest of this document.
|
||||
|
||||
C. Did they make a pull request to one of the bigchaindb repositories on GitHub (e.g. bigchaindb/bigchaindb)? If you're not sure, or you can't find one, then respond with an email of the form:
|
||||
|
||||
Dear [NAME OF PERSON WHO AGREED TO THE CLA]
|
||||
|
||||
According to the email copied below, you agreed to the BigchainDB Contributor License Agreement (CLA).
|
||||
|
||||
Did you intend to do that? If no, then feel free to ignore this email and we'll pretend it never happened.
|
||||
|
||||
If you did intend to do that, then do you intend to make a pull request in a BigchainDB repository? Maybe you already did? If so, can you please point me to the pull request in question?
|
||||
|
||||
Sincerely,
|
||||
[INSERT YOUR NAME HERE]
|
||||
|
||||
D. Otherwise, go to the pull request in question and post a comment using this template:
|
||||
|
||||
Hi @nameofuser
|
||||
|
||||
Before we can merge this pull request, we need you or your organization to agree to one of our contributor agreements. One of the big concerns for people using and developing open source software is that someone who contributed to the code might claim the code infringes on their copyright or patent. To guard against this, we ask all our contributors to sign a Contributor License Agreement. This gives us the right to use the code contributed and any patents the contribution relies on. It also gives us and our users comfort that they won't be sued for using open source software. We know it's a hassle, but it makes the project more reliable in the long run. Thank you for your understanding and your contribution!
|
||||
|
||||
If you are contributing on behalf of yourself (and not on behalf of your employer or another organization you are part of) then you should:
|
||||
|
||||
1. Go to: https://www.bigchaindb.com/cla/
|
||||
2. Read the Individual Contributor Agreement
|
||||
3. Fill in the form "For Individuals"
|
||||
4. Check the box to agree
|
||||
5. Click the SEND button
|
||||
|
||||
If you're contributing as an employee, and/or you want all employees of your employing organization to be covered by our contributor agreement, then someone in your organization with the authority to enter agreements on behalf of all employees must do the following:
|
||||
|
||||
1. Go to: https://www.bigchaindb.com/cla/
|
||||
2. Read the Entity Contributor Agreement
|
||||
3. Fill in the form "For Organizations”
|
||||
4. Check the box to agree
|
||||
5. Click the SEND button
|
||||
|
||||
We will email you (or your employer) with further instructions.
|
||||
|
||||
(END OF COMMENT)
|
||||
|
||||
Once they click SEND, we (BigchainDB) will get an email with the information in the form. (Troy gets those emails for sure, I'm not sure who else.) The next step is to send an email to the email address submitted in the form, saying something like (where the stuff in [square brackets] should be replaced):
|
||||
|
||||
Hi [NAME],
|
||||
|
||||
The next step is for you to copy the following block of text into the comments of Pull Request #[NN] on GitHub:
|
||||
|
||||
BEGIN BLOCK
|
||||
|
||||
This is to confirm that I agreed to and accepted the BigchainDB [Entity/Individual] Contributor Agreement at https://www.bigchaindb.com/cla/ and to represent and warrant that I have authority to do so.
|
||||
|
||||
[Insert long random string here. One good source of those is https://www.grc.com/passwords.htm ]
|
||||
|
||||
END BLOCK
|
||||
|
||||
(END OF EMAIL)
|
||||
|
||||
The next step is to wait for them to copy that comment into the comments of the indicated pull request. Once they do so, it's safe to merge the pull request.
|
||||
|
||||
## How to Handle CLA Agreement Emails with No Associated Pull Request
|
||||
|
||||
Reply with an email like this:
|
||||
|
||||
Hi [First Name],
|
||||
|
||||
Today I got an email (copied below) to tell me that you agreed to the BigchainDB Contributor License Agreement. Did you intend to do that?
|
||||
|
||||
If no, then you can ignore this email.
|
||||
|
||||
If yes, then there's another step to connect your email address with your GitHub account. To do that, you must first create a pull request in one of the BigchainDB repositories on GitHub. Once you've done that, please reply to this email with a link to the pull request. Then I'll send you a special block of text to paste into the comments on that pull request.
|
@ -2,10 +2,12 @@
|
||||
|
||||
## Copyrights
|
||||
|
||||
Except as noted in the **Exceptions** section below, for all code and documentation in this repository, BigchainDB GmbH ("We") either:
|
||||
For all the code and documentation in this repository, the copyright is owned by one or more of the following:
|
||||
|
||||
1. owns the copyright, or
|
||||
2. owns the right to sublicense it under any license (because all external contributors must agree to a Contributor License Agreement).
|
||||
- BigchainDB GmbH
|
||||
- A BigchainDB contributor who agreed to a BigchainDB Contributor License Agreement (CLA) with BigchainDB GmbH. (See [BEP-16](https://github.com/bigchaindb/BEPs/tree/master/16).)
|
||||
- A BigchainDB contributor who signed off on the Developer Certificate of Origin (DCO) for all their contributions. (See [BEP-24](https://github.com/bigchaindb/BEPs/tree/master/24).)
|
||||
- (Rarely, see the **Exceptions Section** below) A third pary who licensed the code in question under an open source license.
|
||||
|
||||
## Code Licenses
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -88,8 +88,6 @@ cov: check-deps ## Check code coverage and open the result in the browser
|
||||
|
||||
doc: check-deps ## Generate HTML documentation and open it in the browser
|
||||
@$(DC) run --rm --no-deps bdocs make -C docs/root html
|
||||
@$(DC) run --rm --no-deps bdocs make -C docs/server html
|
||||
@$(DC) run --rm --no-deps bdocs make -C docs/contributing html
|
||||
$(BROWSER) docs/root/build/html/index.html
|
||||
|
||||
doc-acceptance: check-deps ## Create documentation for acceptance tests
|
||||
|
@ -1,3 +1,10 @@
|
||||
<!---
|
||||
Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
BigchainDB and IPDB software contributors.
|
||||
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
--->
|
||||
|
||||
# Python Style Guide
|
||||
|
||||
This guide starts out with our general Python coding style guidelines and ends with a section on how we write & run (Python) tests.
|
||||
@ -85,6 +92,6 @@ flake8 --max-line-length 119 bigchaindb/
|
||||
|
||||
## Writing and Running (Python) Tests
|
||||
|
||||
The content of this section was moved to [`bigchaindb/tests/README.md`](./tests/README.md).
|
||||
The content of this section was moved to [`bigchaindb/tests/README.md`](https://github.com/bigchaindb/bigchaindb/blob/master/tests/README.md).
|
||||
|
||||
Note: We automatically run all tests on all pull requests (using Travis CI), so you should definitely run all tests locally before you submit a pull request. See the above-linked README file for instructions.
|
||||
|
12
README.md
12
README.md
@ -1,13 +1,19 @@
|
||||
<!---
|
||||
Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
BigchainDB and IPDB software contributors.
|
||||
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
--->
|
||||
|
||||
<!--- There is no shield to get the latest version
|
||||
(including pre-release versions) from PyPI,
|
||||
so show the latest GitHub release instead.
|
||||
--->
|
||||
<!--- Codecov isn't working for us lately, so comment it out for now:
|
||||
|
||||
[](https://codecov.io/github/bigchaindb/bigchaindb?branch=master)
|
||||
--->
|
||||
[](https://github.com/bigchaindb/bigchaindb/releases)
|
||||
[](https://pypi.org/project/BigchainDB/)
|
||||
[](https://travis-ci.org/bigchaindb/bigchaindb)
|
||||
[](https://travis-ci.com/bigchaindb/bigchaindb)
|
||||
[](https://docs.bigchaindb.com/projects/server/en/latest/)
|
||||
[](https://gitter.im/bigchaindb/bigchaindb?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
|
77
README_cn.md
Normal file
77
README_cn.md
Normal file
@ -0,0 +1,77 @@
|
||||
<!---
|
||||
Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
BigchainDB and IPDB software contributors.
|
||||
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
--->
|
||||
|
||||
<!--- There is no shield to get the latest version
|
||||
(including pre-release versions) from PyPI,
|
||||
so show the latest GitHub release instead.
|
||||
--->
|
||||
|
||||
[](https://codecov.io/github/bigchaindb/bigchaindb?branch=master)
|
||||
[](https://github.com/bigchaindb/bigchaindb/releases)
|
||||
[](https://pypi.org/project/BigchainDB/)
|
||||
[](https://travis-ci.com/bigchaindb/bigchaindb)
|
||||
[](https://docs.bigchaindb.com/projects/server/en/latest/)
|
||||
[](https://gitter.im/bigchaindb/bigchaindb?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
# BigchainDB 服务器
|
||||
|
||||
BigchainDB 是区块链数据库. 这是 _BigchainDB 服务器_ 的仓库.
|
||||
|
||||
## 基础知识
|
||||
|
||||
* [尝试快速开始](https://docs.bigchaindb.com/projects/server/en/latest/quickstart.html)
|
||||
* [阅读 BigchainDB 2.0 白皮书](https://www.bigchaindb.com/whitepaper/)
|
||||
* [查阅漫游指南](https://www.bigchaindb.com/developers/guide/)
|
||||
|
||||
## 运行和测试 `master` 分支的 BigchainDB 服务器
|
||||
|
||||
运行和测试最新版本的 BigchainDB 服务器非常简单. 确认你有安装最新版本的 [Docker Compose](https://docs.docker.com/compose/install/). 当你准备好了, 打开一个终端并运行:
|
||||
|
||||
```text
|
||||
git clone https://github.com/bigchaindb/bigchaindb.git
|
||||
cd bigchaindb
|
||||
make run
|
||||
```
|
||||
|
||||
BigchainDB 应该可以通过 `http://localhost:9984/` 访问.
|
||||
|
||||
这里也有一些其他的命令你可以运行:
|
||||
|
||||
* `make start`: 通过源码和守护进程的方式运行 BigchainDB (通过 `make stop` 停止).
|
||||
* `make stop`: 停止运行 BigchainDB.
|
||||
* `make logs`: 附在日志上.
|
||||
* `make test`: 运行所有单元和验收测试.
|
||||
* `make test-unit-watch`: 运行所有测试并等待. 每次更改代码时都会再次运行测试.
|
||||
* `make cov`: 检查代码覆盖率并在浏览器中打开结果.
|
||||
* `make doc`: 生成 HTML 文档并在浏览器中打开它.
|
||||
* `make clean`: 删除所有构建, 测试, 覆盖和 Python 生成物.
|
||||
* `make reset`: 停止并移除所有容器. 警告: 您将丢失存储在 BigchainDB 中的所有数据.
|
||||
|
||||
查看所有可用命令, 请运行 `make`.
|
||||
|
||||
## 一般人员链接
|
||||
|
||||
* [BigchainDB.com](https://www.bigchaindb.com/) - BigchainDB 主网站, 包括新闻订阅
|
||||
* [路线图](https://github.com/bigchaindb/org/blob/master/ROADMAP.md)
|
||||
* [博客](https://medium.com/the-bigchaindb-blog)
|
||||
* [推特](https://twitter.com/BigchainDB)
|
||||
|
||||
## 开发人员链接
|
||||
|
||||
* [所有的 BigchainDB 文档](https://docs.bigchaindb.com/en/latest/)
|
||||
* [BigchainDB 服务器 文档](https://docs.bigchaindb.com/projects/server/en/latest/index.html)
|
||||
* [CONTRIBUTING.md](.github/CONTRIBUTING.md) - how to contribute
|
||||
* [社区指南](CODE_OF_CONDUCT.md)
|
||||
* [公开问题](https://github.com/bigchaindb/bigchaindb/issues)
|
||||
* [公开的 pull request](https://github.com/bigchaindb/bigchaindb/pulls)
|
||||
* [Gitter 聊天室](https://gitter.im/bigchaindb/bigchaindb)
|
||||
|
||||
## 法律声明
|
||||
|
||||
* [许可](LICENSES.md) - 开源代码 & 开源内容
|
||||
* [印记](https://www.bigchaindb.com/imprint/)
|
||||
* [联系我们](https://www.bigchaindb.com/contact/)
|
65
README_kor.md
Normal file
65
README_kor.md
Normal file
@ -0,0 +1,65 @@
|
||||
[](https://codecov.io/github/bigchaindb/bigchaindb?branch=master)
|
||||
[](https://github.com/bigchaindb/bigchaindb/releases)
|
||||
[](https://pypi.org/project/BigchainDB/)
|
||||
[](https://travis-ci.org/bigchaindb/bigchaindb)
|
||||
[](https://docs.bigchaindb.com/projects/server/en/latest/)
|
||||
[](https://gitter.im/bigchaindb/bigchaindb?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
# BigchainDB 서버
|
||||
|
||||
BigchaingDB는 블록체인 데이터베이스입니다. 이 저장소는 _BigchaingDB 서버_를 위한 저장소입니다.
|
||||
|
||||
### 기본 사항
|
||||
|
||||
* [빠른 시작 사용해보기](https://docs.bigchaindb.com/projects/server/en/latest/quickstart.html)
|
||||
* [BigchainDB 2.0 백서 읽기](https://www.bigchaindb.com/whitepaper/)
|
||||
* [BigchainDB에 대한 _Hitchiker's Guide_를 확인십시오.](https://www.bigchaindb.com/developers/guide/)
|
||||
|
||||
### `master` Branch에서 BigchainDB 서버 실행 및 테스트
|
||||
|
||||
BigchaingDB 서버의 최신 버전을 실행하고 테스트하는 것은 어렵지 않습니다. [Docker Compose](https://docs.docker.com/compose/install/)의 최신 버전이 설치되어 있는지 확인하십시오. 준비가 되었다면, 터미널에서 다음을 실행하십시오.
|
||||
|
||||
```text
|
||||
git clone https://github.com/bigchaindb/bigchaindb.git
|
||||
cd bigchaindb
|
||||
make run
|
||||
```
|
||||
|
||||
이제 BigchainDB는 `http://localhost:9984/`에 연결되어야 합니다.
|
||||
|
||||
또한, 실행시키기 위한 다른 명령어들도 있습니다.
|
||||
|
||||
* `make start` : 소스로부터 BigchainDB를 실행하고 데몬화합니다. \(이는 `make stop` 을 하면 중지합니다.\)
|
||||
* `make stop` : BigchainDB를 중지합니다.
|
||||
* `make logs` : 로그에 첨부합니다.
|
||||
* `make text` : 모든 유닛과 허가 테스트를 실행합니다.
|
||||
* `make test-unit-watch` : 모든 테스트를 수행하고 기다립니다. 코드를 변경할 때마다 테스트는 다시 실행될 것입니다.
|
||||
* `make cov` : 코드 커버리지를 확인하고 브라우저에서 결과를 엽니다.
|
||||
* `make doc` : HTML 문서를 만들고, 브라우저에서 엽니다.
|
||||
* `make clean` : 모든 빌드와 테스트, 커버리지 및 파이썬 아티팩트를 제거합니다.
|
||||
* `make reset` : 모든 컨테이너들을 중지하고 제거합니다. 경고 : BigchainDB에 저장된 모든 데이터를 잃을 수 있습니다.
|
||||
|
||||
사용 가능한 모든 명령어를 보기 위해서는 `make` 를 실행하십시오.
|
||||
|
||||
### 모두를 위한 링크들
|
||||
|
||||
* [BigchainDB.com ](https://www.bigchaindb.com/)- 뉴스 레터 가입을 포함하는 BigchainDB 주요 웹 사이트
|
||||
* [로드맵](https://github.com/bigchaindb/org/blob/master/ROADMAP.md)
|
||||
* [블로그](https://medium.com/the-bigchaindb-blog)
|
||||
* [트위터](https://twitter.com/BigchainDB)
|
||||
|
||||
### 개발자들을 위한 링크들
|
||||
|
||||
* [모든 BigchainDB 문서](https://docs.bigchaindb.com/en/latest/)
|
||||
* [BigchainDB 서버 문서](https://docs.bigchaindb.com/projects/server/en/latest/index.html)
|
||||
* [CONTRIBUTING.md](https://github.com/bigchaindb/bigchaindb/blob/master/.github/CONTRIBUTING.md) - 기여를 하는 방법
|
||||
* [커뮤니티 가이드라인](https://github.com/bigchaindb/bigchaindb/blob/master/CODE_OF_CONDUCT.md)
|
||||
* [이슈 작성](https://github.com/bigchaindb/bigchaindb/issues)
|
||||
* [pull request 하기](https://github.com/bigchaindb/bigchaindb/pulls)
|
||||
* [Gitter 채팅방](https://gitter.im/bigchaindb/bigchaindb)
|
||||
|
||||
### 합법
|
||||
|
||||
* [라이선스](https://github.com/bigchaindb/bigchaindb/blob/master/LICENSES.md) - 오픈 소스 & 오픈 콘텐츠
|
||||
* [발행](https://www.bigchaindb.com/imprint/)
|
||||
* [연락처](https://www.bigchaindb.com/contact/)
|
@ -1,3 +1,10 @@
|
||||
<!---
|
||||
Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
BigchainDB and IPDB software contributors.
|
||||
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
--->
|
||||
|
||||
# Our Release Process
|
||||
|
||||
## Notes
|
||||
@ -39,32 +46,39 @@ The following steps are what we do to release a new version of _BigchainDB Serve
|
||||
- In `bigchaindb/version.py`:
|
||||
- update `__version__` to e.g. `0.9.0` (with no `.dev` on the end)
|
||||
- update `__short_version__` to e.g. `0.9` (with no `.dev` on the end)
|
||||
- In the docs about installing BigchainDB (and Tendermint), and in the associated scripts, recommend/install a version of Tendermint that _actually works_ with the soon-to-be-released version of BigchainDB. You can find all such references by doing a search for the previously-recommended version number, such as `0.31.5`.
|
||||
- In `setup.py`, _maybe_ update the development status item in the `classifiers` list. For example, one allowed value is `"Development Status :: 5 - Production/Stable"`. The [allowed values are listed at pypi.python.org](https://pypi.python.org/pypi?%3Aaction=list_classifiers).
|
||||
|
||||
1. **Wait for all the tests to pass!**
|
||||
1. Merge the pull request into the `master` branch.
|
||||
1. Go to the [bigchaindb/bigchaindb Releases page on GitHub](https://github.com/bigchaindb/bigchaindb/releases)
|
||||
2. **Wait for all the tests to pass!**
|
||||
3. Merge the pull request into the `master` branch.
|
||||
4. Go to the [bigchaindb/bigchaindb Releases page on GitHub](https://github.com/bigchaindb/bigchaindb/releases)
|
||||
and click the "Draft a new release" button.
|
||||
1. Fill in the details:
|
||||
5. Fill in the details:
|
||||
- **Tag version:** version number preceded by `v`, e.g. `v0.9.1`
|
||||
- **Target:** the last commit that was just merged. In other words, that commit will get a Git tag with the value given for tag version above.
|
||||
- **Title:** Same as tag version above, e.g `v0.9.1`
|
||||
- **Description:** The body of the changelog entry (Added, Changed, etc.)
|
||||
1. Click "Publish release" to publish the release on GitHub.
|
||||
1. On your local computer, make sure you're on the `master` branch and that it's up-to-date with the `master` branch in the bigchaindb/bigchaindb repository (e.g. `git pull upstream master`). We're going to use that to push a new `bigchaindb` package to PyPI.
|
||||
1. Make sure you have a `~/.pypirc` file containing credentials for PyPI.
|
||||
1. Do `make release` to build and publish the new `bigchaindb` package on PyPI.
|
||||
1. [Log in to readthedocs.org](https://readthedocs.org/accounts/login/) and go to the **BigchainDB Server** project, then:
|
||||
6. Click "Publish release" to publish the release on GitHub.
|
||||
7. On your local computer, make sure you're on the `master` branch and that it's up-to-date with the `master` branch in the bigchaindb/bigchaindb repository (e.g. `git pull upstream master`). We're going to use that to push a new `bigchaindb` package to PyPI.
|
||||
8. Make sure you have a `~/.pypirc` file containing credentials for PyPI.
|
||||
9. Do `make release` to build and publish the new `bigchaindb` package on PyPI. For this step you need to have `twine` installed. If you get an error like `Makefile:135: recipe for target 'clean-pyc' failed` then try doing
|
||||
```text
|
||||
sudo chown -R $(whoami):$(whoami) .
|
||||
```
|
||||
10. [Log in to readthedocs.org](https://readthedocs.org/accounts/login/) and go to the **BigchainDB Server** project, then:
|
||||
- Click on "Builds", select "latest" from the drop-down menu, then click the "Build Version:" button.
|
||||
- Wait for the build of "latest" to finish. This can take a few minutes.
|
||||
- Go to Admin --> Advanced Settings
|
||||
and make sure that "Default branch:" (i.e. what "latest" points to)
|
||||
is set to the new release's tag, e.g. `v0.9.1`.
|
||||
(Don't miss the `v` in front.)
|
||||
(It won't be an option if you didn't wait for the build of "latest" to finish.)
|
||||
Then scroll to the bottom and click "Save".
|
||||
- Go to Admin --> Versions
|
||||
and under **Choose Active Versions**, do these things:
|
||||
1. Make sure that the new version's tag is "Active" and "Public"
|
||||
1. Make sure the **stable** branch is _not_ active.
|
||||
1. Scroll to the bottom of the page and click the "Submit" button.
|
||||
1. Go to [Docker Hub](https://hub.docker.com/) and sign in, then:
|
||||
2. Make sure the **stable** branch is _not_ active.
|
||||
3. Scroll to the bottom of the page and click "Save".
|
||||
11. Go to [Docker Hub](https://hub.docker.com/) and sign in, then:
|
||||
- Click on "Organizations"
|
||||
- Click on "bigchaindb"
|
||||
- Click on "bigchaindb/bigchaindb"
|
||||
|
@ -1,3 +1,10 @@
|
||||
<!---
|
||||
Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
BigchainDB and IPDB software contributors.
|
||||
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
--->
|
||||
|
||||
# BigchainDB Roadmap
|
||||
|
||||
We moved the BigchainDB Roadmap to the bigchaindb/org repository; see:
|
||||
|
@ -1,3 +1,10 @@
|
||||
<!---
|
||||
Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
BigchainDB and IPDB software contributors.
|
||||
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
--->
|
||||
|
||||
# Acceptance test suite
|
||||
This directory contains the acceptance test suite for BigchainDB.
|
||||
|
||||
|
@ -5,5 +5,5 @@ RUN pip install --upgrade \
|
||||
pycco \
|
||||
websocket-client~=0.47.0 \
|
||||
pytest~=3.0 \
|
||||
bigchaindb-driver==0.5.0 \
|
||||
blns
|
||||
bigchaindb-driver~=0.6.2 \
|
||||
blns
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
# # Basic Acceptance Test
|
||||
# Here we check that the primitives of the system behave as expected.
|
||||
# As you will see, this script tests basic stuff like:
|
||||
@ -36,7 +41,6 @@ def test_basic():
|
||||
# The two keypairs will be called—drum roll—Alice and Bob.
|
||||
alice, bob = generate_keypair(), generate_keypair()
|
||||
|
||||
|
||||
# ## Alice registers her bike in BigchainDB
|
||||
# Alice has a nice bike, and here she creates the "digital twin"
|
||||
# of her bike.
|
||||
@ -57,9 +61,8 @@ def test_basic():
|
||||
# a variable with a short and easy name
|
||||
bike_id = fulfilled_creation_tx['id']
|
||||
|
||||
|
||||
# Now she is ready to send it to the BigchainDB Network.
|
||||
sent_transfer_tx = bdb.transactions.send(fulfilled_creation_tx)
|
||||
sent_transfer_tx = bdb.transactions.send_commit(fulfilled_creation_tx)
|
||||
|
||||
# And just to be 100% sure, she also checks if she can retrieve
|
||||
# it from the BigchainDB node.
|
||||
@ -69,7 +72,6 @@ def test_basic():
|
||||
assert len(bdb.outputs.get(alice.public_key, spent=False)) == 1
|
||||
assert bdb.outputs.get(alice.public_key)[0]['transaction_id'] == bike_id
|
||||
|
||||
|
||||
# ## Alice transfers her bike to Bob
|
||||
# After registering her bike, Alice is ready to transfer it to Bob.
|
||||
# She needs to create a new `TRANSFER` transaction.
|
||||
@ -106,7 +108,7 @@ def test_basic():
|
||||
private_keys=alice.private_key)
|
||||
|
||||
# She finally sends the transaction to a BigchainDB node.
|
||||
sent_transfer_tx = bdb.transactions.send(fulfilled_transfer_tx)
|
||||
sent_transfer_tx = bdb.transactions.send_commit(fulfilled_transfer_tx)
|
||||
|
||||
# And just to be 100% sure, she also checks if she can retrieve
|
||||
# it from the BigchainDB node.
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
# # Divisible assets integration testing
|
||||
# This test checks if we can successfully divide assets.
|
||||
# The script tests various things like:
|
||||
@ -70,7 +75,7 @@ def test_divisible_assets():
|
||||
prepared_token_tx,
|
||||
private_keys=alice.private_key)
|
||||
|
||||
bdb.transactions.send(fulfilled_token_tx, mode='commit')
|
||||
bdb.transactions.send_commit(fulfilled_token_tx)
|
||||
|
||||
# We store the `id` of the transaction to use it later on.
|
||||
bike_token_id = fulfilled_token_tx['id']
|
||||
@ -112,8 +117,7 @@ def test_divisible_assets():
|
||||
prepared_transfer_tx,
|
||||
private_keys=bob.private_key)
|
||||
|
||||
sent_transfer_tx = bdb.transactions.send(fulfilled_transfer_tx,
|
||||
mode='commit')
|
||||
sent_transfer_tx = bdb.transactions.send_commit(fulfilled_transfer_tx)
|
||||
|
||||
# First, Bob checks if the transaction was successful.
|
||||
assert bdb.transactions.retrieve(
|
||||
@ -163,7 +167,7 @@ def test_divisible_assets():
|
||||
# Remember Bob, last time you spent 3 tokens already,
|
||||
# so you only have 7 left.
|
||||
with pytest.raises(BadRequest) as error:
|
||||
bdb.transactions.send(fulfilled_transfer_tx, mode='commit')
|
||||
bdb.transactions.send_commit(fulfilled_transfer_tx)
|
||||
|
||||
# Now Bob gets an error saying that the amount he wanted to spent is
|
||||
# higher than the amount of tokens he has left.
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
# # Double Spend testing
|
||||
# This test challenge the system with double spends.
|
||||
|
||||
@ -26,7 +31,7 @@ def test_double_create():
|
||||
|
||||
def send_and_queue(tx):
|
||||
try:
|
||||
bdb.transactions.send(tx)
|
||||
bdb.transactions.send_commit(tx)
|
||||
results.put('OK')
|
||||
except bigchaindb_driver.exceptions.TransportError as e:
|
||||
results.put('FAIL')
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
# # Multiple owners integration testing
|
||||
# This test checks if we can successfully create and transfer a transaction
|
||||
# with multiple owners.
|
||||
@ -60,7 +65,7 @@ def test_multiple_owners():
|
||||
prepared_dw_tx,
|
||||
private_keys=[alice.private_key, bob.private_key])
|
||||
|
||||
bdb.transactions.send(fulfilled_dw_tx, mode='commit')
|
||||
bdb.transactions.send_commit(fulfilled_dw_tx)
|
||||
|
||||
# We store the `id` of the transaction to use it later on.
|
||||
dw_id = fulfilled_dw_tx['id']
|
||||
@ -105,8 +110,7 @@ def test_multiple_owners():
|
||||
prepared_transfer_tx,
|
||||
private_keys=[alice.private_key, bob.private_key])
|
||||
|
||||
sent_transfer_tx = bdb.transactions.send(fulfilled_transfer_tx,
|
||||
mode='commit')
|
||||
sent_transfer_tx = bdb.transactions.send_commit(fulfilled_transfer_tx)
|
||||
|
||||
# They check if the transaction was successful.
|
||||
assert bdb.transactions.retrieve(
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
# ## Testing potentially hazardous strings
|
||||
# This test uses a library of `naughty` strings (code injections, weird unicode chars., etc.) as both keys and values.
|
||||
# We look for either a successful tx, or in the case that we use a naughty string as a key, and it violates some key
|
||||
@ -8,7 +13,8 @@
|
||||
# env variables.
|
||||
import os
|
||||
|
||||
# Since the naughty strings get encoded and decoded in odd ways, we'll use a regex to sweep those details under the rug.
|
||||
# Since the naughty strings get encoded and decoded in odd ways,
|
||||
# we'll use a regex to sweep those details under the rug.
|
||||
import re
|
||||
|
||||
# We'll use a nice library of naughty strings...
|
||||
@ -49,7 +55,7 @@ def send_naughty_tx(asset, metadata):
|
||||
|
||||
# The fulfilled tx gets sent to the BDB network
|
||||
try:
|
||||
sent_transaction = bdb.transactions.send(fulfilled_transaction)
|
||||
sent_transaction = bdb.transactions.send_commit(fulfilled_transaction)
|
||||
except BadRequest as e:
|
||||
sent_transaction = e
|
||||
|
||||
@ -60,8 +66,12 @@ def send_naughty_tx(asset, metadata):
|
||||
# Then she expects a nicely formatted error code
|
||||
status_code = sent_transaction.status_code
|
||||
error = sent_transaction.error
|
||||
regex = '\{"message":"Invalid transaction \\(ValidationError\\): Invalid key name .* in asset object. ' \
|
||||
'The key name cannot contain characters .* or null characters","status":400\}\n'
|
||||
regex = (
|
||||
r'\{\s*\n*'
|
||||
r'\s*"message":\s*"Invalid transaction \(ValidationError\):\s*'
|
||||
r'Invalid key name.*The key name cannot contain characters.*\n*'
|
||||
r'\s*"status":\s*400\n*'
|
||||
r'\s*\}\n*')
|
||||
assert status_code == 400
|
||||
assert re.fullmatch(regex, error), sent_transaction
|
||||
# Otherwise, she expects to see her transaction in the database
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
# # Stream Acceptance Test
|
||||
# This test checks if the event stream works correctly. The basic idea of this
|
||||
# test is to generate some random **valid** transaction, send them to a
|
||||
@ -96,7 +101,7 @@ def test_stream():
|
||||
# transactions to be in the shared queue: this is a two phase test,
|
||||
# first we send a bunch of transactions, then we check if they are
|
||||
# valid (and, in this case, they should).
|
||||
bdb.transactions.send(tx, mode='async')
|
||||
bdb.transactions.send_async(tx)
|
||||
|
||||
# The `id` of every sent transaction is then stored in a list.
|
||||
sent.append(tx['id'])
|
||||
|
@ -1,3 +1,10 @@
|
||||
<!---
|
||||
Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
BigchainDB and IPDB software contributors.
|
||||
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
--->
|
||||
|
||||
# Overview
|
||||
|
||||
A high-level description of the files and subdirectories of BigchainDB.
|
||||
@ -12,9 +19,9 @@ The `BigchainDB` class is defined here. Most node-level operations and database
|
||||
|
||||
`Block`, `Transaction`, and `Asset` classes are defined here. The classes mirror the block and transaction structure from the [documentation](https://docs.bigchaindb.com/projects/server/en/latest/data-models/index.html), but also include methods for validation and signing.
|
||||
|
||||
### [`consensus.py`](./consensus.py)
|
||||
### [`validation.py`](./validation.py)
|
||||
|
||||
Base class for consensus methods (verification of votes, blocks, and transactions). The actual logic is mostly found in `transaction` and `block` models, defined in [`models.py`](./models.py).
|
||||
Base class for validation methods (verification of votes, blocks, and transactions). The actual logic is mostly found in `transaction` and `block` models, defined in [`models.py`](./models.py).
|
||||
|
||||
### [`processes.py`](./processes.py)
|
||||
|
||||
|
@ -1,8 +1,14 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
import copy
|
||||
import logging
|
||||
|
||||
from bigchaindb.log import DEFAULT_LOGGING_CONFIG as log_config
|
||||
from bigchaindb.lib import BigchainDB # noqa
|
||||
from bigchaindb.migrations.chain_migration_election import ChainMigrationElection
|
||||
from bigchaindb.version import __version__ # noqa
|
||||
from bigchaindb.core import App # noqa
|
||||
|
||||
@ -65,6 +71,7 @@ config = {
|
||||
'tendermint': {
|
||||
'host': 'localhost',
|
||||
'port': 26657,
|
||||
'version': 'v0.31.5', # look for __tm_supported_versions__
|
||||
},
|
||||
# FIXME: hardcoding to localmongodb for now
|
||||
'database': _database_map['localmongodb'],
|
||||
@ -90,7 +97,10 @@ _config = copy.deepcopy(config)
|
||||
from bigchaindb.common.transaction import Transaction # noqa
|
||||
from bigchaindb import models # noqa
|
||||
from bigchaindb.upsert_validator import ValidatorElection # noqa
|
||||
from bigchaindb.elections.vote import Vote # noqa
|
||||
|
||||
Transaction.register_type(Transaction.CREATE, models.Transaction)
|
||||
Transaction.register_type(Transaction.TRANSFER, models.Transaction)
|
||||
Transaction.register_type(ValidatorElection.VALIDATOR_ELECTION, ValidatorElection)
|
||||
Transaction.register_type(ValidatorElection.OPERATION, ValidatorElection)
|
||||
Transaction.register_type(ChainMigrationElection.OPERATION, ChainMigrationElection)
|
||||
Transaction.register_type(Vote.OPERATION, Vote)
|
||||
|
@ -1,3 +1,10 @@
|
||||
<!---
|
||||
Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
BigchainDB and IPDB software contributors.
|
||||
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
--->
|
||||
|
||||
# Backend Interfaces
|
||||
|
||||
## Structure
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""Generic backend database interfaces expected by BigchainDB.
|
||||
|
||||
The interfaces in this module allow BigchainDB to be agnostic about its
|
||||
|
@ -1,11 +1,16 @@
|
||||
from itertools import repeat
|
||||
from importlib import import_module
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
import logging
|
||||
from importlib import import_module
|
||||
from itertools import repeat
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb.common.exceptions import ConfigurationError
|
||||
from bigchaindb.backend.exceptions import ConnectionError
|
||||
|
||||
from bigchaindb.backend.utils import get_bigchaindb_config_value, get_bigchaindb_config_value_or_key_error
|
||||
from bigchaindb.common.exceptions import ConfigurationError
|
||||
|
||||
BACKENDS = {
|
||||
'localmongodb': 'bigchaindb.backend.localmongodb.connection.LocalMongoDBConnection',
|
||||
@ -43,10 +48,10 @@ def connect(backend=None, host=None, port=None, name=None, max_tries=None,
|
||||
Authentication failure after connecting to the database.
|
||||
"""
|
||||
|
||||
backend = backend or bigchaindb.config['database']['backend']
|
||||
host = host or bigchaindb.config['database']['host']
|
||||
port = port or bigchaindb.config['database']['port']
|
||||
dbname = name or bigchaindb.config['database']['name']
|
||||
backend = backend or get_bigchaindb_config_value_or_key_error('backend')
|
||||
host = host or get_bigchaindb_config_value_or_key_error('host')
|
||||
port = port or get_bigchaindb_config_value_or_key_error('port')
|
||||
dbname = name or get_bigchaindb_config_value_or_key_error('name')
|
||||
# Not sure how to handle this here. This setting is only relevant for
|
||||
# mongodb.
|
||||
# I added **kwargs for both RethinkDBConnection and MongoDBConnection
|
||||
@ -56,15 +61,15 @@ def connect(backend=None, host=None, port=None, name=None, max_tries=None,
|
||||
# UPD: RethinkDBConnection is not here anymore cause we no longer support RethinkDB.
|
||||
# The problem described above might be reconsidered next time we introduce a backend,
|
||||
# if it ever happens.
|
||||
replicaset = replicaset or bigchaindb.config['database'].get('replicaset')
|
||||
ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)
|
||||
login = login or bigchaindb.config['database'].get('login')
|
||||
password = password or bigchaindb.config['database'].get('password')
|
||||
ca_cert = ca_cert or bigchaindb.config['database'].get('ca_cert', None)
|
||||
certfile = certfile or bigchaindb.config['database'].get('certfile', None)
|
||||
keyfile = keyfile or bigchaindb.config['database'].get('keyfile', None)
|
||||
keyfile_passphrase = keyfile_passphrase or bigchaindb.config['database'].get('keyfile_passphrase', None)
|
||||
crlfile = crlfile or bigchaindb.config['database'].get('crlfile', None)
|
||||
replicaset = replicaset or get_bigchaindb_config_value('replicaset')
|
||||
ssl = ssl if ssl is not None else get_bigchaindb_config_value('ssl', False)
|
||||
login = login or get_bigchaindb_config_value('login')
|
||||
password = password or get_bigchaindb_config_value('password')
|
||||
ca_cert = ca_cert or get_bigchaindb_config_value('ca_cert')
|
||||
certfile = certfile or get_bigchaindb_config_value('certfile')
|
||||
keyfile = keyfile or get_bigchaindb_config_value('keyfile')
|
||||
keyfile_passphrase = keyfile_passphrase or get_bigchaindb_config_value('keyfile_passphrase', None)
|
||||
crlfile = crlfile or get_bigchaindb_config_value('crlfile')
|
||||
|
||||
try:
|
||||
module_name, _, class_name = BACKENDS[backend].rpartition('.')
|
||||
@ -113,7 +118,7 @@ class Connection:
|
||||
self.host = host or dbconf['host']
|
||||
self.port = port or dbconf['port']
|
||||
self.dbname = dbname or dbconf['name']
|
||||
self.connection_timeout = connection_timeout if connection_timeout is not None\
|
||||
self.connection_timeout = connection_timeout if connection_timeout is not None \
|
||||
else dbconf['connection_timeout']
|
||||
self.max_tries = max_tries if max_tries is not None else dbconf['max_tries']
|
||||
self.max_tries_counter = range(self.max_tries) if self.max_tries != 0 else repeat(0)
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
from bigchaindb.exceptions import BigchainDBError
|
||||
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""MongoDB backend implementation.
|
||||
|
||||
Contains a MongoDB-specific implementation of the
|
||||
|
@ -1,16 +1,20 @@
|
||||
import time
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
import logging
|
||||
from ssl import CERT_REQUIRED
|
||||
|
||||
import pymongo
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb.utils import Lazy
|
||||
from bigchaindb.common.exceptions import ConfigurationError
|
||||
from bigchaindb.backend.connection import Connection
|
||||
from bigchaindb.backend.exceptions import (DuplicateKeyError,
|
||||
OperationError,
|
||||
ConnectionError)
|
||||
from bigchaindb.backend.connection import Connection
|
||||
from bigchaindb.backend.utils import get_bigchaindb_config_value
|
||||
from bigchaindb.common.exceptions import ConfigurationError
|
||||
from bigchaindb.utils import Lazy
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -30,15 +34,15 @@ class LocalMongoDBConnection(Connection):
|
||||
"""
|
||||
|
||||
super().__init__(**kwargs)
|
||||
self.replicaset = replicaset or bigchaindb.config['database'].get('replicaset')
|
||||
self.ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)
|
||||
self.login = login or bigchaindb.config['database'].get('login')
|
||||
self.password = password or bigchaindb.config['database'].get('password')
|
||||
self.ca_cert = ca_cert or bigchaindb.config['database'].get('ca_cert', None)
|
||||
self.certfile = certfile or bigchaindb.config['database'].get('certfile', None)
|
||||
self.keyfile = keyfile or bigchaindb.config['database'].get('keyfile', None)
|
||||
self.keyfile_passphrase = keyfile_passphrase or bigchaindb.config['database'].get('keyfile_passphrase', None)
|
||||
self.crlfile = crlfile or bigchaindb.config['database'].get('crlfile', None)
|
||||
self.replicaset = replicaset or get_bigchaindb_config_value('replicaset')
|
||||
self.ssl = ssl if ssl is not None else get_bigchaindb_config_value('ssl', False)
|
||||
self.login = login or get_bigchaindb_config_value('login')
|
||||
self.password = password or get_bigchaindb_config_value('password')
|
||||
self.ca_cert = ca_cert or get_bigchaindb_config_value('ca_cert')
|
||||
self.certfile = certfile or get_bigchaindb_config_value('certfile')
|
||||
self.keyfile = keyfile or get_bigchaindb_config_value('keyfile')
|
||||
self.keyfile_passphrase = keyfile_passphrase or get_bigchaindb_config_value('keyfile_passphrase')
|
||||
self.crlfile = crlfile or get_bigchaindb_config_value('crlfile')
|
||||
|
||||
@property
|
||||
def db(self):
|
||||
@ -59,7 +63,7 @@ class LocalMongoDBConnection(Connection):
|
||||
try:
|
||||
try:
|
||||
return query.run(self.conn)
|
||||
except pymongo.errors.AutoReconnect as exc:
|
||||
except pymongo.errors.AutoReconnect:
|
||||
logger.warning('Lost connection to the database, '
|
||||
'retrying query.')
|
||||
return query.run(self.conn)
|
||||
@ -84,23 +88,6 @@ class LocalMongoDBConnection(Connection):
|
||||
"""
|
||||
|
||||
try:
|
||||
if self.replicaset:
|
||||
# we should only return a connection if the replica set is
|
||||
# initialized. initialize_replica_set will check if the
|
||||
# replica set is initialized else it will initialize it.
|
||||
initialize_replica_set(self.host,
|
||||
self.port,
|
||||
self.connection_timeout,
|
||||
self.dbname,
|
||||
self.ssl,
|
||||
self.login,
|
||||
self.password,
|
||||
self.ca_cert,
|
||||
self.certfile,
|
||||
self.keyfile,
|
||||
self.keyfile_passphrase,
|
||||
self.crlfile)
|
||||
|
||||
# FYI: the connection process might raise a
|
||||
# `ServerSelectionTimeoutError`, that is a subclass of
|
||||
# `ConnectionFailure`.
|
||||
@ -136,8 +123,6 @@ class LocalMongoDBConnection(Connection):
|
||||
|
||||
return client
|
||||
|
||||
# `initialize_replica_set` might raise `ConnectionFailure`,
|
||||
# `OperationFailure` or `ConfigurationError`.
|
||||
except (pymongo.errors.ConnectionFailure,
|
||||
pymongo.errors.OperationFailure) as exc:
|
||||
logger.info('Exception in _connect(): {}'.format(exc))
|
||||
@ -149,120 +134,3 @@ class LocalMongoDBConnection(Connection):
|
||||
MONGO_OPTS = {
|
||||
'socketTimeoutMS': 20000,
|
||||
}
|
||||
|
||||
|
||||
def initialize_replica_set(host, port, connection_timeout, dbname, ssl, login,
|
||||
password, ca_cert, certfile, keyfile,
|
||||
keyfile_passphrase, crlfile):
|
||||
"""Initialize a replica set. If already initialized skip."""
|
||||
|
||||
# Setup a MongoDB connection
|
||||
# The reason we do this instead of `backend.connect` is that
|
||||
# `backend.connect` will connect you to a replica set but this fails if
|
||||
# you try to connect to a replica set that is not yet initialized
|
||||
try:
|
||||
# The presence of ca_cert, certfile, keyfile, crlfile implies the
|
||||
# use of certificates for TLS connectivity.
|
||||
if ca_cert is None or certfile is None or keyfile is None or \
|
||||
crlfile is None:
|
||||
conn = pymongo.MongoClient(host,
|
||||
port,
|
||||
serverselectiontimeoutms=connection_timeout,
|
||||
ssl=ssl,
|
||||
**MONGO_OPTS)
|
||||
if login is not None and password is not None:
|
||||
conn[dbname].authenticate(login, password)
|
||||
else:
|
||||
logger.info('Connecting to MongoDB over TLS/SSL...')
|
||||
conn = pymongo.MongoClient(host,
|
||||
port,
|
||||
serverselectiontimeoutms=connection_timeout,
|
||||
ssl=ssl,
|
||||
ssl_ca_certs=ca_cert,
|
||||
ssl_certfile=certfile,
|
||||
ssl_keyfile=keyfile,
|
||||
ssl_pem_passphrase=keyfile_passphrase,
|
||||
ssl_crlfile=crlfile,
|
||||
ssl_cert_reqs=CERT_REQUIRED,
|
||||
**MONGO_OPTS)
|
||||
if login is not None:
|
||||
logger.info('Authenticating to the database...')
|
||||
conn[dbname].authenticate(login, mechanism='MONGODB-X509')
|
||||
|
||||
except (pymongo.errors.ConnectionFailure,
|
||||
pymongo.errors.OperationFailure) as exc:
|
||||
logger.info('Exception in _connect(): {}'.format(exc))
|
||||
raise ConnectionError(str(exc)) from exc
|
||||
except pymongo.errors.ConfigurationError as exc:
|
||||
raise ConfigurationError from exc
|
||||
|
||||
_check_replica_set(conn)
|
||||
host = '{}:{}'.format(bigchaindb.config['database']['host'],
|
||||
bigchaindb.config['database']['port'])
|
||||
config = {'_id': bigchaindb.config['database']['replicaset'],
|
||||
'members': [{'_id': 0, 'host': host}]}
|
||||
|
||||
try:
|
||||
conn.admin.command('replSetInitiate', config)
|
||||
except pymongo.errors.OperationFailure as exc_info:
|
||||
if exc_info.details['codeName'] == 'AlreadyInitialized':
|
||||
return
|
||||
raise
|
||||
else:
|
||||
_wait_for_replica_set_initialization(conn)
|
||||
logger.info('Initialized replica set')
|
||||
finally:
|
||||
if conn is not None:
|
||||
logger.info('Closing initial connection to MongoDB')
|
||||
conn.close()
|
||||
|
||||
|
||||
def _check_replica_set(conn):
|
||||
"""Checks if the replSet option was enabled either through the command
|
||||
line option or config file and if it matches the one provided by
|
||||
bigchaindb configuration.
|
||||
|
||||
Note:
|
||||
The setting we are looking for will have a different name depending
|
||||
if it was set by the config file (`replSetName`) or by command
|
||||
line arguments (`replSet`).
|
||||
|
||||
Raise:
|
||||
:exc:`~ConfigurationError`: If mongod was not started with the
|
||||
replSet option.
|
||||
"""
|
||||
options = conn.admin.command('getCmdLineOpts')
|
||||
try:
|
||||
repl_opts = options['parsed']['replication']
|
||||
repl_set_name = repl_opts.get('replSetName', repl_opts.get('replSet'))
|
||||
except KeyError:
|
||||
raise ConfigurationError('mongod was not started with'
|
||||
' the replSet option.')
|
||||
|
||||
bdb_repl_set_name = bigchaindb.config['database']['replicaset']
|
||||
if repl_set_name != bdb_repl_set_name:
|
||||
raise ConfigurationError('The replicaset configuration of '
|
||||
'bigchaindb (`{}`) needs to match '
|
||||
'the replica set name from MongoDB'
|
||||
' (`{}`)'.format(bdb_repl_set_name,
|
||||
repl_set_name))
|
||||
|
||||
|
||||
def _wait_for_replica_set_initialization(conn):
|
||||
"""Wait for a replica set to finish initialization.
|
||||
|
||||
If a replica set is being initialized for the first time it takes some
|
||||
time. Nodes need to discover each other and an election needs to take
|
||||
place. During this time the database is not writable so we need to wait
|
||||
before continuing with the rest of the initialization
|
||||
"""
|
||||
|
||||
# I did not find a better way to do this for now.
|
||||
# To check if the database is ready we will poll the mongodb logs until
|
||||
# we find the line that says the database is ready
|
||||
logger.info('Waiting for mongodb replica set initialization')
|
||||
while True:
|
||||
logs = conn.admin.command('getLog', 'rs')['log']
|
||||
if any('database writes are now permitted' in line for line in logs):
|
||||
return
|
||||
time.sleep(0.1)
|
||||
|
@ -1,28 +1,21 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""Query implementation for MongoDB"""
|
||||
|
||||
from pymongo import DESCENDING
|
||||
|
||||
from bigchaindb import backend
|
||||
from bigchaindb.backend.exceptions import DuplicateKeyError
|
||||
from bigchaindb.common.exceptions import MultipleValidatorOperationError
|
||||
from bigchaindb.backend.utils import module_dispatch_registrar
|
||||
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
|
||||
from bigchaindb.common.transaction import Transaction
|
||||
from bigchaindb.backend.query import VALIDATOR_UPDATE_ID
|
||||
|
||||
register_query = module_dispatch_registrar(backend.query)
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def store_transaction(conn, signed_transaction):
|
||||
try:
|
||||
return conn.run(
|
||||
conn.collection('transactions')
|
||||
.insert_one(signed_transaction))
|
||||
except DuplicateKeyError:
|
||||
pass
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def store_transactions(conn, signed_transactions):
|
||||
return conn.run(conn.collection('transactions')
|
||||
@ -99,9 +92,10 @@ def get_assets(conn, asset_ids):
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_spent(conn, transaction_id, output):
|
||||
query = {'inputs.fulfills': {
|
||||
'transaction_id': transaction_id,
|
||||
'output_index': output}}
|
||||
query = {'inputs':
|
||||
{'$elemMatch':
|
||||
{'$and': [{'fulfills.transaction_id': transaction_id},
|
||||
{'fulfills.output_index': output}]}}}
|
||||
|
||||
return conn.run(
|
||||
conn.collection('transactions')
|
||||
@ -112,7 +106,8 @@ def get_spent(conn, transaction_id, output):
|
||||
def get_latest_block(conn):
|
||||
return conn.run(
|
||||
conn.collection('blocks')
|
||||
.find_one(sort=[('height', DESCENDING)]))
|
||||
.find_one(projection={'_id': False},
|
||||
sort=[('height', DESCENDING)]))
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
@ -126,29 +121,19 @@ def store_block(conn, block):
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_txids_filtered(conn, asset_id, operation=None):
|
||||
match_create = {
|
||||
'operation': 'CREATE',
|
||||
'id': asset_id
|
||||
}
|
||||
match_transfer = {
|
||||
'operation': 'TRANSFER',
|
||||
'asset.id': asset_id
|
||||
}
|
||||
def get_txids_filtered(conn, asset_id, operation=None, last_tx=None):
|
||||
|
||||
if operation == Transaction.CREATE:
|
||||
match = match_create
|
||||
elif operation == Transaction.TRANSFER:
|
||||
match = match_transfer
|
||||
else:
|
||||
match = {'$or': [match_create, match_transfer]}
|
||||
match = {
|
||||
Transaction.CREATE: {'operation': 'CREATE', 'id': asset_id},
|
||||
Transaction.TRANSFER: {'operation': 'TRANSFER', 'asset.id': asset_id},
|
||||
None: {'$or': [{'asset.id': asset_id}, {'id': asset_id}]},
|
||||
}[operation]
|
||||
|
||||
cursor = conn.run(conn.collection('transactions').find(match))
|
||||
|
||||
if last_tx:
|
||||
cursor = cursor.sort([('$natural', DESCENDING)]).limit(1)
|
||||
|
||||
pipeline = [
|
||||
{'$match': match}
|
||||
]
|
||||
cursor = conn.run(
|
||||
conn.collection('transactions')
|
||||
.aggregate(pipeline))
|
||||
return (elem['id'] for elem in cursor)
|
||||
|
||||
|
||||
@ -189,15 +174,18 @@ def get_owned_ids(conn, owner):
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_spending_transactions(conn, inputs):
|
||||
transaction_ids = [i['transaction_id'] for i in inputs]
|
||||
output_indexes = [i['output_index'] for i in inputs]
|
||||
query = {'inputs':
|
||||
{'$elemMatch':
|
||||
{'$and':
|
||||
[
|
||||
{'fulfills.transaction_id': {'$in': transaction_ids}},
|
||||
{'fulfills.output_index': {'$in': output_indexes}}
|
||||
]}}}
|
||||
|
||||
cursor = conn.run(
|
||||
conn.collection('transactions').aggregate([
|
||||
{'$match': {
|
||||
'inputs.fulfills': {
|
||||
'$in': inputs,
|
||||
},
|
||||
}},
|
||||
{'$project': {'_id': False}}
|
||||
]))
|
||||
conn.collection('transactions').find(query, {'_id': False}))
|
||||
return cursor
|
||||
|
||||
|
||||
@ -243,7 +231,7 @@ def store_unspent_outputs(conn, *unspent_outputs):
|
||||
def delete_unspent_outputs(conn, *unspent_outputs):
|
||||
if unspent_outputs:
|
||||
return conn.run(
|
||||
conn.collection('utxos').remove({
|
||||
conn.collection('utxos').delete_many({
|
||||
'$or': [{
|
||||
'$and': [
|
||||
{'transaction_id': unspent_output['transaction_id']},
|
||||
@ -264,40 +252,126 @@ def get_unspent_outputs(conn, *, query=None):
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def store_pre_commit_state(conn, state):
|
||||
commit_id = state['commit_id']
|
||||
return conn.run(
|
||||
conn.collection('pre_commit')
|
||||
.update({'commit_id': commit_id}, state, upsert=True)
|
||||
.replace_one({}, state, upsert=True)
|
||||
)
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_pre_commit_state(conn, commit_id):
|
||||
return conn.run(conn.collection('pre_commit')
|
||||
.find_one({'commit_id': commit_id},
|
||||
projection={'_id': False}))
|
||||
def get_pre_commit_state(conn):
|
||||
return conn.run(conn.collection('pre_commit').find_one())
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def store_validator_update(conn, validator_update):
|
||||
try:
|
||||
return conn.run(
|
||||
conn.collection('validators')
|
||||
.insert_one(validator_update))
|
||||
except DuplicateKeyError:
|
||||
raise MultipleValidatorOperationError('Validator update already exists')
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_validator_update(conn, update_id=VALIDATOR_UPDATE_ID):
|
||||
def store_validator_set(conn, validators_update):
|
||||
height = validators_update['height']
|
||||
return conn.run(
|
||||
conn.collection('validators')
|
||||
.find_one({'update_id': update_id}, projection={'_id': False}))
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def delete_validator_update(conn, update_id=VALIDATOR_UPDATE_ID):
|
||||
return conn.run(
|
||||
conn.collection('validators')
|
||||
.delete_one({'update_id': update_id})
|
||||
conn.collection('validators').replace_one(
|
||||
{'height': height},
|
||||
validators_update,
|
||||
upsert=True
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def delete_validator_set(conn, height):
|
||||
return conn.run(
|
||||
conn.collection('validators').delete_many({'height': height})
|
||||
)
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def store_election(conn, election_id, height, is_concluded):
|
||||
return conn.run(
|
||||
conn.collection('elections').replace_one(
|
||||
{'election_id': election_id,
|
||||
'height': height},
|
||||
{'election_id': election_id,
|
||||
'height': height,
|
||||
'is_concluded': is_concluded},
|
||||
upsert=True,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def store_elections(conn, elections):
|
||||
return conn.run(
|
||||
conn.collection('elections').insert_many(elections)
|
||||
)
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def delete_elections(conn, height):
|
||||
return conn.run(
|
||||
conn.collection('elections').delete_many({'height': height})
|
||||
)
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_validator_set(conn, height=None):
|
||||
query = {}
|
||||
if height is not None:
|
||||
query = {'height': {'$lte': height}}
|
||||
|
||||
cursor = conn.run(
|
||||
conn.collection('validators')
|
||||
.find(query, projection={'_id': False})
|
||||
.sort([('height', DESCENDING)])
|
||||
.limit(1)
|
||||
)
|
||||
|
||||
return next(cursor, None)
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_election(conn, election_id):
|
||||
query = {'election_id': election_id}
|
||||
|
||||
return conn.run(
|
||||
conn.collection('elections')
|
||||
.find_one(query, projection={'_id': False},
|
||||
sort=[('height', DESCENDING)])
|
||||
)
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_asset_tokens_for_public_key(conn, asset_id, public_key):
|
||||
query = {'outputs.public_keys': [public_key],
|
||||
'asset.id': asset_id}
|
||||
|
||||
cursor = conn.run(
|
||||
conn.collection('transactions').aggregate([
|
||||
{'$match': query},
|
||||
{'$project': {'_id': False}}
|
||||
]))
|
||||
return cursor
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def store_abci_chain(conn, height, chain_id, is_synced=True):
|
||||
return conn.run(
|
||||
conn.collection('abci_chains').replace_one(
|
||||
{'height': height},
|
||||
{'height': height, 'chain_id': chain_id,
|
||||
'is_synced': is_synced},
|
||||
upsert=True,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def delete_abci_chain(conn, height):
|
||||
return conn.run(
|
||||
conn.collection('abci_chains').delete_many({'height': height})
|
||||
)
|
||||
|
||||
|
||||
@register_query(LocalMongoDBConnection)
|
||||
def get_latest_abci_chain(conn):
|
||||
return conn.run(
|
||||
conn.collection('abci_chains')
|
||||
.find_one(projection={'_id': False}, sort=[('height', DESCENDING)])
|
||||
)
|
||||
|
@ -1,11 +1,16 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""Utils to initialize and drop the database."""
|
||||
|
||||
import logging
|
||||
|
||||
from pymongo import ASCENDING, DESCENDING, TEXT
|
||||
from pymongo.errors import CollectionInvalid
|
||||
|
||||
from bigchaindb import backend
|
||||
from bigchaindb.common import exceptions
|
||||
from bigchaindb.backend.utils import module_dispatch_registrar
|
||||
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
|
||||
|
||||
@ -14,12 +19,48 @@ logger = logging.getLogger(__name__)
|
||||
register_schema = module_dispatch_registrar(backend.schema)
|
||||
|
||||
|
||||
INDEXES = {
|
||||
'transactions': [
|
||||
('id', dict(unique=True, name='transaction_id')),
|
||||
('asset.id', dict(name='asset_id')),
|
||||
('outputs.public_keys', dict(name='outputs')),
|
||||
([('inputs.fulfills.transaction_id', ASCENDING),
|
||||
('inputs.fulfills.output_index', ASCENDING)], dict(name='inputs')),
|
||||
],
|
||||
'assets': [
|
||||
('id', dict(name='asset_id', unique=True)),
|
||||
([('$**', TEXT)], dict(name='text')),
|
||||
],
|
||||
'blocks': [
|
||||
([('height', DESCENDING)], dict(name='height', unique=True)),
|
||||
],
|
||||
'metadata': [
|
||||
('id', dict(name='transaction_id', unique=True)),
|
||||
([('$**', TEXT)], dict(name='text')),
|
||||
],
|
||||
'utxos': [
|
||||
([('transaction_id', ASCENDING),
|
||||
('output_index', ASCENDING)], dict(name='utxo', unique=True)),
|
||||
],
|
||||
'pre_commit': [
|
||||
('height', dict(name='height', unique=True)),
|
||||
],
|
||||
'elections': [
|
||||
([('height', DESCENDING), ('election_id', ASCENDING)],
|
||||
dict(name='election_id_height', unique=True)),
|
||||
],
|
||||
'validators': [
|
||||
('height', dict(name='height', unique=True)),
|
||||
],
|
||||
'abci_chains': [
|
||||
('height', dict(name='height', unique=True)),
|
||||
('chain_id', dict(name='chain_id', unique=True)),
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@register_schema(LocalMongoDBConnection)
|
||||
def create_database(conn, dbname):
|
||||
if dbname in conn.conn.database_names():
|
||||
raise exceptions.DatabaseAlreadyExists('Database `{}` already exists'
|
||||
.format(dbname))
|
||||
|
||||
logger.info('Create database `%s`.', dbname)
|
||||
# TODO: read and write concerns can be declared here
|
||||
conn.conn.get_database(dbname)
|
||||
@ -27,105 +68,23 @@ def create_database(conn, dbname):
|
||||
|
||||
@register_schema(LocalMongoDBConnection)
|
||||
def create_tables(conn, dbname):
|
||||
for table_name in ['transactions', 'utxos', 'assets', 'blocks', 'metadata',
|
||||
'validators', 'pre_commit']:
|
||||
logger.info('Create `%s` table.', table_name)
|
||||
for table_name in backend.schema.TABLES:
|
||||
# create the table
|
||||
# TODO: read and write concerns can be declared here
|
||||
conn.conn[dbname].create_collection(table_name)
|
||||
try:
|
||||
logger.info(f'Create `{table_name}` table.')
|
||||
conn.conn[dbname].create_collection(table_name)
|
||||
except CollectionInvalid:
|
||||
logger.info(f'Collection {table_name} already exists.')
|
||||
create_indexes(conn, dbname, table_name, INDEXES[table_name])
|
||||
|
||||
|
||||
@register_schema(LocalMongoDBConnection)
|
||||
def create_indexes(conn, dbname):
|
||||
create_transactions_secondary_index(conn, dbname)
|
||||
create_assets_secondary_index(conn, dbname)
|
||||
create_blocks_secondary_index(conn, dbname)
|
||||
create_metadata_secondary_index(conn, dbname)
|
||||
create_utxos_secondary_index(conn, dbname)
|
||||
create_pre_commit_secondary_index(conn, dbname)
|
||||
create_validators_secondary_index(conn, dbname)
|
||||
def create_indexes(conn, dbname, collection, indexes):
|
||||
logger.info(f'Ensure secondary indexes for `{collection}`.')
|
||||
for fields, kwargs in indexes:
|
||||
conn.conn[dbname][collection].create_index(fields, **kwargs)
|
||||
|
||||
|
||||
@register_schema(LocalMongoDBConnection)
|
||||
def drop_database(conn, dbname):
|
||||
conn.conn.drop_database(dbname)
|
||||
|
||||
|
||||
def create_transactions_secondary_index(conn, dbname):
|
||||
logger.info('Create `transactions` secondary index.')
|
||||
|
||||
# to query the transactions for a transaction id, this field is unique
|
||||
conn.conn[dbname]['transactions'].create_index('id',
|
||||
name='transaction_id')
|
||||
|
||||
# secondary index for asset uuid, this field is unique
|
||||
conn.conn[dbname]['transactions']\
|
||||
.create_index('asset.id', name='asset_id')
|
||||
|
||||
# secondary index on the public keys of outputs
|
||||
conn.conn[dbname]['transactions']\
|
||||
.create_index('outputs.public_keys',
|
||||
name='outputs')
|
||||
|
||||
# secondary index on inputs/transaction links (transaction_id, output)
|
||||
conn.conn[dbname]['transactions']\
|
||||
.create_index([
|
||||
('inputs.fulfills.transaction_id', ASCENDING),
|
||||
('inputs.fulfills.output_index', ASCENDING),
|
||||
], name='inputs')
|
||||
|
||||
|
||||
def create_assets_secondary_index(conn, dbname):
|
||||
logger.info('Create `assets` secondary index.')
|
||||
|
||||
# unique index on the id of the asset.
|
||||
# the id is the txid of the transaction that created the asset
|
||||
conn.conn[dbname]['assets'].create_index('id',
|
||||
name='asset_id',
|
||||
unique=True)
|
||||
|
||||
# full text search index
|
||||
conn.conn[dbname]['assets'].create_index([('$**', TEXT)], name='text')
|
||||
|
||||
|
||||
def create_blocks_secondary_index(conn, dbname):
|
||||
conn.conn[dbname]['blocks']\
|
||||
.create_index([('height', DESCENDING)], name='height')
|
||||
|
||||
|
||||
def create_metadata_secondary_index(conn, dbname):
|
||||
logger.info('Create `assets` secondary index.')
|
||||
|
||||
# the id is the txid of the transaction where metadata was defined
|
||||
conn.conn[dbname]['metadata'].create_index('id',
|
||||
name='transaction_id',
|
||||
unique=True)
|
||||
|
||||
# full text search index
|
||||
conn.conn[dbname]['metadata'].create_index([('$**', TEXT)], name='text')
|
||||
|
||||
|
||||
def create_utxos_secondary_index(conn, dbname):
|
||||
logger.info('Create `utxos` secondary index.')
|
||||
|
||||
conn.conn[dbname]['utxos'].create_index(
|
||||
[('transaction_id', ASCENDING), ('output_index', ASCENDING)],
|
||||
name='utxo',
|
||||
unique=True,
|
||||
)
|
||||
|
||||
|
||||
def create_pre_commit_secondary_index(conn, dbname):
|
||||
logger.info('Create `pre_commit` secondary index.')
|
||||
|
||||
conn.conn[dbname]['pre_commit'].create_index('commit_id',
|
||||
name='pre_commit_id',
|
||||
unique=True)
|
||||
|
||||
|
||||
def create_validators_secondary_index(conn, dbname):
|
||||
logger.info('Create `validators` secondary index.')
|
||||
|
||||
conn.conn[dbname]['validators'].create_index('update_id',
|
||||
name='update_id',
|
||||
unique=True,)
|
||||
|
@ -1,26 +1,14 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""Query interfaces for backends."""
|
||||
|
||||
from functools import singledispatch
|
||||
|
||||
from bigchaindb.backend.exceptions import OperationError
|
||||
|
||||
VALIDATOR_UPDATE_ID = 'a_unique_id_string'
|
||||
PRE_COMMIT_ID = 'a_unique_id_string'
|
||||
|
||||
|
||||
@singledispatch
|
||||
def store_transaction(connection, signed_transaction):
|
||||
"""Write a transaction to the backlog table.
|
||||
|
||||
Args:
|
||||
signed_transaction (dict): a signed transaction.
|
||||
|
||||
Returns:
|
||||
The result of the operation.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def store_asset(connection, asset):
|
||||
@ -326,12 +314,11 @@ def get_unspent_outputs(connection, *, query=None):
|
||||
|
||||
|
||||
@singledispatch
|
||||
def store_pre_commit_state(connection, commit_id, state):
|
||||
"""Store pre-commit state in a document with `id` as `commit_id`.
|
||||
def store_pre_commit_state(connection, state):
|
||||
"""Store pre-commit state.
|
||||
|
||||
Args:
|
||||
commit_id (string): `id` of document where `state` should be stored.
|
||||
state (dict): commit state.
|
||||
state (dict): pre-commit state.
|
||||
|
||||
Returns:
|
||||
The result of the operation.
|
||||
@ -341,35 +328,103 @@ def store_pre_commit_state(connection, commit_id, state):
|
||||
|
||||
|
||||
@singledispatch
|
||||
def store_validator_update(conn, validator_update):
|
||||
"""Store a update for the validator set"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def get_pre_commit_state(connection, commit_id):
|
||||
"""Get pre-commit state where `id` is `commit_id`.
|
||||
|
||||
Args:
|
||||
commit_id (string): `id` of document where `state` should be stored.
|
||||
def get_pre_commit_state(connection):
|
||||
"""Get pre-commit state.
|
||||
|
||||
Returns:
|
||||
Document with `id` as `commit_id`
|
||||
Document representing the pre-commit state.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def get_validator_update(conn):
|
||||
"""Get validator updates which are not synced"""
|
||||
def store_validator_set(conn, validator_update):
|
||||
"""Store updated validator set"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def delete_validator_update(conn, id):
|
||||
"""Set the sync status for validator update documents"""
|
||||
def delete_validator_set(conn, height):
|
||||
"""Delete the validator set at the given height."""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def store_election(conn, election_id, height, is_concluded):
|
||||
"""Store election record"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def store_elections(conn, elections):
|
||||
"""Store election records in bulk"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def delete_elections(conn, height):
|
||||
"""Delete all election records at the given height"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def get_validator_set(conn, height):
|
||||
"""Get validator set for a given `height`, if `height` is not specified
|
||||
then return the latest validator set
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def get_election(conn, election_id):
|
||||
"""Return the election record
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def get_asset_tokens_for_public_key(connection, asset_id, public_key):
|
||||
"""Retrieve a list of tokens of type `asset_id` that are owned by the `public_key`.
|
||||
Args:
|
||||
asset_id (str): Id of the token.
|
||||
public_key (str): base58 encoded public key
|
||||
Returns:
|
||||
Iterator of transaction that list given owner in conditions.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def store_abci_chain(conn, height, chain_id, is_synced=True):
|
||||
"""Create or update an ABCI chain at the given height.
|
||||
Usually invoked in the beginning of the ABCI communications (height=0)
|
||||
or when ABCI client (like Tendermint) is migrated (any height).
|
||||
|
||||
Args:
|
||||
is_synced: True if the chain is known by both ABCI client and server
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def delete_abci_chain(conn, height):
|
||||
"""Delete the ABCI chain at the given height."""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def get_latest_abci_chain(conn):
|
||||
"""Returns the ABCI chain stored at the biggest height, if any,
|
||||
None otherwise.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
@ -1,13 +1,9 @@
|
||||
"""Database creation and schema-providing interfaces for backends.
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
Attributes:
|
||||
TABLES (tuple): The three standard tables BigchainDB relies on:
|
||||
|
||||
* ``backlog`` for incoming transactions awaiting to be put into
|
||||
a block.
|
||||
* ``bigchain`` for blocks.
|
||||
|
||||
"""
|
||||
"""Database creation and schema-providing interfaces for backends."""
|
||||
|
||||
from functools import singledispatch
|
||||
import logging
|
||||
@ -15,11 +11,14 @@ import logging
|
||||
import bigchaindb
|
||||
from bigchaindb.backend.connection import connect
|
||||
from bigchaindb.common.exceptions import ValidationError
|
||||
from bigchaindb.common.utils import validate_all_values_for_key
|
||||
from bigchaindb.common.utils import validate_all_values_for_key_in_obj, validate_all_values_for_key_in_list
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TABLES = ('bigchain', 'backlog', 'assets', 'metadata')
|
||||
# Tables/collections that every backend database must create
|
||||
TABLES = ('transactions', 'blocks', 'assets', 'metadata',
|
||||
'validators', 'elections', 'pre_commit', 'utxos', 'abci_chains')
|
||||
|
||||
VALID_LANGUAGES = ('danish', 'dutch', 'english', 'finnish', 'french', 'german',
|
||||
'hungarian', 'italian', 'norwegian', 'portuguese', 'romanian',
|
||||
'russian', 'spanish', 'swedish', 'turkish', 'none',
|
||||
@ -33,10 +32,6 @@ def create_database(connection, dbname):
|
||||
|
||||
Args:
|
||||
dbname (str): the name of the database to create.
|
||||
|
||||
Raises:
|
||||
:exc:`~DatabaseAlreadyExists`: If the given :attr:`dbname` already
|
||||
exists as a database.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
@ -53,17 +48,6 @@ def create_tables(connection, dbname):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def create_indexes(connection, dbname):
|
||||
"""Create the indexes to be used by BigchainDB.
|
||||
|
||||
Args:
|
||||
dbname (str): the name of the database to create indexes for.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def drop_database(connection, dbname):
|
||||
"""Drop the database used by BigchainDB.
|
||||
@ -92,10 +76,6 @@ def init_database(connection=None, dbname=None):
|
||||
dbname (str): the name of the database to create.
|
||||
Defaults to the database name given in the BigchainDB
|
||||
configuration.
|
||||
|
||||
Raises:
|
||||
:exc:`~DatabaseAlreadyExists`: If the given :attr:`dbname` already
|
||||
exists as a database.
|
||||
"""
|
||||
|
||||
connection = connection or connect()
|
||||
@ -103,7 +83,6 @@ def init_database(connection=None, dbname=None):
|
||||
|
||||
create_database(connection, dbname)
|
||||
create_tables(connection, dbname)
|
||||
create_indexes(connection, dbname)
|
||||
|
||||
|
||||
def validate_language_key(obj, key):
|
||||
@ -123,7 +102,9 @@ def validate_language_key(obj, key):
|
||||
if backend == 'localmongodb':
|
||||
data = obj.get(key, {})
|
||||
if isinstance(data, dict):
|
||||
validate_all_values_for_key(data, 'language', validate_language)
|
||||
validate_all_values_for_key_in_obj(data, 'language', validate_language)
|
||||
elif isinstance(data, list):
|
||||
validate_all_values_for_key_in_list(data, 'language', validate_language)
|
||||
|
||||
|
||||
def validate_language(value):
|
||||
|
@ -1,3 +1,11 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
import bigchaindb
|
||||
|
||||
|
||||
class ModuleDispatchRegistrationError(Exception):
|
||||
"""Raised when there is a problem registering dispatched functions for a
|
||||
module
|
||||
@ -16,6 +24,16 @@ def module_dispatch_registrar(module):
|
||||
('`{module}` does not contain a single-dispatchable '
|
||||
'function named `{func}`. The module being registered '
|
||||
'was not implemented correctly!').format(
|
||||
func=func_name, module=module.__name__)) from ex
|
||||
func=func_name, module=module.__name__)) from ex
|
||||
|
||||
return wrapper
|
||||
|
||||
return dispatch_wrapper
|
||||
|
||||
|
||||
def get_bigchaindb_config_value(key, default_value=None):
|
||||
return bigchaindb.config['database'].get(key, default_value)
|
||||
|
||||
|
||||
def get_bigchaindb_config_value_or_key_error(key):
|
||||
return bigchaindb.config['database'][key]
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""Implementation of the `bigchaindb` command,
|
||||
the command-line interface (CLI) for BigchainDB Server.
|
||||
"""
|
||||
@ -9,19 +14,24 @@ import copy
|
||||
import json
|
||||
import sys
|
||||
|
||||
from bigchaindb.common.exceptions import (DatabaseAlreadyExists,
|
||||
DatabaseDoesNotExist,
|
||||
MultipleValidatorOperationError)
|
||||
from bigchaindb.core import rollback
|
||||
from bigchaindb.migrations.chain_migration_election import ChainMigrationElection
|
||||
from bigchaindb.utils import load_node_key
|
||||
from bigchaindb.common.transaction_mode_types import BROADCAST_TX_COMMIT
|
||||
from bigchaindb.common.exceptions import (DatabaseDoesNotExist,
|
||||
ValidationError)
|
||||
from bigchaindb.elections.vote import Vote
|
||||
import bigchaindb
|
||||
from bigchaindb import backend
|
||||
from bigchaindb import (backend, ValidatorElection,
|
||||
BigchainDB)
|
||||
from bigchaindb.backend import schema
|
||||
from bigchaindb.backend import query
|
||||
from bigchaindb.backend.query import VALIDATOR_UPDATE_ID, PRE_COMMIT_ID
|
||||
from bigchaindb.commands import utils
|
||||
from bigchaindb.commands.utils import (configure_bigchaindb,
|
||||
input_on_stderr)
|
||||
from bigchaindb.log import setup_logging
|
||||
from bigchaindb.tendermint_utils import public_key_from_base64
|
||||
from bigchaindb.commands.election_types import elections
|
||||
from bigchaindb.version import __tm_supported_versions__
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -94,22 +104,140 @@ def run_configure(args):
|
||||
|
||||
|
||||
@configure_bigchaindb
|
||||
def run_upsert_validator(args):
|
||||
"""Store validators which should be synced with Tendermint"""
|
||||
def run_election(args):
|
||||
"""Initiate and manage elections"""
|
||||
|
||||
b = bigchaindb.BigchainDB()
|
||||
public_key = public_key_from_base64(args.public_key)
|
||||
validator = {'pub_key': {'type': 'ed25519',
|
||||
'data': public_key},
|
||||
'power': args.power}
|
||||
validator_update = {'validator': validator,
|
||||
'update_id': VALIDATOR_UPDATE_ID}
|
||||
b = BigchainDB()
|
||||
|
||||
# Call the function specified by args.action, as defined above
|
||||
globals()[f'run_election_{args.action}'](args, b)
|
||||
|
||||
|
||||
def run_election_new(args, bigchain):
|
||||
election_type = args.election_type.replace('-', '_')
|
||||
globals()[f'run_election_new_{election_type}'](args, bigchain)
|
||||
|
||||
|
||||
def create_new_election(sk, bigchain, election_class, data):
|
||||
try:
|
||||
query.store_validator_update(b.connection, validator_update)
|
||||
except MultipleValidatorOperationError:
|
||||
logger.error('A validator update is pending to be applied. '
|
||||
'Please re-try after the current update has '
|
||||
'been processed.')
|
||||
key = load_node_key(sk)
|
||||
voters = election_class.recipients(bigchain)
|
||||
election = election_class.generate([key.public_key],
|
||||
voters,
|
||||
data, None).sign([key.private_key])
|
||||
election.validate(bigchain)
|
||||
except ValidationError as e:
|
||||
logger.error(e)
|
||||
return False
|
||||
except FileNotFoundError as fd_404:
|
||||
logger.error(fd_404)
|
||||
return False
|
||||
|
||||
resp = bigchain.write_transaction(election, BROADCAST_TX_COMMIT)
|
||||
if resp == (202, ''):
|
||||
logger.info('[SUCCESS] Submitted proposal with id: {}'.format(election.id))
|
||||
return election.id
|
||||
else:
|
||||
logger.error('Failed to commit election proposal')
|
||||
return False
|
||||
|
||||
|
||||
def run_election_new_upsert_validator(args, bigchain):
|
||||
"""Initiates an election to add/update/remove a validator to an existing BigchainDB network
|
||||
|
||||
:param args: dict
|
||||
args = {
|
||||
'public_key': the public key of the proposed peer, (str)
|
||||
'power': the proposed validator power for the new peer, (str)
|
||||
'node_id': the node_id of the new peer (str)
|
||||
'sk': the path to the private key of the node calling the election (str)
|
||||
}
|
||||
:param bigchain: an instance of BigchainDB
|
||||
:return: election_id or `False` in case of failure
|
||||
"""
|
||||
|
||||
new_validator = {
|
||||
'public_key': {'value': public_key_from_base64(args.public_key),
|
||||
'type': 'ed25519-base16'},
|
||||
'power': args.power,
|
||||
'node_id': args.node_id
|
||||
}
|
||||
|
||||
return create_new_election(args.sk, bigchain, ValidatorElection, new_validator)
|
||||
|
||||
|
||||
def run_election_new_chain_migration(args, bigchain):
|
||||
"""Initiates an election to halt block production
|
||||
|
||||
:param args: dict
|
||||
args = {
|
||||
'sk': the path to the private key of the node calling the election (str)
|
||||
}
|
||||
:param bigchain: an instance of BigchainDB
|
||||
:return: election_id or `False` in case of failure
|
||||
"""
|
||||
|
||||
return create_new_election(args.sk, bigchain, ChainMigrationElection, {})
|
||||
|
||||
|
||||
def run_election_approve(args, bigchain):
|
||||
"""Approve an election
|
||||
|
||||
:param args: dict
|
||||
args = {
|
||||
'election_id': the election_id of the election (str)
|
||||
'sk': the path to the private key of the signer (str)
|
||||
}
|
||||
:param bigchain: an instance of BigchainDB
|
||||
:return: success log message or `False` in case of error
|
||||
"""
|
||||
|
||||
key = load_node_key(args.sk)
|
||||
tx = bigchain.get_transaction(args.election_id)
|
||||
voting_powers = [v.amount for v in tx.outputs if key.public_key in v.public_keys]
|
||||
if len(voting_powers) > 0:
|
||||
voting_power = voting_powers[0]
|
||||
else:
|
||||
logger.error('The key you provided does not match any of the eligible voters in this election.')
|
||||
return False
|
||||
|
||||
inputs = [i for i in tx.to_inputs() if key.public_key in i.owners_before]
|
||||
election_pub_key = ValidatorElection.to_public_key(tx.id)
|
||||
approval = Vote.generate(inputs,
|
||||
[([election_pub_key], voting_power)],
|
||||
tx.id).sign([key.private_key])
|
||||
approval.validate(bigchain)
|
||||
|
||||
resp = bigchain.write_transaction(approval, BROADCAST_TX_COMMIT)
|
||||
|
||||
if resp == (202, ''):
|
||||
logger.info('[SUCCESS] Your vote has been submitted')
|
||||
return approval.id
|
||||
else:
|
||||
logger.error('Failed to commit vote')
|
||||
return False
|
||||
|
||||
|
||||
def run_election_show(args, bigchain):
|
||||
"""Retrieves information about an election
|
||||
|
||||
:param args: dict
|
||||
args = {
|
||||
'election_id': the transaction_id for an election (str)
|
||||
}
|
||||
:param bigchain: an instance of BigchainDB
|
||||
"""
|
||||
|
||||
election = bigchain.get_transaction(args.election_id)
|
||||
if not election:
|
||||
logger.error(f'No election found with election_id {args.election_id}')
|
||||
return
|
||||
|
||||
response = election.show_election(bigchain)
|
||||
|
||||
logger.info(response)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def _run_init():
|
||||
@ -121,14 +249,7 @@ def _run_init():
|
||||
@configure_bigchaindb
|
||||
def run_init(args):
|
||||
"""Initialize the database"""
|
||||
# TODO Provide mechanism to:
|
||||
# 1. prompt the user to inquire whether they wish to drop the db
|
||||
# 2. force the init, (e.g., via -f flag)
|
||||
try:
|
||||
_run_init()
|
||||
except DatabaseAlreadyExists:
|
||||
print('The database already exists.', file=sys.stderr)
|
||||
print('If you wish to re-initialize it, first drop it.', file=sys.stderr)
|
||||
_run_init()
|
||||
|
||||
|
||||
@configure_bigchaindb
|
||||
@ -142,7 +263,6 @@ def run_drop(args):
|
||||
return
|
||||
|
||||
conn = backend.connect()
|
||||
dbname = bigchaindb.config['database']['name']
|
||||
try:
|
||||
schema.drop_database(conn, dbname)
|
||||
except DatabaseDoesNotExist:
|
||||
@ -150,16 +270,7 @@ def run_drop(args):
|
||||
|
||||
|
||||
def run_recover(b):
|
||||
pre_commit = query.get_pre_commit_state(b.connection, PRE_COMMIT_ID)
|
||||
|
||||
# Initially the pre-commit collection would be empty
|
||||
if pre_commit:
|
||||
latest_block = query.get_latest_block(b.connection)
|
||||
|
||||
# NOTE: the pre-commit state can only be ahead of the commited state
|
||||
# by 1 block
|
||||
if latest_block and (latest_block['height'] < pre_commit['height']):
|
||||
query.delete_transactions(b.connection, pre_commit['transactions'])
|
||||
rollback(b)
|
||||
|
||||
|
||||
@configure_bigchaindb
|
||||
@ -172,16 +283,22 @@ def run_start(args):
|
||||
logger.info('BigchainDB Version %s', bigchaindb.__version__)
|
||||
run_recover(bigchaindb.lib.BigchainDB())
|
||||
|
||||
try:
|
||||
if not args.skip_initialize_database:
|
||||
logger.info('Initializing database')
|
||||
_run_init()
|
||||
except DatabaseAlreadyExists:
|
||||
pass
|
||||
if not args.skip_initialize_database:
|
||||
logger.info('Initializing database')
|
||||
_run_init()
|
||||
|
||||
logger.info('Starting BigchainDB main process.')
|
||||
from bigchaindb.start import start
|
||||
start()
|
||||
start(args)
|
||||
|
||||
|
||||
def run_tendermint_version(args):
|
||||
"""Show the supported Tendermint version(s)"""
|
||||
supported_tm_ver = {
|
||||
'description': 'BigchainDB supports the following Tendermint version(s)',
|
||||
'tendermint': __tm_supported_versions__,
|
||||
}
|
||||
print(json.dumps(supported_tm_ver, indent=4, sort_keys=True))
|
||||
|
||||
|
||||
def create_parser():
|
||||
@ -208,16 +325,41 @@ def create_parser():
|
||||
help='The backend to use. It can only be '
|
||||
'"localmongodb", currently.')
|
||||
|
||||
validator_parser = subparsers.add_parser('upsert-validator',
|
||||
help='Add/update/delete a validator')
|
||||
# parser for managing elections
|
||||
election_parser = subparsers.add_parser('election',
|
||||
help='Manage elections.')
|
||||
|
||||
validator_parser.add_argument('public_key',
|
||||
help='Public key of the validator.')
|
||||
election_subparser = election_parser.add_subparsers(title='Action',
|
||||
dest='action')
|
||||
|
||||
validator_parser.add_argument('power',
|
||||
type=int,
|
||||
help='Voting power of the validator. '
|
||||
'Setting it to 0 will delete the validator.')
|
||||
new_election_parser = election_subparser.add_parser('new',
|
||||
help='Calls a new election.')
|
||||
|
||||
new_election_subparser = new_election_parser.add_subparsers(title='Election_Type',
|
||||
dest='election_type')
|
||||
|
||||
# Parser factory for each type of new election, so we get a bunch of commands that look like this:
|
||||
# election new <some_election_type> <args>...
|
||||
for name, data in elections.items():
|
||||
args = data['args']
|
||||
generic_parser = new_election_subparser.add_parser(name, help=data['help'])
|
||||
for arg, kwargs in args.items():
|
||||
generic_parser.add_argument(arg, **kwargs)
|
||||
|
||||
approve_election_parser = election_subparser.add_parser('approve',
|
||||
help='Approve the election.')
|
||||
approve_election_parser.add_argument('election_id',
|
||||
help='The election_id of the election.')
|
||||
approve_election_parser.add_argument('--private-key',
|
||||
dest='sk',
|
||||
required=True,
|
||||
help='Path to the private key of the election initiator.')
|
||||
|
||||
show_election_parser = election_subparser.add_parser('show',
|
||||
help='Provides information about an election.')
|
||||
|
||||
show_election_parser.add_argument('election_id',
|
||||
help='The transaction id of the election you wish to query.')
|
||||
|
||||
# parsers for showing/exporting config values
|
||||
subparsers.add_parser('show-config',
|
||||
@ -240,6 +382,15 @@ def create_parser():
|
||||
action='store_true',
|
||||
help='Skip database initialization')
|
||||
|
||||
subparsers.add_parser('tendermint-version',
|
||||
help='Show the Tendermint supported versions')
|
||||
|
||||
start_parser.add_argument('--experimental-parallel-validation',
|
||||
dest='experimental_parallel_validation',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='💀 EXPERIMENTAL: parallelize validation for better throughput 💀')
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
|
31
bigchaindb/commands/election_types.py
Normal file
31
bigchaindb/commands/election_types.py
Normal file
@ -0,0 +1,31 @@
|
||||
elections = {
|
||||
'upsert-validator': {
|
||||
'help': 'Propose a change to the validator set',
|
||||
'args': {
|
||||
'public_key': {
|
||||
'help': 'Public key of the validator to be added/updated/removed.'
|
||||
},
|
||||
'power': {
|
||||
'type': int,
|
||||
'help': 'The proposed power for the validator. Setting to 0 will remove the validator.'},
|
||||
'node_id': {
|
||||
'help': 'The node_id of the validator.'
|
||||
},
|
||||
'--private-key': {
|
||||
'dest': 'sk',
|
||||
'required': True,
|
||||
'help': 'Path to the private key of the election initiator.'
|
||||
}
|
||||
}
|
||||
},
|
||||
'chain-migration': {
|
||||
'help': 'Call for a halt to block production to allow for a version change across breaking changes.',
|
||||
'args': {
|
||||
'--private-key': {
|
||||
'dest': 'sk',
|
||||
'required': True,
|
||||
'help': 'Path to the private key of the election initiator.'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""Utility functions and basic common arguments
|
||||
for ``argparse.ArgumentParser``.
|
||||
"""
|
||||
|
@ -1,7 +1,16 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
# Separate all crypto code so that we can easily test several implementations
|
||||
from collections import namedtuple
|
||||
|
||||
import sha3
|
||||
try:
|
||||
from hashlib import sha3_256
|
||||
except ImportError:
|
||||
from sha3 import sha3_256
|
||||
|
||||
from cryptoconditions import crypto
|
||||
|
||||
|
||||
@ -10,7 +19,7 @@ CryptoKeypair = namedtuple('CryptoKeypair', ('private_key', 'public_key'))
|
||||
|
||||
def hash_data(data):
|
||||
"""Hash the provided data using SHA3-256"""
|
||||
return sha3.sha3_256(data.encode()).hexdigest()
|
||||
return sha3_256(data.encode()).hexdigest()
|
||||
|
||||
|
||||
def generate_key_pair():
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""Custom exceptions used in the `bigchaindb` package.
|
||||
"""
|
||||
from bigchaindb.exceptions import BigchainDBError
|
||||
@ -7,10 +12,6 @@ class ConfigurationError(BigchainDBError):
|
||||
"""Raised when there is a problem with server configuration"""
|
||||
|
||||
|
||||
class DatabaseAlreadyExists(BigchainDBError):
|
||||
"""Raised when trying to create the database but the db is already there"""
|
||||
|
||||
|
||||
class DatabaseDoesNotExist(BigchainDBError):
|
||||
"""Raised when trying to delete the database but the db is not there"""
|
||||
|
||||
@ -66,12 +67,6 @@ class InvalidSignature(ValidationError):
|
||||
"""
|
||||
|
||||
|
||||
class TransactionNotInValidBlock(ValidationError):
|
||||
"""Raised when a transfer transaction is attempting to fulfill the
|
||||
outputs of a transaction that is in an invalid or undecided block
|
||||
"""
|
||||
|
||||
|
||||
class AssetIdMismatch(ValidationError):
|
||||
"""Raised when multiple transaction inputs related to different assets"""
|
||||
|
||||
@ -96,10 +91,6 @@ class ThresholdTooDeep(ValidationError):
|
||||
"""Raised if threshold condition is too deep"""
|
||||
|
||||
|
||||
class GenesisBlockAlreadyExistsError(ValidationError):
|
||||
"""Raised when trying to create the already existing genesis block"""
|
||||
|
||||
|
||||
class MultipleValidatorOperationError(ValidationError):
|
||||
"""Raised when a validator update pending but new request is submited"""
|
||||
|
||||
@ -118,3 +109,7 @@ class UnequalValidatorSet(ValidationError):
|
||||
|
||||
class InvalidPowerChange(ValidationError):
|
||||
"""Raised if proposed power change in validator set is >=1/3 total power"""
|
||||
|
||||
|
||||
class InvalidPublicKey(ValidationError):
|
||||
"""Raised if public key doesn't match the encoding type"""
|
||||
|
58
bigchaindb/common/memoize.py
Normal file
58
bigchaindb/common/memoize.py
Normal file
@ -0,0 +1,58 @@
|
||||
import functools
|
||||
import codecs
|
||||
from functools import lru_cache
|
||||
|
||||
|
||||
class HDict(dict):
|
||||
def __hash__(self):
|
||||
return hash(codecs.decode(self['id'], 'hex'))
|
||||
|
||||
|
||||
@lru_cache(maxsize=16384)
|
||||
def from_dict(func, *args, **kwargs):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
|
||||
def memoize_from_dict(func):
|
||||
|
||||
@functools.wraps(func)
|
||||
def memoized_func(*args, **kwargs):
|
||||
|
||||
if args[1].get('id', None):
|
||||
args = list(args)
|
||||
args[1] = HDict(args[1])
|
||||
new_args = tuple(args)
|
||||
return from_dict(func, *new_args, **kwargs)
|
||||
else:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return memoized_func
|
||||
|
||||
|
||||
class ToDictWrapper():
|
||||
def __init__(self, tx):
|
||||
self.tx = tx
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.tx.id == other.tx.id
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.tx.id)
|
||||
|
||||
|
||||
@lru_cache(maxsize=16384)
|
||||
def to_dict(func, tx_wrapped):
|
||||
return func(tx_wrapped.tx)
|
||||
|
||||
|
||||
def memoize_to_dict(func):
|
||||
|
||||
@functools.wraps(func)
|
||||
def memoized_func(*args, **kwargs):
|
||||
|
||||
if args[0].id:
|
||||
return to_dict(func, ToDictWrapper(args[0]))
|
||||
else:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return memoized_func
|
@ -1,3 +1,10 @@
|
||||
<!---
|
||||
Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
BigchainDB and IPDB software contributors.
|
||||
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
--->
|
||||
|
||||
# Introduction
|
||||
|
||||
This directory contains the schemas for the different JSON documents BigchainDB uses.
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""Schema validation related functions and data"""
|
||||
import os.path
|
||||
import logging
|
||||
@ -5,7 +10,6 @@ import logging
|
||||
import jsonschema
|
||||
import yaml
|
||||
import rapidjson
|
||||
import rapidjson_schema
|
||||
|
||||
from bigchaindb.common.exceptions import SchemaValidationError
|
||||
|
||||
@ -18,7 +22,7 @@ def _load_schema(name, path=__file__):
|
||||
path = os.path.join(os.path.dirname(path), name + '.yaml')
|
||||
with open(path) as handle:
|
||||
schema = yaml.safe_load(handle)
|
||||
fast_schema = rapidjson_schema.loads(rapidjson.dumps(schema))
|
||||
fast_schema = rapidjson.Validator(rapidjson.dumps(schema))
|
||||
return path, (schema, fast_schema)
|
||||
|
||||
|
||||
@ -34,6 +38,11 @@ _, TX_SCHEMA_TRANSFER = _load_schema('transaction_transfer_' +
|
||||
_, TX_SCHEMA_VALIDATOR_ELECTION = _load_schema('transaction_validator_election_' +
|
||||
TX_SCHEMA_VERSION)
|
||||
|
||||
_, TX_SCHEMA_CHAIN_MIGRATION_ELECTION = _load_schema('transaction_chain_migration_election_' +
|
||||
TX_SCHEMA_VERSION)
|
||||
|
||||
_, TX_SCHEMA_VOTE = _load_schema('transaction_vote_' + TX_SCHEMA_VERSION)
|
||||
|
||||
|
||||
def _validate_schema(schema, body):
|
||||
"""Validate data against a schema"""
|
||||
@ -50,7 +59,7 @@ def _validate_schema(schema, body):
|
||||
# a helpful error message.
|
||||
|
||||
try:
|
||||
schema[1].validate(rapidjson.dumps(body))
|
||||
schema[1](rapidjson.dumps(body))
|
||||
except ValueError as exc:
|
||||
try:
|
||||
jsonschema.validate(body, schema[0])
|
||||
|
@ -0,0 +1,45 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
---
|
||||
"$schema": "http://json-schema.org/draft-04/schema#"
|
||||
type: object
|
||||
title: Chain Migration Election Schema - Propose a halt in block production to allow for a version change
|
||||
required:
|
||||
- operation
|
||||
- asset
|
||||
- outputs
|
||||
properties:
|
||||
operation:
|
||||
type: string
|
||||
value: "CHAIN_MIGRATION_ELECTION"
|
||||
asset:
|
||||
additionalProperties: false
|
||||
properties:
|
||||
data:
|
||||
additionalProperties: false
|
||||
properties:
|
||||
seed:
|
||||
type: string
|
||||
required:
|
||||
- data
|
||||
outputs:
|
||||
type: array
|
||||
items:
|
||||
"$ref": "#/definitions/output"
|
||||
definitions:
|
||||
output:
|
||||
type: object
|
||||
properties:
|
||||
condition:
|
||||
type: object
|
||||
required:
|
||||
- uri
|
||||
properties:
|
||||
uri:
|
||||
type: string
|
||||
pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
|
||||
(fpt=ed25519-sha-256(&)?|cost=[0-9]+(&)?|\
|
||||
subtypes=ed25519-sha-256(&)?){2,3}$"
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
---
|
||||
"$schema": "http://json-schema.org/draft-04/schema#"
|
||||
type: object
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
---
|
||||
"$schema": "http://json-schema.org/draft-04/schema#"
|
||||
type: object
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
---
|
||||
"$schema": "http://json-schema.org/draft-04/schema#"
|
||||
type: object
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
---
|
||||
"$schema": "http://json-schema.org/draft-04/schema#"
|
||||
type: object
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
---
|
||||
"$schema": "http://json-schema.org/draft-04/schema#"
|
||||
type: object
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
---
|
||||
"$schema": "http://json-schema.org/draft-04/schema#"
|
||||
type: object
|
||||
@ -59,6 +64,8 @@ definitions:
|
||||
- CREATE
|
||||
- TRANSFER
|
||||
- VALIDATOR_ELECTION
|
||||
- CHAIN_MIGRATION_ELECTION
|
||||
- VOTE
|
||||
asset:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
---
|
||||
"$schema": "http://json-schema.org/draft-04/schema#"
|
||||
type: object
|
||||
@ -18,8 +23,23 @@ properties:
|
||||
properties:
|
||||
node_id:
|
||||
type: string
|
||||
public_key:
|
||||
seed:
|
||||
type: string
|
||||
public_key:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
required:
|
||||
- value
|
||||
- type
|
||||
properties:
|
||||
value:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- ed25519-base16
|
||||
- ed25519-base32
|
||||
- ed25519-base64
|
||||
power:
|
||||
"$ref": "#/definitions/positiveInteger"
|
||||
required:
|
||||
|
34
bigchaindb/common/schema/transaction_vote_v2.0.yaml
Normal file
34
bigchaindb/common/schema/transaction_vote_v2.0.yaml
Normal file
@ -0,0 +1,34 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
---
|
||||
"$schema": "http://json-schema.org/draft-04/schema#"
|
||||
type: object
|
||||
title: Vote Schema - Vote on an election
|
||||
required:
|
||||
- operation
|
||||
- outputs
|
||||
properties:
|
||||
operation:
|
||||
type: string
|
||||
value: "VOTE"
|
||||
outputs:
|
||||
type: array
|
||||
items:
|
||||
"$ref": "#/definitions/output"
|
||||
definitions:
|
||||
output:
|
||||
type: object
|
||||
properties:
|
||||
condition:
|
||||
type: object
|
||||
required:
|
||||
- uri
|
||||
properties:
|
||||
uri:
|
||||
type: string
|
||||
pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
|
||||
(fpt=ed25519-sha-256(&)?|cost=[0-9]+(&)?|\
|
||||
subtypes=ed25519-sha-256(&)?){2,3}$"
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""Transaction related models to parse and construct transaction
|
||||
payloads.
|
||||
|
||||
@ -8,20 +13,26 @@ Attributes:
|
||||
"""
|
||||
from collections import namedtuple
|
||||
from copy import deepcopy
|
||||
from functools import reduce
|
||||
from functools import reduce, lru_cache
|
||||
import rapidjson
|
||||
|
||||
import base58
|
||||
from cryptoconditions import Fulfillment, ThresholdSha256, Ed25519Sha256
|
||||
from cryptoconditions.exceptions import (
|
||||
ParsingError, ASN1DecodeError, ASN1EncodeError, UnsupportedTypeError)
|
||||
from sha3 import sha3_256
|
||||
try:
|
||||
from hashlib import sha3_256
|
||||
except ImportError:
|
||||
from sha3 import sha3_256
|
||||
|
||||
from bigchaindb.common.crypto import PrivateKey, hash_data
|
||||
from bigchaindb.common.exceptions import (KeypairMismatchException,
|
||||
InputDoesNotExist, DoubleSpend,
|
||||
InvalidHash, InvalidSignature,
|
||||
AmountError, AssetIdMismatch,
|
||||
ThresholdTooDeep)
|
||||
from bigchaindb.common.utils import serialize
|
||||
from .memoize import memoize_from_dict, memoize_to_dict
|
||||
|
||||
|
||||
UnspentOutput = namedtuple(
|
||||
@ -67,7 +78,7 @@ class Input(object):
|
||||
if fulfills is not None and not isinstance(fulfills, TransactionLink):
|
||||
raise TypeError('`fulfills` must be a TransactionLink instance')
|
||||
if not isinstance(owners_before, list):
|
||||
raise TypeError('`owners_after` must be a list instance')
|
||||
raise TypeError('`owners_before` must be a list instance')
|
||||
|
||||
self.fulfillment = fulfillment
|
||||
self.fulfills = fulfills
|
||||
@ -77,6 +88,11 @@ class Input(object):
|
||||
# TODO: If `other !== Fulfillment` return `False`
|
||||
return self.to_dict() == other.to_dict()
|
||||
|
||||
# NOTE: This function is used to provide a unique key for a given
|
||||
# Input to suppliment memoization
|
||||
def __hash__(self):
|
||||
return hash((self.fulfillment, self.fulfills))
|
||||
|
||||
def to_dict(self):
|
||||
"""Transforms the object to a Python dictionary.
|
||||
|
||||
@ -89,7 +105,7 @@ class Input(object):
|
||||
"""
|
||||
try:
|
||||
fulfillment = self.fulfillment.serialize_uri()
|
||||
except (TypeError, AttributeError, ASN1EncodeError):
|
||||
except (TypeError, AttributeError, ASN1EncodeError, ASN1DecodeError):
|
||||
fulfillment = _fulfillment_to_details(self.fulfillment)
|
||||
|
||||
try:
|
||||
@ -156,7 +172,7 @@ def _fulfillment_to_details(fulfillment):
|
||||
if fulfillment.type_name == 'ed25519-sha-256':
|
||||
return {
|
||||
'type': 'ed25519-sha-256',
|
||||
'public_key': base58.b58encode(fulfillment.public_key),
|
||||
'public_key': base58.b58encode(fulfillment.public_key).decode(),
|
||||
}
|
||||
|
||||
if fulfillment.type_name == 'threshold-sha-256':
|
||||
@ -495,7 +511,7 @@ class Transaction(object):
|
||||
VERSION = '2.0'
|
||||
|
||||
def __init__(self, operation, asset, inputs=None, outputs=None,
|
||||
metadata=None, version=None, hash_id=None):
|
||||
metadata=None, version=None, hash_id=None, tx_dict=None):
|
||||
"""The constructor allows to create a customizable Transaction.
|
||||
|
||||
Note:
|
||||
@ -523,14 +539,14 @@ class Transaction(object):
|
||||
# Asset payloads for 'CREATE' operations must be None or
|
||||
# dicts holding a `data` property. Asset payloads for 'TRANSFER'
|
||||
# operations must be dicts holding an `id` property.
|
||||
if (operation == Transaction.CREATE and
|
||||
if (operation == self.CREATE and
|
||||
asset is not None and not (isinstance(asset, dict) and 'data' in asset)):
|
||||
raise TypeError(('`asset` must be None or a dict holding a `data` '
|
||||
" property instance for '{}' Transactions".format(operation)))
|
||||
elif (operation == Transaction.TRANSFER and
|
||||
elif (operation == self.TRANSFER and
|
||||
not (isinstance(asset, dict) and 'id' in asset)):
|
||||
raise TypeError(('`asset` must be a dict holding an `id` property '
|
||||
"for 'TRANSFER' Transactions".format(operation)))
|
||||
'for \'TRANSFER\' Transactions'))
|
||||
|
||||
if outputs and not isinstance(outputs, list):
|
||||
raise TypeError('`outputs` must be a list instance or None')
|
||||
@ -548,6 +564,7 @@ class Transaction(object):
|
||||
self.outputs = outputs or []
|
||||
self.metadata = metadata
|
||||
self._id = hash_id
|
||||
self.tx_dict = tx_dict
|
||||
|
||||
@property
|
||||
def unspent_outputs(self):
|
||||
@ -555,9 +572,9 @@ class Transaction(object):
|
||||
structure containing relevant information for storing them in
|
||||
a UTXO set, and performing validation.
|
||||
"""
|
||||
if self.operation == Transaction.CREATE:
|
||||
if self.operation == self.CREATE:
|
||||
self._asset_id = self._id
|
||||
elif self.operation == Transaction.TRANSFER:
|
||||
elif self.operation == self.TRANSFER:
|
||||
self._asset_id = self.asset['id']
|
||||
return (UnspentOutput(
|
||||
transaction_id=self._id,
|
||||
@ -569,7 +586,7 @@ class Transaction(object):
|
||||
|
||||
@property
|
||||
def spent_outputs(self):
|
||||
"""tuple of :obj:`dict`: Inputs of this transaction. Each input
|
||||
"""Tuple of :obj:`dict`: Inputs of this transaction. Each input
|
||||
is represented as a dictionary containing a transaction id and
|
||||
output index.
|
||||
"""
|
||||
@ -649,6 +666,31 @@ class Transaction(object):
|
||||
(inputs, outputs) = cls.validate_create(tx_signers, recipients, asset, metadata)
|
||||
return cls(cls.CREATE, {'data': asset}, inputs, outputs, metadata)
|
||||
|
||||
@classmethod
|
||||
def validate_transfer(cls, inputs, recipients, asset_id, metadata):
|
||||
if not isinstance(inputs, list):
|
||||
raise TypeError('`inputs` must be a list instance')
|
||||
if len(inputs) == 0:
|
||||
raise ValueError('`inputs` must contain at least one item')
|
||||
if not isinstance(recipients, list):
|
||||
raise TypeError('`recipients` must be a list instance')
|
||||
if len(recipients) == 0:
|
||||
raise ValueError('`recipients` list cannot be empty')
|
||||
|
||||
outputs = []
|
||||
for recipient in recipients:
|
||||
if not isinstance(recipient, tuple) or len(recipient) != 2:
|
||||
raise ValueError(('Each `recipient` in the list must be a'
|
||||
' tuple of `([<list of public keys>],'
|
||||
' <amount>)`'))
|
||||
pub_keys, amount = recipient
|
||||
outputs.append(Output.generate(pub_keys, amount))
|
||||
|
||||
if not isinstance(asset_id, str):
|
||||
raise TypeError('`asset_id` must be a string')
|
||||
|
||||
return (deepcopy(inputs), outputs)
|
||||
|
||||
@classmethod
|
||||
def transfer(cls, inputs, recipients, asset_id, metadata=None):
|
||||
"""A simple way to generate a `TRANSFER` transaction.
|
||||
@ -688,28 +730,7 @@ class Transaction(object):
|
||||
Returns:
|
||||
:class:`~bigchaindb.common.transaction.Transaction`
|
||||
"""
|
||||
if not isinstance(inputs, list):
|
||||
raise TypeError('`inputs` must be a list instance')
|
||||
if len(inputs) == 0:
|
||||
raise ValueError('`inputs` must contain at least one item')
|
||||
if not isinstance(recipients, list):
|
||||
raise TypeError('`recipients` must be a list instance')
|
||||
if len(recipients) == 0:
|
||||
raise ValueError('`recipients` list cannot be empty')
|
||||
|
||||
outputs = []
|
||||
for recipient in recipients:
|
||||
if not isinstance(recipient, tuple) or len(recipient) != 2:
|
||||
raise ValueError(('Each `recipient` in the list must be a'
|
||||
' tuple of `([<list of public keys>],'
|
||||
' <amount>)`'))
|
||||
pub_keys, amount = recipient
|
||||
outputs.append(Output.generate(pub_keys, amount))
|
||||
|
||||
if not isinstance(asset_id, str):
|
||||
raise TypeError('`asset_id` must be a string')
|
||||
|
||||
inputs = deepcopy(inputs)
|
||||
(inputs, outputs) = cls.validate_transfer(inputs, recipients, asset_id, metadata)
|
||||
return cls(cls.TRANSFER, {'id': asset_id}, inputs, outputs, metadata)
|
||||
|
||||
def __eq__(self, other):
|
||||
@ -848,8 +869,9 @@ class Transaction(object):
|
||||
return cls._sign_threshold_signature_fulfillment(input_, message,
|
||||
key_pairs)
|
||||
else:
|
||||
raise ValueError("Fulfillment couldn't be matched to "
|
||||
'Cryptocondition fulfillment type.')
|
||||
raise ValueError(
|
||||
'Fulfillment couldn\'t be matched to '
|
||||
'Cryptocondition fulfillment type.')
|
||||
|
||||
@classmethod
|
||||
def _sign_simple_signature_fulfillment(cls, input_, message, key_pairs):
|
||||
@ -954,7 +976,7 @@ class Transaction(object):
|
||||
# greatly, as we do not have to check against `None` values.
|
||||
return self._inputs_valid(['dummyvalue'
|
||||
for _ in self.inputs])
|
||||
elif self.operation == Transaction.TRANSFER:
|
||||
elif self.operation == self.TRANSFER:
|
||||
return self._inputs_valid([output.fulfillment.condition_uri
|
||||
for output in outputs])
|
||||
else:
|
||||
@ -981,7 +1003,7 @@ class Transaction(object):
|
||||
raise ValueError('Inputs and '
|
||||
'output_condition_uris must have the same count')
|
||||
|
||||
tx_dict = self.to_dict()
|
||||
tx_dict = self.tx_dict if self.tx_dict else self.to_dict()
|
||||
tx_dict = Transaction._remove_signatures(tx_dict)
|
||||
tx_dict['id'] = None
|
||||
tx_serialized = Transaction._to_str(tx_dict)
|
||||
@ -994,6 +1016,7 @@ class Transaction(object):
|
||||
return all(validate(i, cond)
|
||||
for i, cond in enumerate(output_condition_uris))
|
||||
|
||||
@lru_cache(maxsize=16384)
|
||||
def _input_valid(self, input_, operation, message, output_condition_uri=None):
|
||||
"""Validates a single Input against a single Output.
|
||||
|
||||
@ -1039,6 +1062,11 @@ class Transaction(object):
|
||||
ffill_valid = parsed_ffill.validate(message=message.digest())
|
||||
return output_valid and ffill_valid
|
||||
|
||||
# This function is required by `lru_cache` to create a key for memoization
|
||||
def __hash__(self):
|
||||
return hash(self.id)
|
||||
|
||||
@memoize_to_dict
|
||||
def to_dict(self):
|
||||
"""Transforms the object to a Python dictionary.
|
||||
|
||||
@ -1098,8 +1126,8 @@ class Transaction(object):
|
||||
tx = Transaction._remove_signatures(self.to_dict())
|
||||
return Transaction._to_str(tx)
|
||||
|
||||
@staticmethod
|
||||
def get_asset_id(transactions):
|
||||
@classmethod
|
||||
def get_asset_id(cls, transactions):
|
||||
"""Get the asset id from a list of :class:`~.Transactions`.
|
||||
|
||||
This is useful when we want to check if the multiple inputs of a
|
||||
@ -1123,7 +1151,7 @@ class Transaction(object):
|
||||
transactions = [transactions]
|
||||
|
||||
# create a set of the transactions' asset ids
|
||||
asset_ids = {tx.id if tx.operation == Transaction.CREATE
|
||||
asset_ids = {tx.id if tx.operation == tx.CREATE
|
||||
else tx.asset['id']
|
||||
for tx in transactions}
|
||||
|
||||
@ -1141,7 +1169,9 @@ class Transaction(object):
|
||||
tx_body (dict): The Transaction to be transformed.
|
||||
"""
|
||||
# NOTE: Remove reference to avoid side effects
|
||||
tx_body = deepcopy(tx_body)
|
||||
# tx_body = deepcopy(tx_body)
|
||||
tx_body = rapidjson.loads(rapidjson.dumps(tx_body))
|
||||
|
||||
try:
|
||||
proposed_tx_id = tx_body['id']
|
||||
except KeyError:
|
||||
@ -1158,6 +1188,7 @@ class Transaction(object):
|
||||
raise InvalidHash(err_msg.format(proposed_tx_id))
|
||||
|
||||
@classmethod
|
||||
@memoize_from_dict
|
||||
def from_dict(cls, tx, skip_schema_validation=True):
|
||||
"""Transforms a Python dictionary to a Transaction object.
|
||||
|
||||
@ -1169,13 +1200,15 @@ class Transaction(object):
|
||||
"""
|
||||
operation = tx.get('operation', Transaction.CREATE) if isinstance(tx, dict) else Transaction.CREATE
|
||||
cls = Transaction.resolve_class(operation)
|
||||
|
||||
if not skip_schema_validation:
|
||||
cls.validate_id(tx)
|
||||
cls.validate_schema(tx)
|
||||
|
||||
inputs = [Input.from_dict(input_) for input_ in tx['inputs']]
|
||||
outputs = [Output.from_dict(output) for output in tx['outputs']]
|
||||
return cls(tx['operation'], tx['asset'], inputs, outputs,
|
||||
tx['metadata'], tx['version'], hash_id=tx['id'])
|
||||
tx['metadata'], tx['version'], hash_id=tx['id'], tx_dict=tx)
|
||||
|
||||
@classmethod
|
||||
def from_db(cls, bigchain, tx_dict_list):
|
||||
@ -1242,3 +1275,56 @@ class Transaction(object):
|
||||
@classmethod
|
||||
def validate_schema(cls, tx):
|
||||
pass
|
||||
|
||||
def validate_transfer_inputs(self, bigchain, current_transactions=[]):
|
||||
# store the inputs so that we can check if the asset ids match
|
||||
input_txs = []
|
||||
input_conditions = []
|
||||
for input_ in self.inputs:
|
||||
input_txid = input_.fulfills.txid
|
||||
input_tx = bigchain.get_transaction(input_txid)
|
||||
|
||||
if input_tx is None:
|
||||
for ctxn in current_transactions:
|
||||
if ctxn.id == input_txid:
|
||||
input_tx = ctxn
|
||||
|
||||
if input_tx is None:
|
||||
raise InputDoesNotExist("input `{}` doesn't exist"
|
||||
.format(input_txid))
|
||||
|
||||
spent = bigchain.get_spent(input_txid, input_.fulfills.output,
|
||||
current_transactions)
|
||||
if spent:
|
||||
raise DoubleSpend('input `{}` was already spent'
|
||||
.format(input_txid))
|
||||
|
||||
output = input_tx.outputs[input_.fulfills.output]
|
||||
input_conditions.append(output)
|
||||
input_txs.append(input_tx)
|
||||
|
||||
# Validate that all inputs are distinct
|
||||
links = [i.fulfills.to_uri() for i in self.inputs]
|
||||
if len(links) != len(set(links)):
|
||||
raise DoubleSpend('tx "{}" spends inputs twice'.format(self.id))
|
||||
|
||||
# validate asset id
|
||||
asset_id = self.get_asset_id(input_txs)
|
||||
if asset_id != self.asset['id']:
|
||||
raise AssetIdMismatch(('The asset id of the input does not'
|
||||
' match the asset id of the'
|
||||
' transaction'))
|
||||
|
||||
input_amount = sum([input_condition.amount for input_condition in input_conditions])
|
||||
output_amount = sum([output_condition.amount for output_condition in self.outputs])
|
||||
|
||||
if output_amount != input_amount:
|
||||
raise AmountError(('The amount used in the inputs `{}`'
|
||||
' needs to be same as the amount used'
|
||||
' in the outputs `{}`')
|
||||
.format(input_amount, output_amount))
|
||||
|
||||
if not self.inputs_valid(input_conditions):
|
||||
raise InvalidSignature('Transaction signature is invalid.')
|
||||
|
||||
return True
|
||||
|
8
bigchaindb/common/transaction_mode_types.py
Normal file
8
bigchaindb/common/transaction_mode_types.py
Normal file
@ -0,0 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
BROADCAST_TX_COMMIT = 'broadcast_tx_commit'
|
||||
BROADCAST_TX_ASYNC = 'broadcast_tx_async'
|
||||
BROADCAST_TX_SYNC = 'broadcast_tx_sync'
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
import time
|
||||
import re
|
||||
import rapidjson
|
||||
@ -72,10 +77,20 @@ def validate_txn_obj(obj_name, obj, key, validation_fun):
|
||||
if backend == 'localmongodb':
|
||||
data = obj.get(key, {})
|
||||
if isinstance(data, dict):
|
||||
validate_all_keys(obj_name, data, validation_fun)
|
||||
validate_all_keys_in_obj(obj_name, data, validation_fun)
|
||||
elif isinstance(data, list):
|
||||
validate_all_items_in_list(obj_name, data, validation_fun)
|
||||
|
||||
|
||||
def validate_all_keys(obj_name, obj, validation_fun):
|
||||
def validate_all_items_in_list(obj_name, data, validation_fun):
|
||||
for item in data:
|
||||
if isinstance(item, dict):
|
||||
validate_all_keys_in_obj(obj_name, item, validation_fun)
|
||||
elif isinstance(item, list):
|
||||
validate_all_items_in_list(obj_name, item, validation_fun)
|
||||
|
||||
|
||||
def validate_all_keys_in_obj(obj_name, obj, validation_fun):
|
||||
"""Validate all (nested) keys in `obj` by using `validation_fun`.
|
||||
|
||||
Args:
|
||||
@ -93,10 +108,12 @@ def validate_all_keys(obj_name, obj, validation_fun):
|
||||
for key, value in obj.items():
|
||||
validation_fun(obj_name, key)
|
||||
if isinstance(value, dict):
|
||||
validate_all_keys(obj_name, value, validation_fun)
|
||||
validate_all_keys_in_obj(obj_name, value, validation_fun)
|
||||
elif isinstance(value, list):
|
||||
validate_all_items_in_list(obj_name, value, validation_fun)
|
||||
|
||||
|
||||
def validate_all_values_for_key(obj, key, validation_fun):
|
||||
def validate_all_values_for_key_in_obj(obj, key, validation_fun):
|
||||
"""Validate value for all (nested) occurrence of `key` in `obj`
|
||||
using `validation_fun`.
|
||||
|
||||
@ -113,7 +130,17 @@ def validate_all_values_for_key(obj, key, validation_fun):
|
||||
if vkey == key:
|
||||
validation_fun(value)
|
||||
elif isinstance(value, dict):
|
||||
validate_all_values_for_key(value, key, validation_fun)
|
||||
validate_all_values_for_key_in_obj(value, key, validation_fun)
|
||||
elif isinstance(value, list):
|
||||
validate_all_values_for_key_in_list(value, key, validation_fun)
|
||||
|
||||
|
||||
def validate_all_values_for_key_in_list(input_list, key, validation_fun):
|
||||
for item in input_list:
|
||||
if isinstance(item, dict):
|
||||
validate_all_values_for_key_in_obj(item, key, validation_fun)
|
||||
elif isinstance(item, list):
|
||||
validate_all_values_for_key_in_list(item, key, validation_fun)
|
||||
|
||||
|
||||
def validate_key(obj_name, key):
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""Utils for reading and setting configuration settings.
|
||||
|
||||
The value of each BigchainDB Server configuration setting is
|
||||
@ -15,7 +20,7 @@ import os
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import collections
|
||||
import collections.abc
|
||||
from functools import lru_cache
|
||||
|
||||
from pkg_resources import iter_entry_points, ResolutionError
|
||||
@ -24,7 +29,7 @@ from bigchaindb.common import exceptions
|
||||
|
||||
import bigchaindb
|
||||
|
||||
from bigchaindb.consensus import BaseConsensusRules
|
||||
from bigchaindb.validation import BaseValidationRules
|
||||
|
||||
# TODO: move this to a proper configuration file for logging
|
||||
logging.getLogger('requests').setLevel(logging.WARNING)
|
||||
@ -47,7 +52,7 @@ def map_leafs(func, mapping):
|
||||
path = []
|
||||
|
||||
for key, val in mapping.items():
|
||||
if isinstance(val, collections.Mapping):
|
||||
if isinstance(val, collections.abc.Mapping):
|
||||
_inner(val, path + [key])
|
||||
else:
|
||||
mapping[key] = func(val, path=path+[key])
|
||||
@ -75,7 +80,7 @@ def update(d, u):
|
||||
mapping: An updated version of d (updated by u).
|
||||
"""
|
||||
for k, v in u.items():
|
||||
if isinstance(v, collections.Mapping):
|
||||
if isinstance(v, collections.abc.Mapping):
|
||||
r = update(d.get(k, {}), v)
|
||||
d[k] = r
|
||||
else:
|
||||
@ -254,38 +259,38 @@ def autoconfigure(filename=None, config=None, force=False):
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_consensus_plugin(name=None):
|
||||
"""Find and load the chosen consensus plugin.
|
||||
def load_validation_plugin(name=None):
|
||||
"""Find and load the chosen validation plugin.
|
||||
|
||||
Args:
|
||||
name (string): the name of the entry_point, as advertised in the
|
||||
setup.py of the providing package.
|
||||
|
||||
Returns:
|
||||
an uninstantiated subclass of ``bigchaindb.consensus.AbstractConsensusRules``
|
||||
an uninstantiated subclass of ``bigchaindb.validation.AbstractValidationRules``
|
||||
"""
|
||||
if not name:
|
||||
return BaseConsensusRules
|
||||
return BaseValidationRules
|
||||
|
||||
# TODO: This will return the first plugin with group `bigchaindb.consensus`
|
||||
# TODO: This will return the first plugin with group `bigchaindb.validation`
|
||||
# and name `name` in the active WorkingSet.
|
||||
# We should probably support Requirements specs in the config, e.g.
|
||||
# consensus_plugin: 'my-plugin-package==0.0.1;default'
|
||||
# validation_plugin: 'my-plugin-package==0.0.1;default'
|
||||
plugin = None
|
||||
for entry_point in iter_entry_points('bigchaindb.consensus', name):
|
||||
for entry_point in iter_entry_points('bigchaindb.validation', name):
|
||||
plugin = entry_point.load()
|
||||
|
||||
# No matching entry_point found
|
||||
if not plugin:
|
||||
raise ResolutionError(
|
||||
'No plugin found in group `bigchaindb.consensus` with name `{}`'.
|
||||
'No plugin found in group `bigchaindb.validation` with name `{}`'.
|
||||
format(name))
|
||||
|
||||
# Is this strictness desireable?
|
||||
# It will probably reduce developer headaches in the wild.
|
||||
if not issubclass(plugin, (BaseConsensusRules,)):
|
||||
if not issubclass(plugin, (BaseValidationRules,)):
|
||||
raise TypeError('object of type "{}" does not implement `bigchaindb.'
|
||||
'consensus.BaseConsensusRules`'.format(type(plugin)))
|
||||
'validation.BaseValidationRules`'.format(type(plugin)))
|
||||
|
||||
return plugin
|
||||
|
||||
|
@ -1,22 +0,0 @@
|
||||
|
||||
|
||||
class BaseConsensusRules():
|
||||
"""Base consensus rules for Bigchain.
|
||||
|
||||
A consensus plugin must expose a class inheriting from this one via an entry_point.
|
||||
|
||||
All methods listed below must be implemented.
|
||||
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def validate_transaction(bigchain, transaction):
|
||||
"""See :meth:`bigchaindb.models.Transaction.validate`
|
||||
for documentation.
|
||||
"""
|
||||
return transaction.validate(bigchain)
|
||||
|
||||
@staticmethod
|
||||
def validate_block(bigchain, block):
|
||||
"""See :meth:`bigchaindb.models.Block.validate` for documentation."""
|
||||
return block.validate(bigchain)
|
@ -1,28 +1,28 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""This module contains all the goodness to integrate BigchainDB
|
||||
with Tendermint."""
|
||||
with Tendermint.
|
||||
"""
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from abci.application import BaseApplication
|
||||
from abci.types_pb2 import (
|
||||
ResponseInitChain,
|
||||
ResponseInfo,
|
||||
ResponseCheckTx,
|
||||
ResponseBeginBlock,
|
||||
ResponseDeliverTx,
|
||||
ResponseEndBlock,
|
||||
ResponseCommit,
|
||||
Validator,
|
||||
PubKey
|
||||
)
|
||||
from abci import CodeTypeOk
|
||||
|
||||
from bigchaindb import BigchainDB
|
||||
from bigchaindb.elections.election import Election
|
||||
from bigchaindb.version import __tm_supported_versions__
|
||||
from bigchaindb.utils import tendermint_version_is_compatible
|
||||
from bigchaindb.tendermint_utils import (decode_transaction,
|
||||
calculate_hash)
|
||||
from bigchaindb.lib import Block, PreCommitState
|
||||
from bigchaindb.backend.query import PRE_COMMIT_ID
|
||||
from bigchaindb.lib import Block
|
||||
import bigchaindb.upsert_validator.validator_utils as vutils
|
||||
from bigchaindb.events import EventTypes, Event
|
||||
|
||||
|
||||
CodeTypeOk = 0
|
||||
CodeTypeError = 1
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -31,30 +31,96 @@ class App(BaseApplication):
|
||||
"""Bridge between BigchainDB and Tendermint.
|
||||
|
||||
The role of this class is to expose the BigchainDB
|
||||
transactional logic to the Tendermint Consensus
|
||||
State Machine."""
|
||||
transaction logic to Tendermint Core.
|
||||
"""
|
||||
|
||||
def __init__(self, bigchaindb=None):
|
||||
def __init__(self, abci, bigchaindb=None, events_queue=None,):
|
||||
super().__init__(abci)
|
||||
self.events_queue = events_queue
|
||||
self.bigchaindb = bigchaindb or BigchainDB()
|
||||
self.block_txn_ids = []
|
||||
self.block_txn_hash = ''
|
||||
self.block_transactions = []
|
||||
self.validators = None
|
||||
self.new_height = None
|
||||
self.chain = self.bigchaindb.get_latest_abci_chain()
|
||||
|
||||
def init_chain(self, validators):
|
||||
"""Initialize chain with block of height 0"""
|
||||
def log_abci_migration_error(self, chain_id, validators):
|
||||
logger.error('An ABCI chain migration is in process. '
|
||||
'Download the new ABCI client and configure it with '
|
||||
f'chain_id={chain_id} and validators={validators}.')
|
||||
|
||||
block = Block(app_hash='', height=0, transactions=[])
|
||||
def abort_if_abci_chain_is_not_synced(self):
|
||||
if self.chain is None or self.chain['is_synced']:
|
||||
return
|
||||
|
||||
validators = self.bigchaindb.get_validators()
|
||||
self.log_abci_migration_error(self.chain['chain_id'], validators)
|
||||
sys.exit(1)
|
||||
|
||||
def init_chain(self, genesis):
|
||||
"""Initialize chain upon genesis or a migration"""
|
||||
|
||||
app_hash = ''
|
||||
height = 0
|
||||
|
||||
known_chain = self.bigchaindb.get_latest_abci_chain()
|
||||
if known_chain is not None:
|
||||
chain_id = known_chain['chain_id']
|
||||
|
||||
if known_chain['is_synced']:
|
||||
msg = (f'Got invalid InitChain ABCI request ({genesis}) - '
|
||||
f'the chain {chain_id} is already synced.')
|
||||
logger.error(msg)
|
||||
sys.exit(1)
|
||||
|
||||
if chain_id != genesis.chain_id:
|
||||
validators = self.bigchaindb.get_validators()
|
||||
self.log_abci_migration_error(chain_id, validators)
|
||||
sys.exit(1)
|
||||
|
||||
# set migration values for app hash and height
|
||||
block = self.bigchaindb.get_latest_block()
|
||||
app_hash = '' if block is None else block['app_hash']
|
||||
height = 0 if block is None else block['height'] + 1
|
||||
|
||||
known_validators = self.bigchaindb.get_validators()
|
||||
validator_set = [vutils.decode_validator(v)
|
||||
for v in genesis.validators]
|
||||
|
||||
if known_validators and known_validators != validator_set:
|
||||
self.log_abci_migration_error(known_chain['chain_id'],
|
||||
known_validators)
|
||||
sys.exit(1)
|
||||
|
||||
block = Block(app_hash=app_hash, height=height, transactions=[])
|
||||
self.bigchaindb.store_block(block._asdict())
|
||||
return ResponseInitChain()
|
||||
self.bigchaindb.store_validator_set(height + 1, validator_set)
|
||||
abci_chain_height = 0 if known_chain is None else known_chain['height']
|
||||
self.bigchaindb.store_abci_chain(abci_chain_height,
|
||||
genesis.chain_id, True)
|
||||
self.chain = {'height': abci_chain_height, 'is_synced': True,
|
||||
'chain_id': genesis.chain_id}
|
||||
return self.abci.ResponseInitChain()
|
||||
|
||||
def info(self, request):
|
||||
"""Return height of the latest committed block."""
|
||||
r = ResponseInfo()
|
||||
|
||||
self.abort_if_abci_chain_is_not_synced()
|
||||
|
||||
# Check if BigchainDB supports the Tendermint version
|
||||
if not (hasattr(request, 'version') and tendermint_version_is_compatible(request.version)):
|
||||
logger.error(f'Unsupported Tendermint version: {getattr(request, "version", "no version")}.'
|
||||
f' Currently, BigchainDB only supports {__tm_supported_versions__}. Exiting!')
|
||||
sys.exit(1)
|
||||
|
||||
logger.info(f"Tendermint version: {request.version}")
|
||||
|
||||
r = self.abci.ResponseInfo()
|
||||
block = self.bigchaindb.get_latest_block()
|
||||
if block:
|
||||
r.last_block_height = block['height']
|
||||
chain_shift = 0 if self.chain is None else self.chain['height']
|
||||
r.last_block_height = block['height'] - chain_shift
|
||||
r.last_block_app_hash = block['app_hash'].encode('utf-8')
|
||||
else:
|
||||
r.last_block_height = 0
|
||||
@ -66,19 +132,19 @@ class App(BaseApplication):
|
||||
the mempool.
|
||||
|
||||
Args:
|
||||
raw_tx: a raw string (in bytes) transaction."""
|
||||
raw_tx: a raw string (in bytes) transaction.
|
||||
"""
|
||||
|
||||
self.abort_if_abci_chain_is_not_synced()
|
||||
|
||||
logger.benchmark('CHECK_TX_INIT')
|
||||
logger.debug('check_tx: %s', raw_transaction)
|
||||
transaction = decode_transaction(raw_transaction)
|
||||
if self.bigchaindb.is_valid_transaction(transaction):
|
||||
logger.debug('check_tx: VALID')
|
||||
logger.benchmark('CHECK_TX_END, tx_id:%s', transaction['id'])
|
||||
return ResponseCheckTx(code=CodeTypeOk)
|
||||
return self.abci.ResponseCheckTx(code=CodeTypeOk)
|
||||
else:
|
||||
logger.debug('check_tx: INVALID')
|
||||
logger.benchmark('CHECK_TX_END, tx_id:%s', transaction['id'])
|
||||
return ResponseCheckTx(code=CodeTypeError)
|
||||
return self.abci.ResponseCheckTx(code=CodeTypeError)
|
||||
|
||||
def begin_block(self, req_begin_block):
|
||||
"""Initialize list of transaction.
|
||||
@ -86,41 +152,61 @@ class App(BaseApplication):
|
||||
req_begin_block: block object which contains block header
|
||||
and block hash.
|
||||
"""
|
||||
logger.benchmark('BEGIN BLOCK, height:%s, num_txs:%s',
|
||||
req_begin_block.header.height,
|
||||
req_begin_block.header.num_txs)
|
||||
self.abort_if_abci_chain_is_not_synced()
|
||||
|
||||
chain_shift = 0 if self.chain is None else self.chain['height']
|
||||
logger.debug('BEGIN BLOCK, height:%s, num_txs:%s',
|
||||
req_begin_block.header.height + chain_shift,
|
||||
req_begin_block.header.num_txs)
|
||||
|
||||
self.block_txn_ids = []
|
||||
self.block_transactions = []
|
||||
return ResponseBeginBlock()
|
||||
return self.abci.ResponseBeginBlock()
|
||||
|
||||
def deliver_tx(self, raw_transaction):
|
||||
"""Validate the transaction before mutating the state.
|
||||
|
||||
Args:
|
||||
raw_tx: a raw string (in bytes) transaction."""
|
||||
raw_tx: a raw string (in bytes) transaction.
|
||||
"""
|
||||
|
||||
self.abort_if_abci_chain_is_not_synced()
|
||||
|
||||
logger.debug('deliver_tx: %s', raw_transaction)
|
||||
transaction = self.bigchaindb.is_valid_transaction(
|
||||
decode_transaction(raw_transaction), self.block_transactions)
|
||||
|
||||
if not transaction:
|
||||
logger.debug('deliver_tx: INVALID')
|
||||
return ResponseDeliverTx(code=CodeTypeError)
|
||||
return self.abci.ResponseDeliverTx(code=CodeTypeError)
|
||||
else:
|
||||
logger.debug('storing tx')
|
||||
self.block_txn_ids.append(transaction.id)
|
||||
self.block_transactions.append(transaction)
|
||||
return ResponseDeliverTx(code=CodeTypeOk)
|
||||
return self.abci.ResponseDeliverTx(code=CodeTypeOk)
|
||||
|
||||
def end_block(self, request_end_block):
|
||||
"""Calculate block hash using transaction ids and previous block
|
||||
hash to be stored in the next block.
|
||||
|
||||
Args:
|
||||
height (int): new height of the chain."""
|
||||
height (int): new height of the chain.
|
||||
"""
|
||||
|
||||
height = request_end_block.height
|
||||
self.abort_if_abci_chain_is_not_synced()
|
||||
|
||||
chain_shift = 0 if self.chain is None else self.chain['height']
|
||||
|
||||
height = request_end_block.height + chain_shift
|
||||
self.new_height = height
|
||||
|
||||
# store pre-commit state to recover in case there is a crash during
|
||||
# `end_block` or `commit`
|
||||
logger.debug(f'Updating pre-commit state: {self.new_height}')
|
||||
pre_commit_state = dict(height=self.new_height,
|
||||
transactions=self.block_txn_ids)
|
||||
self.bigchaindb.store_pre_commit_state(pre_commit_state)
|
||||
|
||||
block_txn_hash = calculate_hash(self.block_txn_ids)
|
||||
block = self.bigchaindb.get_latest_block()
|
||||
|
||||
@ -129,50 +215,57 @@ class App(BaseApplication):
|
||||
else:
|
||||
self.block_txn_hash = block['app_hash']
|
||||
|
||||
validator_updates = self.bigchaindb.get_validator_update()
|
||||
validator_updates = [encode_validator(v) for v in validator_updates]
|
||||
validator_update = Election.process_block(self.bigchaindb,
|
||||
self.new_height,
|
||||
self.block_transactions)
|
||||
|
||||
# set sync status to true
|
||||
self.bigchaindb.delete_validator_update()
|
||||
|
||||
# Store pre-commit state to recover in case there is a crash
|
||||
# during `commit`
|
||||
pre_commit_state = PreCommitState(commit_id=PRE_COMMIT_ID,
|
||||
height=self.new_height,
|
||||
transactions=self.block_txn_ids)
|
||||
logger.debug('Updating PreCommitState: %s', self.new_height)
|
||||
self.bigchaindb.store_pre_commit_state(pre_commit_state._asdict())
|
||||
return ResponseEndBlock(validator_updates=validator_updates)
|
||||
return self.abci.ResponseEndBlock(validator_updates=validator_update)
|
||||
|
||||
def commit(self):
|
||||
"""Store the new height and along with block hash."""
|
||||
|
||||
self.abort_if_abci_chain_is_not_synced()
|
||||
|
||||
data = self.block_txn_hash.encode('utf-8')
|
||||
|
||||
# register a new block only when new transactions are received
|
||||
if self.block_txn_ids:
|
||||
self.bigchaindb.store_bulk_transactions(self.block_transactions)
|
||||
block = Block(app_hash=self.block_txn_hash,
|
||||
height=self.new_height,
|
||||
transactions=self.block_txn_ids)
|
||||
# NOTE: storing the block should be the last operation during commit
|
||||
# this effects crash recovery. Refer BEP#8 for details
|
||||
self.bigchaindb.store_block(block._asdict())
|
||||
|
||||
block = Block(app_hash=self.block_txn_hash,
|
||||
height=self.new_height,
|
||||
transactions=self.block_txn_ids)
|
||||
# NOTE: storing the block should be the last operation during commit
|
||||
# this effects crash recovery. Refer BEP#8 for details
|
||||
self.bigchaindb.store_block(block._asdict())
|
||||
|
||||
logger.debug('Commit-ing new block with hash: apphash=%s ,'
|
||||
'height=%s, txn ids=%s', data, self.new_height,
|
||||
self.block_txn_ids)
|
||||
logger.benchmark('COMMIT_BLOCK, height:%s', self.new_height)
|
||||
return ResponseCommit(data=data)
|
||||
|
||||
if self.events_queue:
|
||||
event = Event(EventTypes.BLOCK_VALID, {
|
||||
'height': self.new_height,
|
||||
'transactions': self.block_transactions
|
||||
})
|
||||
self.events_queue.put(event)
|
||||
|
||||
return self.abci.ResponseCommit(data=data)
|
||||
|
||||
|
||||
def encode_validator(v):
|
||||
ed25519_public_key = v['pub_key']['data']
|
||||
# NOTE: tendermint expects public to be encoded in go-amino format
|
||||
def rollback(b):
|
||||
pre_commit = b.get_pre_commit_state()
|
||||
|
||||
pub_key = PubKey(type='ed25519',
|
||||
data=bytes.fromhex(ed25519_public_key))
|
||||
if pre_commit is None:
|
||||
# the pre_commit record is first stored in the first `end_block`
|
||||
return
|
||||
|
||||
return Validator(pub_key=pub_key,
|
||||
address=b'',
|
||||
power=v['power'])
|
||||
latest_block = b.get_latest_block()
|
||||
if latest_block is None:
|
||||
logger.error('Found precommit state but no blocks!')
|
||||
sys.exit(1)
|
||||
|
||||
# NOTE: the pre-commit state is always at most 1 block ahead of the commited state
|
||||
if latest_block['height'] < pre_commit['height']:
|
||||
Election.rollback(b, pre_commit['height'], pre_commit['transactions'])
|
||||
b.delete_transactions(pre_commit['transactions'])
|
||||
|
0
bigchaindb/elections/__init__.py
Normal file
0
bigchaindb/elections/__init__.py
Normal file
355
bigchaindb/elections/election.py
Normal file
355
bigchaindb/elections/election.py
Normal file
@ -0,0 +1,355 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
from collections import OrderedDict
|
||||
|
||||
import base58
|
||||
from uuid import uuid4
|
||||
|
||||
from bigchaindb import backend
|
||||
from bigchaindb.elections.vote import Vote
|
||||
from bigchaindb.common.exceptions import (InvalidSignature,
|
||||
MultipleInputsError,
|
||||
InvalidProposer,
|
||||
UnequalValidatorSet,
|
||||
DuplicateTransaction)
|
||||
from bigchaindb.tendermint_utils import key_from_base64, public_key_to_base64
|
||||
from bigchaindb.common.crypto import (public_key_from_ed25519_key)
|
||||
from bigchaindb.common.transaction import Transaction
|
||||
from bigchaindb.common.schema import (_validate_schema,
|
||||
TX_SCHEMA_COMMON,
|
||||
TX_SCHEMA_CREATE)
|
||||
|
||||
|
||||
class Election(Transaction):
|
||||
"""Represents election transactions.
|
||||
|
||||
To implement a custom election, create a class deriving from this one
|
||||
with OPERATION set to the election operation, ALLOWED_OPERATIONS
|
||||
set to (OPERATION,), CREATE set to OPERATION.
|
||||
"""
|
||||
|
||||
OPERATION = None
|
||||
# Custom validation schema
|
||||
TX_SCHEMA_CUSTOM = None
|
||||
# Election Statuses:
|
||||
ONGOING = 'ongoing'
|
||||
CONCLUDED = 'concluded'
|
||||
INCONCLUSIVE = 'inconclusive'
|
||||
# Vote ratio to approve an election
|
||||
ELECTION_THRESHOLD = 2 / 3
|
||||
|
||||
@classmethod
|
||||
def get_validator_change(cls, bigchain):
|
||||
"""Return the validator set from the most recent approved block
|
||||
|
||||
:return: {
|
||||
'height': <block_height>,
|
||||
'validators': <validator_set>
|
||||
}
|
||||
"""
|
||||
latest_block = bigchain.get_latest_block()
|
||||
if latest_block is None:
|
||||
return None
|
||||
return bigchain.get_validator_change(latest_block['height'])
|
||||
|
||||
@classmethod
|
||||
def get_validators(cls, bigchain, height=None):
|
||||
"""Return a dictionary of validators with key as `public_key` and
|
||||
value as the `voting_power`
|
||||
"""
|
||||
validators = {}
|
||||
for validator in bigchain.get_validators(height):
|
||||
# NOTE: we assume that Tendermint encodes public key in base64
|
||||
public_key = public_key_from_ed25519_key(key_from_base64(validator['public_key']['value']))
|
||||
validators[public_key] = validator['voting_power']
|
||||
|
||||
return validators
|
||||
|
||||
@classmethod
|
||||
def recipients(cls, bigchain):
|
||||
"""Convert validator dictionary to a recipient list for `Transaction`"""
|
||||
|
||||
recipients = []
|
||||
for public_key, voting_power in cls.get_validators(bigchain).items():
|
||||
recipients.append(([public_key], voting_power))
|
||||
|
||||
return recipients
|
||||
|
||||
@classmethod
|
||||
def is_same_topology(cls, current_topology, election_topology):
|
||||
voters = {}
|
||||
for voter in election_topology:
|
||||
if len(voter.public_keys) > 1:
|
||||
return False
|
||||
|
||||
[public_key] = voter.public_keys
|
||||
voting_power = voter.amount
|
||||
voters[public_key] = voting_power
|
||||
|
||||
# Check whether the voters and their votes is same to that of the
|
||||
# validators and their voting power in the network
|
||||
return current_topology == voters
|
||||
|
||||
def validate(self, bigchain, current_transactions=[]):
|
||||
"""Validate election transaction
|
||||
|
||||
NOTE:
|
||||
* A valid election is initiated by an existing validator.
|
||||
|
||||
* A valid election is one where voters are validators and votes are
|
||||
allocated according to the voting power of each validator node.
|
||||
|
||||
Args:
|
||||
:param bigchain: (BigchainDB) an instantiated bigchaindb.lib.BigchainDB object.
|
||||
:param current_transactions: (list) A list of transactions to be validated along with the election
|
||||
|
||||
Returns:
|
||||
Election: a Election object or an object of the derived Election subclass.
|
||||
|
||||
Raises:
|
||||
ValidationError: If the election is invalid
|
||||
"""
|
||||
input_conditions = []
|
||||
|
||||
duplicates = any(txn for txn in current_transactions if txn.id == self.id)
|
||||
if bigchain.is_committed(self.id) or duplicates:
|
||||
raise DuplicateTransaction('transaction `{}` already exists'
|
||||
.format(self.id))
|
||||
|
||||
if not self.inputs_valid(input_conditions):
|
||||
raise InvalidSignature('Transaction signature is invalid.')
|
||||
|
||||
current_validators = self.get_validators(bigchain)
|
||||
|
||||
# NOTE: Proposer should be a single node
|
||||
if len(self.inputs) != 1 or len(self.inputs[0].owners_before) != 1:
|
||||
raise MultipleInputsError('`tx_signers` must be a list instance of length one')
|
||||
|
||||
# NOTE: Check if the proposer is a validator.
|
||||
[election_initiator_node_pub_key] = self.inputs[0].owners_before
|
||||
if election_initiator_node_pub_key not in current_validators.keys():
|
||||
raise InvalidProposer('Public key is not a part of the validator set')
|
||||
|
||||
# NOTE: Check if all validators have been assigned votes equal to their voting power
|
||||
if not self.is_same_topology(current_validators, self.outputs):
|
||||
raise UnequalValidatorSet('Validator set much be exactly same to the outputs of election')
|
||||
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def generate(cls, initiator, voters, election_data, metadata=None):
|
||||
# Break symmetry in case we need to call an election with the same properties twice
|
||||
uuid = uuid4()
|
||||
election_data['seed'] = str(uuid)
|
||||
|
||||
(inputs, outputs) = cls.validate_create(initiator, voters, election_data, metadata)
|
||||
election = cls(cls.OPERATION, {'data': election_data}, inputs, outputs, metadata)
|
||||
cls.validate_schema(election.to_dict())
|
||||
return election
|
||||
|
||||
@classmethod
|
||||
def validate_schema(cls, tx):
|
||||
"""Validate the election transaction. Since `ELECTION` extends `CREATE` transaction, all the validations for
|
||||
`CREATE` transaction should be inherited
|
||||
"""
|
||||
_validate_schema(TX_SCHEMA_COMMON, tx)
|
||||
_validate_schema(TX_SCHEMA_CREATE, tx)
|
||||
if cls.TX_SCHEMA_CUSTOM:
|
||||
_validate_schema(cls.TX_SCHEMA_CUSTOM, tx)
|
||||
|
||||
@classmethod
|
||||
def create(cls, tx_signers, recipients, metadata=None, asset=None):
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def transfer(cls, tx_signers, recipients, metadata=None, asset=None):
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def to_public_key(cls, election_id):
|
||||
return base58.b58encode(bytes.fromhex(election_id)).decode()
|
||||
|
||||
@classmethod
|
||||
def count_votes(cls, election_pk, transactions, getter=getattr):
|
||||
votes = 0
|
||||
for txn in transactions:
|
||||
if getter(txn, 'operation') == Vote.OPERATION:
|
||||
for output in getter(txn, 'outputs'):
|
||||
# NOTE: We enforce that a valid vote to election id will have only
|
||||
# election_pk in the output public keys, including any other public key
|
||||
# along with election_pk will lead to vote being not considered valid.
|
||||
if len(getter(output, 'public_keys')) == 1 and [election_pk] == getter(output, 'public_keys'):
|
||||
votes = votes + int(getter(output, 'amount'))
|
||||
return votes
|
||||
|
||||
def get_commited_votes(self, bigchain, election_pk=None):
|
||||
if election_pk is None:
|
||||
election_pk = self.to_public_key(self.id)
|
||||
txns = list(backend.query.get_asset_tokens_for_public_key(bigchain.connection,
|
||||
self.id,
|
||||
election_pk))
|
||||
return self.count_votes(election_pk, txns, dict.get)
|
||||
|
||||
def has_concluded(self, bigchain, current_votes=[]):
|
||||
"""Check if the election can be concluded or not.
|
||||
|
||||
* Elections can only be concluded if the validator set has not changed
|
||||
since the election was initiated.
|
||||
* Elections can be concluded only if the current votes form a supermajority.
|
||||
|
||||
Custom elections may override this function and introduce additional checks.
|
||||
"""
|
||||
if self.has_validator_set_changed(bigchain):
|
||||
return False
|
||||
|
||||
election_pk = self.to_public_key(self.id)
|
||||
votes_committed = self.get_commited_votes(bigchain, election_pk)
|
||||
votes_current = self.count_votes(election_pk, current_votes)
|
||||
|
||||
total_votes = sum(output.amount for output in self.outputs)
|
||||
if (votes_committed < (2/3) * total_votes) and \
|
||||
(votes_committed + votes_current >= (2/3)*total_votes):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_status(self, bigchain):
|
||||
election = self.get_election(self.id, bigchain)
|
||||
if election and election['is_concluded']:
|
||||
return self.CONCLUDED
|
||||
|
||||
return self.INCONCLUSIVE if self.has_validator_set_changed(bigchain) else self.ONGOING
|
||||
|
||||
def has_validator_set_changed(self, bigchain):
|
||||
latest_change = self.get_validator_change(bigchain)
|
||||
if latest_change is None:
|
||||
return False
|
||||
|
||||
latest_change_height = latest_change['height']
|
||||
|
||||
election = self.get_election(self.id, bigchain)
|
||||
|
||||
return latest_change_height > election['height']
|
||||
|
||||
def get_election(self, election_id, bigchain):
|
||||
return bigchain.get_election(election_id)
|
||||
|
||||
def store(self, bigchain, height, is_concluded):
|
||||
bigchain.store_election(self.id, height, is_concluded)
|
||||
|
||||
def show_election(self, bigchain):
|
||||
data = self.asset['data']
|
||||
if 'public_key' in data.keys():
|
||||
data['public_key'] = public_key_to_base64(data['public_key']['value'])
|
||||
response = ''
|
||||
for k, v in data.items():
|
||||
if k != 'seed':
|
||||
response += f'{k}={v}\n'
|
||||
response += f'status={self.get_status(bigchain)}'
|
||||
|
||||
return response
|
||||
|
||||
@classmethod
|
||||
def _get_initiated_elections(cls, height, txns):
|
||||
elections = []
|
||||
for tx in txns:
|
||||
if not isinstance(tx, Election):
|
||||
continue
|
||||
|
||||
elections.append({'election_id': tx.id, 'height': height,
|
||||
'is_concluded': False})
|
||||
return elections
|
||||
|
||||
@classmethod
|
||||
def _get_votes(cls, txns):
|
||||
elections = OrderedDict()
|
||||
for tx in txns:
|
||||
if not isinstance(tx, Vote):
|
||||
continue
|
||||
|
||||
election_id = tx.asset['id']
|
||||
if election_id not in elections:
|
||||
elections[election_id] = []
|
||||
elections[election_id].append(tx)
|
||||
return elections
|
||||
|
||||
@classmethod
|
||||
def process_block(cls, bigchain, new_height, txns):
|
||||
"""Looks for election and vote transactions inside the block, records
|
||||
and processes elections.
|
||||
|
||||
Every election is recorded in the database.
|
||||
|
||||
Every vote has a chance to conclude the corresponding election. When
|
||||
an election is concluded, the corresponding database record is
|
||||
marked as such.
|
||||
|
||||
Elections and votes are processed in the order in which they
|
||||
appear in the block. Elections are concluded in the order of
|
||||
appearance of their first votes in the block.
|
||||
|
||||
For every election concluded in the block, calls its `on_approval`
|
||||
method. The returned value of the last `on_approval`, if any,
|
||||
is a validator set update to be applied in one of the following blocks.
|
||||
|
||||
`on_approval` methods are implemented by elections of particular type.
|
||||
The method may contain side effects but should be idempotent. To account
|
||||
for other concluded elections, if it requires so, the method should
|
||||
rely on the database state.
|
||||
"""
|
||||
# elections initiated in this block
|
||||
initiated_elections = cls._get_initiated_elections(new_height, txns)
|
||||
|
||||
if initiated_elections:
|
||||
bigchain.store_elections(initiated_elections)
|
||||
|
||||
# elections voted for in this block and their votes
|
||||
elections = cls._get_votes(txns)
|
||||
|
||||
validator_update = None
|
||||
for election_id, votes in elections.items():
|
||||
election = bigchain.get_transaction(election_id)
|
||||
if election is None:
|
||||
continue
|
||||
|
||||
if not election.has_concluded(bigchain, votes):
|
||||
continue
|
||||
|
||||
validator_update = election.on_approval(bigchain, new_height)
|
||||
election.store(bigchain, new_height, is_concluded=True)
|
||||
|
||||
return [validator_update] if validator_update else []
|
||||
|
||||
@classmethod
|
||||
def rollback(cls, bigchain, new_height, txn_ids):
|
||||
"""Looks for election and vote transactions inside the block and
|
||||
cleans up the database artifacts possibly created in `process_blocks`.
|
||||
|
||||
Part of the `end_block`/`commit` crash recovery.
|
||||
"""
|
||||
|
||||
# delete election records for elections initiated at this height and
|
||||
# elections concluded at this height
|
||||
bigchain.delete_elections(new_height)
|
||||
|
||||
txns = [bigchain.get_transaction(tx_id) for tx_id in txn_ids]
|
||||
|
||||
elections = cls._get_votes(txns)
|
||||
for election_id in elections:
|
||||
election = bigchain.get_transaction(election_id)
|
||||
election.on_rollback(bigchain, new_height)
|
||||
|
||||
def on_approval(self, bigchain, new_height):
|
||||
"""Override to update the database state according to the
|
||||
election rules. Consider the current database state to account for
|
||||
other concluded elections, if required.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def on_rollback(self, bigchain, new_height):
|
||||
"""Override to clean up the database artifacts possibly created
|
||||
in `on_approval`. Part of the `end_block`/`commit` crash recovery.
|
||||
"""
|
||||
raise NotImplementedError
|
64
bigchaindb/elections/vote.py
Normal file
64
bigchaindb/elections/vote.py
Normal file
@ -0,0 +1,64 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
from bigchaindb.common.transaction import Transaction
|
||||
from bigchaindb.common.schema import (_validate_schema,
|
||||
TX_SCHEMA_COMMON,
|
||||
TX_SCHEMA_TRANSFER,
|
||||
TX_SCHEMA_VOTE)
|
||||
|
||||
|
||||
class Vote(Transaction):
|
||||
|
||||
OPERATION = 'VOTE'
|
||||
# NOTE: This class inherits TRANSFER txn type. The `TRANSFER` property is
|
||||
# overriden to re-use methods from parent class
|
||||
TRANSFER = OPERATION
|
||||
ALLOWED_OPERATIONS = (OPERATION,)
|
||||
# Custom validation schema
|
||||
TX_SCHEMA_CUSTOM = TX_SCHEMA_VOTE
|
||||
|
||||
def validate(self, bigchain, current_transactions=[]):
|
||||
"""Validate election vote transaction
|
||||
NOTE: There are no additional validity conditions on casting votes i.e.
|
||||
a vote is just a valid TRANFER transaction
|
||||
|
||||
For more details refer BEP-21: https://github.com/bigchaindb/BEPs/tree/master/21
|
||||
|
||||
Args:
|
||||
bigchain (BigchainDB): an instantiated bigchaindb.lib.BigchainDB object.
|
||||
|
||||
Returns:
|
||||
Vote: a Vote object
|
||||
|
||||
Raises:
|
||||
ValidationError: If the election vote is invalid
|
||||
"""
|
||||
self.validate_transfer_inputs(bigchain, current_transactions)
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def generate(cls, inputs, recipients, election_id, metadata=None):
|
||||
(inputs, outputs) = cls.validate_transfer(inputs, recipients, election_id, metadata)
|
||||
election_vote = cls(cls.OPERATION, {'id': election_id}, inputs, outputs, metadata)
|
||||
cls.validate_schema(election_vote.to_dict())
|
||||
return election_vote
|
||||
|
||||
@classmethod
|
||||
def validate_schema(cls, tx):
|
||||
"""Validate the validator election vote transaction. Since `VOTE` extends `TRANSFER`
|
||||
transaction, all the validations for `CREATE` transaction should be inherited
|
||||
"""
|
||||
_validate_schema(TX_SCHEMA_COMMON, tx)
|
||||
_validate_schema(TX_SCHEMA_TRANSFER, tx)
|
||||
_validate_schema(cls.TX_SCHEMA_CUSTOM, tx)
|
||||
|
||||
@classmethod
|
||||
def create(cls, tx_signers, recipients, metadata=None, asset=None):
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def transfer(cls, tx_signers, recipients, metadata=None, asset=None):
|
||||
raise NotImplementedError
|
@ -1,85 +0,0 @@
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
|
||||
import aiohttp
|
||||
|
||||
from bigchaindb import config
|
||||
from bigchaindb.common.utils import gen_timestamp
|
||||
from bigchaindb.events import EventTypes, Event
|
||||
from bigchaindb.tendermint_utils import decode_transaction_base64
|
||||
|
||||
|
||||
HOST = config['tendermint']['host']
|
||||
PORT = config['tendermint']['port']
|
||||
URL = 'ws://{}:{}/websocket'.format(HOST, PORT)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def connect_and_recv(event_queue):
|
||||
session = aiohttp.ClientSession()
|
||||
ws = yield from session.ws_connect(URL)
|
||||
|
||||
logger.info('Connected to tendermint ws server')
|
||||
|
||||
stream_id = 'bigchaindb_stream_{}'.format(gen_timestamp())
|
||||
yield from subscribe_events(ws, stream_id)
|
||||
|
||||
while True:
|
||||
msg = yield from ws.receive()
|
||||
process_event(event_queue, msg.data, stream_id)
|
||||
|
||||
if msg.type in (aiohttp.WSMsgType.CLOSED,
|
||||
aiohttp.WSMsgType.ERROR):
|
||||
session.close()
|
||||
raise aiohttp.ClientConnectionError()
|
||||
|
||||
|
||||
def process_event(event_queue, event, stream_id):
|
||||
event_stream_id = stream_id + '#event'
|
||||
event = json.loads(event)
|
||||
|
||||
if (event['id'] == event_stream_id and event['result']['query'] == 'tm.event=\'NewBlock\''):
|
||||
block = event['result']['data']['value']['block']
|
||||
block_id = block['header']['height']
|
||||
block_txs = block['data']['txs']
|
||||
|
||||
# Only push non empty blocks
|
||||
if block_txs:
|
||||
block_txs = [decode_transaction_base64(txn) for txn in block_txs]
|
||||
new_block = {'height': block_id, 'transactions': block_txs}
|
||||
event = Event(EventTypes.BLOCK_VALID, new_block)
|
||||
event_queue.put(event)
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def subscribe_events(ws, stream_id):
|
||||
payload = {
|
||||
'method': 'subscribe',
|
||||
'jsonrpc': '2.0',
|
||||
'params': ['tm.event=\'NewBlock\''],
|
||||
'id': stream_id
|
||||
}
|
||||
yield from ws.send_str(json.dumps(payload))
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def try_connect_and_recv(event_queue):
|
||||
try:
|
||||
yield from connect_and_recv(event_queue)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning('WebSocket connection failed with exception %s', e)
|
||||
time.sleep(3)
|
||||
yield from try_connect_and_recv(event_queue)
|
||||
|
||||
|
||||
def start(event_queue):
|
||||
loop = asyncio.get_event_loop()
|
||||
try:
|
||||
loop.run_until_complete(try_connect_and_recv(event_queue))
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
logger.info('Shutting down Tendermint event stream connection')
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
from queue import Empty
|
||||
from collections import defaultdict
|
||||
from multiprocessing import Queue
|
||||
@ -67,7 +72,7 @@ class Exchange:
|
||||
"""
|
||||
|
||||
try:
|
||||
self.started_queue.get_nowait()
|
||||
self.started_queue.get(timeout=1)
|
||||
raise RuntimeError('Cannot create a new subscriber queue while Exchange is running.')
|
||||
except Empty:
|
||||
pass
|
||||
|
@ -1,10 +1,12 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
|
||||
class BigchainDBError(Exception):
|
||||
"""Base class for BigchainDB exceptions."""
|
||||
|
||||
|
||||
class CriticalDoubleSpend(BigchainDBError):
|
||||
"""Data integrity error that requires attention"""
|
||||
|
||||
|
||||
class CriticalDoubleInclusion(BigchainDBError):
|
||||
"""Data integrity error that requires attention"""
|
||||
|
@ -1,19 +1,21 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
from bigchaindb.utils import condition_details_has_owner
|
||||
from bigchaindb.backend import query
|
||||
from bigchaindb.common.transaction import TransactionLink
|
||||
|
||||
|
||||
class FastQuery():
|
||||
"""
|
||||
Database queries that join on block results from a single node.
|
||||
"""
|
||||
"""Database queries that join on block results from a single node."""
|
||||
|
||||
def __init__(self, connection):
|
||||
self.connection = connection
|
||||
|
||||
def get_outputs_by_public_key(self, public_key):
|
||||
"""
|
||||
Get outputs for a public key
|
||||
"""
|
||||
"""Get outputs for a public key"""
|
||||
txs = list(query.get_owned_ids(self.connection, public_key))
|
||||
return [TransactionLink(tx['id'], index)
|
||||
for tx in txs
|
||||
@ -22,8 +24,7 @@ class FastQuery():
|
||||
public_key)]
|
||||
|
||||
def filter_spent_outputs(self, outputs):
|
||||
"""
|
||||
Remove outputs that have been spent
|
||||
"""Remove outputs that have been spent
|
||||
|
||||
Args:
|
||||
outputs: list of TransactionLink
|
||||
@ -36,8 +37,7 @@ class FastQuery():
|
||||
return [ff for ff in outputs if ff not in spends]
|
||||
|
||||
def filter_unspent_outputs(self, outputs):
|
||||
"""
|
||||
Remove outputs that have not been spent
|
||||
"""Remove outputs that have not been spent
|
||||
|
||||
Args:
|
||||
outputs: list of TransactionLink
|
||||
|
@ -1,16 +1,22 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""Module containing main contact points with Tendermint and
|
||||
MongoDB.
|
||||
|
||||
"""
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
from copy import deepcopy
|
||||
from uuid import uuid4
|
||||
|
||||
import rapidjson
|
||||
|
||||
try:
|
||||
from hashlib import sha3_256
|
||||
except ImportError:
|
||||
# NOTE: neeeded for Python < 3.6
|
||||
# NOTE: needed for Python < 3.6
|
||||
from sha3 import sha3_256
|
||||
|
||||
import requests
|
||||
@ -21,9 +27,13 @@ from bigchaindb.models import Transaction
|
||||
from bigchaindb.common.exceptions import (SchemaValidationError,
|
||||
ValidationError,
|
||||
DoubleSpend)
|
||||
from bigchaindb.common.transaction_mode_types import (BROADCAST_TX_COMMIT,
|
||||
BROADCAST_TX_ASYNC,
|
||||
BROADCAST_TX_SYNC)
|
||||
from bigchaindb.tendermint_utils import encode_transaction, merkleroot
|
||||
from bigchaindb import exceptions as core_exceptions
|
||||
from bigchaindb.consensus import BaseConsensusRules
|
||||
from bigchaindb.validation import BaseValidationRules
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -34,18 +44,6 @@ class BigchainDB(object):
|
||||
Create, read, sign, write transactions to the database
|
||||
"""
|
||||
|
||||
BLOCK_INVALID = 'invalid'
|
||||
"""return if a block is invalid"""
|
||||
|
||||
BLOCK_VALID = TX_VALID = 'valid'
|
||||
"""return if a block is valid, or tx is in valid block"""
|
||||
|
||||
BLOCK_UNDECIDED = TX_UNDECIDED = 'undecided'
|
||||
"""return if block is undecided, or tx is in undecided block"""
|
||||
|
||||
TX_IN_BACKLOG = 'backlog'
|
||||
"""return if transaction is in backlog"""
|
||||
|
||||
def __init__(self, connection=None):
|
||||
"""Initialize the Bigchain instance
|
||||
|
||||
@ -63,19 +61,20 @@ class BigchainDB(object):
|
||||
A connection to the database.
|
||||
"""
|
||||
config_utils.autoconfigure()
|
||||
self.mode_list = ('broadcast_tx_async',
|
||||
'broadcast_tx_sync',
|
||||
'broadcast_tx_commit')
|
||||
self.mode_commit = BROADCAST_TX_COMMIT
|
||||
self.mode_list = (BROADCAST_TX_ASYNC,
|
||||
BROADCAST_TX_SYNC,
|
||||
self.mode_commit)
|
||||
self.tendermint_host = bigchaindb.config['tendermint']['host']
|
||||
self.tendermint_port = bigchaindb.config['tendermint']['port']
|
||||
self.endpoint = 'http://{}:{}/'.format(self.tendermint_host, self.tendermint_port)
|
||||
|
||||
consensusPlugin = bigchaindb.config.get('consensus_plugin')
|
||||
validationPlugin = bigchaindb.config.get('validation_plugin')
|
||||
|
||||
if consensusPlugin:
|
||||
self.consensus = config_utils.load_consensus_plugin(consensusPlugin)
|
||||
if validationPlugin:
|
||||
self.validation = config_utils.load_validation_plugin(validationPlugin)
|
||||
else:
|
||||
self.consensus = BaseConsensusRules
|
||||
self.validation = BaseValidationRules
|
||||
|
||||
self.connection = connection if connection else backend.connect(**bigchaindb.config['database'])
|
||||
|
||||
@ -85,10 +84,11 @@ class BigchainDB(object):
|
||||
raise ValidationError('Mode must be one of the following {}.'
|
||||
.format(', '.join(self.mode_list)))
|
||||
|
||||
tx_dict = transaction.tx_dict if transaction.tx_dict else transaction.to_dict()
|
||||
payload = {
|
||||
'method': mode,
|
||||
'jsonrpc': '2.0',
|
||||
'params': [encode_transaction(transaction.to_dict())],
|
||||
'params': [encode_transaction(tx_dict)],
|
||||
'id': str(uuid4())
|
||||
}
|
||||
# TODO: handle connection errors!
|
||||
@ -102,57 +102,38 @@ class BigchainDB(object):
|
||||
|
||||
def _process_post_response(self, response, mode):
|
||||
logger.debug(response)
|
||||
if response.get('error') is not None:
|
||||
return (500, 'Internal error')
|
||||
|
||||
error = response.get('error')
|
||||
if error:
|
||||
status_code = 500
|
||||
message = error.get('message', 'Internal Error')
|
||||
data = error.get('data', '')
|
||||
|
||||
if 'Tx already exists in cache' in data:
|
||||
status_code = 400
|
||||
|
||||
return (status_code, message + ' - ' + data)
|
||||
|
||||
result = response['result']
|
||||
if mode == self.mode_commit:
|
||||
check_tx_code = result.get('check_tx', {}).get('code', 0)
|
||||
deliver_tx_code = result.get('deliver_tx', {}).get('code', 0)
|
||||
error_code = check_tx_code or deliver_tx_code
|
||||
else:
|
||||
error_code = result.get('code', 0)
|
||||
|
||||
if error_code:
|
||||
return (500, 'Transaction validation failed')
|
||||
|
||||
return (202, '')
|
||||
# result = response['result']
|
||||
# if mode == self.mode_list[2]:
|
||||
# return self._process_commit_mode_response(result)
|
||||
# else:
|
||||
# status_code = result['code']
|
||||
# return self._process_status_code(status_code,
|
||||
# 'Error while processing transaction')
|
||||
|
||||
# def _process_commit_mode_response(self, result):
|
||||
# check_tx_status_code = result['check_tx']['code']
|
||||
# if check_tx_status_code == 0:
|
||||
# deliver_tx_status_code = result['deliver_tx']['code']
|
||||
# return self._process_status_code(deliver_tx_status_code,
|
||||
# 'Error while commiting the transaction')
|
||||
# else:
|
||||
# return (500, 'Error while validating the transaction')
|
||||
|
||||
def process_status_code(self, status_code, failure_msg):
|
||||
return (202, '') if status_code == 0 else (500, failure_msg)
|
||||
|
||||
def store_transaction(self, transaction):
|
||||
"""Store a valid transaction to the transactions collection."""
|
||||
|
||||
# self.update_utxoset(transaction)
|
||||
transaction = deepcopy(transaction.to_dict())
|
||||
if transaction['operation'] == 'CREATE':
|
||||
asset = transaction.pop('asset')
|
||||
asset['id'] = transaction['id']
|
||||
if asset['data']:
|
||||
backend.query.store_asset(self.connection, asset)
|
||||
|
||||
metadata = transaction.pop('metadata')
|
||||
transaction_metadata = {'id': transaction['id'],
|
||||
'metadata': metadata}
|
||||
|
||||
backend.query.store_metadatas(self.connection, [transaction_metadata])
|
||||
|
||||
return backend.query.store_transaction(self.connection, transaction)
|
||||
|
||||
def store_bulk_transactions(self, transactions):
|
||||
txns = []
|
||||
assets = []
|
||||
txn_metadatas = []
|
||||
for transaction_obj in transactions:
|
||||
# self.update_utxoset(transaction)
|
||||
transaction = transaction_obj.to_dict()
|
||||
if transaction['operation'] == transaction_obj.CREATE:
|
||||
for t in transactions:
|
||||
transaction = t.tx_dict if t.tx_dict else rapidjson.loads(rapidjson.dumps(t.to_dict()))
|
||||
if transaction['operation'] == t.CREATE:
|
||||
asset = transaction.pop('asset')
|
||||
asset['id'] = transaction['id']
|
||||
assets.append(asset)
|
||||
@ -167,6 +148,9 @@ class BigchainDB(object):
|
||||
backend.query.store_assets(self.connection, assets)
|
||||
return backend.query.store_transactions(self.connection, txns)
|
||||
|
||||
def delete_transactions(self, txs):
|
||||
return backend.query.delete_transactions(self.connection, txs)
|
||||
|
||||
def update_utxoset(self, transaction):
|
||||
"""Update the UTXO set given ``transaction``. That is, remove
|
||||
the outputs that the given ``transaction`` spends, and add the
|
||||
@ -251,7 +235,11 @@ class BigchainDB(object):
|
||||
return backend.query.delete_unspent_outputs(
|
||||
self.connection, *unspent_outputs)
|
||||
|
||||
def get_transaction(self, transaction_id, include_status=False):
|
||||
def is_committed(self, transaction_id):
|
||||
transaction = backend.query.get_transaction(self.connection, transaction_id)
|
||||
return bool(transaction)
|
||||
|
||||
def get_transaction(self, transaction_id):
|
||||
transaction = backend.query.get_transaction(self.connection, transaction_id)
|
||||
|
||||
if transaction:
|
||||
@ -269,20 +257,18 @@ class BigchainDB(object):
|
||||
|
||||
transaction = Transaction.from_dict(transaction)
|
||||
|
||||
if include_status:
|
||||
return transaction, self.TX_VALID if transaction else None
|
||||
else:
|
||||
return transaction
|
||||
return transaction
|
||||
|
||||
def get_transactions_filtered(self, asset_id, operation=None):
|
||||
def get_transactions(self, txn_ids):
|
||||
return backend.query.get_transactions(self.connection, txn_ids)
|
||||
|
||||
def get_transactions_filtered(self, asset_id, operation=None, last_tx=None):
|
||||
"""Get a list of transactions filtered on some criteria
|
||||
"""
|
||||
txids = backend.query.get_txids_filtered(self.connection, asset_id,
|
||||
operation)
|
||||
operation, last_tx)
|
||||
for txid in txids:
|
||||
tx, status = self.get_transaction(txid, True)
|
||||
if status == self.TX_VALID:
|
||||
yield tx
|
||||
yield self.get_transaction(txid)
|
||||
|
||||
def get_outputs_filtered(self, owner, spent=None):
|
||||
"""Get a list of output links filtered on some criteria
|
||||
@ -317,7 +303,8 @@ class BigchainDB(object):
|
||||
current_spent_transactions = []
|
||||
for ctxn in current_transactions:
|
||||
for ctxn_input in ctxn.inputs:
|
||||
if ctxn_input.fulfills.txid == txid and\
|
||||
if ctxn_input.fulfills and\
|
||||
ctxn_input.fulfills.txid == txid and\
|
||||
ctxn_input.fulfills.output == output:
|
||||
current_spent_transactions.append(ctxn)
|
||||
|
||||
@ -421,16 +408,8 @@ class BigchainDB(object):
|
||||
Returns:
|
||||
iter: An iterator of assets that match the text search.
|
||||
"""
|
||||
objects = backend.query.text_search(self.connection, search, limit=limit,
|
||||
table=table)
|
||||
|
||||
# TODO: This is not efficient. There may be a more efficient way to
|
||||
# query by storing block ids with the assets and using fastquery.
|
||||
# See https://github.com/bigchaindb/bigchaindb/issues/1496
|
||||
for obj in objects:
|
||||
tx, status = self.get_transaction(obj['id'], True)
|
||||
if status == self.TX_VALID:
|
||||
yield obj
|
||||
return backend.query.text_search(self.connection, search, limit=limit,
|
||||
table=table)
|
||||
|
||||
def get_assets(self, asset_ids):
|
||||
"""Return a list of assets that match the asset_ids
|
||||
@ -460,31 +439,76 @@ class BigchainDB(object):
|
||||
def fastquery(self):
|
||||
return fastquery.FastQuery(self.connection)
|
||||
|
||||
def get_validators(self):
|
||||
try:
|
||||
resp = requests.get('{}validators'.format(self.endpoint))
|
||||
validators = resp.json()['result']['validators']
|
||||
for v in validators:
|
||||
v.pop('accum')
|
||||
v.pop('address')
|
||||
def get_validator_change(self, height=None):
|
||||
return backend.query.get_validator_set(self.connection, height)
|
||||
|
||||
return validators
|
||||
def get_validators(self, height=None):
|
||||
result = self.get_validator_change(height)
|
||||
return [] if result is None else result['validators']
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error('Error while connecting to Tendermint HTTP API')
|
||||
raise e
|
||||
def get_election(self, election_id):
|
||||
return backend.query.get_election(self.connection, election_id)
|
||||
|
||||
def get_validator_update(self):
|
||||
update = backend.query.get_validator_update(self.connection)
|
||||
return [update['validator']] if update else []
|
||||
|
||||
def delete_validator_update(self):
|
||||
return backend.query.delete_validator_update(self.connection)
|
||||
def get_pre_commit_state(self):
|
||||
return backend.query.get_pre_commit_state(self.connection)
|
||||
|
||||
def store_pre_commit_state(self, state):
|
||||
return backend.query.store_pre_commit_state(self.connection, state)
|
||||
|
||||
def store_validator_set(self, height, validators):
|
||||
"""Store validator set at a given `height`.
|
||||
NOTE: If the validator set already exists at that `height` then an
|
||||
exception will be raised.
|
||||
"""
|
||||
return backend.query.store_validator_set(self.connection, {'height': height,
|
||||
'validators': validators})
|
||||
|
||||
def delete_validator_set(self, height):
|
||||
return backend.query.delete_validator_set(self.connection, height)
|
||||
|
||||
def store_abci_chain(self, height, chain_id, is_synced=True):
|
||||
return backend.query.store_abci_chain(self.connection, height,
|
||||
chain_id, is_synced)
|
||||
|
||||
def delete_abci_chain(self, height):
|
||||
return backend.query.delete_abci_chain(self.connection, height)
|
||||
|
||||
def get_latest_abci_chain(self):
|
||||
return backend.query.get_latest_abci_chain(self.connection)
|
||||
|
||||
def migrate_abci_chain(self):
|
||||
"""Generate and record a new ABCI chain ID. New blocks are not
|
||||
accepted until we receive an InitChain ABCI request with
|
||||
the matching chain ID and validator set.
|
||||
|
||||
Chain ID is generated based on the current chain and height.
|
||||
`chain-X` => `chain-X-migrated-at-height-5`.
|
||||
`chain-X-migrated-at-height-5` => `chain-X-migrated-at-height-21`.
|
||||
|
||||
If there is no known chain (we are at genesis), the function returns.
|
||||
"""
|
||||
latest_chain = self.get_latest_abci_chain()
|
||||
if latest_chain is None:
|
||||
return
|
||||
|
||||
block = self.get_latest_block()
|
||||
|
||||
suffix = '-migrated-at-height-'
|
||||
chain_id = latest_chain['chain_id']
|
||||
block_height_str = str(block['height'])
|
||||
new_chain_id = chain_id.split(suffix)[0] + suffix + block_height_str
|
||||
|
||||
self.store_abci_chain(block['height'] + 1, new_chain_id, False)
|
||||
|
||||
def store_election(self, election_id, height, is_concluded):
|
||||
return backend.query.store_election(self.connection, election_id,
|
||||
height, is_concluded)
|
||||
|
||||
def store_elections(self, elections):
|
||||
return backend.query.store_elections(self.connection, elections)
|
||||
|
||||
def delete_elections(self, height):
|
||||
return backend.query.delete_elections(self.connection, height)
|
||||
|
||||
|
||||
Block = namedtuple('Block', ('app_hash', 'height', 'transactions'))
|
||||
|
||||
PreCommitState = namedtuple('PreCommitState', ('commit_id', 'height', 'transactions'))
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
import bigchaindb
|
||||
import logging
|
||||
|
||||
@ -7,8 +12,6 @@ import os
|
||||
|
||||
|
||||
DEFAULT_LOG_DIR = os.getcwd()
|
||||
BENCHMARK_LOG_LEVEL = 15
|
||||
|
||||
|
||||
DEFAULT_LOGGING_CONFIG = {
|
||||
'version': 1,
|
||||
@ -25,11 +28,6 @@ DEFAULT_LOGGING_CONFIG = {
|
||||
'format': ('[%(asctime)s] [%(levelname)s] (%(name)s) '
|
||||
'%(message)s (%(processName)-10s - pid: %(process)d)'),
|
||||
'datefmt': '%Y-%m-%d %H:%M:%S',
|
||||
},
|
||||
'benchmark': {
|
||||
'class': 'logging.Formatter',
|
||||
'format': ('%(asctime)s, %(levelname)s, %(message)s'),
|
||||
'datefmt': '%Y-%m-%d %H:%M:%S',
|
||||
}
|
||||
},
|
||||
'handlers': {
|
||||
@ -55,31 +53,16 @@ DEFAULT_LOGGING_CONFIG = {
|
||||
'backupCount': 5,
|
||||
'formatter': 'file',
|
||||
'level': logging.ERROR,
|
||||
},
|
||||
'benchmark': {
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': os.path.join(DEFAULT_LOG_DIR, 'bigchaindb-benchmark.log'),
|
||||
'mode': 'w',
|
||||
'maxBytes': 209715200,
|
||||
'backupCount': 5,
|
||||
'formatter': 'benchmark',
|
||||
'level': BENCHMARK_LOG_LEVEL,
|
||||
}
|
||||
},
|
||||
'loggers': {},
|
||||
'root': {
|
||||
'level': logging.DEBUG,
|
||||
'handlers': ['console', 'file', 'errors', 'benchmark'],
|
||||
'handlers': ['console', 'file', 'errors'],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def benchmark(self, message, *args, **kws):
|
||||
# Yes, logger takes its '*args' as 'args'.
|
||||
if self.isEnabledFor(BENCHMARK_LOG_LEVEL):
|
||||
self._log(BENCHMARK_LOG_LEVEL, message, args, **kws)
|
||||
|
||||
|
||||
def _normalize_log_level(level):
|
||||
try:
|
||||
return level.upper()
|
||||
@ -100,11 +83,6 @@ def setup_logging():
|
||||
|
||||
"""
|
||||
|
||||
# Add a new logging level for logging benchmark
|
||||
logging.addLevelName(BENCHMARK_LOG_LEVEL, 'BENCHMARK')
|
||||
logging.BENCHMARK = BENCHMARK_LOG_LEVEL
|
||||
logging.Logger.benchmark = benchmark
|
||||
|
||||
logging_configs = DEFAULT_LOGGING_CONFIG
|
||||
new_logging_configs = bigchaindb.config['log']
|
||||
|
||||
@ -123,7 +101,6 @@ def setup_logging():
|
||||
if 'level_logfile' in new_logging_configs:
|
||||
level = _normalize_log_level(new_logging_configs['level_logfile'])
|
||||
logging_configs['handlers']['file']['level'] = level
|
||||
logging_configs['handlers']['benchmark']['level'] = level
|
||||
|
||||
if 'fmt_console' in new_logging_configs:
|
||||
fmt = new_logging_configs['fmt_console']
|
||||
|
0
bigchaindb/migrations/__init__.py
Normal file
0
bigchaindb/migrations/__init__.py
Normal file
48
bigchaindb/migrations/chain_migration_election.py
Normal file
48
bigchaindb/migrations/chain_migration_election.py
Normal file
@ -0,0 +1,48 @@
|
||||
import json
|
||||
|
||||
from bigchaindb.common.schema import TX_SCHEMA_CHAIN_MIGRATION_ELECTION
|
||||
from bigchaindb.elections.election import Election
|
||||
|
||||
|
||||
class ChainMigrationElection(Election):
|
||||
|
||||
OPERATION = 'CHAIN_MIGRATION_ELECTION'
|
||||
CREATE = OPERATION
|
||||
ALLOWED_OPERATIONS = (OPERATION,)
|
||||
TX_SCHEMA_CUSTOM = TX_SCHEMA_CHAIN_MIGRATION_ELECTION
|
||||
|
||||
def has_concluded(self, bigchaindb, *args, **kwargs):
|
||||
chain = bigchaindb.get_latest_abci_chain()
|
||||
if chain is not None and not chain['is_synced']:
|
||||
# do not conclude the migration election if
|
||||
# there is another migration in progress
|
||||
return False
|
||||
|
||||
return super().has_concluded(bigchaindb, *args, **kwargs)
|
||||
|
||||
def on_approval(self, bigchain, *args, **kwargs):
|
||||
bigchain.migrate_abci_chain()
|
||||
|
||||
def show_election(self, bigchain):
|
||||
output = super().show_election(bigchain)
|
||||
chain = bigchain.get_latest_abci_chain()
|
||||
if chain is None or chain['is_synced']:
|
||||
return output
|
||||
|
||||
output += f'\nchain_id={chain["chain_id"]}'
|
||||
block = bigchain.get_latest_block()
|
||||
output += f'\napp_hash={block["app_hash"]}'
|
||||
validators = [
|
||||
{
|
||||
'pub_key': {
|
||||
'type': 'tendermint/PubKeyEd25519',
|
||||
'value': k,
|
||||
},
|
||||
'power': v,
|
||||
} for k, v in self.get_validators(bigchain).items()
|
||||
]
|
||||
output += f'\nvalidators={json.dumps(validators, indent=4)}'
|
||||
return output
|
||||
|
||||
def on_rollback(self, bigchain, new_height):
|
||||
bigchain.delete_abci_chain(new_height)
|
@ -1,27 +1,29 @@
|
||||
from bigchaindb.common.exceptions import (InvalidSignature, DoubleSpend,
|
||||
InputDoesNotExist,
|
||||
TransactionNotInValidBlock,
|
||||
AssetIdMismatch, AmountError,
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
from bigchaindb.backend.schema import validate_language_key
|
||||
from bigchaindb.common.exceptions import (InvalidSignature,
|
||||
DuplicateTransaction)
|
||||
from bigchaindb.common.schema import validate_transaction_schema
|
||||
from bigchaindb.common.transaction import Transaction
|
||||
from bigchaindb.common.utils import (validate_txn_obj, validate_key)
|
||||
from bigchaindb.common.schema import validate_transaction_schema
|
||||
from bigchaindb.backend.schema import validate_language_key
|
||||
|
||||
|
||||
class Transaction(Transaction):
|
||||
ASSET = 'asset'
|
||||
METADATA = 'metadata'
|
||||
DATA = 'data'
|
||||
|
||||
def validate(self, bigchain, current_transactions=[]):
|
||||
"""Validate transaction spend
|
||||
|
||||
Args:
|
||||
bigchain (BigchainDB): an instantiated bigchaindb.BigchainDB object.
|
||||
|
||||
Returns:
|
||||
The transaction (Transaction) if the transaction is valid else it
|
||||
raises an exception describing the reason why the transaction is
|
||||
invalid.
|
||||
|
||||
Raises:
|
||||
ValidationError: If the transaction is invalid
|
||||
"""
|
||||
@ -29,67 +31,15 @@ class Transaction(Transaction):
|
||||
|
||||
if self.operation == Transaction.CREATE:
|
||||
duplicates = any(txn for txn in current_transactions if txn.id == self.id)
|
||||
if bigchain.get_transaction(self.to_dict()['id']) or duplicates:
|
||||
if bigchain.is_committed(self.id) or duplicates:
|
||||
raise DuplicateTransaction('transaction `{}` already exists'
|
||||
.format(self.id))
|
||||
|
||||
if not self.inputs_valid(input_conditions):
|
||||
raise InvalidSignature('Transaction signature is invalid.')
|
||||
|
||||
elif self.operation == Transaction.TRANSFER:
|
||||
# store the inputs so that we can check if the asset ids match
|
||||
input_txs = []
|
||||
for input_ in self.inputs:
|
||||
input_txid = input_.fulfills.txid
|
||||
input_tx, status = bigchain.\
|
||||
get_transaction(input_txid, include_status=True)
|
||||
|
||||
if input_tx is None:
|
||||
for ctxn in current_transactions:
|
||||
# assume that the status as valid for previously validated
|
||||
# transactions in current round
|
||||
if ctxn.id == input_txid:
|
||||
input_tx = ctxn
|
||||
status = bigchain.TX_VALID
|
||||
|
||||
if input_tx is None:
|
||||
raise InputDoesNotExist("input `{}` doesn't exist"
|
||||
.format(input_txid))
|
||||
|
||||
if status != bigchain.TX_VALID:
|
||||
raise TransactionNotInValidBlock(
|
||||
'input `{}` does not exist in a valid block'.format(
|
||||
input_txid))
|
||||
|
||||
spent = bigchain.get_spent(input_txid, input_.fulfills.output,
|
||||
current_transactions)
|
||||
if spent:
|
||||
raise DoubleSpend('input `{}` was already spent'
|
||||
.format(input_txid))
|
||||
|
||||
output = input_tx.outputs[input_.fulfills.output]
|
||||
input_conditions.append(output)
|
||||
input_txs.append(input_tx)
|
||||
|
||||
# Validate that all inputs are distinct
|
||||
links = [i.fulfills.to_uri() for i in self.inputs]
|
||||
if len(links) != len(set(links)):
|
||||
raise DoubleSpend('tx "{}" spends the same output more than once'.format(self.id))
|
||||
|
||||
# validate asset id
|
||||
asset_id = Transaction.get_asset_id(input_txs)
|
||||
if asset_id != self.asset['id']:
|
||||
raise AssetIdMismatch(('The asset id of the input does not'
|
||||
' match the asset id of the'
|
||||
' transaction'))
|
||||
|
||||
input_amount = sum([input_condition.amount for input_condition in input_conditions])
|
||||
output_amount = sum([output_condition.amount for output_condition in self.outputs])
|
||||
|
||||
if output_amount != input_amount:
|
||||
raise AmountError(('The amount used in the inputs `{}`'
|
||||
' needs to be same as the amount used'
|
||||
' in the outputs `{}`')
|
||||
.format(input_amount, output_amount))
|
||||
|
||||
if not self.inputs_valid(input_conditions):
|
||||
raise InvalidSignature('Transaction signature is invalid.')
|
||||
self.validate_transfer_inputs(bigchain, current_transactions)
|
||||
|
||||
return self
|
||||
|
||||
@ -99,11 +49,11 @@ class Transaction(Transaction):
|
||||
|
||||
@classmethod
|
||||
def validate_schema(cls, tx_body):
|
||||
cls.validate_id(tx_body)
|
||||
validate_transaction_schema(tx_body)
|
||||
validate_txn_obj('asset', tx_body['asset'], 'data', validate_key)
|
||||
validate_txn_obj('metadata', tx_body, 'metadata', validate_key)
|
||||
validate_language_key(tx_body['asset'], 'data')
|
||||
validate_txn_obj(cls.ASSET, tx_body[cls.ASSET], cls.DATA, validate_key)
|
||||
validate_txn_obj(cls.METADATA, tx_body, cls.METADATA, validate_key)
|
||||
validate_language_key(tx_body[cls.ASSET], cls.DATA)
|
||||
validate_language_key(tx_body, cls.METADATA)
|
||||
|
||||
|
||||
class FastTransaction:
|
||||
|
124
bigchaindb/parallel_validation.py
Normal file
124
bigchaindb/parallel_validation.py
Normal file
@ -0,0 +1,124 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
import multiprocessing as mp
|
||||
from collections import defaultdict
|
||||
|
||||
from bigchaindb import App, BigchainDB
|
||||
from bigchaindb.tendermint_utils import decode_transaction
|
||||
from abci import CodeTypeOk
|
||||
|
||||
|
||||
class ParallelValidationApp(App):
|
||||
def __init__(self, bigchaindb=None, events_queue=None, abci=None):
|
||||
super().__init__(bigchaindb, events_queue, abci=abci)
|
||||
self.parallel_validator = ParallelValidator()
|
||||
self.parallel_validator.start()
|
||||
|
||||
def check_tx(self, raw_transaction):
|
||||
return self.abci.ResponseCheckTx(code=CodeTypeOk)
|
||||
|
||||
def deliver_tx(self, raw_transaction):
|
||||
self.parallel_validator.validate(raw_transaction)
|
||||
return self.abci.ResponseDeliverTx(code=CodeTypeOk)
|
||||
|
||||
def end_block(self, request_end_block):
|
||||
result = self.parallel_validator.result(timeout=30)
|
||||
for transaction in result:
|
||||
if transaction:
|
||||
self.block_txn_ids.append(transaction.id)
|
||||
self.block_transactions.append(transaction)
|
||||
|
||||
return super().end_block(request_end_block)
|
||||
|
||||
|
||||
RESET = 'reset'
|
||||
EXIT = 'exit'
|
||||
|
||||
|
||||
class ParallelValidator:
|
||||
def __init__(self, number_of_workers=mp.cpu_count()):
|
||||
self.number_of_workers = number_of_workers
|
||||
self.transaction_index = 0
|
||||
self.routing_queues = [mp.Queue() for _ in range(self.number_of_workers)]
|
||||
self.workers = []
|
||||
self.results_queue = mp.Queue()
|
||||
|
||||
def start(self):
|
||||
for routing_queue in self.routing_queues:
|
||||
worker = ValidationWorker(routing_queue, self.results_queue)
|
||||
process = mp.Process(target=worker.run)
|
||||
process.start()
|
||||
self.workers.append(process)
|
||||
|
||||
def stop(self):
|
||||
for routing_queue in self.routing_queues:
|
||||
routing_queue.put(EXIT)
|
||||
|
||||
def validate(self, raw_transaction):
|
||||
dict_transaction = decode_transaction(raw_transaction)
|
||||
index = int(dict_transaction['id'], 16) % self.number_of_workers
|
||||
self.routing_queues[index].put((self.transaction_index, dict_transaction))
|
||||
self.transaction_index += 1
|
||||
|
||||
def result(self, timeout=None):
|
||||
result_buffer = [None] * self.transaction_index
|
||||
for _ in range(self.transaction_index):
|
||||
index, transaction = self.results_queue.get(timeout=timeout)
|
||||
result_buffer[index] = transaction
|
||||
self.transaction_index = 0
|
||||
for routing_queue in self.routing_queues:
|
||||
routing_queue.put(RESET)
|
||||
return result_buffer
|
||||
|
||||
|
||||
class ValidationWorker:
|
||||
"""Run validation logic in a loop. This Worker is suitable for a Process
|
||||
life: no thrills, just a queue to get some values, and a queue to return results.
|
||||
|
||||
Note that a worker is expected to validate multiple transactions in
|
||||
multiple rounds, and it needs to keep in memory all transactions already
|
||||
validated, until a new round starts. To trigger a new round of validation,
|
||||
a ValidationWorker expects a `RESET` message. To exit the infinite loop the
|
||||
worker is in, it expects an `EXIT` message.
|
||||
"""
|
||||
|
||||
def __init__(self, in_queue, results_queue):
|
||||
self.in_queue = in_queue
|
||||
self.results_queue = results_queue
|
||||
self.bigchaindb = BigchainDB()
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
# We need a place to store already validated transactions,
|
||||
# in case of dependant transactions in the same block.
|
||||
# `validated_transactions` maps an `asset_id` with the list
|
||||
# of all other transactions sharing the same asset.
|
||||
self.validated_transactions = defaultdict(list)
|
||||
|
||||
def validate(self, dict_transaction):
|
||||
try:
|
||||
asset_id = dict_transaction['asset']['id']
|
||||
except KeyError:
|
||||
asset_id = dict_transaction['id']
|
||||
|
||||
transaction = self.bigchaindb.is_valid_transaction(
|
||||
dict_transaction,
|
||||
self.validated_transactions[asset_id])
|
||||
|
||||
if transaction:
|
||||
self.validated_transactions[asset_id].append(transaction)
|
||||
return transaction
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
message = self.in_queue.get()
|
||||
if message == RESET:
|
||||
self.reset()
|
||||
elif message == EXIT:
|
||||
return
|
||||
else:
|
||||
index, transaction = message
|
||||
self.results_queue.put((index, self.validate(transaction)))
|
@ -1,11 +1,18 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
import logging
|
||||
import setproctitle
|
||||
|
||||
from abci import TmVersion, ABCI
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb.lib import BigchainDB
|
||||
from bigchaindb.core import App
|
||||
from bigchaindb.parallel_validation import ParallelValidationApp
|
||||
from bigchaindb.web import server, websocket_server
|
||||
from bigchaindb import event_stream
|
||||
from bigchaindb.events import Exchange, EventTypes
|
||||
from bigchaindb.utils import Process
|
||||
|
||||
@ -15,10 +22,8 @@ logger = logging.getLogger(__name__)
|
||||
BANNER = """
|
||||
****************************************************************************
|
||||
* *
|
||||
* ┏┓ ╻┏━╸┏━╸╻ ╻┏━┓╻┏┓╻╺┳┓┏┓ ┏━┓ ┏━┓ ╺┳┓┏━╸╻ ╻ *
|
||||
* ┣┻┓┃┃╺┓┃ ┣━┫┣━┫┃┃┗┫ ┃┃┣┻┓ ┏━┛ ┃┃┃ ┃┃┣╸ ┃┏┛ *
|
||||
* ┗━┛╹┗━┛┗━╸╹ ╹╹ ╹╹╹ ╹╺┻┛┗━┛ ┗━╸╹┗━┛╹╺┻┛┗━╸┗┛ *
|
||||
* codename "fluffy cat" *
|
||||
* BigchainDB 2.2.2 *
|
||||
* codename "jumping sloth" *
|
||||
* Initialization complete. BigchainDB Server is ready and waiting. *
|
||||
* *
|
||||
* You can send HTTP requests via the HTTP API documented in the *
|
||||
@ -31,7 +36,7 @@ BANNER = """
|
||||
"""
|
||||
|
||||
|
||||
def start():
|
||||
def start(args):
|
||||
# Exchange object for event stream api
|
||||
logger.info('Starting BigchainDB')
|
||||
exchange = Exchange()
|
||||
@ -43,7 +48,6 @@ def start():
|
||||
p_webapi = Process(name='bigchaindb_webapi', target=app_server.run, daemon=True)
|
||||
p_webapi.start()
|
||||
|
||||
# start message
|
||||
logger.info(BANNER.format(bigchaindb.config['server']['bind']))
|
||||
|
||||
# start websocket server
|
||||
@ -53,25 +57,32 @@ def start():
|
||||
args=(exchange.get_subscriber_queue(EventTypes.BLOCK_VALID),))
|
||||
p_websocket_server.start()
|
||||
|
||||
# connect to tendermint event stream
|
||||
p_websocket_client = Process(name='bigchaindb_ws_to_tendermint',
|
||||
target=event_stream.start,
|
||||
daemon=True,
|
||||
args=(exchange.get_publisher_queue(),))
|
||||
p_websocket_client.start()
|
||||
|
||||
p_exchange = Process(name='bigchaindb_exchange', target=exchange.run, daemon=True)
|
||||
p_exchange.start()
|
||||
|
||||
# We need to import this after spawning the web server
|
||||
# because import ABCIServer will monkeypatch all sockets
|
||||
# for gevent.
|
||||
from abci import ABCIServer
|
||||
from abci.server import ABCIServer
|
||||
|
||||
setproctitle.setproctitle('bigchaindb')
|
||||
|
||||
# Start the ABCIServer
|
||||
app = ABCIServer(app=App())
|
||||
abci = ABCI(TmVersion(bigchaindb.config['tendermint']['version']))
|
||||
if args.experimental_parallel_validation:
|
||||
app = ABCIServer(
|
||||
app=ParallelValidationApp(
|
||||
abci=abci.types,
|
||||
events_queue=exchange.get_publisher_queue(),
|
||||
)
|
||||
)
|
||||
else:
|
||||
app = ABCIServer(
|
||||
app=App(
|
||||
abci=abci.types,
|
||||
events_queue=exchange.get_publisher_queue(),
|
||||
)
|
||||
)
|
||||
app.run()
|
||||
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import json
|
||||
|
@ -1,2 +1,7 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
|
||||
from bigchaindb.upsert_validator.validator_election import ValidatorElection # noqa
|
||||
|
@ -1,143 +1,68 @@
|
||||
from bigchaindb.common.exceptions import (InvalidSignature,
|
||||
MultipleInputsError,
|
||||
InvalidProposer,
|
||||
UnequalValidatorSet,
|
||||
InvalidPowerChange,
|
||||
DuplicateTransaction)
|
||||
from bigchaindb.tendermint_utils import key_from_base64
|
||||
from bigchaindb.common.crypto import (public_key_from_ed25519_key)
|
||||
from bigchaindb.common.transaction import Transaction
|
||||
from bigchaindb.common.schema import (_validate_schema,
|
||||
TX_SCHEMA_VALIDATOR_ELECTION,
|
||||
TX_SCHEMA_COMMON,
|
||||
TX_SCHEMA_CREATE)
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
from bigchaindb.common.exceptions import InvalidPowerChange
|
||||
from bigchaindb.elections.election import Election
|
||||
from bigchaindb.common.schema import TX_SCHEMA_VALIDATOR_ELECTION
|
||||
from .validator_utils import (new_validator_set, encode_validator, validate_asset_public_key)
|
||||
|
||||
|
||||
class ValidatorElection(Transaction):
|
||||
class ValidatorElection(Election):
|
||||
|
||||
VALIDATOR_ELECTION = 'VALIDATOR_ELECTION'
|
||||
OPERATION = 'VALIDATOR_ELECTION'
|
||||
# NOTE: this transaction class extends create so the operation inheritence is achieved
|
||||
# by renaming CREATE to VALIDATOR_ELECTION
|
||||
CREATE = VALIDATOR_ELECTION
|
||||
ALLOWED_OPERATIONS = (VALIDATOR_ELECTION,)
|
||||
|
||||
def __init__(self, operation, asset, inputs, outputs,
|
||||
metadata=None, version=None, hash_id=None):
|
||||
# operation `CREATE` is being passed as argument as `VALIDATOR_ELECTION` is an extension
|
||||
# of `CREATE` and any validation on `CREATE` in the parent class should apply to it
|
||||
super().__init__(operation, asset, inputs, outputs, metadata, version, hash_id)
|
||||
|
||||
@classmethod
|
||||
def current_validators(cls, bigchain):
|
||||
"""Return a dictionary of validators with key as `public_key` and
|
||||
value as the `voting_power`
|
||||
"""
|
||||
|
||||
validators = {}
|
||||
for validator in bigchain.get_validators():
|
||||
# NOTE: we assume that Tendermint encodes public key in base64
|
||||
public_key = public_key_from_ed25519_key(key_from_base64(validator['pub_key']['value']))
|
||||
validators[public_key] = validator['voting_power']
|
||||
|
||||
return validators
|
||||
|
||||
@classmethod
|
||||
def recipients(cls, bigchain):
|
||||
"""Convert validator dictionary to a recipient list for `Transaction`"""
|
||||
|
||||
recipients = []
|
||||
for public_key, voting_power in cls.current_validators(bigchain).items():
|
||||
recipients.append(([public_key], voting_power))
|
||||
|
||||
return recipients
|
||||
|
||||
@classmethod
|
||||
def is_same_topology(cls, current_topology, election_topology):
|
||||
voters = {}
|
||||
for voter in election_topology:
|
||||
if len(voter.public_keys) > 1:
|
||||
return False
|
||||
|
||||
[public_key] = voter.public_keys
|
||||
voting_power = voter.amount
|
||||
voters[public_key] = voting_power
|
||||
|
||||
# Check whether the voters and their votes is same to that of the
|
||||
# validators and their voting power in the network
|
||||
return (current_topology == voters)
|
||||
CREATE = OPERATION
|
||||
ALLOWED_OPERATIONS = (OPERATION,)
|
||||
TX_SCHEMA_CUSTOM = TX_SCHEMA_VALIDATOR_ELECTION
|
||||
|
||||
def validate(self, bigchain, current_transactions=[]):
|
||||
"""Validate election transaction
|
||||
For more details refer BEP-21: https://github.com/bigchaindb/BEPs/tree/master/21
|
||||
|
||||
NOTE:
|
||||
* A valid election is initiated by an existing validator.
|
||||
|
||||
* A valid election is one where voters are validators and votes are
|
||||
alloacted according to the voting power of each validator node.
|
||||
|
||||
Args:
|
||||
bigchain (BigchainDB): an instantiated bigchaindb.lib.BigchainDB object.
|
||||
|
||||
Returns:
|
||||
`True` if the election is valid
|
||||
|
||||
Raises:
|
||||
ValidationError: If the election is invalid
|
||||
"""For more details refer BEP-21: https://github.com/bigchaindb/BEPs/tree/master/21
|
||||
"""
|
||||
input_conditions = []
|
||||
|
||||
duplicates = any(txn for txn in current_transactions if txn.id == self.id)
|
||||
if bigchain.get_transaction(self.id) or duplicates:
|
||||
raise DuplicateTransaction('transaction `{}` already exists'
|
||||
.format(self.id))
|
||||
current_validators = self.get_validators(bigchain)
|
||||
|
||||
if not self.inputs_valid(input_conditions):
|
||||
raise InvalidSignature('Transaction signature is invalid.')
|
||||
|
||||
current_validators = self.current_validators(bigchain)
|
||||
|
||||
# NOTE: Proposer should be a single node
|
||||
if len(self.inputs) != 1 or len(self.inputs[0].owners_before) != 1:
|
||||
raise MultipleInputsError('`tx_signers` must be a list instance of length one')
|
||||
super(ValidatorElection, self).validate(bigchain, current_transactions=current_transactions)
|
||||
|
||||
# NOTE: change more than 1/3 of the current power is not allowed
|
||||
if self.asset['data']['power'] >= (1/3)*sum(current_validators.values()):
|
||||
raise InvalidPowerChange('`power` change must be less than 1/3 of total power')
|
||||
|
||||
# NOTE: Check if the proposer is a validator.
|
||||
[election_initiator_node_pub_key] = self.inputs[0].owners_before
|
||||
if election_initiator_node_pub_key not in current_validators.keys():
|
||||
raise InvalidProposer('Public key is not a part of the validator set')
|
||||
|
||||
# NOTE: Check if all validators have been assigned votes equal to their voting power
|
||||
if not self.is_same_topology(current_validators, self.outputs):
|
||||
raise UnequalValidatorSet('Validator set much be exactly same to the outputs of election')
|
||||
|
||||
return True
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def generate(cls, initiator, voters, election_data, metadata=None):
|
||||
(inputs, outputs) = cls.validate_create(initiator, voters, election_data, metadata)
|
||||
election = cls(cls.VALIDATOR_ELECTION, {'data': election_data}, inputs, outputs, metadata)
|
||||
cls.validate_schema(election.to_dict(), skip_id=True)
|
||||
return election
|
||||
def validate_schema(cls, tx):
|
||||
super(ValidatorElection, cls).validate_schema(tx)
|
||||
validate_asset_public_key(tx['asset']['data']['public_key'])
|
||||
|
||||
@classmethod
|
||||
def validate_schema(cls, tx, skip_id=False):
|
||||
"""Validate the validator election transaction. Since `VALIDATOR_ELECTION` extends `CREATE`
|
||||
transaction, all the validations for `CREATE` transaction should be inherited
|
||||
"""
|
||||
if not skip_id:
|
||||
cls.validate_id(tx)
|
||||
_validate_schema(TX_SCHEMA_COMMON, tx)
|
||||
_validate_schema(TX_SCHEMA_CREATE, tx)
|
||||
_validate_schema(TX_SCHEMA_VALIDATOR_ELECTION, tx)
|
||||
def has_concluded(self, bigchain, *args, **kwargs):
|
||||
latest_block = bigchain.get_latest_block()
|
||||
if latest_block is not None:
|
||||
latest_block_height = latest_block['height']
|
||||
latest_validator_change = bigchain.get_validator_change()['height']
|
||||
|
||||
@classmethod
|
||||
def create(cls, tx_signers, recipients, metadata=None, asset=None):
|
||||
raise NotImplementedError
|
||||
# TODO change to `latest_block_height + 3` when upgrading to Tendermint 0.24.0.
|
||||
if latest_validator_change == latest_block_height + 2:
|
||||
# do not conclude the election if there is a change assigned already
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def transfer(cls, tx_signers, recipients, metadata=None, asset=None):
|
||||
raise NotImplementedError
|
||||
return super().has_concluded(bigchain, *args, **kwargs)
|
||||
|
||||
def on_approval(self, bigchain, new_height):
|
||||
validator_updates = [self.asset['data']]
|
||||
curr_validator_set = bigchain.get_validators(new_height)
|
||||
updated_validator_set = new_validator_set(curr_validator_set,
|
||||
validator_updates)
|
||||
|
||||
updated_validator_set = [v for v in updated_validator_set
|
||||
if v['voting_power'] > 0]
|
||||
|
||||
# TODO change to `new_height + 2` when upgrading to Tendermint 0.24.0.
|
||||
bigchain.store_validator_set(new_height + 1, updated_validator_set)
|
||||
return encode_validator(self.asset['data'])
|
||||
|
||||
def on_rollback(self, bigchaindb, new_height):
|
||||
# TODO change to `new_height + 2` when upgrading to Tendermint 0.24.0.
|
||||
bigchaindb.delete_validator_set(new_height + 1)
|
||||
|
85
bigchaindb/upsert_validator/validator_utils.py
Normal file
85
bigchaindb/upsert_validator/validator_utils.py
Normal file
@ -0,0 +1,85 @@
|
||||
import base64
|
||||
import binascii
|
||||
import codecs
|
||||
|
||||
import bigchaindb
|
||||
from abci import types_v0_22_8, types_v0_31_5, TmVersion
|
||||
from bigchaindb.common.exceptions import InvalidPublicKey, BigchainDBError
|
||||
|
||||
|
||||
def encode_validator(v):
|
||||
ed25519_public_key = v['public_key']['value']
|
||||
# NOTE: tendermint expects public to be encoded in go-amino format
|
||||
try:
|
||||
version = TmVersion(bigchaindb.config["tendermint"]["version"])
|
||||
except ValueError:
|
||||
raise BigchainDBError('Invalid tendermint version, '
|
||||
'check BigchainDB configuration file')
|
||||
|
||||
validator_update_t, pubkey_t = {
|
||||
TmVersion.v0_22_8: (types_v0_22_8.Validator, types_v0_22_8.PubKey),
|
||||
TmVersion.v0_31_5: (types_v0_31_5.ValidatorUpdate, types_v0_31_5.PubKey)
|
||||
}[version]
|
||||
pub_key = pubkey_t(type='ed25519', data=bytes.fromhex(ed25519_public_key))
|
||||
|
||||
return validator_update_t(pub_key=pub_key, power=v['power'])
|
||||
|
||||
|
||||
def decode_validator(v):
|
||||
return {'public_key': {'type': 'ed25519-base64',
|
||||
'value': codecs.encode(v.pub_key.data, 'base64').decode().rstrip('\n')},
|
||||
'voting_power': v.power}
|
||||
|
||||
|
||||
def new_validator_set(validators, updates):
|
||||
validators_dict = {}
|
||||
for v in validators:
|
||||
validators_dict[v['public_key']['value']] = v
|
||||
|
||||
updates_dict = {}
|
||||
for u in updates:
|
||||
decoder = get_public_key_decoder(u['public_key'])
|
||||
public_key64 = base64.b64encode(decoder(u['public_key']['value'])).decode('utf-8')
|
||||
updates_dict[public_key64] = {'public_key': {'type': 'ed25519-base64',
|
||||
'value': public_key64},
|
||||
'voting_power': u['power']}
|
||||
|
||||
new_validators_dict = {**validators_dict, **updates_dict}
|
||||
return list(new_validators_dict.values())
|
||||
|
||||
|
||||
def encode_pk_to_base16(validator):
|
||||
pk = validator['public_key']
|
||||
decoder = get_public_key_decoder(pk)
|
||||
public_key16 = base64.b16encode(decoder(pk['value'])).decode('utf-8')
|
||||
|
||||
validator['public_key']['value'] = public_key16
|
||||
return validator
|
||||
|
||||
|
||||
def validate_asset_public_key(pk):
|
||||
pk_binary = pk['value'].encode('utf-8')
|
||||
decoder = get_public_key_decoder(pk)
|
||||
try:
|
||||
pk_decoded = decoder(pk_binary)
|
||||
if len(pk_decoded) != 32:
|
||||
raise InvalidPublicKey('Public key should be of size 32 bytes')
|
||||
|
||||
except binascii.Error:
|
||||
raise InvalidPublicKey('Invalid `type` specified for public key `value`')
|
||||
|
||||
|
||||
def get_public_key_decoder(pk):
|
||||
encoding = pk['type']
|
||||
decoder = base64.b64decode
|
||||
|
||||
if encoding == 'ed25519-base16':
|
||||
decoder = base64.b16decode
|
||||
elif encoding == 'ed25519-base32':
|
||||
decoder = base64.b32decode
|
||||
elif encoding == 'ed25519-base64':
|
||||
decoder = base64.b64decode
|
||||
else:
|
||||
raise InvalidPublicKey('Invalid `type` specified for public key `value`')
|
||||
|
||||
return decoder
|
@ -1,9 +1,19 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
import contextlib
|
||||
import threading
|
||||
import queue
|
||||
import multiprocessing as mp
|
||||
import json
|
||||
|
||||
import setproctitle
|
||||
from packaging import version
|
||||
from bigchaindb.version import __tm_supported_versions__
|
||||
from bigchaindb.tendermint_utils import key_from_base64
|
||||
from bigchaindb.common.crypto import key_pair_from_ed25519_key
|
||||
|
||||
|
||||
class ProcessGroup(object):
|
||||
@ -31,7 +41,8 @@ class ProcessGroup(object):
|
||||
class Process(mp.Process):
|
||||
"""Wrapper around multiprocessing.Process that uses
|
||||
setproctitle to set the name of the process when running
|
||||
the target task."""
|
||||
the target task.
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
setproctitle.setproctitle(self.name)
|
||||
@ -167,3 +178,32 @@ class Lazy:
|
||||
|
||||
self.stack = []
|
||||
return last
|
||||
|
||||
|
||||
# Load Tendermint's public and private key from the file path
|
||||
def load_node_key(path):
|
||||
with open(path) as json_data:
|
||||
priv_validator = json.load(json_data)
|
||||
priv_key = priv_validator['priv_key']['value']
|
||||
hex_private_key = key_from_base64(priv_key)
|
||||
return key_pair_from_ed25519_key(hex_private_key)
|
||||
|
||||
|
||||
def tendermint_version_is_compatible(running_tm_ver):
|
||||
"""
|
||||
Check Tendermint compatability with BigchainDB server
|
||||
|
||||
:param running_tm_ver: Version number of the connected Tendermint instance
|
||||
:type running_tm_ver: str
|
||||
:return: True/False depending on the compatability with BigchainDB server
|
||||
:rtype: bool
|
||||
"""
|
||||
|
||||
# Splitting because version can look like this e.g. 0.22.8-40d6dc2e
|
||||
tm_ver = running_tm_ver.split('-')
|
||||
if not tm_ver:
|
||||
return False
|
||||
for ver in __tm_supported_versions__:
|
||||
if version.parse(ver) == version.parse(tm_ver[0]):
|
||||
return True
|
||||
return False
|
||||
|
25
bigchaindb/validation.py
Normal file
25
bigchaindb/validation.py
Normal file
@ -0,0 +1,25 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
|
||||
class BaseValidationRules():
|
||||
"""Base validation rules for BigchainDB.
|
||||
|
||||
A validation plugin must expose a class inheriting from this one via an entry_point.
|
||||
|
||||
All methods listed below must be implemented.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def validate_transaction(bigchaindb, transaction):
|
||||
"""See :meth:`bigchaindb.models.Transaction.validate`
|
||||
for documentation.
|
||||
"""
|
||||
return transaction.validate(bigchaindb)
|
||||
|
||||
@staticmethod
|
||||
def validate_block(bigchaindb, block):
|
||||
"""See :meth:`bigchaindb.models.Block.validate` for documentation."""
|
||||
return block.validate(bigchaindb)
|
@ -1,2 +1,10 @@
|
||||
__version__ = '2.0.0b5'
|
||||
__short_version__ = '2.0b5'
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
__version__ = '2.2.2'
|
||||
__short_version__ = '2.2'
|
||||
|
||||
# Supported Tendermint versions
|
||||
__tm_supported_versions__ = ["0.31.5", "0.22.8"]
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""API routes definition"""
|
||||
from flask_restful import Api
|
||||
from bigchaindb.web.views import (
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""This module contains basic functions to instantiate the BigchainDB API.
|
||||
|
||||
The application is implemented in Flask and runs using Gunicorn.
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""This module provides the blueprint for some basic API endpoints.
|
||||
|
||||
For more information please refer to the documentation: http://bigchaindb.com/http-api
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""Common classes and methods for API handlers
|
||||
"""
|
||||
import logging
|
||||
@ -32,7 +37,10 @@ def base_ws_uri():
|
||||
customized (typically when running behind NAT, firewall, etc.)
|
||||
"""
|
||||
|
||||
scheme = config['wsserver']['advertised_scheme']
|
||||
host = config['wsserver']['advertised_host']
|
||||
port = config['wsserver']['advertised_port']
|
||||
config_wsserver = config['wsserver']
|
||||
|
||||
scheme = config_wsserver['advertised_scheme']
|
||||
host = config_wsserver['advertised_host']
|
||||
port = config_wsserver['advertised_port']
|
||||
|
||||
return '{}://{}:{}'.format(scheme, host, port)
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""This module provides the blueprint for the blocks API endpoints.
|
||||
|
||||
For more information please refer to the documentation: http://bigchaindb.com/http-api
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""API Index endpoint"""
|
||||
|
||||
import flask
|
||||
@ -43,6 +48,7 @@ def get_api_v1_info(api_prefix):
|
||||
return {
|
||||
'docs': ''.join(docs_url),
|
||||
'transactions': '{}transactions/'.format(api_prefix),
|
||||
'blocks': '{}blocks/'.format(api_prefix),
|
||||
'assets': '{}assets/'.format(api_prefix),
|
||||
'outputs': '{}outputs/'.format(api_prefix),
|
||||
'streams': websocket_root,
|
||||
|
@ -1,3 +1,8 @@
|
||||
# Copyright © 2020 Interplanetary Database Association e.V.,
|
||||
# BigchainDB and IPDB software contributors.
|
||||
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
|
||||
# Code is Apache-2.0 and docs are CC-BY-4.0
|
||||
|
||||
"""This module provides the blueprint for some basic API endpoints.
|
||||
|
||||
For more information please refer to the documentation: http://bigchaindb.com/http-api
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user