diff --git a/.ci/travis-after-success.sh b/.ci/travis-after-success.sh
deleted file mode 100755
index af77412..0000000
--- a/.ci/travis-after-success.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
-
-set -e -x
-
-if [[ -z ${TOXENV} ]] && [[ ${PLANETMINT_CI_ABCI} != 'enable' ]] && [[ ${PLANETMINT_ACCEPTANCE_TEST} != 'enable' ]]; then
- codecov -v -f htmlcov/coverage.xml
-fi
diff --git a/.ci/travis-before-install.sh b/.ci/travis-before-install.sh
deleted file mode 100755
index 4c53a86..0000000
--- a/.ci/travis-before-install.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
-if [[ -n ${TOXENV} ]]; then
- sudo apt-get update
- sudo apt-get install zsh
-fi
-
-if [[ -z ${TOXENV} ]]; then
- sudo apt-get update
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
-
- sudo rm /usr/local/bin/docker-compose
- curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
- chmod +x docker-compose
- sudo mv docker-compose /usr/local/bin
-fi
diff --git a/.ci/travis-before-script.sh b/.ci/travis-before-script.sh
deleted file mode 100755
index bb55c38..0000000
--- a/.ci/travis-before-script.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
-
-set -e -x
-
-if [[ -z ${TOXENV} ]]; then
-
- if [[ ${PLANETMINT_CI_ABCI} == 'enable' ]]; then
- docker-compose up -d planetmint
- else
- docker-compose up -d bdb
- fi
-
-fi
diff --git a/.ci/travis-install.sh b/.ci/travis-install.sh
deleted file mode 100755
index 083f9bb..0000000
--- a/.ci/travis-install.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
-
-set -e -x
-
-pip install --upgrade pip
-
-if [[ -n ${TOXENV} ]]; then
- pip install --upgrade tox
-elif [[ ${PLANETMINT_CI_ABCI} == 'enable' ]]; then
- docker-compose build --no-cache --build-arg abci_status=enable planetmint
-else
- docker-compose build --no-cache planetmint
- pip install --upgrade codecov
-fi
diff --git a/.ci/travis_script.sh b/.ci/travis_script.sh
deleted file mode 100755
index 68398d6..0000000
--- a/.ci/travis_script.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
-
-set -e -x
-
-if [[ -n ${TOXENV} ]]; then
- tox -e ${TOXENV}
-elif [[ ${PLANETMINT_CI_ABCI} == 'enable' ]]; then
- docker-compose exec planetmint pytest -v -m abci
-elif [[ ${PLANETMINT_ACCEPTANCE_TEST} == 'enable' ]]; then
- ./scripts/run-acceptance-test.sh
-elif [[ ${PLANETMINT_INTEGRATION_TEST} == 'enable' ]]; then
- docker-compose down # TODO: remove after ci optimization
- ./scripts/run-integration-test.sh
-else
- docker-compose exec planetmint pytest -v --cov=planetmint --cov-report xml:htmlcov/coverage.xml
-fi
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
deleted file mode 100644
index 9c4dc71..0000000
--- a/.github/CONTRIBUTING.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-# How to Contribute to the Planetmint Project
-
-There are many ways you can contribute to the Planetmint project, some very easy and others more involved.
-
-All of that is documented elsewhere: go to the "[Contributing to Planetmint" docs on ReadTheDocs](https://docs.planetmint.com/projects/contributing/en/latest/index.html).
-
-Note: GitHub automatically links to this file (`.github/CONTRIBUTING.md`) when a contributor creates a new issue or pull request, so you shouldn't delete it. Just use it to point people to full and proper help elsewhere.
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index 17f8529..0000000
--- a/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# Do you want to:
-
-- make a bug report? Then read below about what should go in a bug report.
-- make a feature request or proposal? Then read [the page about how to make a feature request or proposal](https://docs.planetmint.com/projects/contributing/en/latest/ways-can-contribute/make-a-feature-request-or-proposal.html).
-- ask a question about Planetmint? Then [go to Gitter](https://gitter.im/planetmint/planetmint) (our chat room) and ask it there.
-- share your neat idea or realization? Then [go to Gitter](https://gitter.im/planetmint/planetmint) (our chat room) and share it there.
-
-# What Should Go in a Bug Report
-
-- What computer are you on (hardware)?
-- What operating system are you using, including version. e.g. Ubuntu 14.04? Fedora 23?
-- What version of Planetmint software were you using? Is that the latest version?
-- What, exactly, did you do to get to the point where you got stuck? Describe all the steps so we can get there too. Show screenshots or copy-and-paste text to GitHub.
-- Show what actually happened.
-- Say what you tried to do to resolve the problem.
-- Provide details to convince us that it matters to you. Is it for a school project, a job, a contract with a deadline, a child who needs it for Christmas?
-
-We will do our best but please understand that we don't have time to help everyone, especially people who don't care to help us help them. "It doesn't work." is not going to get any reaction from us. We need _details_.
-
-Tip: Use Github code block formatting to make code render pretty in GitHub. To do that, put three backticks followed by a string to set the type of code (e.g. `Python`), then the code, and then end with three backticks. There's more information about [inserting code blocks](https://help.github.com/articles/creating-and-highlighting-code-blocks/) in the GitHub help pages.
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
deleted file mode 100644
index 5e04c9f..0000000
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ /dev/null
@@ -1,30 +0,0 @@
----
-name: Bug report
-about: Create a report to help us improve
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Describe the bug**
-A clear and concise description of what the bug is.
-
-**To Reproduce**
-Steps to reproduce the behavior:
-
-**Expected behavior**
-A clear and concise description of what you expected to happen.
-
-**Logs or terminal output**
-If applicable, add add textual content to help explain your problem.
-
-**Desktop (please complete the following information):**
- - Distribution: [e.g. Ubuntu 18.04]
- - Bigchaindb version:
- - Tendermint version:
- - Mongodb version:
-- Python full version: [e.g. Python 3.9.3]
-
-**Additional context**
-Add any other context about the problem here.
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index 09c4f60..0000000
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,18 +0,0 @@
-Make sure the title of this pull request has the form:
-
-**Problem: A short statement of the problem.**
-
-## Solution
-
-A short statement about how this PR solves the **Problem**.
-
-## Issues Resolved
-
-What issues does this PR resolve, if any? Please include lines like the following (i.e. "Resolves #NNNN), so that when this PR gets merged, GitHub will automatically close those issues.
-
-Resolves #NNNN
-Resolves #MMMM
-
-## BEPs Implemented
-
-What [BEPs](https://github.com/planetmint/beps) does this pull request implement, if any?
diff --git a/.github/workflows/acceptance-test.yml b/.github/workflows/acceptance-test.yml
new file mode 100644
index 0000000..51df9e6
--- /dev/null
+++ b/.github/workflows/acceptance-test.yml
@@ -0,0 +1,22 @@
+# Copyright © 2020 Interplanetary Database Association e.V.,
+# Planetmint and IPDB software contributors.
+# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+# Code is Apache-2.0 and docs are CC-BY-4.0
+
+name: Acceptance tests
+on: [push, pull_request]
+
+jobs:
+ test:
+ if: ${{ false }
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out repository code
+ uses: actions/checkout@v3
+
+ - name: Start container
+ run: docker-compose up -d planetmint
+
+ - name: Run test
+ run: docker-compose -f docker-compose.yml run --rm python-acceptance pytest /src
\ No newline at end of file
diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml
new file mode 100644
index 0000000..78a1622
--- /dev/null
+++ b/.github/workflows/audit.yml
@@ -0,0 +1,36 @@
+# Copyright © 2020 Interplanetary Database Association e.V.,
+# Planetmint and IPDB software contributors.
+# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+# Code is Apache-2.0 and docs are CC-BY-4.0
+
+name: Audit
+on:
+ schedule:
+ - cron: '0 2 * * *'
+
+jobs:
+ audit:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ - name: Setup python
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.9
+
+ - name: Install pip-audit
+ run: pip install --upgrade pip pip-audit
+
+ - name: Install dependencies
+ run: pip install .
+
+ - name: Create requirements.txt
+ run: pip freeze > requirements.txt
+
+ - name: Audit dependencies
+ run: pip-audit
+
+
\ No newline at end of file
diff --git a/.github/workflows/documenation.yml b/.github/workflows/documenation.yml
new file mode 100644
index 0000000..4cda540
--- /dev/null
+++ b/.github/workflows/documenation.yml
@@ -0,0 +1,35 @@
+# Copyright © 2020 Interplanetary Database Association e.V.,
+# Planetmint and IPDB software contributors.
+# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+# Code is Apache-2.0 and docs are CC-BY-4.0
+
+name: Documentation
+on: [push, pull_request]
+
+jobs:
+ documentation:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out repository code
+ uses: actions/checkout@v3
+
+ - name: Setup python
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.9
+
+ - name: Install tox
+ run: python -m pip install --upgrade tox tox-gh-actions
+
+ - name: Install dependencies
+ run: pip install .'[dev]'
+
+ - name: Run tox
+ run: tox -e docsroot
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml
new file mode 100644
index 0000000..0798bd9
--- /dev/null
+++ b/.github/workflows/integration-test.yml
@@ -0,0 +1,19 @@
+# Copyright © 2020 Interplanetary Database Association e.V.,
+# Planetmint and IPDB software contributors.
+# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+# Code is Apache-2.0 and docs are CC-BY-4.0
+
+name: Integration tests
+on: [push, pull_request]
+
+jobs:
+ test:
+ if: ${{ false }
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out repository code
+ uses: actions/checkout@v3
+
+ - name: Start test run
+ run: docker-compose -f docker-compose.integration.yml up test
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
new file mode 100644
index 0000000..43eaa30
--- /dev/null
+++ b/.github/workflows/lint.yml
@@ -0,0 +1,17 @@
+# Copyright © 2020 Interplanetary Database Association e.V.,
+# Planetmint and IPDB software contributors.
+# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+# Code is Apache-2.0 and docs are CC-BY-4.0
+
+name: Lint
+on: [push, pull_request]
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: psf/black@stable
+ with:
+ options: "--check -l 119"
+ src: "."
diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml
new file mode 100644
index 0000000..52adb0d
--- /dev/null
+++ b/.github/workflows/unit-test.yml
@@ -0,0 +1,109 @@
+# Copyright © 2020 Interplanetary Database Association e.V.,
+# Planetmint and IPDB software contributors.
+# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+# Code is Apache-2.0 and docs are CC-BY-4.0
+
+name: Unit tests
+on: [push, pull_request]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ include:
+ - abci_enabled: "ABCI enabled"
+ abci: "enabled"
+ - abci_disabled: "ABCI disabled"
+ abci: "disabled"
+
+ steps:
+ - name: Check out repository code
+ uses: actions/checkout@v3
+
+ - name: Build container
+ run: |
+ if [[ "${{ matrix.abci }}" == "enabled" ]]; then
+ docker-compose -f docker-compose.yml build --no-cache --build-arg abci_status=enable planetmint
+ fi
+ if [[ ""${{ matrix.abci }}" == "disabled"" ]]; then
+ docker-compose -f docker-compose.yml build --no-cache planetmint
+ fi
+
+ - name: Save image
+ run: docker save -o planetmint.tar planetmint_planetmint
+
+ - name: Upload image
+ uses: actions/upload-artifact@v3
+ with:
+ name: planetmint-abci-${{matrix.abci}}
+ path: planetmint.tar
+ retention-days: 5
+
+
+ test-with-abci:
+ runs-on: ubuntu-latest
+ needs: build
+ strategy:
+ matrix:
+ include:
+ - db: "MongoDB with ABCI"
+ host: "mongodb"
+ port: 27017
+ abci: "enabled"
+ - db: "Tarantool with ABCI"
+ host: "tarantool"
+ port: 3303
+ abci: "enabled"
+
+ steps:
+ - name: Check out repository code
+ uses: actions/checkout@v3
+
+ - name: Download planetmint
+ uses: actions/download-artifact@v3
+ with:
+ name: planetmint-abci-enabled
+
+ - name: Load planetmint
+ run: docker load -i planetmint.tar
+
+ - name: Start containers
+ run: docker-compose -f docker-compose.yml up -d planetmint
+
+ - name: Run tests
+ run: docker exec planetmint_planetmint_1 pytest -v -m abci
+
+ test-without-abci:
+ runs-on: ubuntu-latest
+ needs: build
+ strategy:
+ matrix:
+ include:
+ - db: "MongoDB without ABCI"
+ host: "mongodb"
+ port: 27017
+ - db: "Tarantool without ABCI"
+ host: "tarantool"
+ port: 3303
+
+ steps:
+ - name: Check out repository code
+ uses: actions/checkout@v3
+
+ - name: Download planetmint
+ uses: actions/download-artifact@v3
+ with:
+ name: planetmint-abci-disabled
+
+ - name: Load planetmint
+ run: docker load -i planetmint.tar
+
+ - name: Start containers
+ run: docker-compose -f docker-compose.yml up -d bdb
+
+ - name: Run tests
+ run: docker exec planetmint_planetmint_1 pytest -v --cov=planetmint --cov-report xml:htmlcov/coverage.xml
+
+ - name: Upload Coverage to Codecov
+ uses: codecov/codecov-action@v3
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 4de0ef9..16cd16f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,7 @@
# Byte-compiled / optimized / DLL files
__pycache__/
+planetmint_environment/
+.idea/
*.py[cod]
*$py.class
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 542a916..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright © 2020, 2021 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
-sudo: required
-
-dist: focal
-
-services:
- - docker
-
-language: python
-cache: pip
-
-python:
- - 3.9
-
-env:
- global:
- - DOCKER_COMPOSE_VERSION=1.29.2
- matrix:
- - TOXENV=flake8
- - TOXENV=docsroot
-
-matrix:
- fast_finish: true
- include:
- - python: 3.9
- env:
- - PLANETMINT_DATABASE_BACKEND=localmongodb
- - PLANETMINT_DATABASE_SSL=
- - python: 3.9
- env:
- - PLANETMINT_DATABASE_BACKEND=localmongodb
- - PLANETMINT_DATABASE_SSL=
- - PLANETMINT_CI_ABCI=enable
- - python: 3.9
- env:
- - PLANETMINT_ACCEPTANCE_TEST=enable
- - python: 3.9
- env:
- - PLANETMINT_INTEGRATION_TEST=enable
-
-
-before_install: sudo .ci/travis-before-install.sh
-
-install: .ci/travis-install.sh
-
-before_script: .ci/travis-before-script.sh
-
-script: .ci/travis_script.sh
-
-after_success: .ci/travis-after-success.sh
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 47709a5..18ff60d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -25,6 +25,59 @@ For reference, the possible headings are:
* **Known Issues**
* **Notes**
+## [Unreleased]
+* **Changed** replaced transaction module with planetmint-transactions package
+* **Changed** moved transaction network validation to Planetmint class
+* **Changed** adjusted test cases
+
+## [1.2.1] - 2022-20-09
+* **Changed** Create model now validates for CID strings for asset["data"] and metadata
+* **Changed** adjusted test cases
+
+## [1.2.0] - 2022-09-05
+* **Changed** disabled acceptance and integration tests, they have a circular dep. to the python driver
+* **Changed** Metadata and asset["data"] types to string containing an IPLD hash
+* **Fixed** Transaction generation bug that automatically assigned 'assets' to asset["data"]
+* **Changed** adjusted test cases
+
+
+## [1.1.0] - 2022-09-05
+* **Changed** adjusted to zenroom calling convention of PRP #13 (breaking change)
+* **Changed** zenroom test cases to comply to the new calling convention
+* **Fixed** zenroom signing bug (call of wrong function)
+* **Changed** using cryptoconditions 0.10.0
+* **Deprecated** usage of ripde160md as a address generation algorithm, isn't available from python 3.9.14 on, skipping these tests from now on.
+* **Changed** script/ouptut tag to be of type array or object for schema v3.0 and v2.0
+* **Changed** added 'script' handling to the common/transactions.py class
+* **Fixed** data input handling to the transaction fullfillment methods
+
+
+
+## [1.0.1] - 2022-07-07
+updated documentation
+
+## [1.0.0] - 2022-07-05
+### Feature Update
+Tarantool integration
+
+## [0.9.8] - 2022-06-27
+
+### Feature Update
+Changed license to AGPLv3
+
+
+## [0.9.7] - 2022-06-17
+
+### Feature Update
+Deep Zenroom integration
+
+## [0.9.6] - 2022-06-08
+
+### Maintenance
+
+* removed Korean documentation
+* removed Korean and Chinese README
+
## [2.2.2] - 2020-08-12
### Security
@@ -1157,6 +1210,6 @@ The first public release of Planetmint, including:
- Initial documentation (in `planetmint/docs`).
- Initial `README.md`, `ROADMAP.md`, `CODE_OF_CONDUCT.md`, and `CONTRIBUTING.md`.
- Packaging for PyPI, including `setup.py` and `setup.cfg`.
-- Initial `Dockerfile` and `docker-compose.yml` (for deployment using Docker and Docker Compose).
+- Initial `Dockerfile` and `docker compose.yml` (for deployment using Docker and Docker Compose).
- Initial `.gitignore` (list of things for git to ignore).
- Initial `.travis.yml` (used by Travis CI).
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 5667f9d..729131e 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -42,7 +42,7 @@ This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior directed at yourself or another community member may be
-reported by contacting a project maintainer at [contact@planetmint.com](mailto:contact@planetmint.com). All
+reported by contacting a project maintainer at [mail@planetmint.io](mailto:contact@planetmint.io). All
complaints will be reviewed and investigated and will result in a response that
is appropriate to the circumstances. Maintainers are
obligated to maintain confidentiality with regard to the reporter of an
diff --git a/Dockerfile b/Dockerfile
index e7daeea..3c2de9b 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -7,6 +7,7 @@ RUN apt-get -qq update \
&& apt-get -y upgrade \
&& apt-get install -y jq vim zsh build-essential cmake\
&& pip install . \
+ && pip install pynacl==1.4.0 base58==2.1.1 pyasn1==0.4.8 zenroom==2.1.0.dev1655293214 cryptography==3.4.7\
&& apt-get autoremove \
&& apt-get clean
diff --git a/Dockerfile-all-in-one b/Dockerfile-all-in-one
index 8dd5aec..44d6fb8 100644
--- a/Dockerfile-all-in-one
+++ b/Dockerfile-all-in-one
@@ -16,12 +16,11 @@ RUN apt-get update \
&& pip install -e . \
&& apt-get autoremove
-# Install mongodb and monit
+# Install tarantool and monit
RUN apt-get install -y dirmngr gnupg apt-transport-https software-properties-common ca-certificates curl
-RUN wget -qO - https://www.mongodb.org/static/pgp/server-5.0.asc | apt-key add -
-RUN echo "deb http://repo.mongodb.org/apt/debian buster/mongodb-org/5.0 main" | tee /etc/apt/sources.list.d/mongodb-org-5.0.list
RUN apt-get update
-RUN apt-get install -y mongodb-org monit
+RUN curl -L https://tarantool.io/wrATeGF/release/2/installer.sh | bash
+RUN apt-get install -y tarantool monit
# Install Tendermint
RUN wget https://github.com/tendermint/tendermint/releases/download/v${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.tar.gz \
@@ -31,13 +30,10 @@ RUN wget https://github.com/tendermint/tendermint/releases/download/v${TM_VERSIO
ENV TMHOME=/tendermint
-# Set permissions required for mongodb
-RUN mkdir -p /data/db /data/configdb \
- && chown -R mongodb:mongodb /data/db /data/configdb
-
# Planetmint enviroment variables
-ENV PLANETMINT_DATABASE_PORT 27017
-ENV PLANETMINT_DATABASE_BACKEND localmongodb
+ENV PLANETMINT_DATABASE_PORT 3303
+ENV PLANETMINT_DATABASE_BACKEND tarantool_db
+ENV PLANETMINT_DATABASE_HOST localhost
ENV PLANETMINT_SERVER_BIND 0.0.0.0:9984
ENV PLANETMINT_WSSERVER_HOST 0.0.0.0
ENV PLANETMINT_WSSERVER_SCHEME ws
@@ -50,4 +46,7 @@ VOLUME /data/db /data/configdb /tendermint
EXPOSE 27017 28017 9984 9985 26656 26657 26658
+RUN pip install pynacl==1.4.0 base58==2.1.1 pyasn1==0.4.8 zenroom==2.1.0.dev1655293214 cryptography==3.4.7
+
+
WORKDIR $HOME
\ No newline at end of file
diff --git a/Dockerfile-dev b/Dockerfile-dev
index bfeada4..4148179 100644
--- a/Dockerfile-dev
+++ b/Dockerfile-dev
@@ -1,13 +1,14 @@
ARG python_version=3.9
-FROM python:${python_version}
+FROM python:${python_version}-slim
LABEL maintainer "contact@ipdb.global"
RUN apt-get update \
- && apt-get install -y git zsh-common vim build-essential cmake\
+ && apt-get install -y git zsh curl\
+ && apt-get install -y tarantool-common\
+ && apt-get install -y vim build-essential cmake\
&& pip install -U pip \
&& apt-get autoremove \
&& apt-get clean
-
ARG backend
ARG abci_status
@@ -15,7 +16,7 @@ ARG abci_status
# to force stdin, stdout and stderr to be totally unbuffered and to capture logs/outputs
ENV PYTHONUNBUFFERED 0
-ENV PLANETMINT_DATABASE_PORT 27017
+ENV PLANETMINT_DATABASE_PORT 3303
ENV PLANETMINT_DATABASE_BACKEND $backend
ENV PLANETMINT_SERVER_BIND 0.0.0.0:9984
ENV PLANETMINT_WSSERVER_HOST 0.0.0.0
@@ -32,4 +33,6 @@ RUN mkdir -p /usr/src/app
COPY . /usr/src/app/
WORKDIR /usr/src/app
RUN pip install -e .[dev]
+RUN pip install flask-cors
+RUN pip install pynacl==1.4.0 base58==2.1.1 pyasn1==0.4.8 zenroom==2.1.0.dev1655293214 cryptography==3.4.7
RUN planetmint -y configure
diff --git a/LICENSE b/LICENSE
index 261eeb9..0ad25db 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,201 +1,661 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
- 1. Definitions.
+ Preamble
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
+ The precise terms and conditions for copying, distribution and
+modification follow.
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
+ TERMS AND CONDITIONS
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
+ 0. Definitions.
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
+ "This License" refers to version 3 of the GNU Affero General Public License.
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
+ 1. Source Code.
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
- END OF TERMS AND CONDITIONS
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
- APPENDIX: How to apply the Apache License to your work.
+ The Corresponding Source for a work in source code form is that
+same work.
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
+ 2. Basic Permissions.
- Copyright [yyyy] [name of copyright owner]
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
- http://www.apache.org/licenses/LICENSE-2.0
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published
+ by the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+.
diff --git a/Makefile b/Makefile
index 9ef104e..bf09bbb 100644
--- a/Makefile
+++ b/Makefile
@@ -47,6 +47,7 @@ HELP := python -c "$$PRINT_HELP_PYSCRIPT"
ECHO := /usr/bin/env echo
IS_DOCKER_COMPOSE_INSTALLED := $(shell command -v docker-compose 2> /dev/null)
+IS_BLACK_INSTALLED := $(shell command -v black 2> /dev/null)
################
# Main targets #
@@ -56,10 +57,10 @@ help: ## Show this help
@$(HELP) < $(MAKEFILE_LIST)
run: check-deps ## Run Planetmint from source (stop it with ctrl+c)
- # although planetmint has tendermint and mongodb in depends_on,
+ # although planetmint has tendermint and tarantool in depends_on,
# launch them first otherwise tendermint will get stuck upon sending yet another log
# due to some docker-compose issue; does not happen when containers are run as daemons
- @$(DC) up --no-deps mongodb tendermint planetmint
+ @$(DC) up --no-deps tarantool tendermint planetmint
start: check-deps ## Run Planetmint from source and daemonize it (stop with `make stop`)
@$(DC) up -d planetmint
@@ -70,8 +71,11 @@ stop: check-deps ## Stop Planetmint
logs: check-deps ## Attach to the logs
@$(DC) logs -f planetmint
-lint: check-deps ## Lint the project
- @$(DC) up lint
+lint: check-py-deps ## Lint the project
+ black --check -l 119 .
+
+format: check-py-deps ## Format the project
+ black -l 119 .
test: check-deps test-unit test-acceptance ## Run unit and acceptance tests
@@ -132,3 +136,12 @@ ifndef IS_DOCKER_COMPOSE_INSTALLED
@$(ECHO)
@$(DC) # docker-compose is not installed, so we call it to generate an error and exit
endif
+
+check-py-deps:
+ifndef IS_BLACK_INSTALLED
+ @$(ECHO) "Error: black is not installed"
+ @$(ECHO)
+ @$(ECHO) "You need to activate your virtual environment and install the test dependencies"
+ black # black is not installed, so we call it to generate an error and exit
+endif
+
diff --git a/PYTHON_STYLE_GUIDE.md b/PYTHON_STYLE_GUIDE.md
index 65ffaf3..dff51f6 100644
--- a/PYTHON_STYLE_GUIDE.md
+++ b/PYTHON_STYLE_GUIDE.md
@@ -82,11 +82,11 @@ x = 'name: {}; score: {}'.format(name, n)
we use the `format()` version. The [official Python documentation says](https://docs.python.org/2/library/stdtypes.html#str.format), "This method of string formatting is the new standard in Python 3, and should be preferred to the % formatting described in String Formatting Operations in new code."
-## Running the Flake8 Style Checker
+## Running the Black Style Checker
-We use [Flake8](http://flake8.pycqa.org/en/latest/index.html) to check our Python code style. Once you have it installed, you can run it using:
+We use [Black](https://black.readthedocs.io/en/stable/) to check our Python code style. Once you have it installed, you can run it using:
```text
-flake8 --max-line-length 119 planetmint/
+black --check -l 119 .
```
diff --git a/README.md b/README.md
index 0ec040f..2e3f3d6 100644
--- a/README.md
+++ b/README.md
@@ -18,13 +18,11 @@ so show the latest GitHub release instead.
# Planetmint Server
-Planetmint is the blockchain database. This repository is for _BigchainDB Server_.
+Planetmint is the blockchain database. This repository is for _Planetmint Server_.
## The Basics
-* [Try the Quickstart](https://docs.planetmint.com/projects/server/en/latest/quickstart.html)
-* [Read the Planetmint 2.0 whitepaper](https://www.planetmint.com/whitepaper/)
-* [Check out the _Hitchiker's Guide to BigchainDB_](https://www.planetmint.com/developers/guide/)
+* [Try the Quickstart](https://docs.planetmint.io/en/latest/introduction/index.html#quickstart)
## Run and Test Planetmint Server from the `master` Branch
@@ -55,15 +53,11 @@ To view all commands available, run `make`.
## Links for Everyone
-* [Planetmint.com](https://www.planetmint.com/) - the main Planetmint website, including newsletter signup
-* [Roadmap](https://github.com/planetmint/org/blob/master/ROADMAP.md)
-* [Blog](https://medium.com/the-planetmint-blog)
-* [Twitter](https://twitter.com/Planetmint)
+* [Planetmint.io](https://www.planetmint.io/) - the main Planetmint website, including newsletter signup
## Links for Developers
-* [All Planetmint Documentation](https://docs.planetmint.com/en/latest/)
-* [Planetmint Server Documentation](https://docs.planetmint.com/projects/server/en/latest/index.html)
+* [All Planetmint Documentation](https://docs.planetmint.io/en/latest/)
* [CONTRIBUTING.md](.github/CONTRIBUTING.md) - how to contribute
* [Community guidelines](CODE_OF_CONDUCT.md)
* [Open issues](https://github.com/planetmint/planetmint/issues)
@@ -73,5 +67,3 @@ To view all commands available, run `make`.
## Legal
* [Licenses](LICENSES.md) - open source & open content
-* [Imprint](https://www.planetmint.com/imprint/)
-* [Contact Us](https://www.planetmint.com/contact/)
diff --git a/README_cn.md b/README_cn.md
deleted file mode 100644
index 8c1cb8c..0000000
--- a/README_cn.md
+++ /dev/null
@@ -1,77 +0,0 @@
-
-
-
-
-[](https://codecov.io/github/planetmint/planetmint?branch=master)
-[](https://github.com/planetmint/planetmint/releases)
-[](https://pypi.org/project/Planetmint/)
-[](https://travis-ci.com/planetmint/planetmint)
-[](https://docs.planetmint.com/projects/server/en/latest/)
-[](https://gitter.im/planetmint/planetmint?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-
-# Planetmint 服务器
-
-Planetmint 是区块链数据库. 这是 _BigchainDB 服务器_ 的仓库.
-
-## 基础知识
-
-* [尝试快速开始](https://docs.planetmint.com/projects/server/en/latest/quickstart.html)
-* [阅读 Planetmint 2.0 白皮书](https://www.planetmint.com/whitepaper/)
-* [查阅漫游指南](https://www.planetmint.com/developers/guide/)
-
-## 运行和测试 `master` 分支的 Planetmint 服务器
-
-运行和测试最新版本的 Planetmint 服务器非常简单. 确认你有安装最新版本的 [Docker Compose](https://docs.docker.com/compose/install/). 当你准备好了, 打开一个终端并运行:
-
-```text
-git clone https://github.com/planetmint/planetmint.git
-cd planetmint
-make run
-```
-
-Planetmint 应该可以通过 `http://localhost:9984/` 访问.
-
-这里也有一些其他的命令你可以运行:
-
-* `make start`: 通过源码和守护进程的方式运行 Planetmint (通过 `make stop` 停止).
-* `make stop`: 停止运行 Planetmint.
-* `make logs`: 附在日志上.
-* `make test`: 运行所有单元和验收测试.
-* `make test-unit-watch`: 运行所有测试并等待. 每次更改代码时都会再次运行测试.
-* `make cov`: 检查代码覆盖率并在浏览器中打开结果.
-* `make doc`: 生成 HTML 文档并在浏览器中打开它.
-* `make clean`: 删除所有构建, 测试, 覆盖和 Python 生成物.
-* `make reset`: 停止并移除所有容器. 警告: 您将丢失存储在 Planetmint 中的所有数据.
-
-查看所有可用命令, 请运行 `make`.
-
-## 一般人员链接
-
-* [Planetmint.com](https://www.planetmint.com/) - Planetmint 主网站, 包括新闻订阅
-* [路线图](https://github.com/planetmint/org/blob/master/ROADMAP.md)
-* [博客](https://medium.com/the-planetmint-blog)
-* [推特](https://twitter.com/Planetmint)
-
-## 开发人员链接
-
-* [所有的 Planetmint 文档](https://docs.planetmint.com/en/latest/)
-* [Planetmint 服务器 文档](https://docs.planetmint.com/projects/server/en/latest/index.html)
-* [CONTRIBUTING.md](.github/CONTRIBUTING.md) - how to contribute
-* [社区指南](CODE_OF_CONDUCT.md)
-* [公开问题](https://github.com/planetmint/planetmint/issues)
-* [公开的 pull request](https://github.com/planetmint/planetmint/pulls)
-* [Gitter 聊天室](https://gitter.im/planetmint/planetmint)
-
-## 法律声明
-
-* [许可](LICENSES.md) - 开源代码 & 开源内容
-* [印记](https://www.planetmint.com/imprint/)
-* [联系我们](https://www.planetmint.com/contact/)
diff --git a/README_kor.md b/README_kor.md
deleted file mode 100644
index 2982e51..0000000
--- a/README_kor.md
+++ /dev/null
@@ -1,65 +0,0 @@
-[](https://codecov.io/github/planetmint/planetmint?branch=master)
-[](https://github.com/planetmint/planetmint/releases)
-[](https://pypi.org/project/Planetmint/)
-[](https://travis-ci.org/planetmint/planetmint)
-[](https://docs.planetmint.com/projects/server/en/latest/)
-[](https://gitter.im/planetmint/planetmint?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-
-# Planetmint 서버
-
-BigchaingDB는 블록체인 데이터베이스입니다. 이 저장소는 _BigchaingDB 서버_를 위한 저장소입니다.
-
-### 기본 사항
-
-* [빠른 시작 사용해보기](https://docs.planetmint.com/projects/server/en/latest/quickstart.html)
-* [Planetmint 2.0 백서 읽기](https://www.planetmint.com/whitepaper/)
-* [BigchainDB에 대한 _Hitchiker's Guide_를 확인십시오.](https://www.planetmint.com/developers/guide/)
-
-### `master` Branch에서 Planetmint 서버 실행 및 테스트
-
-BigchaingDB 서버의 최신 버전을 실행하고 테스트하는 것은 어렵지 않습니다. [Docker Compose](https://docs.docker.com/compose/install/)의 최신 버전이 설치되어 있는지 확인하십시오. 준비가 되었다면, 터미널에서 다음을 실행하십시오.
-
-```text
-git clone https://github.com/planetmint/planetmint.git
-cd planetmint
-make run
-```
-
-이제 BigchainDB는 `http://localhost:9984/`에 연결되어야 합니다.
-
-또한, 실행시키기 위한 다른 명령어들도 있습니다.
-
-* `make start` : 소스로부터 BigchainDB를 실행하고 데몬화합니다. \(이는 `make stop` 을 하면 중지합니다.\)
-* `make stop` : BigchainDB를 중지합니다.
-* `make logs` : 로그에 첨부합니다.
-* `make text` : 모든 유닛과 허가 테스트를 실행합니다.
-* `make test-unit-watch` : 모든 테스트를 수행하고 기다립니다. 코드를 변경할 때마다 테스트는 다시 실행될 것입니다.
-* `make cov` : 코드 커버리지를 확인하고 브라우저에서 결과를 엽니다.
-* `make doc` : HTML 문서를 만들고, 브라우저에서 엽니다.
-* `make clean` : 모든 빌드와 테스트, 커버리지 및 파이썬 아티팩트를 제거합니다.
-* `make reset` : 모든 컨테이너들을 중지하고 제거합니다. 경고 : BigchainDB에 저장된 모든 데이터를 잃을 수 있습니다.
-
-사용 가능한 모든 명령어를 보기 위해서는 `make` 를 실행하십시오.
-
-### 모두를 위한 링크들
-
-* [Planetmint.com ](https://www.planetmint.com/)- 뉴스 레터 가입을 포함하는 Planetmint 주요 웹 사이트
-* [로드맵](https://github.com/planetmint/org/blob/master/ROADMAP.md)
-* [블로그](https://medium.com/the-planetmint-blog)
-* [트위터](https://twitter.com/Planetmint)
-
-### 개발자들을 위한 링크들
-
-* [모든 Planetmint 문서](https://docs.planetmint.com/en/latest/)
-* [Planetmint 서버 문서](https://docs.planetmint.com/projects/server/en/latest/index.html)
-* [CONTRIBUTING.md](https://github.com/planetmint/planetmint/blob/master/.github/CONTRIBUTING.md) - 기여를 하는 방법
-* [커뮤니티 가이드라인](https://github.com/planetmint/planetmint/blob/master/CODE_OF_CONDUCT.md)
-* [이슈 작성](https://github.com/planetmint/planetmint/issues)
-* [pull request 하기](https://github.com/planetmint/planetmint/pulls)
-* [Gitter 채팅방](https://gitter.im/planetmint/planetmint)
-
-### 합법
-
-* [라이선스](https://github.com/planetmint/planetmint/blob/master/LICENSES.md) - 오픈 소스 & 오픈 콘텐츠
-* [발행](https://www.planetmint.com/imprint/)
-* [연락처](https://www.planetmint.com/contact/)
diff --git a/acceptance/python/Dockerfile b/acceptance/python/Dockerfile
index ace8dce..8ddb3f4 100644
--- a/acceptance/python/Dockerfile
+++ b/acceptance/python/Dockerfile
@@ -1,21 +1,20 @@
FROM python:3.9
RUN apt-get update \
- && pip install -U pip \
- && apt-get autoremove \
- && apt-get clean
-RUN apt-get install -y vim zsh build-essential cmake
+ && pip install -U pip \
+ && apt-get autoremove \
+ && apt-get clean
+RUN apt-get install -y vim zsh build-essential cmake git
RUN mkdir -p /src
RUN /usr/local/bin/python -m pip install --upgrade pip
RUN pip install --upgrade meson ninja
-RUN pip install zenroom==2.0.0.dev1644927841
RUN pip install --upgrade \
pycco \
websocket-client~=0.47.0 \
pytest~=3.0 \
- # planetmint-cryptoconditions>=0.9.4\
- # planetmint-driver>=0.9.0 \
- git+https://github.com/planetmint/cryptoconditions.git@asset-migration \
- git+https://github.com/planetmint/planetmint-driver-python.git@asset-migration \
+ planetmint-cryptoconditions>=0.10.0\
+ planetmint-driver>=0.9.2 \
blns
+RUN pip install base58>=2.1.1 pynacl==1.4.0 zenroom==2.1.0.dev1655293214 pyasn1==0.4.8 cryptography==3.4.7
+RUN pip install planetmint-ipld>=0.0.3
diff --git a/acceptance/python/src/conftest.py b/acceptance/python/src/conftest.py
index 8583969..747e527 100644
--- a/acceptance/python/src/conftest.py
+++ b/acceptance/python/src/conftest.py
@@ -5,87 +5,82 @@
import pytest
-GENERATE_KEYPAIR = \
- """Rule input encoding base58
- Rule output encoding base58
- Scenario 'ecdh': Create the keypair
- Given that I am known as 'Pippo'
- When I create the ecdh key
- When I create the testnet key
- Then print data"""
+CONDITION_SCRIPT = """Scenario 'ecdh': create the signature of an object
+ Given I have the 'keyring'
+ Given that I have a 'string dictionary' named 'houses'
+ When I create the signature of 'houses'
+ Then print the 'signature'"""
-# secret key to public key
-SK_TO_PK = \
- """Rule input encoding base58
- Rule output encoding base58
- Scenario 'ecdh': Create the keypair
- Given that I am known as '{}'
- Given I have the 'keys'
- When I create the ecdh public key
- When I create the testnet address
- Then print my 'ecdh public key'
- Then print my 'testnet address'"""
-
-FULFILL_SCRIPT = \
- """Rule input encoding base58
- Rule output encoding base58
- Scenario 'ecdh': Bob verifies the signature from Alice
+FULFILL_SCRIPT = """Scenario 'ecdh': Bob verifies the signature from Alice
Given I have a 'ecdh public key' from 'Alice'
- Given that I have a 'string dictionary' named 'houses' inside 'asset'
- Given I have a 'signature' named 'data.signature' inside 'result'
- When I verify the 'houses' has a signature in 'data.signature' by 'Alice'
+ Given that I have a 'string dictionary' named 'houses'
+ Given I have a 'signature' named 'signature'
+ When I verify the 'houses' has a signature in 'signature' by 'Alice'
Then print the string 'ok'"""
-HOUSE_ASSETS = [
- {
- "data": {
- "houses": [
- {
- "name": "Harry",
- "team": "Gryffindor",
- },
- {
- "name": "Draco",
- "team": "Slytherin",
- }
- ],
- }
- }
-]
+SK_TO_PK = """Scenario 'ecdh': Create the keypair
+ Given that I am known as '{}'
+ Given I have the 'keyring'
+ When I create the ecdh public key
+ When I create the bitcoin address
+ Then print my 'ecdh public key'
+ Then print my 'bitcoin address'"""
-ZENROOM_DATA = {
- 'also': 'more data'
+GENERATE_KEYPAIR = """Scenario 'ecdh': Create the keypair
+ Given that I am known as 'Pippo'
+ When I create the ecdh key
+ When I create the bitcoin key
+ Then print data"""
+
+INITIAL_STATE = {"also": "more data"}
+SCRIPT_INPUT = {
+ "houses": [
+ {
+ "name": "Harry",
+ "team": "Gryffindor",
+ },
+ {
+ "name": "Draco",
+ "team": "Slytherin",
+ },
+ ],
}
-CONDITION_SCRIPT = """Rule input encoding base58
- Rule output encoding base58
- Scenario 'ecdh': create the signature of an object
- Given I have the 'keys'
- Given that I have a 'string dictionary' named 'houses' inside 'asset'
- When I create the signature of 'houses'
- When I rename the 'signature' to 'data.signature'
- Then print the 'data.signature'"""
+metadata = {"units": 300, "type": "KG"}
+
+ZENROOM_DATA = {"that": "is my data"}
+
@pytest.fixture
def gen_key_zencode():
return GENERATE_KEYPAIR
+
@pytest.fixture
def secret_key_to_private_key_zencode():
return SK_TO_PK
+
@pytest.fixture
def fulfill_script_zencode():
return FULFILL_SCRIPT
+
@pytest.fixture
def condition_script_zencode():
return CONDITION_SCRIPT
+
@pytest.fixture
def zenroom_house_assets():
- return HOUSE_ASSETS
+ return SCRIPT_INPUT
+
+
+@pytest.fixture
+def zenroom_script_input():
+ return SCRIPT_INPUT
+
@pytest.fixture
def zenroom_data():
- return ZENROOM_DATA
\ No newline at end of file
+ return ZENROOM_DATA
diff --git a/acceptance/python/src/test_analyse_tx.py b/acceptance/python/src/test_analyse_tx.py
new file mode 100644
index 0000000..ea5d8fc
--- /dev/null
+++ b/acceptance/python/src/test_analyse_tx.py
@@ -0,0 +1,174 @@
+# Copyright © 2020 Interplanetary Database Association e.V.,
+# Planetmint and IPDB software contributors.
+# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+# Code is Apache-2.0 and docs are CC-BY-4.0
+
+# # Basic Acceptance Test
+# Here we check that the primitives of the system behave as expected.
+# As you will see, this script tests basic stuff like:
+#
+# - create a transaction
+# - check if the transaction is stored
+# - check for the outputs of a given public key
+# - transfer the transaction to another key
+#
+# We run a series of checks for each steps, that is retrieving the transaction from
+# the remote system, and also checking the `outputs` of a given public key.
+
+# ## Imports
+# We need some utils from the `os` package, we will interact with
+# env variables.
+import os
+
+# For this test case we import and use the Python Driver.
+from planetmint_driver import Planetmint
+from planetmint_driver.crypto import generate_keypair
+from ipld import multihash, marshal
+
+
+def test_get_tests():
+ # ## Set up a connection to Planetmint
+ # To use BighainDB we need a connection. Here we create one. By default we
+ # connect to localhost, but you can override this value using the env variable
+ # called `PLANETMINT_ENDPOINT`, a valid value must include the schema:
+ # `https://example.com:9984`
+ bdb = Planetmint(os.environ.get("PLANETMINT_ENDPOINT"))
+
+ # ## Create keypairs
+ # This test requires the interaction between two actors with their own keypair.
+ # The two keypairs will be called—drum roll—Alice and Bob.
+ alice, bob = generate_keypair(), generate_keypair()
+
+ # ## Alice registers her bike in Planetmint
+ # Alice has a nice bike, and here she creates the "digital twin"
+ # of her bike.
+ bike = {"data": multihash(marshal({"bicycle": {"serial_number": 420420}}))}
+
+ # She prepares a `CREATE` transaction...
+ prepared_creation_tx = bdb.transactions.prepare(operation="CREATE", signers=alice.public_key, asset=bike)
+
+ # ... and she fulfills it with her private key.
+ fulfilled_creation_tx = bdb.transactions.fulfill(prepared_creation_tx, private_keys=alice.private_key)
+
+ # We will use the `id` of this transaction several time, so we store it in
+ # a variable with a short and easy name
+ bike_id = fulfilled_creation_tx["id"]
+
+ # Now she is ready to send it to the Planetmint Network.
+ sent_transfer_tx = bdb.transactions.send_commit(fulfilled_creation_tx)
+
+ # And just to be 100% sure, she also checks if she can retrieve
+ # it from the Planetmint node.
+ assert bdb.transactions.retrieve(bike_id), "Cannot find transaction {}".format(bike_id)
+
+ # Alice is now the proud owner of one unspent asset.
+ assert len(bdb.outputs.get(alice.public_key, spent=False)) == 1
+ assert bdb.outputs.get(alice.public_key)[0]["transaction_id"] == bike_id
+
+ # ## Alice transfers her bike to Bob
+ # After registering her bike, Alice is ready to transfer it to Bob.
+ # She needs to create a new `TRANSFER` transaction.
+
+ # A `TRANSFER` transaction contains a pointer to the original asset. The original asset
+ # is identified by the `id` of the `CREATE` transaction that defined it.
+ transfer_asset = {"id": bike_id}
+
+ # Alice wants to spend the one and only output available, the one with index `0`.
+ output_index = 0
+ output = fulfilled_creation_tx["outputs"][output_index]
+
+ # Here, she defines the `input` of the `TRANSFER` transaction. The `input` contains
+ # several keys:
+ #
+ # - `fulfillment`, taken from the previous `CREATE` transaction.
+ # - `fulfills`, that specifies which condition she is fulfilling.
+ # - `owners_before`.
+ transfer_input = {
+ "fulfillment": output["condition"]["details"],
+ "fulfills": {"output_index": output_index, "transaction_id": fulfilled_creation_tx["id"]},
+ "owners_before": output["public_keys"],
+ }
+
+ # Now that all the elements are set, she creates the actual transaction...
+ prepared_transfer_tx = bdb.transactions.prepare(
+ operation="TRANSFER", asset=transfer_asset, inputs=transfer_input, recipients=bob.public_key
+ )
+
+ # ... and signs it with her private key.
+ fulfilled_transfer_tx = bdb.transactions.fulfill(prepared_transfer_tx, private_keys=alice.private_key)
+
+ # She finally sends the transaction to a Planetmint node.
+ sent_transfer_tx = bdb.transactions.send_commit(fulfilled_transfer_tx)
+
+ # And just to be 100% sure, she also checks if she can retrieve
+ # it from the Planetmint node.
+ assert bdb.transactions.retrieve(fulfilled_transfer_tx["id"]) == sent_transfer_tx
+
+ # Now Alice has zero unspent transactions.
+ assert len(bdb.outputs.get(alice.public_key, spent=False)) == 0
+
+ # While Bob has one.copy
+ assert len(bdb.outputs.get(bob.public_key, spent=False)) == 1
+
+ # Bob double checks what he got was the actual bike.
+ bob_tx_id = bdb.outputs.get(bob.public_key, spent=False)[0]["transaction_id"]
+ assert bdb.transactions.retrieve(bob_tx_id) == sent_transfer_tx
+
+ transfer_asset = {"id": bike_id}
+
+ # Alice wants to spend the one and only output available, the one with index `0`.
+ output_index = 0
+ output = fulfilled_transfer_tx["outputs"][output_index]
+
+ # Here, she defines the `input` of the `TRANSFER` transaction. The `input` contains
+ # several keys:
+ #
+ # - `fulfillment`, taken from the previous `CREATE` transaction.
+ # - `fulfills`, that specifies which condition she is fulfilling.
+ # - `owners_before`.
+ transfer_input = {
+ "fulfillment": output["condition"]["details"],
+ "fulfills": {"output_index": output_index, "transaction_id": fulfilled_transfer_tx["id"]},
+ "owners_before": output["public_keys"],
+ }
+
+ # Now that all the elements are set, she creates the actual transaction...
+ prepared_transfer_tx = bdb.transactions.prepare(
+ operation="TRANSFER", asset=transfer_asset, inputs=transfer_input, recipients=bob.public_key
+ )
+
+ # ... and signs it with her private key.
+ fulfilled_transfer_tx = bdb.transactions.fulfill(prepared_transfer_tx, private_keys=bob.private_key)
+
+ # She finally sends the transaction to a Planetmint node.
+ sent_transfer_tx = bdb.transactions.send_commit(fulfilled_transfer_tx)
+
+ assert bdb.transactions.retrieve(fulfilled_transfer_tx["id"]) == sent_transfer_tx
+
+ # from urllib3 import request
+ import urllib3
+ import json
+
+ http = urllib3.PoolManager()
+
+ # verify that 3 transactions contain the asset_id
+ asset_id = bike_id
+ url = "http://planetmint:9984/api/v1/transactions?asset_id=" + asset_id
+ r = http.request("GET", url)
+ tmp_json = http.request("GET", url)
+ tmp_json = json.loads(tmp_json.data.decode("utf-8"))
+ assert len(tmp_json) == 3
+
+ # verify that one transaction is the create TX
+ url = "http://planetmint:9984/api/v1/transactions?asset_id=" + asset_id + "&operation=CREATE"
+ r = http.request("GET", url)
+ tmp_json = http.request("GET", url)
+ tmp_json = json.loads(tmp_json.data.decode("utf-8"))
+ assert len(tmp_json) == 1
+
+ # verify that 2 transactoins are of type transfer
+ url = "http://planetmint:9984/api/v1/transactions?asset_id=" + asset_id + "&operation=transfer"
+ r = http.request("GET", url)
+ tmp_json = http.request("GET", url)
+ tmp_json = json.loads(tmp_json.data.decode("utf-8"))
+ assert len(tmp_json) == 2
diff --git a/acceptance/python/src/test_basic.py b/acceptance/python/src/test_basic.py
index ddc8cba..10357af 100644
--- a/acceptance/python/src/test_basic.py
+++ b/acceptance/python/src/test_basic.py
@@ -14,9 +14,6 @@
#
# We run a series of checks for each steps, that is retrieving the transaction from
# the remote system, and also checking the `outputs` of a given public key.
-#
-# This acceptance test is a rip-off of our
-# [tutorial](https://docs.planetmint.com/projects/py-driver/en/latest/usage.html).
# ## Imports
# We need some utils from the `os` package, we will interact with
@@ -26,6 +23,7 @@ import os
# For this test case we import and use the Python Driver.
from planetmint_driver import Planetmint
from planetmint_driver.crypto import generate_keypair
+from ipld import multihash, marshal
def test_basic():
@@ -34,7 +32,7 @@ def test_basic():
# connect to localhost, but you can override this value using the env variable
# called `PLANETMINT_ENDPOINT`, a valid value must include the schema:
# `https://example.com:9984`
- bdb = Planetmint(os.environ.get('PLANETMINT_ENDPOINT'))
+ bdb = Planetmint(os.environ.get("PLANETMINT_ENDPOINT"))
# ## Create keypairs
# This test requires the interaction between two actors with their own keypair.
@@ -44,33 +42,28 @@ def test_basic():
# ## Alice registers her bike in Planetmint
# Alice has a nice bike, and here she creates the "digital twin"
# of her bike.
- bike = [{'data': {'bicycle': {'serial_number': 420420}}}]
+ bike = [{"data": multihash(marshal({"bicycle": {"serial_number": 420420}}))}]
# She prepares a `CREATE` transaction...
- prepared_creation_tx = bdb.transactions.prepare(
- operation='CREATE',
- signers=alice.public_key,
- assets=bike)
+ prepared_creation_tx = bdb.transactions.prepare(operation="CREATE", signers=alice.public_key, assets=bike)
# ... and she fulfills it with her private key.
- fulfilled_creation_tx = bdb.transactions.fulfill(
- prepared_creation_tx,
- private_keys=alice.private_key)
+ fulfilled_creation_tx = bdb.transactions.fulfill(prepared_creation_tx, private_keys=alice.private_key)
# We will use the `id` of this transaction several time, so we store it in
# a variable with a short and easy name
- bike_id = fulfilled_creation_tx['id']
+ bike_id = fulfilled_creation_tx["id"]
# Now she is ready to send it to the Planetmint Network.
sent_transfer_tx = bdb.transactions.send_commit(fulfilled_creation_tx)
# And just to be 100% sure, she also checks if she can retrieve
# it from the Planetmint node.
- assert bdb.transactions.retrieve(bike_id), 'Cannot find transaction {}'.format(bike_id)
+ assert bdb.transactions.retrieve(bike_id), "Cannot find transaction {}".format(bike_id)
# Alice is now the proud owner of one unspent asset.
assert len(bdb.outputs.get(alice.public_key, spent=False)) == 1
- assert bdb.outputs.get(alice.public_key)[0]['transaction_id'] == bike_id
+ assert bdb.outputs.get(alice.public_key)[0]["transaction_id"] == bike_id
# ## Alice transfers her bike to Bob
# After registering her bike, Alice is ready to transfer it to Bob.
@@ -78,11 +71,11 @@ def test_basic():
# A `TRANSFER` transaction contains a pointer to the original asset. The original asset
# is identified by the `id` of the `CREATE` transaction that defined it.
- transfer_assets = [{'id': bike_id}]
+ transfer_assets = [{"id": bike_id}]
# Alice wants to spend the one and only output available, the one with index `0`.
output_index = 0
- output = fulfilled_creation_tx['outputs'][output_index]
+ output = fulfilled_creation_tx["outputs"][output_index]
# Here, she defines the `input` of the `TRANSFER` transaction. The `input` contains
# several keys:
@@ -90,29 +83,26 @@ def test_basic():
# - `fulfillment`, taken from the previous `CREATE` transaction.
# - `fulfills`, that specifies which condition she is fulfilling.
# - `owners_before`.
- transfer_input = {'fulfillment': output['condition']['details'],
- 'fulfills': {'output_index': output_index,
- 'transaction_id': fulfilled_creation_tx['id']},
- 'owners_before': output['public_keys']}
+ transfer_input = {
+ "fulfillment": output["condition"]["details"],
+ "fulfills": {"output_index": output_index, "transaction_id": fulfilled_creation_tx["id"]},
+ "owners_before": output["public_keys"],
+ }
# Now that all the elements are set, she creates the actual transaction...
prepared_transfer_tx = bdb.transactions.prepare(
- operation='TRANSFER',
- assets=transfer_assets,
- inputs=transfer_input,
- recipients=bob.public_key)
+ operation="TRANSFER", assets=transfer_assets, inputs=transfer_input, recipients=bob.public_key
+ )
# ... and signs it with her private key.
- fulfilled_transfer_tx = bdb.transactions.fulfill(
- prepared_transfer_tx,
- private_keys=alice.private_key)
+ fulfilled_transfer_tx = bdb.transactions.fulfill(prepared_transfer_tx, private_keys=alice.private_key)
# She finally sends the transaction to a Planetmint node.
sent_transfer_tx = bdb.transactions.send_commit(fulfilled_transfer_tx)
# And just to be 100% sure, she also checks if she can retrieve
# it from the Planetmint node.
- assert bdb.transactions.retrieve(fulfilled_transfer_tx['id']) == sent_transfer_tx
+ assert bdb.transactions.retrieve(fulfilled_transfer_tx["id"]) == sent_transfer_tx
# Now Alice has zero unspent transactions.
assert len(bdb.outputs.get(alice.public_key, spent=False)) == 0
@@ -121,5 +111,5 @@ def test_basic():
assert len(bdb.outputs.get(bob.public_key, spent=False)) == 1
# Bob double checks what he got was the actual bike.
- bob_tx_id = bdb.outputs.get(bob.public_key, spent=False)[0]['transaction_id']
+ bob_tx_id = bdb.outputs.get(bob.public_key, spent=False)[0]["transaction_id"]
assert bdb.transactions.retrieve(bob_tx_id) == sent_transfer_tx
diff --git a/acceptance/python/src/test_divisible_asset.py b/acceptance/python/src/test_divisible_asset.py
index 8799d88..2d034aa 100644
--- a/acceptance/python/src/test_divisible_asset.py
+++ b/acceptance/python/src/test_divisible_asset.py
@@ -15,9 +15,6 @@
# We run a series of checks for each step, that is retrieving
# the transaction from the remote system, and also checking the `amount`
# of a given transaction.
-#
-# This integration test is a rip-off of our
-# [tutorial](https://docs.planetmint.com/projects/py-driver/en/latest/usage.html).
# ## Imports
# We need some utils from the `os` package, we will interact with
@@ -31,13 +28,14 @@ from planetmint_driver.exceptions import BadRequest
# For this test case we import and use the Python Driver.
from planetmint_driver import Planetmint
from planetmint_driver.crypto import generate_keypair
+from ipld import multihash, marshal
def test_divisible_assets():
# ## Set up a connection to Planetmint
# Check [test_basic.py](./test_basic.html) to get some more details
# about the endpoint.
- bdb = Planetmint(os.environ.get('PLANETMINT_ENDPOINT'))
+ bdb = Planetmint(os.environ.get("PLANETMINT_ENDPOINT"))
# Oh look, it is Alice again and she brought her friend Bob along.
alice, bob = generate_keypair(), generate_keypair()
@@ -51,42 +49,36 @@ def test_divisible_assets():
# the bike for one hour.
bike_token = [{
- 'data': {
- 'token_for': {
- 'bike': {
- 'serial_number': 420420
+ "data": multihash(
+ marshal(
+ {
+ "token_for": {"bike": {"serial_number": 420420}},
+ "description": "Time share token. Each token equals one hour of riding.",
}
- },
- 'description': 'Time share token. Each token equals one hour of riding.',
- },
+ )
+ ),
}]
# She prepares a `CREATE` transaction and issues 10 tokens.
# Here, Alice defines in a tuple that she wants to assign
# these 10 tokens to Bob.
prepared_token_tx = bdb.transactions.prepare(
- operation='CREATE',
- signers=alice.public_key,
- recipients=[([bob.public_key], 10)],
- assets=bike_token)
+ operation="CREATE", signers=alice.public_key, recipients=[([bob.public_key], 10)], assets=bike_token
+ )
# She fulfills and sends the transaction.
- fulfilled_token_tx = bdb.transactions.fulfill(
- prepared_token_tx,
- private_keys=alice.private_key)
+ fulfilled_token_tx = bdb.transactions.fulfill(prepared_token_tx, private_keys=alice.private_key)
bdb.transactions.send_commit(fulfilled_token_tx)
# We store the `id` of the transaction to use it later on.
- bike_token_id = fulfilled_token_tx['id']
+ bike_token_id = fulfilled_token_tx["id"]
# Let's check if the transaction was successful.
- assert bdb.transactions.retrieve(bike_token_id), \
- 'Cannot find transaction {}'.format(bike_token_id)
+ assert bdb.transactions.retrieve(bike_token_id), "Cannot find transaction {}".format(bike_token_id)
# Bob owns 10 tokens now.
- assert bdb.transactions.retrieve(bike_token_id)['outputs'][0][
- 'amount'] == '10'
+ assert bdb.transactions.retrieve(bike_token_id)["outputs"][0]["amount"] == "10"
# ## Bob wants to use the bike
# Now that Bob got the tokens and the sun is shining, he wants to get out
@@ -94,49 +86,45 @@ def test_divisible_assets():
# To use the bike he has to send the tokens back to Alice.
# To learn about the details of transferring a transaction check out
# [test_basic.py](./test_basic.html)
- transfer_assets = [{'id': bike_token_id}]
+ transfer_assets = [{"id": bike_token_id}]
output_index = 0
- output = fulfilled_token_tx['outputs'][output_index]
- transfer_input = {'fulfillment': output['condition']['details'],
- 'fulfills': {'output_index': output_index,
- 'transaction_id': fulfilled_token_tx[
- 'id']},
- 'owners_before': output['public_keys']}
+ output = fulfilled_token_tx["outputs"][output_index]
+ transfer_input = {
+ "fulfillment": output["condition"]["details"],
+ "fulfills": {"output_index": output_index, "transaction_id": fulfilled_token_tx["id"]},
+ "owners_before": output["public_keys"],
+ }
# To use the tokens Bob has to reassign 7 tokens to himself and the
# amount he wants to use to Alice.
prepared_transfer_tx = bdb.transactions.prepare(
- operation='TRANSFER',
- assets=transfer_assets,
+ operation="TRANSFER",
+ asset=transfer_assets,
inputs=transfer_input,
- recipients=[([alice.public_key], 3), ([bob.public_key], 7)])
+ recipients=[([alice.public_key], 3), ([bob.public_key], 7)],
+ )
# He signs and sends the transaction.
- fulfilled_transfer_tx = bdb.transactions.fulfill(
- prepared_transfer_tx,
- private_keys=bob.private_key)
+ fulfilled_transfer_tx = bdb.transactions.fulfill(prepared_transfer_tx, private_keys=bob.private_key)
sent_transfer_tx = bdb.transactions.send_commit(fulfilled_transfer_tx)
# First, Bob checks if the transaction was successful.
- assert bdb.transactions.retrieve(
- fulfilled_transfer_tx['id']) == sent_transfer_tx
+ assert bdb.transactions.retrieve(fulfilled_transfer_tx["id"]) == sent_transfer_tx
# There are two outputs in the transaction now.
# The first output shows that Alice got back 3 tokens...
- assert bdb.transactions.retrieve(
- fulfilled_transfer_tx['id'])['outputs'][0]['amount'] == '3'
+ assert bdb.transactions.retrieve(fulfilled_transfer_tx["id"])["outputs"][0]["amount"] == "3"
# ... while Bob still has 7 left.
- assert bdb.transactions.retrieve(
- fulfilled_transfer_tx['id'])['outputs'][1]['amount'] == '7'
+ assert bdb.transactions.retrieve(fulfilled_transfer_tx["id"])["outputs"][1]["amount"] == "7"
# ## Bob wants to ride the bike again
# It's been a week and Bob wants to right the bike again.
# Now he wants to ride for 8 hours, that's a lot Bob!
# He prepares the transaction again.
- transfer_assets = [{'id': bike_token_id}]
+ transfer_asset = [{"id": bike_token_id}]
# This time we need an `output_index` of 1, since we have two outputs
# in the `fulfilled_transfer_tx` we created before. The first output with
# index 0 is for Alice and the second output is for Bob.
@@ -144,24 +132,21 @@ def test_divisible_assets():
# correct output with the correct amount of tokens.
output_index = 1
- output = fulfilled_transfer_tx['outputs'][output_index]
+ output = fulfilled_transfer_tx["outputs"][output_index]
- transfer_input = {'fulfillment': output['condition']['details'],
- 'fulfills': {'output_index': output_index,
- 'transaction_id': fulfilled_transfer_tx['id']},
- 'owners_before': output['public_keys']}
+ transfer_input = {
+ "fulfillment": output["condition"]["details"],
+ "fulfills": {"output_index": output_index, "transaction_id": fulfilled_transfer_tx["id"]},
+ "owners_before": output["public_keys"],
+ }
# This time Bob only provides Alice in the `recipients` because he wants
# to spend all his tokens
prepared_transfer_tx = bdb.transactions.prepare(
- operation='TRANSFER',
- assets=transfer_assets,
- inputs=transfer_input,
- recipients=[([alice.public_key], 8)])
+ operation="TRANSFER", assets=transfer_assets, inputs=transfer_input, recipients=[([alice.public_key], 8)]
+ )
- fulfilled_transfer_tx = bdb.transactions.fulfill(
- prepared_transfer_tx,
- private_keys=bob.private_key)
+ fulfilled_transfer_tx = bdb.transactions.fulfill(prepared_transfer_tx, private_keys=bob.private_key)
# Oh Bob, what have you done?! You tried to spend more tokens than you had.
# Remember Bob, last time you spent 3 tokens already,
@@ -172,10 +157,12 @@ def test_divisible_assets():
# Now Bob gets an error saying that the amount he wanted to spent is
# higher than the amount of tokens he has left.
assert error.value.args[0] == 400
- message = 'Invalid transaction (AmountError): The amount used in the ' \
- 'inputs `7` needs to be same as the amount used in the ' \
- 'outputs `8`'
- assert error.value.args[2]['message'] == message
+ message = (
+ "Invalid transaction (AmountError): The amount used in the "
+ "inputs `7` needs to be same as the amount used in the "
+ "outputs `8`"
+ )
+ assert error.value.args[2]["message"] == message
# We have to stop this test now, I am sorry, but Bob is pretty upset
# about his mistake. See you next time :)
diff --git a/acceptance/python/src/test_double_spend.py b/acceptance/python/src/test_double_spend.py
index bbc266c..aa744e3 100644
--- a/acceptance/python/src/test_double_spend.py
+++ b/acceptance/python/src/test_double_spend.py
@@ -14,35 +14,36 @@ import queue
import planetmint_driver.exceptions
from planetmint_driver import Planetmint
from planetmint_driver.crypto import generate_keypair
+from ipld import multihash, marshal
def test_double_create():
- bdb = Planetmint(os.environ.get('PLANETMINT_ENDPOINT'))
+ bdb = Planetmint(os.environ.get("PLANETMINT_ENDPOINT"))
alice = generate_keypair()
results = queue.Queue()
tx = bdb.transactions.fulfill(
- bdb.transactions.prepare(
- operation='CREATE',
- signers=alice.public_key,
- assets=[{'data': {'uuid': str(uuid4())}}]),
- private_keys=alice.private_key)
+ bdb.transactions.prepare(
+ operation="CREATE", signers=alice.public_key, assets=[{"data": multihash(marshal({"uuid": str(uuid4())}))}]
+ ),
+ private_keys=alice.private_key,
+ )
def send_and_queue(tx):
try:
bdb.transactions.send_commit(tx)
- results.put('OK')
+ results.put("OK")
except planetmint_driver.exceptions.TransportError as e:
- results.put('FAIL')
+ results.put("FAIL")
- t1 = Thread(target=send_and_queue, args=(tx, ))
- t2 = Thread(target=send_and_queue, args=(tx, ))
+ t1 = Thread(target=send_and_queue, args=(tx,))
+ t2 = Thread(target=send_and_queue, args=(tx,))
t1.start()
t2.start()
results = [results.get(timeout=2), results.get(timeout=2)]
- assert results.count('OK') == 1
- assert results.count('FAIL') == 1
+ assert results.count("OK") == 1
+ assert results.count("FAIL") == 1
diff --git a/acceptance/python/src/test_multiple_owners.py b/acceptance/python/src/test_multiple_owners.py
index 12793d5..afb9ed3 100644
--- a/acceptance/python/src/test_multiple_owners.py
+++ b/acceptance/python/src/test_multiple_owners.py
@@ -15,9 +15,7 @@
# We run a series of checks for each step, that is retrieving
# the transaction from the remote system, and also checking the public keys
# of a given transaction.
-#
-# This integration test is a rip-off of our
-# [tutorial](https://docs.planetmint.com/projects/py-driver/en/latest/usage.html).
+
# ## Imports
# We need some utils from the `os` package, we will interact with
@@ -27,13 +25,14 @@ import os
# For this test case we import and use the Python Driver.
from planetmint_driver import Planetmint
from planetmint_driver.crypto import generate_keypair
+from ipld import multihash, marshal
def test_multiple_owners():
# ## Set up a connection to Planetmint
# Check [test_basic.py](./test_basic.html) to get some more details
# about the endpoint.
- bdb = Planetmint(os.environ.get('PLANETMINT_ENDPOINT'))
+ bdb = Planetmint(os.environ.get("PLANETMINT_ENDPOINT"))
# Hey Alice and Bob, nice to see you again!
alice, bob = generate_keypair(), generate_keypair()
@@ -43,40 +42,28 @@ def test_multiple_owners():
# high rents anymore. Bob suggests to get a dish washer for the
# kitchen. Alice agrees and here they go, creating the asset for their
# dish washer.
- dw_asset = {
- 'data': {
- 'dish washer': {
- 'serial_number': 1337
- }
- }
- }
+ dw_asset = {"data": multihash(marshal({"dish washer": {"serial_number": 1337}}))}
# They prepare a `CREATE` transaction. To have multiple owners, both
# Bob and Alice need to be the recipients.
prepared_dw_tx = bdb.transactions.prepare(
- operation='CREATE',
- signers=alice.public_key,
- recipients=(alice.public_key, bob.public_key),
- assets=[dw_asset])
+ operation="CREATE", signers=alice.public_key, recipients=(alice.public_key, bob.public_key), assets=[dw_asset]
+ )
# Now they both sign the transaction by providing their private keys.
# And send it afterwards.
- fulfilled_dw_tx = bdb.transactions.fulfill(
- prepared_dw_tx,
- private_keys=[alice.private_key, bob.private_key])
+ fulfilled_dw_tx = bdb.transactions.fulfill(prepared_dw_tx, private_keys=[alice.private_key, bob.private_key])
bdb.transactions.send_commit(fulfilled_dw_tx)
# We store the `id` of the transaction to use it later on.
- dw_id = fulfilled_dw_tx['id']
+ dw_id = fulfilled_dw_tx["id"]
# Let's check if the transaction was successful.
- assert bdb.transactions.retrieve(dw_id), \
- 'Cannot find transaction {}'.format(dw_id)
+ assert bdb.transactions.retrieve(dw_id), "Cannot find transaction {}".format(dw_id)
# The transaction should have two public keys in the outputs.
- assert len(
- bdb.transactions.retrieve(dw_id)['outputs'][0]['public_keys']) == 2
+ assert len(bdb.transactions.retrieve(dw_id)["outputs"][0]["public_keys"]) == 2
# ## Alice and Bob transfer a transaction to Carol.
# Alice and Bob save a lot of money living together. They often go out
@@ -88,39 +75,33 @@ def test_multiple_owners():
# Alice and Bob prepare the transaction to transfer the dish washer to
# Carol.
- transfer_assets = [{'id': dw_id}]
+ transfer_assets = [{"id": dw_id}]
output_index = 0
- output = fulfilled_dw_tx['outputs'][output_index]
- transfer_input = {'fulfillment': output['condition']['details'],
- 'fulfills': {'output_index': output_index,
- 'transaction_id': fulfilled_dw_tx[
- 'id']},
- 'owners_before': output['public_keys']}
+ output = fulfilled_dw_tx["outputs"][output_index]
+ transfer_input = {
+ "fulfillment": output["condition"]["details"],
+ "fulfills": {"output_index": output_index, "transaction_id": fulfilled_dw_tx["id"]},
+ "owners_before": output["public_keys"],
+ }
# Now they create the transaction...
prepared_transfer_tx = bdb.transactions.prepare(
- operation='TRANSFER',
- assets=transfer_assets,
- inputs=transfer_input,
- recipients=carol.public_key)
+ operation="TRANSFER", assets=transfer_assets, inputs=transfer_input, recipients=carol.public_key
+ )
# ... and sign it with their private keys, then send it.
fulfilled_transfer_tx = bdb.transactions.fulfill(
- prepared_transfer_tx,
- private_keys=[alice.private_key, bob.private_key])
+ prepared_transfer_tx, private_keys=[alice.private_key, bob.private_key]
+ )
sent_transfer_tx = bdb.transactions.send_commit(fulfilled_transfer_tx)
# They check if the transaction was successful.
- assert bdb.transactions.retrieve(
- fulfilled_transfer_tx['id']) == sent_transfer_tx
+ assert bdb.transactions.retrieve(fulfilled_transfer_tx["id"]) == sent_transfer_tx
# The owners before should include both Alice and Bob.
- assert len(
- bdb.transactions.retrieve(fulfilled_transfer_tx['id'])['inputs'][0][
- 'owners_before']) == 2
+ assert len(bdb.transactions.retrieve(fulfilled_transfer_tx["id"])["inputs"][0]["owners_before"]) == 2
# While the new owner is Carol.
- assert bdb.transactions.retrieve(fulfilled_transfer_tx['id'])[
- 'outputs'][0]['public_keys'][0] == carol.public_key
+ assert bdb.transactions.retrieve(fulfilled_transfer_tx["id"])["outputs"][0]["public_keys"][0] == carol.public_key
diff --git a/acceptance/python/src/test_naughty_strings.py b/acceptance/python/src/test_naughty_strings.py
index be7438c..a74f3fd 100644
--- a/acceptance/python/src/test_naughty_strings.py
+++ b/acceptance/python/src/test_naughty_strings.py
@@ -16,6 +16,8 @@ import os
# Since the naughty strings get encoded and decoded in odd ways,
# we'll use a regex to sweep those details under the rug.
import re
+from tkinter import N
+from unittest import skip
# We'll use a nice library of naughty strings...
from blns import blns
@@ -27,31 +29,61 @@ import pytest
from planetmint_driver import Planetmint
from planetmint_driver.crypto import generate_keypair
from planetmint_driver.exceptions import BadRequest
+from ipld import multihash, marshal
naughty_strings = blns.all()
+skipped_naughty_strings = [
+ "1.00",
+ "$1.00",
+ "-1.00",
+ "-$1.00",
+ "0.00",
+ "0..0",
+ ".",
+ "0.0.0",
+ "-.",
+ ",./;'[]\\-=",
+ "ثم نفس سقطت وبالتحديد،, جزيرتي باستخدام أن دنو. إذ هنا؟ الستار وتنصيب كان. أهّل ايطاليا، بريطانيا-فرنسا قد أخذ. سليمان، إتفاقية بين ما, يذكر الحدود أي بعد, معاملة بولندا، الإطلاق عل إيو.",
+ "test\x00",
+ "Ṱ̺̺̕o͞ ̷i̲̬͇̪͙n̝̗͕v̟̜̘̦͟o̶̙̰̠kè͚̮̺̪̹̱̤ ̖t̝͕̳̣̻̪͞h̼͓̲̦̳̘̲e͇̣̰̦̬͎ ̢̼̻̱̘h͚͎͙̜̣̲ͅi̦̲̣̰̤v̻͍e̺̭̳̪̰-m̢iͅn̖̺̞̲̯̰d̵̼̟͙̩̼̘̳ ̞̥̱̳̭r̛̗̘e͙p͠r̼̞̻̭̗e̺̠̣͟s̘͇̳͍̝͉e͉̥̯̞̲͚̬͜ǹ̬͎͎̟̖͇̤t͍̬̤͓̼̭͘ͅi̪̱n͠g̴͉ ͏͉ͅc̬̟h͡a̫̻̯͘o̫̟̖͍̙̝͉s̗̦̲.̨̹͈̣",
+ "̡͓̞ͅI̗̘̦͝n͇͇͙v̮̫ok̲̫̙͈i̖͙̭̹̠̞n̡̻̮̣̺g̲͈͙̭͙̬͎ ̰t͔̦h̞̲e̢̤ ͍̬̲͖f̴̘͕̣è͖ẹ̥̩l͖͔͚i͓͚̦͠n͖͍̗͓̳̮g͍ ̨o͚̪͡f̘̣̬ ̖̘͖̟͙̮c҉͔̫͖͓͇͖ͅh̵̤̣͚͔á̗̼͕ͅo̼̣̥s̱͈̺̖̦̻͢.̛̖̞̠̫̰",
+ "̗̺͖̹̯͓Ṯ̤͍̥͇͈h̲́e͏͓̼̗̙̼̣͔ ͇̜̱̠͓͍ͅN͕͠e̗̱z̘̝̜̺͙p̤̺̹͍̯͚e̠̻̠͜r̨̤͍̺̖͔̖̖d̠̟̭̬̝͟i̦͖̩͓͔̤a̠̗̬͉̙n͚͜ ̻̞̰͚ͅh̵͉i̳̞v̢͇ḙ͎͟-҉̭̩̼͔m̤̭̫i͕͇̝̦n̗͙ḍ̟ ̯̲͕͞ǫ̟̯̰̲͙̻̝f ̪̰̰̗̖̭̘͘c̦͍̲̞͍̩̙ḥ͚a̮͎̟̙͜ơ̩̹͎s̤.̝̝ ҉Z̡̖̜͖̰̣͉̜a͖̰͙̬͡l̲̫̳͍̩g̡̟̼̱͚̞̬ͅo̗͜.̟",
+ "̦H̬̤̗̤͝e͜ ̜̥̝̻͍̟́w̕h̖̯͓o̝͙̖͎̱̮ ҉̺̙̞̟͈W̷̼̭a̺̪͍į͈͕̭͙̯̜t̶̼̮s̘͙͖̕ ̠̫̠B̻͍͙͉̳ͅe̵h̵̬͇̫͙i̹͓̳̳̮͎̫̕n͟d̴̪̜̖ ̰͉̩͇͙̲͞ͅT͖̼͓̪͢h͏͓̮̻e̬̝̟ͅ ̤̹̝W͙̞̝͔͇͝ͅa͏͓͔̹̼̣l̴͔̰̤̟͔ḽ̫.͕",
+ '">',
+ "'>",
+ ">",
+ "",
+ "< / script >< script >alert(document.title)< / script >",
+ " onfocus=alert(document.title) autofocus ",
+ '" onfocus=alert(document.title) autofocus ',
+ "' onfocus=alert(document.title) autofocus ",
+ "<script>alert(document.title)</script>",
+ "/dev/null; touch /tmp/blns.fail ; echo",
+ "../../../../../../../../../../../etc/passwd%00",
+ "../../../../../../../../../../../etc/hosts",
+ "() { 0; }; touch /tmp/blns.shellshock1.fail;",
+ "() { _; } >_[$($())] { touch /tmp/blns.shellshock2.fail; }",
+]
+naughty_strings = [naughty for naughty in naughty_strings if naughty not in skipped_naughty_strings]
# This is our base test case, but we'll reuse it to send naughty strings as both keys and values.
def send_naughty_tx(assets, metadata):
# ## Set up a connection to Planetmint
# Check [test_basic.py](./test_basic.html) to get some more details
# about the endpoint.
- bdb = Planetmint(os.environ.get('PLANETMINT_ENDPOINT'))
+ bdb = Planetmint(os.environ.get("PLANETMINT_ENDPOINT"))
# Here's Alice.
alice = generate_keypair()
# Alice is in a naughty mood today, so she creates a tx with some naughty strings
prepared_transaction = bdb.transactions.prepare(
- operation='CREATE',
- signers=alice.public_key,
- assets=assets,
- metadata=metadata)
+ operation="CREATE", signers=alice.public_key, assets=assets, metadata=metadata
+ )
# She fulfills the transaction
- fulfilled_transaction = bdb.transactions.fulfill(
- prepared_transaction,
- private_keys=alice.private_key)
+ fulfilled_transaction = bdb.transactions.fulfill(prepared_transaction, private_keys=alice.private_key)
# The fulfilled tx gets sent to the BDB network
try:
@@ -60,23 +92,24 @@ def send_naughty_tx(assets, metadata):
sent_transaction = e
# If her key contained a '.', began with a '$', or contained a NUL character
- regex = '.*\..*|\$.*|.*\x00.*'
+ regex = ".*\..*|\$.*|.*\x00.*"
key = next(iter(metadata))
if re.match(regex, key):
# Then she expects a nicely formatted error code
status_code = sent_transaction.status_code
error = sent_transaction.error
regex = (
- r'\{\s*\n*'
+ r"\{\s*\n*"
r'\s*"message":\s*"Invalid transaction \(ValidationError\):\s*'
- r'Invalid key name.*The key name cannot contain characters.*\n*'
+ r"Invalid key name.*The key name cannot contain characters.*\n*"
r'\s*"status":\s*400\n*'
- r'\s*\}\n*')
+ r"\s*\}\n*"
+ )
assert status_code == 400
assert re.fullmatch(regex, error), sent_transaction
# Otherwise, she expects to see her transaction in the database
- elif 'id' in sent_transaction.keys():
- tx_id = sent_transaction['id']
+ elif "id" in sent_transaction.keys():
+ tx_id = sent_transaction["id"]
assert bdb.transactions.retrieve(tx_id)
# If neither condition was true, then something weird happened...
else:
@@ -86,8 +119,8 @@ def send_naughty_tx(assets, metadata):
@pytest.mark.parametrize("naughty_string", naughty_strings, ids=naughty_strings)
def test_naughty_keys(naughty_string):
- assets = [{'data': {naughty_string: 'nice_value'}}]
- metadata = {naughty_string: 'nice_value'}
+ assets = [{"data": multihash(marshal({naughty_string: "nice_value"}))}]
+ metadata = multihash(marshal({naughty_string: "nice_value"}))
send_naughty_tx(assets, metadata)
@@ -95,7 +128,7 @@ def test_naughty_keys(naughty_string):
@pytest.mark.parametrize("naughty_string", naughty_strings, ids=naughty_strings)
def test_naughty_values(naughty_string):
- assets = [{'data': {'nice_key': naughty_string}}]
- metadata = {'nice_key': naughty_string}
+ assets = [{"data": multihash(marshal({"nice_key": naughty_string}))}]
+ metadata = multihash(marshal({"nice_key": naughty_string}))
send_naughty_tx(assets, metadata)
diff --git a/acceptance/python/src/test_stream.py b/acceptance/python/src/test_stream.py
index b9f2161..f5e4908 100644
--- a/acceptance/python/src/test_stream.py
+++ b/acceptance/python/src/test_stream.py
@@ -21,6 +21,7 @@ import queue
import json
from threading import Thread, Event
from uuid import uuid4
+from ipld import multihash, marshal
# For this script, we need to set up a websocket connection, that's the reason
# we import the
@@ -35,10 +36,10 @@ def test_stream():
# ## Set up the test
# We use the env variable `BICHAINDB_ENDPOINT` to know where to connect.
# Check [test_basic.py](./test_basic.html) for more information.
- BDB_ENDPOINT = os.environ.get('PLANETMINT_ENDPOINT')
+ BDB_ENDPOINT = os.environ.get("PLANETMINT_ENDPOINT")
# *That's pretty bad, but let's do like this for now.*
- WS_ENDPOINT = 'ws://{}:9985/api/v1/streams/valid_transactions'.format(BDB_ENDPOINT.rsplit(':')[0])
+ WS_ENDPOINT = "ws://{}:9985/api/v1/streams/valid_transactions".format(BDB_ENDPOINT.rsplit(":")[0])
bdb = Planetmint(BDB_ENDPOINT)
@@ -90,11 +91,13 @@ def test_stream():
# random `uuid`.
for _ in range(10):
tx = bdb.transactions.fulfill(
- bdb.transactions.prepare(
- operation='CREATE',
- signers=alice.public_key,
- assets=[{'data': {'uuid': str(uuid4())}}]),
- private_keys=alice.private_key)
+ bdb.transactions.prepare(
+ operation="CREATE",
+ signers=alice.public_key,
+ assets=[{"data": multihash(marshal({"uuid": str(uuid4())}))}],
+ ),
+ private_keys=alice.private_key,
+ )
# We don't want to wait for each transaction to be in a block. By using
# `async` mode, we make sure that the driver returns as soon as the
# transaction is pushed to the Planetmint API. Remember: we expect all
@@ -104,7 +107,7 @@ def test_stream():
bdb.transactions.send_async(tx)
# The `id` of every sent transaction is then stored in a list.
- sent.append(tx['id'])
+ sent.append(tx["id"])
# ## Check the valid transactions coming from Planetmint
# Now we are ready to check if Planetmint did its job. A simple way to
@@ -118,9 +121,9 @@ def test_stream():
# the timeout, then game over ¯\\\_(ツ)\_/¯
try:
event = received.get(timeout=5)
- txid = json.loads(event)['transaction_id']
+ txid = json.loads(event)["transaction_id"]
except queue.Empty:
- assert False, 'Did not receive all expected transactions'
+ assert False, "Did not receive all expected transactions"
# Last thing is to try to remove the `txid` from the set of sent
# transactions. If this test is running in parallel with others, we
diff --git a/acceptance/python/src/test_zenroom.py b/acceptance/python/src/test_zenroom.py
index 3520036..c560514 100644
--- a/acceptance/python/src/test_zenroom.py
+++ b/acceptance/python/src/test_zenroom.py
@@ -1,83 +1,133 @@
-# GOAL:
-# In this script I tried to implement the ECDSA signature using zenroom
-
-# However, the scripts are customizable and so with the same procedure
-# we can implement more complex smart contracts
-
-# PUBLIC IDENTITY
-# The public identity of the users in this script (Bob and Alice)
-# is the pair (ECDH public key, Testnet address)
-
+import os
import json
+import base58
+from hashlib import sha3_256
+from cryptoconditions.types.ed25519 import Ed25519Sha256
+from cryptoconditions.types.zenroom import ZenroomSha256
+from zenroom import zencode_exec
+from planetmint_driver import Planetmint
+from planetmint_driver.crypto import generate_keypair
+from ipld import multihash, marshal
-import hashlib
-from cryptoconditions import ZenroomSha256
-from json.decoder import JSONDecodeError
-def test_zenroom(gen_key_zencode, secret_key_to_private_key_zencode, fulfill_script_zencode,
-condition_script_zencode, zenroom_data, zenroom_house_assets):
- alice = json.loads(ZenroomSha256.run_zenroom(gen_key_zencode).output)['keys']
- bob = json.loads(ZenroomSha256.run_zenroom(gen_key_zencode).output)['keys']
+def test_zenroom_signing(
+ gen_key_zencode,
+ secret_key_to_private_key_zencode,
+ fulfill_script_zencode,
+ zenroom_data,
+ zenroom_house_assets,
+ zenroom_script_input,
+ condition_script_zencode,
+):
- zen_public_keys = json.loads(ZenroomSha256.run_zenroom(secret_key_to_private_key_zencode.format('Alice'),
- keys={'keys': alice}).output)
- zen_public_keys.update(json.loads(ZenroomSha256.run_zenroom(secret_key_to_private_key_zencode.format('Bob'),
- keys={'keys': bob}).output))
+ biolabs = generate_keypair()
+ version = "2.0"
- # CRYPTO-CONDITIONS: instantiate an Ed25519 crypto-condition for buyer
- zenSha = ZenroomSha256(script=fulfill_script_zencode, keys=zen_public_keys, data=zenroom_data)
+ alice = json.loads(zencode_exec(gen_key_zencode).output)["keyring"]
+ bob = json.loads(zencode_exec(gen_key_zencode).output)["keyring"]
+
+ zen_public_keys = json.loads(
+ zencode_exec(secret_key_to_private_key_zencode.format("Alice"), keys=json.dumps({"keyring": alice})).output
+ )
+ zen_public_keys.update(
+ json.loads(
+ zencode_exec(secret_key_to_private_key_zencode.format("Bob"), keys=json.dumps({"keyring": bob})).output
+ )
+ )
+
+ zenroomscpt = ZenroomSha256(script=fulfill_script_zencode, data=zenroom_data, keys=zen_public_keys)
+ print(f"zenroom is: {zenroomscpt.script}")
# CRYPTO-CONDITIONS: generate the condition uri
- condition_uri = zenSha.condition.serialize_uri()
+ condition_uri_zen = zenroomscpt.condition.serialize_uri()
+ print(f"\nzenroom condition URI: {condition_uri_zen}")
# CRYPTO-CONDITIONS: construct an unsigned fulfillment dictionary
- unsigned_fulfillment_dict = {
- 'type': zenSha.TYPE_NAME,
- 'script': fulfill_script_zencode,
- 'keys': zen_public_keys,
+ unsigned_fulfillment_dict_zen = {
+ "type": zenroomscpt.TYPE_NAME,
+ "public_key": base58.b58encode(biolabs.public_key).decode(),
}
-
output = {
- 'amount': '1000',
- 'condition': {
- 'details': unsigned_fulfillment_dict,
- 'uri': condition_uri,
+ "amount": "10",
+ "condition": {
+ "details": unsigned_fulfillment_dict_zen,
+ "uri": condition_uri_zen,
},
- 'data': zenroom_data,
- 'script': fulfill_script_zencode,
- 'conf': '',
- 'public_keys': (zen_public_keys['Alice']['ecdh_public_key'], ),
+ "public_keys": [
+ biolabs.public_key,
+ ],
}
-
-
input_ = {
- 'fulfillment': None,
- 'fulfills': None,
- 'owners_before': (zen_public_keys['Alice']['ecdh_public_key'], ),
+ "fulfillment": None,
+ "fulfills": None,
+ "owners_before": [
+ biolabs.public_key,
+ ],
+ }
+ metadata = {"result": {"output": ["ok"]}}
+
+ script_ = {
+ "code": {"type": "zenroom", "raw": "test_string", "parameters": [{"obj": "1"}, {"obj": "2"}]}, # obsolete
+ "state": "dd8bbd234f9869cab4cc0b84aa660e9b5ef0664559b8375804ee8dce75b10576", #
+ "input": zenroom_script_input,
+ "output": ["ok"],
+ "policies": {},
}
token_creation_tx = {
- 'operation': 'CREATE',
- 'assets': zenroom_house_assets,
- 'metadata': None,
- 'outputs': (output,),
- 'inputs': (input_,),
- 'version': '2.0',
- 'id': None,
+ "operation": "CREATE",
+ "assets": [{"data": multihash(marshal({"test": "my asset"}))}],
+ "metadata": multihash(marshal(metadata)),
+ "script": script_,
+ "outputs": [
+ output,
+ ],
+ "inputs": [
+ input_,
+ ],
+ "version": version,
+ "id": None,
}
# JSON: serialize the transaction-without-id to a json formatted string
- message = json.dumps(
+ tx = json.dumps(
token_creation_tx,
sort_keys=True,
- separators=(',', ':'),
+ separators=(",", ":"),
ensure_ascii=False,
)
+ script_ = json.dumps(script_)
+ # major workflow:
+ # we store the fulfill script in the transaction/message (zenroom-sha)
+ # the condition script is used to fulfill the transaction and create the signature
+ #
+ # the server should ick the fulfill script and recreate the zenroom-sha and verify the signature
- try:
- assert(not zenSha.validate(message=message))
- except: # noqa
- pass
+ signed_input = zenroomscpt.sign(script_, condition_script_zencode, alice)
- message = zenSha.sign(message, condition_script_zencode, alice)
- assert(zenSha.validate(message=message))
+ input_signed = json.loads(signed_input)
+ input_signed["input"]["signature"] = input_signed["output"]["signature"]
+ del input_signed["output"]["signature"]
+ del input_signed["output"]["logs"]
+ input_signed["output"] = ["ok"] # define expected output that is to be compared
+ input_msg = json.dumps(input_signed)
+
+ assert zenroomscpt.validate(message=input_msg)
+
+ tx = json.loads(tx)
+ fulfillment_uri_zen = zenroomscpt.serialize_uri()
+
+ tx["inputs"][0]["fulfillment"] = fulfillment_uri_zen
+ tx["script"] = input_signed
+ tx["id"] = None
+ json_str_tx = json.dumps(tx, sort_keys=True, skipkeys=False, separators=(",", ":"))
+ # SHA3: hash the serialized id-less transaction to generate the id
+ shared_creation_txid = sha3_256(json_str_tx.encode()).hexdigest()
+ tx["id"] = shared_creation_txid
+ # tx = json.dumps(tx)
+ # `https://example.com:9984`
+ print(f"TX \n{tx}")
+ plntmnt = Planetmint(os.environ.get("PLANETMINT_ENDPOINT"))
+ sent_transfer_tx = plntmnt.transactions.send_commit(tx)
+
+ print(f"\n\nstatus and result : + {sent_transfer_tx}")
diff --git a/docker-compose.yml b/docker-compose.yml
index c825a8f..e7f7124 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -14,10 +14,22 @@ services:
- "27017:27017"
command: mongod
restart: always
+ tarantool:
+ image: tarantool/tarantool:2.8.3
+ ports:
+ - "5200:5200"
+ - "3301:3301"
+ - "3303:3303"
+ - "8081:8081"
+ volumes:
+ - ./planetmint/backend/tarantool/basic.lua:/opt/tarantool/basic.lua
+ command: tarantool /opt/tarantool/basic.lua
+ restart: always
planetmint:
depends_on:
- - mongodb
+ #- mongodb
- tendermint
+ - tarantool
build:
context: .
dockerfile: Dockerfile-dev
@@ -31,9 +43,9 @@ services:
- ./pytest.ini:/usr/src/app/pytest.ini
- ./tox.ini:/usr/src/app/tox.ini
environment:
- PLANETMINT_DATABASE_BACKEND: localmongodb
- PLANETMINT_DATABASE_HOST: mongodb
- PLANETMINT_DATABASE_PORT: 27017
+ PLANETMINT_DATABASE_BACKEND: tarantool_db
+ PLANETMINT_DATABASE_HOST: tarantool
+ PLANETMINT_DATABASE_PORT: 3303
PLANETMINT_SERVER_BIND: 0.0.0.0:9984
PLANETMINT_WSSERVER_HOST: 0.0.0.0
PLANETMINT_WSSERVER_ADVERTISED_HOST: planetmint
@@ -43,13 +55,15 @@ services:
- "9984:9984"
- "9985:9985"
- "26658"
+ - "2222:2222"
healthcheck:
test: ["CMD", "bash", "-c", "curl http://planetmint:9984 && curl http://tendermint:26657/abci_query"]
interval: 3s
timeout: 5s
- retries: 3
- command: '.ci/entrypoint.sh'
+ retries: 5
+ command: 'scripts/entrypoint.sh'
restart: always
+
tendermint:
image: tendermint/tendermint:v0.34.15
# volumes:
@@ -60,6 +74,7 @@ services:
- "26657:26657"
command: sh -c "tendermint init && tendermint node --consensus.create_empty_blocks=false --rpc.laddr=tcp://0.0.0.0:26657 --proxy_app=tcp://planetmint:26658"
restart: always
+
bdb:
image: busybox
depends_on:
@@ -93,7 +108,7 @@ services:
context: .
dockerfile: Dockerfile-dev
args:
- backend: localmongodb
+ backend: tarantool
volumes:
- .:/usr/src/app/
command: make -C docs/root html
@@ -104,16 +119,6 @@ services:
volumes:
- ./docs/root/build/html:/usr/share/nginx/html
- # Lints project according to PEP8
- lint:
- image: alpine/flake8
- command: --max-line-length 119 /planetmint /acceptance /integration /tests
- volumes:
- - ./planetmint:/planetmint
- - ./acceptance:/acceptance
- - ./integration:/integration
- - ./tests:/tests
-
# Remove all build, test, coverage and Python artifacts
clean:
image: alpine
diff --git a/docs/root/.vscode/settings.json b/docs/root/.vscode/settings.json
new file mode 100644
index 0000000..65e1ec0
--- /dev/null
+++ b/docs/root/.vscode/settings.json
@@ -0,0 +1,3 @@
+{
+ "makefile.extensionOutputFolder": "./.vscode"
+}
\ No newline at end of file
diff --git a/docs/root/generate_http_server_api_documentation.py b/docs/root/generate_http_server_api_documentation.py
index ccb035c..400fcf7 100644
--- a/docs/root/generate_http_server_api_documentation.py
+++ b/docs/root/generate_http_server_api_documentation.py
@@ -9,39 +9,48 @@ import json
import os
import os.path
-from planetmint.transactions.common.input import Input
-from planetmint.transactions.common.transaction_link import TransactionLink
+from transactions.common.input import Input
+from transactions.common.transaction_link import TransactionLink
from planetmint import lib
-from planetmint.transactions.types.assets.create import Create
-from planetmint.transactions.types.assets.transfer import Transfer
+from transactions.types.assets.create import Create
+from transactions.types.assets.transfer import Transfer
from planetmint.web import server
+from ipld import multihash, marshal
TPLS = {}
-TPLS['index-response'] = """\
+TPLS[
+ "index-response"
+] = """\
HTTP/1.1 200 OK
Content-Type: application/json
%(index)s
"""
-TPLS['api-index-response'] = """\
+TPLS[
+ "api-index-response"
+] = """\
HTTP/1.1 200 OK
Content-Type: application/json
%(api_index)s
"""
-TPLS['get-tx-id-request'] = """\
+TPLS[
+ "get-tx-id-request"
+] = """\
GET /api/v1/transactions/%(txid)s HTTP/1.1
Host: example.com
"""
-TPLS['get-tx-id-response'] = """\
+TPLS[
+ "get-tx-id-response"
+] = """\
HTTP/1.1 200 OK
Content-Type: application/json
@@ -49,14 +58,18 @@ Content-Type: application/json
"""
-TPLS['get-tx-by-asset-request'] = """\
+TPLS[
+ "get-tx-by-asset-request"
+] = """\
GET /api/v1/transactions?operation=TRANSFER&asset_id=%(txid)s HTTP/1.1
Host: example.com
"""
-TPLS['get-tx-by-asset-response'] = """\
+TPLS[
+ "get-tx-by-asset-response"
+] = """\
HTTP/1.1 200 OK
Content-Type: application/json
@@ -64,7 +77,9 @@ Content-Type: application/json
%(tx_transfer_last)s]
"""
-TPLS['post-tx-request'] = """\
+TPLS[
+ "post-tx-request"
+] = """\
POST /api/v1/transactions?mode=async HTTP/1.1
Host: example.com
Content-Type: application/json
@@ -73,7 +88,9 @@ Content-Type: application/json
"""
-TPLS['post-tx-response'] = """\
+TPLS[
+ "post-tx-response"
+] = """\
HTTP/1.1 202 Accepted
Content-Type: application/json
@@ -81,14 +98,18 @@ Content-Type: application/json
"""
-TPLS['get-block-request'] = """\
+TPLS[
+ "get-block-request"
+] = """\
GET /api/v1/blocks/%(blockid)s HTTP/1.1
Host: example.com
"""
-TPLS['get-block-response'] = """\
+TPLS[
+ "get-block-response"
+] = """\
HTTP/1.1 200 OK
Content-Type: application/json
@@ -96,14 +117,18 @@ Content-Type: application/json
"""
-TPLS['get-block-txid-request'] = """\
+TPLS[
+ "get-block-txid-request"
+] = """\
GET /api/v1/blocks?transaction_id=%(txid)s HTTP/1.1
Host: example.com
"""
-TPLS['get-block-txid-response'] = """\
+TPLS[
+ "get-block-txid-response"
+] = """\
HTTP/1.1 200 OK
Content-Type: application/json
@@ -112,7 +137,7 @@ Content-Type: application/json
def main():
- """ Main function """
+ """Main function"""
ctx = {}
@@ -121,90 +146,95 @@ def main():
client = server.create_app().test_client()
- host = 'example.com:9984'
+ host = "example.com:9984"
# HTTP Index
- res = client.get('/', environ_overrides={'HTTP_HOST': host})
+ res = client.get("/", environ_overrides={"HTTP_HOST": host})
res_data = json.loads(res.data.decode())
- ctx['index'] = pretty_json(res_data)
+ ctx["index"] = pretty_json(res_data)
# API index
- res = client.get('/api/v1/', environ_overrides={'HTTP_HOST': host})
- ctx['api_index'] = pretty_json(json.loads(res.data.decode()))
+ res = client.get("/api/v1/", environ_overrides={"HTTP_HOST": host})
+ ctx["api_index"] = pretty_json(json.loads(res.data.decode()))
# tx create
- privkey = 'CfdqtD7sS7FgkMoGPXw55MVGGFwQLAoHYTcBhZDtF99Z'
- pubkey = '4K9sWUMFwTgaDGPfdynrbxWqWS6sWmKbZoTjxLtVUibD'
- assets = [{'msg': 'Hello Planetmint!'}]
- tx = Create.generate([pubkey], [([pubkey], 1)], assets=assets, metadata={'sequence': 0})
+ from ipld import marshal, multihash
+
+ privkey = "CfdqtD7sS7FgkMoGPXw55MVGGFwQLAoHYTcBhZDtF99Z"
+ pubkey = "4K9sWUMFwTgaDGPfdynrbxWqWS6sWmKbZoTjxLtVUibD"
+ assets = [{"data": multihash(marshal({"msg": "Hello Planetmint!"}))}]
+ tx = Create.generate([pubkey], [([pubkey], 1)], assets=assets, metadata=multihash(marshal({"sequence": 0})))
tx = tx.sign([privkey])
- ctx['tx'] = pretty_json(tx.to_dict())
- ctx['public_keys'] = tx.outputs[0].public_keys[0]
- ctx['txid'] = tx.id
+ ctx["tx"] = pretty_json(tx.to_dict())
+ ctx["public_keys"] = tx.outputs[0].public_keys[0]
+ ctx["txid"] = tx.id
# tx transfer
- privkey_transfer = '3AeWpPdhEZzWLYfkfYHBfMFC2r1f8HEaGS9NtbbKssya'
- pubkey_transfer = '3yfQPHeWAa1MxTX9Zf9176QqcpcnWcanVZZbaHb8B3h9'
+ privkey_transfer = "3AeWpPdhEZzWLYfkfYHBfMFC2r1f8HEaGS9NtbbKssya"
+ pubkey_transfer = "3yfQPHeWAa1MxTX9Zf9176QqcpcnWcanVZZbaHb8B3h9"
cid = 0
- input_ = Input(fulfillment=tx.outputs[cid].fulfillment,
- fulfills=TransactionLink(txid=tx.id, output=cid),
- owners_before=tx.outputs[cid].public_keys)
- tx_transfer = Transfer.generate([input_], [([pubkey_transfer], 1)], asset_ids=[tx.id], metadata={'sequence': 1})
+ input_ = Input(
+ fulfillment=tx.outputs[cid].fulfillment,
+ fulfills=TransactionLink(txid=tx.id, output=cid),
+ owners_before=tx.outputs[cid].public_keys,
+ )
+ tx_transfer = Transfer.generate(
+ [input_], [([pubkey_transfer], 1)], asset_ids=[tx.id], metadata=multihash(marshal({"sequence": 1}))
+ )
tx_transfer = tx_transfer.sign([privkey])
- ctx['tx_transfer'] = pretty_json(tx_transfer.to_dict())
- ctx['public_keys_transfer'] = tx_transfer.outputs[0].public_keys[0]
- ctx['tx_transfer_id'] = tx_transfer.id
+ ctx["tx_transfer"] = pretty_json(tx_transfer.to_dict())
+ ctx["public_keys_transfer"] = tx_transfer.outputs[0].public_keys[0]
+ ctx["tx_transfer_id"] = tx_transfer.id
# privkey_transfer_last = 'sG3jWDtdTXUidBJK53ucSTrosktG616U3tQHBk81eQe'
- pubkey_transfer_last = '3Af3fhhjU6d9WecEM9Uw5hfom9kNEwE7YuDWdqAUssqm'
+ pubkey_transfer_last = "3Af3fhhjU6d9WecEM9Uw5hfom9kNEwE7YuDWdqAUssqm"
cid = 0
- input_ = Input(fulfillment=tx_transfer.outputs[cid].fulfillment,
- fulfills=TransactionLink(txid=tx_transfer.id, output=cid),
- owners_before=tx_transfer.outputs[cid].public_keys)
- tx_transfer_last = Transfer.generate([input_], [([pubkey_transfer_last], 1)],
- asset_ids=[tx.id], metadata={'sequence': 2})
+ input_ = Input(
+ fulfillment=tx_transfer.outputs[cid].fulfillment,
+ fulfills=TransactionLink(txid=tx_transfer.id, output=cid),
+ owners_before=tx_transfer.outputs[cid].public_keys,
+ )
+ tx_transfer_last = Transfer.generate(
+ [input_], [([pubkey_transfer_last], 1)], asset_ids=[tx.id], metadata=multihash(marshal({"sequence": 2}))
+ )
tx_transfer_last = tx_transfer_last.sign([privkey_transfer])
- ctx['tx_transfer_last'] = pretty_json(tx_transfer_last.to_dict())
- ctx['tx_transfer_last_id'] = tx_transfer_last.id
- ctx['public_keys_transfer_last'] = tx_transfer_last.outputs[0].public_keys[0]
+ ctx["tx_transfer_last"] = pretty_json(tx_transfer_last.to_dict())
+ ctx["tx_transfer_last_id"] = tx_transfer_last.id
+ ctx["public_keys_transfer_last"] = tx_transfer_last.outputs[0].public_keys[0]
# block
node_private = "5G2kE1zJAgTajkVSbPAQWo4c2izvtwqaNHYsaNpbbvxX"
node_public = "DngBurxfeNVKZWCEcDnLj1eMPAS7focUZTE5FndFGuHT"
signature = "53wxrEQDYk1dXzmvNSytbCfmNVnPqPkDQaTnAe8Jf43s6ssejPxezkCvUnGTnduNUmaLjhaan1iRLi3peu6s5DzA"
- app_hash = 'f6e0c49c6d94d6924351f25bb334cf2a99af4206339bf784e741d1a5ab599056'
+ app_hash = "f6e0c49c6d94d6924351f25bb334cf2a99af4206339bf784e741d1a5ab599056"
block = lib.Block(height=1, transactions=[tx.to_dict()], app_hash=app_hash)
block_dict = block._asdict()
- block_dict.pop('app_hash')
- ctx['block'] = pretty_json(block_dict)
- ctx['blockid'] = block.height
+ block_dict.pop("app_hash")
+ ctx["block"] = pretty_json(block_dict)
+ ctx["blockid"] = block.height
# block status
- block_list = [
- block.height
- ]
- ctx['block_list'] = pretty_json(block_list)
+ block_list = [block.height]
+ ctx["block_list"] = pretty_json(block_list)
-
- base_path = os.path.join(os.path.dirname(__file__),
- 'source/installation/api/http-samples')
+ base_path = os.path.join(os.path.dirname(__file__), "source/connecting/http-samples")
if not os.path.exists(base_path):
os.makedirs(base_path)
for name, tpl in TPLS.items():
- path = os.path.join(base_path, name + '.http')
+ path = os.path.join(base_path, name + ".http")
code = tpl % ctx
- with open(path, 'w') as handle:
+ with open(path, "w") as handle:
handle.write(code)
def setup(*_):
- """ Fool sphinx into think it's an extension muahaha """
+ """Fool sphinx into think it's an extension muahaha"""
main()
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/docs/root/requirements.txt b/docs/root/requirements.txt
index 8d8ed0f..c048f4f 100644
--- a/docs/root/requirements.txt
+++ b/docs/root/requirements.txt
@@ -36,3 +36,11 @@ sphinxcontrib-serializinghtml==1.1.5
urllib3==1.26.9
wget==3.2
zipp==3.8.0
+nest-asyncio==1.5.5
+sphinx-press-theme==0.8.0
+sphinx-documatt-theme
+base58>=2.1.1
+pynacl==1.4.0
+zenroom==2.1.0.dev1655293214
+pyasn1==0.4.8
+cryptography==3.4.7
diff --git a/docs/root/source/installation/_static/Node-components.png b/docs/root/source/_static/Node-components.png
similarity index 100%
rename from docs/root/source/installation/_static/Node-components.png
rename to docs/root/source/_static/Node-components.png
diff --git a/docs/root/source/_static/PLANETMINT_COLOR_POS.png b/docs/root/source/_static/PLANETMINT_COLOR_POS.png
new file mode 100644
index 0000000..1ce824b
Binary files /dev/null and b/docs/root/source/_static/PLANETMINT_COLOR_POS.png differ
diff --git a/docs/root/source/installation/_static/mongodb_cloud_manager_1.png b/docs/root/source/_static/mongodb_cloud_manager_1.png
similarity index 100%
rename from docs/root/source/installation/_static/mongodb_cloud_manager_1.png
rename to docs/root/source/_static/mongodb_cloud_manager_1.png
diff --git a/docs/root/source/_static/planet-mint-logo.png b/docs/root/source/_static/planet-mint-logo.png
new file mode 100644
index 0000000..c923c03
Binary files /dev/null and b/docs/root/source/_static/planet-mint-logo.png differ
diff --git a/docs/root/source/_static/planet-mint-logo.svg b/docs/root/source/_static/planet-mint-logo.svg
new file mode 100644
index 0000000..b8aa071
--- /dev/null
+++ b/docs/root/source/_static/planet-mint-logo.svg
@@ -0,0 +1,13 @@
+
+
+
diff --git a/docs/root/source/_static/planetmint-logo.png b/docs/root/source/_static/planetmint-logo.png
new file mode 100644
index 0000000..c923c03
Binary files /dev/null and b/docs/root/source/_static/planetmint-logo.png differ
diff --git a/docs/root/source/_static/planetmint-logo.svg b/docs/root/source/_static/planetmint-logo.svg
new file mode 100644
index 0000000..b8aa071
--- /dev/null
+++ b/docs/root/source/_static/planetmint-logo.svg
@@ -0,0 +1,13 @@
+
+
+
diff --git a/docs/root/source/_static/planetmint350x150.png b/docs/root/source/_static/planetmint350x150.png
new file mode 100644
index 0000000..3d10d7a
Binary files /dev/null and b/docs/root/source/_static/planetmint350x150.png differ
diff --git a/docs/root/source/_static/planetmint360x150white.png b/docs/root/source/_static/planetmint360x150white.png
new file mode 100644
index 0000000..70c1f59
Binary files /dev/null and b/docs/root/source/_static/planetmint360x150white.png differ
diff --git a/docs/root/source/installation/appendices/cryptography.rst b/docs/root/source/appendices/cryptography.rst
similarity index 100%
rename from docs/root/source/installation/appendices/cryptography.rst
rename to docs/root/source/appendices/cryptography.rst
diff --git a/docs/root/source/installation/appendices/firewall-notes.md b/docs/root/source/appendices/firewall-notes.md
similarity index 96%
rename from docs/root/source/installation/appendices/firewall-notes.md
rename to docs/root/source/appendices/firewall-notes.md
index 4b8ec39..2b3a397 100644
--- a/docs/root/source/installation/appendices/firewall-notes.md
+++ b/docs/root/source/appendices/firewall-notes.md
@@ -49,7 +49,7 @@ Port 443 is the default HTTPS port (TCP). Package managers might also get some p
Port 9984 is the default port for the Planetmint client-server HTTP API (TCP), which is served by Gunicorn HTTP Server. It's _possible_ allow port 9984 to accept inbound traffic from anyone, but we recommend against doing that. Instead, set up a reverse proxy server (e.g. using Nginx) and only allow traffic from there. Information about how to do that can be found [in the Gunicorn documentation](http://docs.gunicorn.org/en/stable/deploy.html). (They call it a proxy.)
-If Gunicorn and the reverse proxy are running on the same server, then you'll have to tell Gunicorn to listen on some port other than 9984 (so that the reverse proxy can listen on port 9984). You can do that by setting `server.bind` to 'localhost:PORT' in the [Planetmint Configuration Settings](../../installation/node-setup/configuration), where PORT is whatever port you chose (e.g. 9983).
+If Gunicorn and the reverse proxy are running on the same server, then you'll have to tell Gunicorn to listen on some port other than 9984 (so that the reverse proxy can listen on port 9984). You can do that by setting `server.bind` to 'localhost:PORT' in the [Planetmint Configuration Settings](../node-setup/configuration), where PORT is whatever port you chose (e.g. 9983).
You may want to have Gunicorn and the reverse proxy running on different servers, so that both can listen on port 9984. That would also help isolate the effects of a denial-of-service attack.
diff --git a/docs/root/source/installation/appendices/generate-key-pair-for-ssh.md b/docs/root/source/appendices/generate-key-pair-for-ssh.md
similarity index 100%
rename from docs/root/source/installation/appendices/generate-key-pair-for-ssh.md
rename to docs/root/source/appendices/generate-key-pair-for-ssh.md
diff --git a/docs/root/source/installation/appendices/index.rst b/docs/root/source/appendices/index.rst
similarity index 100%
rename from docs/root/source/installation/appendices/index.rst
rename to docs/root/source/appendices/index.rst
diff --git a/docs/root/source/installation/appendices/licenses.md b/docs/root/source/appendices/licenses.md
similarity index 100%
rename from docs/root/source/installation/appendices/licenses.md
rename to docs/root/source/appendices/licenses.md
diff --git a/docs/root/source/installation/appendices/log-rotation.md b/docs/root/source/appendices/log-rotation.md
similarity index 83%
rename from docs/root/source/installation/appendices/log-rotation.md
rename to docs/root/source/appendices/log-rotation.md
index 53bea64..e89aa27 100644
--- a/docs/root/source/installation/appendices/log-rotation.md
+++ b/docs/root/source/appendices/log-rotation.md
@@ -9,7 +9,7 @@ Code is Apache-2.0 and docs are CC-BY-4.0
Each Planetmint node runs:
-- MongoDB
+- Tarantool
- Planetmint Server
- Tendermint
@@ -17,11 +17,6 @@ When running a Planetmint node for long periods
of time, we need to consider doing log rotation, i.e. we do not want the logs taking
up large amounts of storage and making the node unresponsive or getting it into a bad state.
-## MongoDB Logging and Log Rotation
-
-See the MongoDB docs about
-[logging](https://docs.mongodb.com/v3.6/administration/monitoring/#monitoring-standard-loggging)
-and [log rotation](https://docs.mongodb.com/v3.6/tutorial/rotate-log-files/).
## Planetmint Server Logging and Log Rotation
@@ -32,7 +27,7 @@ Planetmint Server writes its logs to two files: normal logs and error logs. The
Log rotation is baked into Planetmint Server using Python's `logging` module. The logs for Planetmint Server are rotated when any of the above mentioned files exceeds 209715200 bytes (i.e. approximately 209 MB).
-For more information, see the docs about [the Planetmint Server configuration settings related to logging](../../installation/node-setup/configuration#log).
+For more information, see the docs about [the Planetmint Server configuration settings related to logging](../node-setup/configuration#log).
## Tendermint Logging and Log Rotation
diff --git a/docs/root/source/installation/appendices/ntp-notes.md b/docs/root/source/appendices/ntp-notes.md
similarity index 100%
rename from docs/root/source/installation/appendices/ntp-notes.md
rename to docs/root/source/appendices/ntp-notes.md
diff --git a/docs/root/source/basic-usage.md b/docs/root/source/basic-usage.md
index 7f8f393..16d9490 100644
--- a/docs/root/source/basic-usage.md
+++ b/docs/root/source/basic-usage.md
@@ -17,7 +17,7 @@ two kinds: CREATE transactions and TRANSFER transactions.
You can view the transaction specifications in Github, which describe transaction components and the conditions they have to fulfill in order to be valid.
-[Planetmint Transactions Specs](https://github.com/planetmint/BEPs/tree/master/13/)
+[Planetmint Transactions Specs](https://github.com/bigchaindb/BEPs/tree/master/13/)
### CREATE Transactions
@@ -44,7 +44,7 @@ Planetmint supports a variety of conditions.
For details, see
the section titled **Transaction Components: Conditions**
in the relevant
-[Planetmint Transactions Spec](https://github.com/planetmint/BEPs/tree/master/13/).
+[Planetmint Transactions Spec](https://github.com/bigchaindb/BEPs/tree/master/13/).

@@ -58,7 +58,7 @@ Loosely speaking, that list might be interpreted as the list of "owners."
A more accurate word might be fulfillers, signers, controllers,
or transfer-enablers.
See the section titled **A Note about Owners**
-in the relevant [Planetmint Transactions Spec](https://github.com/planetmint/BEPs/tree/master/13/).
+in the relevant [Planetmint Transactions Spec](https://github.com/bigchaindb/BEPs/tree/master/13/).
A CREATE transaction must be signed by all the owners.
(If you're looking for that signature,
@@ -119,13 +119,43 @@ of the outgoing paperclips (100).
### Transaction Validity
When a node is asked to check if a transaction is valid, it checks several
-things. We documented those things in a post on *The Planetmint Blog*:
-["What is a Valid Transaction in Planetmint?"](https://blog.planetmint.io/what-is-a-valid-transaction-in-planetmint-9a1a075a9598)
+things. This got documented by a BigchainDB post (previous version of Planetmint) at*The BigchainDB Blog*:
+["What is a Valid Transaction in BigchainDB?"](https://blog.bigchaindb.com/what-is-a-valid-transaction-in-planetmint-9a1a075a9598)
(Note: That post was about Planetmint Server v1.0.0.)
+## A Note on IPLD marshalling and CIDs
+
+Planetmint utilizes IPLD (interplanetary linked data) marshalling and CIDs (content identifiers) to store and verify data.
+Before submitting a transaction to the network the data is marshalled using [py-ipld](https://github.com/planetmint/py-ipld) and instead of the raw data a CID is stored on chain.
+
+The CID is a self describing data structure. It contains information about the encoding, cryptographic algorithm, length and the actual hashvalue. For example the CID `bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi` tells us the following:
+
+```
+Encoding: base32
+Codec: dag-pb (MerkleDAG protobuf)
+Hashing-Algorithm: sha2-256
+Digest (Hex): C3C4733EC8AFFD06CF9E9FF50FFC6BCD2EC85A6170004BB709669C31DE94391A
+```
+
+With this information we can validate that information about an asset we've received is actually valid.
+
+
### Example Transactions
There are example Planetmint transactions in
-[the HTTP API documentation](./installation/api/http-client-server-api)
+[the HTTP API documentation](./connecting/http-client-server-api)
and
-[the Python Driver documentation](./drivers/index).
+[the Python Driver documentation](./connecting/drivers).
+
+## Contracts & Conditions
+
+Planetmint has been developed with simple logical gateways in mind. The logic got introduced by [cryptoconditions](https://https://docs.planetmint.io/projects/cryptoconditions). The cryptocondition documentation contains all details about how conditoins are defined and how they can be verified and fulfilled.
+
+The integration of such into the transaction schema of Planetmint is shown below.
+
+## Zenroom Smart Contracts and Policies
+
+[Zenroom](https://zenroom.org/) was integrated into [cryptoconditions](https://https://docs.planetmint.io/projects/cryptoconditions) to allow for human-readable conditions and fulfillments.
+At the moment these contracts can only be stateless, which implies that the conditions and fulfillments need to be transacted in the same transaction. However, [PRP-10](https://github.com/planetmint/PRPs/tree/main/10) aims to make stateful contracts possible, which enables asynchronous and party-independent processing of contracts.
+
+As for network-wide or asset-based policies [PRP-11](https://github.com/planetmint/PRPs/tree/main/11) specifies how these can be implemented and how these can be used to verify a transaction state before it is commited to the network.
diff --git a/docs/root/source/conf.py b/docs/root/source/conf.py
index 5c082ea..8dc1e0e 100644
--- a/docs/root/source/conf.py
+++ b/docs/root/source/conf.py
@@ -30,14 +30,14 @@ from os import rename, remove
# get version
_version = {}
-with open('../../../planetmint/version.py') as fp:
+with open("../../../planetmint/version.py") as fp:
exec(fp.read(), _version)
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
-sys.path.insert(0,parentdir)
-#sys.path.insert(0, "/home/myname/pythonfiles")
+sys.path.insert(0, parentdir)
+# sys.path.insert(0, "/home/myname/pythonfiles")
# -- General configuration ------------------------------------------------
@@ -48,83 +48,95 @@ sys.path.insert(0,parentdir)
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-project = 'Planetmint'
+project = "Planetmint"
import sphinx_rtd_theme
extensions = [
- 'myst_parser',
- 'sphinx.ext.autosectionlabel',
- 'sphinx.ext.autodoc',
- 'sphinx.ext.intersphinx',
- 'sphinx.ext.coverage',
- 'sphinx.ext.viewcode',
- 'sphinx.ext.todo',
- 'sphinx.ext.napoleon',
- 'sphinxcontrib.httpdomain',
- 'aafigure.sphinxext',
+ "myst_parser",
+ "sphinx.ext.autosectionlabel",
+ "sphinx.ext.autodoc",
+ "sphinx.ext.intersphinx",
+ "sphinx.ext.coverage",
+ "sphinx.ext.viewcode",
+ "sphinx.ext.todo",
+ "sphinx.ext.napoleon",
+ "sphinxcontrib.httpdomain",
+ "aafigure.sphinxext",
+ #'sphinx_toolbox.collapse',
# Below are actually build steps made to look like sphinx extensions.
# It was the easiest way to get it running with ReadTheDocs.
- 'generate_http_server_api_documentation',
+ "generate_http_server_api_documentation",
]
try:
- remove('contributing/cross-project-policies/code-of-conduct.md')
- remove('contributing/cross-project-policies/release-process.md')
- remove('contributing/cross-project-policies/python-style-guide.md')
+ remove("contributing/cross-project-policies/code-of-conduct.md")
+ remove("contributing/cross-project-policies/release-process.md")
+ remove("contributing/cross-project-policies/python-style-guide.md")
except:
- print('done')
+ print("done")
+
def get_old_new(url, old, new):
filename = wget.download(url)
rename(old, new)
-get_old_new('https://raw.githubusercontent.com/planetmint/planetmint/master/CODE_OF_CONDUCT.md',
- 'CODE_OF_CONDUCT.md', 'contributing/cross-project-policies/code-of-conduct.md')
-get_old_new('https://raw.githubusercontent.com/planetmint/planetmint/master/RELEASE_PROCESS.md',
- 'RELEASE_PROCESS.md', 'contributing/cross-project-policies/release-process.md')
+get_old_new(
+ "https://raw.githubusercontent.com/planetmint/planetmint/master/CODE_OF_CONDUCT.md",
+ "CODE_OF_CONDUCT.md",
+ "contributing/cross-project-policies/code-of-conduct.md",
+)
-get_old_new('https://raw.githubusercontent.com/planetmint/planetmint/master/PYTHON_STYLE_GUIDE.md',
- 'PYTHON_STYLE_GUIDE.md', 'contributing/cross-project-policies/python-style-guide.md')
+get_old_new(
+ "https://raw.githubusercontent.com/planetmint/planetmint/master/RELEASE_PROCESS.md",
+ "RELEASE_PROCESS.md",
+ "contributing/cross-project-policies/release-process.md",
+)
-suppress_warnings = ['misc.highlighting_failure']
+get_old_new(
+ "https://raw.githubusercontent.com/planetmint/planetmint/master/PYTHON_STYLE_GUIDE.md",
+ "PYTHON_STYLE_GUIDE.md",
+ "contributing/cross-project-policies/python-style-guide.md",
+)
+
+suppress_warnings = ["misc.highlighting_failure"]
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
# autodoc settings
-autodoc_member_order = 'bysource'
+autodoc_member_order = "bysource"
autodoc_default_options = {
- 'members': None,
+ "members": None,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
-source_suffix = ['.rst', '.md']
+source_suffix = [".rst", ".md"]
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
autosectionlabel_prefix_document = True
# General information about the project.
now = datetime.datetime.now()
-copyright = str(now.year) + ', Planetmint Contributors'
-author = 'Planetmint Contributors'
+copyright = str(now.year) + ", Planetmint Contributors"
+author = "Planetmint Contributors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
-version = _version['__short_version__']
+version = _version["__short_version__"]
# The full version, including alpha/beta/rc tags.
-release = _version['__version__']
+release = _version["__version__"]
# The full version, including alpha/beta/rc tags.
# The language for content autogenerated by Sphinx. Refer to documentation
@@ -132,7 +144,7 @@ release = _version['__version__']
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
-language = 'en'
+language = "en"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
@@ -168,7 +180,7 @@ exclude_patterns = []
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
@@ -185,7 +197,8 @@ todo_include_todos = False
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
-html_theme = 'sphinx_rtd_theme'
+html_theme = "press"
+# html_theme = 'sphinx_documatt_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
@@ -194,7 +207,7 @@ html_theme = 'sphinx_rtd_theme'
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
-html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
+# html_theme_path = [press.get_html_theme_path()]
# The name for this set of Sphinx documents.
# " v documentation" by default.
@@ -208,7 +221,7 @@ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
-# html_logo = None
+html_logo = "_static/planetmint-logo.png"
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
@@ -219,7 +232,7 @@ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
@@ -299,34 +312,36 @@ html_static_path = ['_static']
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
-htmlhelp_basename = 'Planetmintdoc'
+htmlhelp_basename = "Planetmintdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
- # The paper size ('letterpaper' or 'a4paper').
- #
- 'papersize': 'letterpaper',
-
- # The font size ('10pt', '11pt' or '12pt').
- #
- # 'pointsize': '10pt',
-
- # Additional stuff for the LaTeX preamble.
- #
- # 'preamble': '',
-
- # Latex figure (float) alignment
- #
- # 'figure_align': 'htbp',
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ "papersize": "letterpaper",
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
+ # Latex figure (float) alignment
+ #
+ # 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, 'Planetmint.tex', 'Planetmint Documentation',
- 'Planetmint Contributors', 'manual'),
+ (
+ master_doc,
+ "Planetmint.tex",
+ "Planetmint Documentation",
+ "Planetmint Contributors",
+ "manual",
+ ),
]
# The name of an image file (relative to this directory) to place at the top of
@@ -366,10 +381,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
-man_pages = [
- (master_doc, 'planetmint', 'Planetmint Documentation',
- [author], 1)
-]
+man_pages = [(master_doc, "planetmint", "Planetmint Documentation", [author], 1)]
# If true, show URL addresses after external links.
#
@@ -382,9 +394,15 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- (master_doc, 'Planetmint', 'Planetmint Documentation',
- author, 'Planetmint', 'One line description of project.',
- 'Miscellaneous'),
+ (
+ master_doc,
+ "Planetmint",
+ "Planetmint Documentation",
+ author,
+ "Planetmint",
+ "One line description of project.",
+ "Miscellaneous",
+ ),
]
# Documents to append as an appendix to all manuals.
diff --git a/docs/root/source/installation/_static/Conditions_Circuit_Diagram.png b/docs/root/source/connecting/_static/Conditions_Circuit_Diagram.png
similarity index 100%
rename from docs/root/source/installation/_static/Conditions_Circuit_Diagram.png
rename to docs/root/source/connecting/_static/Conditions_Circuit_Diagram.png
diff --git a/docs/root/source/connecting/_static/Node-components.png b/docs/root/source/connecting/_static/Node-components.png
new file mode 100644
index 0000000..4bc8e9a
Binary files /dev/null and b/docs/root/source/connecting/_static/Node-components.png differ
diff --git a/docs/root/source/installation/_static/arch.jpg b/docs/root/source/connecting/_static/arch.jpg
similarity index 100%
rename from docs/root/source/installation/_static/arch.jpg
rename to docs/root/source/connecting/_static/arch.jpg
diff --git a/docs/root/source/installation/_static/cc_escrow_execute_abort.png b/docs/root/source/connecting/_static/cc_escrow_execute_abort.png
similarity index 100%
rename from docs/root/source/installation/_static/cc_escrow_execute_abort.png
rename to docs/root/source/connecting/_static/cc_escrow_execute_abort.png
diff --git a/docs/root/source/installation/_static/models_diagrams.odg b/docs/root/source/connecting/_static/models_diagrams.odg
similarity index 100%
rename from docs/root/source/installation/_static/models_diagrams.odg
rename to docs/root/source/connecting/_static/models_diagrams.odg
diff --git a/docs/root/source/connecting/_static/mongodb_cloud_manager_1.png b/docs/root/source/connecting/_static/mongodb_cloud_manager_1.png
new file mode 100644
index 0000000..16073d6
Binary files /dev/null and b/docs/root/source/connecting/_static/mongodb_cloud_manager_1.png differ
diff --git a/docs/root/source/installation/_static/monitoring_system_diagram.png b/docs/root/source/connecting/_static/monitoring_system_diagram.png
similarity index 100%
rename from docs/root/source/installation/_static/monitoring_system_diagram.png
rename to docs/root/source/connecting/_static/monitoring_system_diagram.png
diff --git a/docs/root/source/installation/_static/stories_3_assets.png b/docs/root/source/connecting/_static/stories_3_assets.png
similarity index 100%
rename from docs/root/source/installation/_static/stories_3_assets.png
rename to docs/root/source/connecting/_static/stories_3_assets.png
diff --git a/docs/root/source/installation/_static/tx_escrow_execute_abort.png b/docs/root/source/connecting/_static/tx_escrow_execute_abort.png
similarity index 100%
rename from docs/root/source/installation/_static/tx_escrow_execute_abort.png
rename to docs/root/source/connecting/_static/tx_escrow_execute_abort.png
diff --git a/docs/root/source/installation/_static/tx_multi_condition_multi_fulfillment_v1.png b/docs/root/source/connecting/_static/tx_multi_condition_multi_fulfillment_v1.png
similarity index 100%
rename from docs/root/source/installation/_static/tx_multi_condition_multi_fulfillment_v1.png
rename to docs/root/source/connecting/_static/tx_multi_condition_multi_fulfillment_v1.png
diff --git a/docs/root/source/installation/_static/tx_schematics.odg b/docs/root/source/connecting/_static/tx_schematics.odg
similarity index 100%
rename from docs/root/source/installation/_static/tx_schematics.odg
rename to docs/root/source/connecting/_static/tx_schematics.odg
diff --git a/docs/root/source/installation/_static/tx_single_condition_single_fulfillment_v1.png b/docs/root/source/connecting/_static/tx_single_condition_single_fulfillment_v1.png
similarity index 100%
rename from docs/root/source/installation/_static/tx_single_condition_single_fulfillment_v1.png
rename to docs/root/source/connecting/_static/tx_single_condition_single_fulfillment_v1.png
diff --git a/docs/root/source/installation/commands-and-backend/backend.rst b/docs/root/source/connecting/commands-and-backend/backend.rst
similarity index 78%
rename from docs/root/source/installation/commands-and-backend/backend.rst
rename to docs/root/source/connecting/commands-and-backend/backend.rst
index 543520d..8256a0c 100644
--- a/docs/root/source/installation/commands-and-backend/backend.rst
+++ b/docs/root/source/connecting/commands-and-backend/backend.rst
@@ -8,46 +8,53 @@
Database Backend Interfaces
###########################
+
+
.. automodule:: planetmint.backend
:special-members: __init__
Generic Interfaces
-==================
+------------------
+
:mod:`planetmint.backend.connection`
-------------------------------------
-
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: planetmint.backend.connection
:special-members: __init__
:mod:`planetmint.backend.query`
--------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: planetmint.backend.query
:mod:`planetmint.backend.schema`
---------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: planetmint.backend.schema
:mod:`planetmint.backend.utils`
--------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: planetmint.backend.utils
MongoDB Backend
-===============
+---------------
+
.. automodule:: planetmint.backend.localmongodb
:special-members: __init__
:mod:`planetmint.backend.localmongodb.connection`
--------------------------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+
.. automodule:: planetmint.backend.localmongodb.connection
:mod:`planetmint.backend.localmongodb.query`
---------------------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
.. automodule:: planetmint.backend.localmongodb.query
:mod:`planetmint.backend.localmongodb.schema`
----------------------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
.. automodule:: planetmint.backend.localmongodb.schema
diff --git a/docs/root/source/installation/commands-and-backend/commands.rst b/docs/root/source/connecting/commands-and-backend/commands.rst
similarity index 100%
rename from docs/root/source/installation/commands-and-backend/commands.rst
rename to docs/root/source/connecting/commands-and-backend/commands.rst
diff --git a/docs/root/source/installation/commands-and-backend/index.rst b/docs/root/source/connecting/commands-and-backend/index.rst
similarity index 82%
rename from docs/root/source/installation/commands-and-backend/index.rst
rename to docs/root/source/connecting/commands-and-backend/index.rst
index 723ec25..2d43231 100644
--- a/docs/root/source/installation/commands-and-backend/index.rst
+++ b/docs/root/source/connecting/commands-and-backend/index.rst
@@ -5,7 +5,8 @@
Code is Apache-2.0 and docs are CC-BY-4.0
Commands And Backend
-====================
+********************
+
This section contains auto-generated documentation of various functions, classes and methods
in the Planetmint Server code, based on Python docstrings in the code itself.
@@ -16,11 +17,9 @@ in the Planetmint Server code, based on Python docstrings in the code itself.
if you want to know *for sure* what the code does,
then you have to read the code itself.
-.. toctree::
- :maxdepth: 1
+.. include:: ./commands.rst
+.. include:: the-planetmint-class.rst
+.. include:: backend.rst
- commands
- the-planetmint-class
- backend
\ No newline at end of file
diff --git a/docs/root/source/installation/commands-and-backend/the-planetmint-class.rst b/docs/root/source/connecting/commands-and-backend/the-planetmint-class.rst
similarity index 100%
rename from docs/root/source/installation/commands-and-backend/the-planetmint-class.rst
rename to docs/root/source/connecting/commands-and-backend/the-planetmint-class.rst
diff --git a/docs/root/source/drivers/index.rst b/docs/root/source/connecting/drivers.rst
similarity index 92%
rename from docs/root/source/drivers/index.rst
rename to docs/root/source/connecting/drivers.rst
index c2c56c9..eea6486 100644
--- a/docs/root/source/drivers/index.rst
+++ b/docs/root/source/connecting/drivers.rst
@@ -4,13 +4,14 @@
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
-Drivers
-=======
+Drivers & Bindings
+******************
+
Connectors to Planetmint are referred to as drivers within the community. A driver is used to create valid transactions, to generate key pairs, to sign transactions and to post the transaction to the Planetmint API.
-These drivers were originally created by the original BigchainDB team:
-Planetmint
+These drivers or bindings were originally created by the original BigchainDB team:
+
* `Python Driver `_
* `JavaScript / Node.js Driver `_
* `Java Driver `_
diff --git a/docs/root/source/installation/api/http-client-server-api.rst b/docs/root/source/connecting/http-client-server-api.rst
similarity index 97%
rename from docs/root/source/installation/api/http-client-server-api.rst
rename to docs/root/source/connecting/http-client-server-api.rst
index 528d240..2458910 100644
--- a/docs/root/source/installation/api/http-client-server-api.rst
+++ b/docs/root/source/connecting/http-client-server-api.rst
@@ -4,10 +4,9 @@
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
-.. _the-http-client-server-api:
+HTTP Client-Server API
+**************************
-The HTTP Client-Server API
-==========================
This page assumes you already know an API Root URL
for a Planetmint node or reverse proxy.
@@ -18,10 +17,10 @@ If you set up a Planetmint node or reverse proxy yourself,
and you're not sure what the API Root URL is,
then see the last section of this page for help.
-.. _planetmint-root-url:
Planetmint Root URL
--------------------
+===================
+
If you send an HTTP GET request to the Planetmint Root URL
e.g. ``http://localhost:9984``
@@ -34,10 +33,10 @@ with something like the following in the body:
:language: http
-.. _api-root-endpoint:
-
+.. _Api root endpoint:
API Root Endpoint
------------------
+=================
+
If you send an HTTP GET request to the API Root Endpoint
e.g. ``http://localhost:9984/api/v1/``
@@ -50,7 +49,8 @@ that allows you to discover the Planetmint API endpoints:
Transactions Endpoint
----------------------
+=====================
+
.. note::
@@ -147,11 +147,11 @@ Transactions Endpoint
If it's invalid, the node will return an HTTP 400 (error).
Otherwise, the node will send the transaction to Tendermint (in the same node) using the
`Tendermint broadcast API
- `_.
+ `_.
The meaning of the ``mode`` query parameter is inherited from the mode parameter in
`Tendermint's broadcast API
- `_.
+ `_.
``mode=async`` means the HTTP response will come back immediately,
before Tendermint asks Planetmint Server to check the validity of the transaction (a second time).
``mode=sync`` means the HTTP response will come back
@@ -210,7 +210,8 @@ Transactions Endpoint
Transaction Outputs
--------------------
+===================
+
The ``/api/v1/outputs`` endpoint returns transactions outputs filtered by a
given public key, and optionally filtered to only include either spent or
@@ -332,7 +333,8 @@ unspent outputs.
Assets
-------
+======
+
.. note::
@@ -456,7 +458,8 @@ Assets
Transaction Metadata
---------------------
+====================
+
.. note::
@@ -580,7 +583,8 @@ Transaction Metadata
Validators
---------------------
+==========
+
.. http:get:: /api/v1/validators
@@ -624,7 +628,8 @@ Validators
Blocks
-------
+======
+
.. http:get:: /api/v1/blocks/{block_height}
@@ -701,7 +706,8 @@ Blocks
.. _determining-the-api-root-url:
Determining the API Root URL
-----------------------------
+============================
+
When you start Planetmint Server using ``planetmint start``,
an HTTP API is exposed at some address. The default is:
@@ -713,7 +719,7 @@ so you can access it from the same machine,
but it won't be directly accessible from the outside world.
(The outside world could connect via a SOCKS proxy or whatnot.)
-The documentation about Planetmint Server :doc:`Configuration Settings <../../installation/node-setup/configuration>`
+The documentation about Planetmint Server :doc:`Configuration Settings <../../node-setup/configuration>`
has a section about how to set ``server.bind`` so as to make
the HTTP API publicly accessible.
diff --git a/docs/root/source/installation/api/http-samples/api-index-response.http b/docs/root/source/connecting/http-samples/api-index-response.http
similarity index 63%
rename from docs/root/source/installation/api/http-samples/api-index-response.http
rename to docs/root/source/connecting/http-samples/api-index-response.http
index fe767cd..0b0deb8 100644
--- a/docs/root/source/installation/api/http-samples/api-index-response.http
+++ b/docs/root/source/connecting/http-samples/api-index-response.http
@@ -4,9 +4,10 @@ Content-Type: application/json
{
"assets": "/assets/",
"blocks": "/blocks/",
- "docs": "https://docs.planetmint.com/projects/server/en/v0.9.2/http-client-server-api.html",
+ "docs": "https://docs.planetmint.io/projects/server/en/v1.0.1/http-client-server-api.html",
"metadata": "/metadata/",
"outputs": "/outputs/",
+ "streamedblocks": "ws://localhost:9985/api/v1/streams/valid_blocks",
"streams": "ws://localhost:9985/api/v1/streams/valid_transactions",
"transactions": "/transactions/",
"validators": "/validators"
diff --git a/docs/root/source/installation/api/http-samples/get-block-request.http b/docs/root/source/connecting/http-samples/get-block-request.http
similarity index 100%
rename from docs/root/source/installation/api/http-samples/get-block-request.http
rename to docs/root/source/connecting/http-samples/get-block-request.http
diff --git a/docs/root/source/installation/api/http-samples/get-block-response.http b/docs/root/source/connecting/http-samples/get-block-response.http
similarity index 100%
rename from docs/root/source/installation/api/http-samples/get-block-response.http
rename to docs/root/source/connecting/http-samples/get-block-response.http
diff --git a/docs/root/source/installation/api/http-samples/get-block-txid-request.http b/docs/root/source/connecting/http-samples/get-block-txid-request.http
similarity index 100%
rename from docs/root/source/installation/api/http-samples/get-block-txid-request.http
rename to docs/root/source/connecting/http-samples/get-block-txid-request.http
diff --git a/docs/root/source/installation/api/http-samples/get-block-txid-response.http b/docs/root/source/connecting/http-samples/get-block-txid-response.http
similarity index 100%
rename from docs/root/source/installation/api/http-samples/get-block-txid-response.http
rename to docs/root/source/connecting/http-samples/get-block-txid-response.http
diff --git a/docs/root/source/installation/api/http-samples/get-tx-by-asset-request.http b/docs/root/source/connecting/http-samples/get-tx-by-asset-request.http
similarity index 100%
rename from docs/root/source/installation/api/http-samples/get-tx-by-asset-request.http
rename to docs/root/source/connecting/http-samples/get-tx-by-asset-request.http
diff --git a/docs/root/source/installation/api/http-samples/get-tx-by-asset-response.http b/docs/root/source/connecting/http-samples/get-tx-by-asset-response.http
similarity index 100%
rename from docs/root/source/installation/api/http-samples/get-tx-by-asset-response.http
rename to docs/root/source/connecting/http-samples/get-tx-by-asset-response.http
diff --git a/docs/root/source/installation/api/http-samples/get-tx-id-request.http b/docs/root/source/connecting/http-samples/get-tx-id-request.http
similarity index 100%
rename from docs/root/source/installation/api/http-samples/get-tx-id-request.http
rename to docs/root/source/connecting/http-samples/get-tx-id-request.http
diff --git a/docs/root/source/installation/api/http-samples/get-tx-id-response.http b/docs/root/source/connecting/http-samples/get-tx-id-response.http
similarity index 100%
rename from docs/root/source/installation/api/http-samples/get-tx-id-response.http
rename to docs/root/source/connecting/http-samples/get-tx-id-response.http
diff --git a/docs/root/source/installation/api/http-samples/index-response.http b/docs/root/source/connecting/http-samples/index-response.http
similarity index 61%
rename from docs/root/source/installation/api/http-samples/index-response.http
rename to docs/root/source/connecting/http-samples/index-response.http
index 789da5e..e1cca43 100644
--- a/docs/root/source/installation/api/http-samples/index-response.http
+++ b/docs/root/source/connecting/http-samples/index-response.http
@@ -6,15 +6,16 @@ Content-Type: application/json
"v1": {
"assets": "/api/v1/assets/",
"blocks": "/api/v1/blocks/",
- "docs": "https://docs.planetmint.com/projects/server/en/v0.9.2/http-client-server-api.html",
+ "docs": "https://docs.planetmint.io/projects/server/en/v1.0.1/http-client-server-api.html",
"metadata": "/api/v1/metadata/",
"outputs": "/api/v1/outputs/",
+ "streamedblocks": "ws://localhost:9985/api/v1/streams/valid_blocks",
"streams": "ws://localhost:9985/api/v1/streams/valid_transactions",
"transactions": "/api/v1/transactions/",
"validators": "/api/v1/validators"
}
},
- "docs": "https://docs.planetmint.com/projects/server/en/v0.9.2/",
+ "docs": "https://docs.planetmint.io/projects/server/en/v1.0.1/",
"software": "Planetmint",
- "version": "0.9.2"
+ "version": "1.0.1"
}
diff --git a/docs/root/source/installation/api/http-samples/post-tx-request.http b/docs/root/source/connecting/http-samples/post-tx-request.http
similarity index 100%
rename from docs/root/source/installation/api/http-samples/post-tx-request.http
rename to docs/root/source/connecting/http-samples/post-tx-request.http
diff --git a/docs/root/source/installation/api/http-samples/post-tx-response.http b/docs/root/source/connecting/http-samples/post-tx-response.http
similarity index 100%
rename from docs/root/source/installation/api/http-samples/post-tx-response.http
rename to docs/root/source/connecting/http-samples/post-tx-response.http
diff --git a/docs/root/source/connecting/index.rst b/docs/root/source/connecting/index.rst
new file mode 100644
index 0000000..cfc338e
--- /dev/null
+++ b/docs/root/source/connecting/index.rst
@@ -0,0 +1,23 @@
+
+.. Copyright © 2020 Interplanetary Database Association e.V.,
+ Planetmint and IPDB software contributors.
+ SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+ Code is Apache-2.0 and docs are CC-BY-4.0
+
+Connecting to Planetmint
+########################
+
+Planetmint enables you to connect to it via variaous ways:
+
+* Bindings or drivers for varioues languages exist
+* RESTful APIs and direct database queries
+
+Details are listed below.
+
+.. include:: drivers.rst
+.. include:: http-client-server-api.rst
+.. include:: websocket-event-stream-api.rst
+.. include:: query.rst
+.. .. include:: api/index.rst
+.. .. include:: commands-and-backend/index.rst
+
diff --git a/docs/root/source/query.rst b/docs/root/source/connecting/query.rst
similarity index 98%
rename from docs/root/source/query.rst
rename to docs/root/source/connecting/query.rst
index 821eeae..513bc18 100644
--- a/docs/root/source/query.rst
+++ b/docs/root/source/connecting/query.rst
@@ -4,14 +4,15 @@
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
-Queries in Planetmint
-=====================
+Database Queries
+******************
A node operator can use the full power of MongoDB's query engine to search and query all stored data, including all transactions, assets and metadata.
The node operator can decide for themselves how much of that query power they expose to external users.
-Blog Post with Example Queries
-------------------------------
+Querying MongoDB
+==============================
+
We wrote a blog post in The Planetmint Blog to show
how to use some MongoDB tools to query a Planetmint node's MongoDB database.
@@ -22,6 +23,7 @@ about custom cars and their ownership histories.
How to Connect to MongoDB
-------------------------
+
Before you can query a MongoDB database, you must connect to it, and to do that, you need to know its hostname and port.
If you're running a Planetmint node on your local machine (e.g. for dev and test), then the hostname should be ``localhost`` and the port should be ``27017``, unless you did something to change those values. If you're running a Planetmint node on a remote machine and you can SSH to that machine, then the same is true.
@@ -31,6 +33,7 @@ If you're running a Planetmint node on a remote machine and you configured its M
How to Query
------------
+
A Planetmint node operator has full access to their local MongoDB instance, so they can use any of MongoDB's APIs for running queries, including:
- `the Mongo Shell `_,
diff --git a/docs/root/source/installation/api/websocket-event-stream-api.rst b/docs/root/source/connecting/websocket-event-stream-api.rst
similarity index 93%
rename from docs/root/source/installation/api/websocket-event-stream-api.rst
rename to docs/root/source/connecting/websocket-event-stream-api.rst
index 96eab15..c754b0c 100644
--- a/docs/root/source/installation/api/websocket-event-stream-api.rst
+++ b/docs/root/source/connecting/websocket-event-stream-api.rst
@@ -6,8 +6,9 @@
.. _the-websocket-event-stream-api:
-The WebSocket Event Stream API
-==============================
+WebSocket Event Stream API
+******************************
+
.. important::
The WebSocket Event Stream runs on a different port than the Web API. The
@@ -21,18 +22,20 @@ to notify you as events occur, such as new `valid transactions <#valid-transacti
Demoing the API
----------------
+===============
+
You may be interested in demoing the Event Stream API with the `WebSocket echo test `_
to familiarize yourself before attempting an integration.
Determining Support for the Event Stream API
---------------------------------------------
+============================================
+
It's a good idea to make sure that the node you're connecting with
has advertised support for the Event Stream API. To do so, send a HTTP GET
-request to the node's :ref:`api-root-endpoint`
+request to the node's `API root endpoint`_
(e.g. ``http://localhost:9984/api/v1/``) and check that the
response contains a ``streams`` property:
@@ -46,7 +49,8 @@ response contains a ``streams`` property:
Connection Keep-Alive
----------------------
+=====================
+
The Event Stream API supports Ping/Pong frames as descibed in
`RFC 6455 `_.
@@ -58,7 +62,8 @@ The Event Stream API supports Ping/Pong frames as descibed in
same.
Streams
--------
+=======
+
Each stream is meant as a unidirectional communication channel, where the
Planetmint node is the only party sending messages. Any messages sent to the
@@ -85,7 +90,8 @@ All messages sent in a stream are in the JSON format.
API, consider creating a new `BEP `_.
Valid Transactions
-~~~~~~~~~~~~~~~~~~
+==================
+
``/valid_transactions``
diff --git a/docs/root/source/contributing/cross-project-policies/code-of-conduct.md b/docs/root/source/contributing/cross-project-policies/code-of-conduct.md
index 5667f9d..da6e9bf 100644
--- a/docs/root/source/contributing/cross-project-policies/code-of-conduct.md
+++ b/docs/root/source/contributing/cross-project-policies/code-of-conduct.md
@@ -42,7 +42,7 @@ This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior directed at yourself or another community member may be
-reported by contacting a project maintainer at [contact@planetmint.com](mailto:contact@planetmint.com). All
+reported by contacting a project maintainer at [mail@planetmint.io](mailto:mail@planetmint.io). All
complaints will be reviewed and investigated and will result in a response that
is appropriate to the circumstances. Maintainers are
obligated to maintain confidentiality with regard to the reporter of an
diff --git a/docs/root/source/contributing/cross-project-policies/python-style-guide.md b/docs/root/source/contributing/cross-project-policies/python-style-guide.md
index 65ffaf3..dff51f6 100644
--- a/docs/root/source/contributing/cross-project-policies/python-style-guide.md
+++ b/docs/root/source/contributing/cross-project-policies/python-style-guide.md
@@ -82,11 +82,11 @@ x = 'name: {}; score: {}'.format(name, n)
we use the `format()` version. The [official Python documentation says](https://docs.python.org/2/library/stdtypes.html#str.format), "This method of string formatting is the new standard in Python 3, and should be preferred to the % formatting described in String Formatting Operations in new code."
-## Running the Flake8 Style Checker
+## Running the Black Style Checker
-We use [Flake8](http://flake8.pycqa.org/en/latest/index.html) to check our Python code style. Once you have it installed, you can run it using:
+We use [Black](https://black.readthedocs.io/en/stable/) to check our Python code style. Once you have it installed, you can run it using:
```text
-flake8 --max-line-length 119 planetmint/
+black --check -l 119 .
```
diff --git a/docs/root/source/contributing/dev-setup-coding-and-contribution-process/run-dev-network-stack.md b/docs/root/source/contributing/dev-setup-coding-and-contribution-process/run-dev-network-stack.md
index e5e9c37..d059560 100644
--- a/docs/root/source/contributing/dev-setup-coding-and-contribution-process/run-dev-network-stack.md
+++ b/docs/root/source/contributing/dev-setup-coding-and-contribution-process/run-dev-network-stack.md
@@ -99,8 +99,7 @@ $ bash stack.sh -h
ENV[TM_VERSION]
(Optional) Tendermint version to use for the setup. (default: 0.22.8)
- ENV[MONGO_VERSION]
- (Optional) MongoDB version to use with the setup. (default: 3.6)
+
ENV[AZURE_CLIENT_ID]
Only required when STACK_TYPE="cloud" and STACK_TYPE_PROVIDER="azure". Steps to generate:
@@ -181,8 +180,6 @@ $ export STACK_BRANCH=master
#Optional, since 0.22.8 is the default tendermint version.
$ export TM_VERSION=0.22.8
-#Optional, since 3.6 is the default MongoDB version.
-$ export MONGO_VERSION=3.6
$ bash stack.sh
```
@@ -232,8 +229,7 @@ $ export STACK_BRANCH=master
#Optional, since 0.22.8 is the default tendermint version
$ export TM_VERSION=0.22.8
-#Optional, since 3.6 is the default MongoDB version.
-$ export MONGO_VERSION=3.6
+
$ bash stack.sh
```
diff --git a/docs/root/source/contributing/dev-setup-coding-and-contribution-process/run-node-as-processes.md b/docs/root/source/contributing/dev-setup-coding-and-contribution-process/run-node-as-processes.md
index 58074ac..ba60915 100644
--- a/docs/root/source/contributing/dev-setup-coding-and-contribution-process/run-node-as-processes.md
+++ b/docs/root/source/contributing/dev-setup-coding-and-contribution-process/run-node-as-processes.md
@@ -11,16 +11,16 @@ The following doc describes how to run a local node for developing Planetmint Te
There are two crucial dependencies required to start a local node:
-- MongoDB
+- Tarantool
- Tendermint
and of course you also need to install Planetmint Sever from the local code you just developed.
-## Install and Run MongoDB
+## Install and Run Tarantool
-MongoDB can be easily installed, just refer to their [installation documentation](https://docs.mongodb.com/manual/installation/) for your distro.
-We know MongoDB 3.4 and 3.6 work with Planetmint.
-After the installation of MongoDB is complete, run MongoDB using `sudo mongod`
+Tarantool can be easily installed, just refer to their [installation documentation](https://www.tarantool.io/en/download/os-installation/ubuntu/) for your distro.
+We know Tarantool 2.8 work with Planetmint.
+After the installation of Tarantool is complete, run Tarantool using `tarantool` and to create a listener `box.cfg{listen=3301}` in cli of Tarantool.
## Install and Run Tendermint
@@ -125,7 +125,7 @@ To execute tests when developing a feature or fixing a bug one could use the fol
$ pytest -v
```
-NOTE: MongoDB and Tendermint should be running as discussed above.
+NOTE: Tarantool and Tendermint should be running as discussed above.
One could mark a specific test and execute the same by appending `-m my_mark` to the above command.
diff --git a/docs/root/source/contributing/dev-setup-coding-and-contribution-process/run-node-with-docker-compose.md b/docs/root/source/contributing/dev-setup-coding-and-contribution-process/run-node-with-docker-compose.md
index 5ee7643..de733bb 100644
--- a/docs/root/source/contributing/dev-setup-coding-and-contribution-process/run-node-with-docker-compose.md
+++ b/docs/root/source/contributing/dev-setup-coding-and-contribution-process/run-node-with-docker-compose.md
@@ -39,7 +39,7 @@ $ docker-compose up -d bdb
The above command will launch all 3 main required services/processes:
-* ``mongodb``
+* ``tarantool``
* ``tendermint``
* ``planetmint``
@@ -55,7 +55,7 @@ To follow the logs of the ``planetmint`` service:
$ docker-compose logs -f planetmint
```
-To follow the logs of the ``mongodb`` service:
+
```bash
$ docker-compose logs -f mdb
diff --git a/docs/root/source/contributing/dev-setup-coding-and-contribution-process/write-code.rst b/docs/root/source/contributing/dev-setup-coding-and-contribution-process/write-code.rst
index 4425e37..4c4df18 100644
--- a/docs/root/source/contributing/dev-setup-coding-and-contribution-process/write-code.rst
+++ b/docs/root/source/contributing/dev-setup-coding-and-contribution-process/write-code.rst
@@ -12,7 +12,7 @@ Know What You Want to Write Code to Do
Do you want to write code to resolve an open issue (bug)? Which one?
-Do you want to implement a Planetmint Enhancement Proposal (PEP)? Which one?
+Do you want to implement a Planetmint Refinement Proposal (PRP)? Which one?
You should know why you want to write code before you go any farther.
@@ -21,7 +21,7 @@ Refresh Yourself about the C4 Process
-------------------------------------
C4 is the Collective Code Construction Contract. It's quite short:
-`re-reading it will only take a few minutes `_.
+`re-reading it will only take a few minutes `_.
Set Up Your Local Machine. Here's How.
@@ -69,7 +69,7 @@ Set Up Your Local Machine. Here's How.
Before You Start Writing Code
-----------------------------
-Read `BEP-24 `_
+Read `BEP-24 `_
so you know what to do to ensure that your changes (i.e. your future pull request) can be merged.
It's easy and will save you some hassle later on.
diff --git a/docs/root/source/contributing/index.rst b/docs/root/source/contributing/index.rst
index a109838..36820f7 100644
--- a/docs/root/source/contributing/index.rst
+++ b/docs/root/source/contributing/index.rst
@@ -16,7 +16,7 @@ It includes several sub-projects.
- `Planetmint Java Driver `_
- `cryptoconditions `_ (a Python package by us)
- `py-abci `_ (a Python package we use)
-- `Planetmint Enhancement Proposals (PEPs) `_
+- `Planetmint Refinement Proposals (PRPs) `_
Contents
--------
diff --git a/docs/root/source/contributing/ways-to-contribute/report-a-bug.md b/docs/root/source/contributing/ways-to-contribute/report-a-bug.md
index fadc345..a7f94df 100644
--- a/docs/root/source/contributing/ways-to-contribute/report-a-bug.md
+++ b/docs/root/source/contributing/ways-to-contribute/report-a-bug.md
@@ -27,17 +27,17 @@ People ask questions about Planetmint in the following places:
Feel free to hang out and answer some questions. People will be thankful.
-# Write a Planetmint Enhancement Proposal (PEP)
+# Write a Planetmint Refinement Proposal (PRP)
-If you have an idea for a new feature or enhancement, and you want some feedback before you write a full Planetmint Enhancement Proposal (PEP), then feel free to:
+If you have an idea for a new feature or enhancement, and you want some feedback before you write a full Planetmint Refinement Proposal (PRP), then feel free to:
- ask in the [planetmint/community Gitter chat room](https://gitter.im/planetmint/planetmint) or
- - [open a new issue in the planetmint/PEPs repo](https://github.com/planetmint/PEPs/issues/new) and give it the label **PEP idea**.
+ - [open a new issue in the planetmint/PRPs repo](https://github.com/planetmint/PRPs/issues/new) and give it the label **PRP idea**.
-If you want to discuss an existing PEP, then [open a new issue in the planetmint/PEPs repo](https://github.com/planetmint/BEPs/issues/new) and give it the label **discuss existing PEP**.
+If you want to discuss an existing PRP, then [open a new issue in the planetmint/PRPs repo](https://github.com/planetmint/PRPs/issues/new) and give it the label **discuss existing PRP**.
-## Steps to Write a New PEP
+## Steps to Write a New PRP
-1. Look at the structure of existing PEPs in the [planetmint/PEPs repo](https://github.com/planetmint/PEPs). Note the section headings. [PEP-2](https://github.com/planetmint/PEPs/tree/master/2) (our variant of the consensus-oriented specification system [COSS]) says more about the expected structure and process.
-1. Write a first draft of your PEP. It doesn't have to be long or perfect.
-1. Push your BEP draft to the [planetmint/PEPs repo](https://github.com/planetmint/PEPs) and make a pull request. [PEP-1](https://github.com/planetmint/PEPs/tree/master/1) (our variant of C4) outlines the process we use to handle all pull requests. In particular, we try to merge all pull requests quickly.
-1. Your PEP can be revised by pushing more pull requests.
\ No newline at end of file
+1. Look at the structure of existing PRPs in the [planetmint/PRPs repo](https://github.com/planetmint/PRPs). Note the section headings. [PRP-2](https://github.com/planetmint/PRPs/tree/master/2) (our variant of the consensus-oriented specification system [COSS]) says more about the expected structure and process.
+1. Write a first draft of your PRP. It doesn't have to be long or perfect.
+1. Push your BEP draft to the [planetmint/PRPs repo](https://github.com/planetmint/PRPs) and make a pull request. [PRP-1](https://github.com/planetmint/PRPs/tree/master/1) (our variant of C4) outlines the process we use to handle all pull requests. In particular, we try to merge all pull requests quickly.
+1. Your PRP can be revised by pushing more pull requests.
\ No newline at end of file
diff --git a/planetmint/migrations/__init__.py b/docs/root/source/cryptoconditions.md
similarity index 100%
rename from planetmint/migrations/__init__.py
rename to docs/root/source/cryptoconditions.md
diff --git a/docs/root/source/index.rst b/docs/root/source/index.rst
index 5a013c8..218a43a 100644
--- a/docs/root/source/index.rst
+++ b/docs/root/source/index.rst
@@ -4,30 +4,33 @@
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
-Planetmint Documentation
-========================
+Planetmint
+==========
-Meet Planetmint. The blockchain database.
+Meet Planetmint. The metadata blockchain.
It has some database characteristics and some blockchain `properties `_,
including decentralization, immutability and native support for assets.
At a high level, one can communicate with a Planetmint network (set of nodes) using the Planetmint HTTP API, or a wrapper for that API, such as the Planetmint Python Driver. Each Planetmint node runs Planetmint Server and various other software. The `terminology page `_ explains some of those terms in more detail.
-More About Planetmint
----------------------
+.. toctree::
+ :maxdepth: 3
+
+ Introdcution
+ Using Planetmint
+ Node Setup
+ Networks & Federations
+ Connecting to Planetmint
+ tools/index
+ contributing/index
+ terminology
+ troubleshooting
+
.. toctree::
:maxdepth: 1
+ :caption: Cryptoconditions & Smart Contracts
- Planetmint Docs Home
- about-planetmint
- terminology
- properties
- basic-usage
- installation/index
- drivers/index
- query
- contributing/index
- korean/index
-
+ Crypto Conditions & Smart Contracts
+ cryptoconditions
diff --git a/docs/root/source/installation/api/index.rst b/docs/root/source/installation/api/index.rst
deleted file mode 100644
index 7693fab..0000000
--- a/docs/root/source/installation/api/index.rst
+++ /dev/null
@@ -1,16 +0,0 @@
-
-.. Copyright © 2020 Interplanetary Database Association e.V.,
- Planetmint and IPDB software contributors.
- SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
- Code is Apache-2.0 and docs are CC-BY-4.0
-
-
-
-API
-===
-
-.. toctree::
- :maxdepth: 1
-
- http-client-server-api
- websocket-event-stream-api
diff --git a/docs/root/source/installation/index.rst b/docs/root/source/installation/index.rst
deleted file mode 100644
index 2efc18a..0000000
--- a/docs/root/source/installation/index.rst
+++ /dev/null
@@ -1,20 +0,0 @@
-
-.. Copyright © 2020 Interplanetary Database Association e.V.,
- Planetmint and IPDB software contributors.
- SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
- Code is Apache-2.0 and docs are CC-BY-4.0
-
-Installation
-============
-
-You can install a single node to test out Planetmint, connect it to a network or setup a network of nodes.
-
-.. toctree::
- :maxdepth: 1
-
- quickstart
- node-setup/index
- network-setup/index
- api/index
- commands-and-backend/index
- appendices/index
diff --git a/docs/root/source/installation/network-setup/planetmint-node-ansible.md b/docs/root/source/installation/network-setup/planetmint-node-ansible.md
index f9fc9a5..ce7ab6c 100644
--- a/docs/root/source/installation/network-setup/planetmint-node-ansible.md
+++ b/docs/root/source/installation/network-setup/planetmint-node-ansible.md
@@ -4,4 +4,4 @@ You can find one of the installation methods with Ansible on GitHub at:
[Ansible script](https://github.com/planetmint/planetmint-node-ansible)
-It allows to install Planetmint, MongoDB, Tendermint, and python, and then connect nodes into a network. Current tested machine is Ubuntu 18.04.
\ No newline at end of file
+It allows to install Planetmint, Tarantool, Tendermint, and python, and then connect nodes into a network. Current tested machine is Ubuntu 18.04.
\ No newline at end of file
diff --git a/docs/root/source/installation/node-setup/index.rst b/docs/root/source/installation/node-setup/index.rst
deleted file mode 100644
index e7efc00..0000000
--- a/docs/root/source/installation/node-setup/index.rst
+++ /dev/null
@@ -1,25 +0,0 @@
-
-.. Copyright © 2020 Interplanetary Database Association e.V.,
- Planetmint and IPDB software contributors.
- SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
- Code is Apache-2.0 and docs are CC-BY-4.0
-
-Node setup
-==========
-
-You can use the all-in-one docker solution, or install Tendermint, MongoDB, and Planetmint step by step. For more advanced users and for development, the second option is recommended.
-
-.. toctree::
- :maxdepth: 1
-
- deploy-a-machine
- aws-setup
- all-in-one-planetmint
- planetmint-node-ansible
- set-up-node-software
- set-up-nginx
- configuration
- planetmint-cli
- troubleshooting
- production-node/index
- release-notes
diff --git a/docs/root/source/installation/node-setup/production-node/index.rst b/docs/root/source/installation/node-setup/production-node/index.rst
deleted file mode 100644
index 2b1300e..0000000
--- a/docs/root/source/installation/node-setup/production-node/index.rst
+++ /dev/null
@@ -1,17 +0,0 @@
-
-.. Copyright © 2020 Interplanetary Database Association e.V.,
- Planetmint and IPDB software contributors.
- SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
- Code is Apache-2.0 and docs are CC-BY-4.0
-
-Production Nodes
-================
-
-.. toctree::
- :maxdepth: 1
-
- node-requirements
- node-assumptions
- node-components
- node-security-and-privacy
- reverse-proxy-notes
diff --git a/docs/root/source/installation/node-setup/release-notes.md b/docs/root/source/installation/node-setup/release-notes.md
deleted file mode 100644
index 4dfbe1c..0000000
--- a/docs/root/source/installation/node-setup/release-notes.md
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
-# Release Notes
-
-You can find a list of all Planetmint Server releases and release notes on GitHub at:
-
-[https://github.com/planetmint/planetmint/releases](https://github.com/planetmint/planetmint/releases)
-
-The [CHANGELOG.md file](https://github.com/planetmint/planetmint/blob/master/CHANGELOG.md) contains much the same information, but it also has notes about what to expect in the _next_ release.
-
-We also have [a roadmap document in ROADMAP.md](https://github.com/planetmint/org/blob/master/ROADMAP.md).
diff --git a/docs/root/source/installation/quickstart.md b/docs/root/source/installation/quickstart.md
deleted file mode 100644
index fa58301..0000000
--- a/docs/root/source/installation/quickstart.md
+++ /dev/null
@@ -1,91 +0,0 @@
-
-
-
-# Introduction
-
-This is the documentation for Planetmint Server, or in other words, node -
-the Planetmint software that is on servers (but not on clients).
-
-## Setup Instructions for Various Cases
-
-- Quickstart link below
-- [Set up a local Planetmint node for development, experimenting and testing](node-setup/index)
-- [Set up and run a Planetmint network](network-setup/index)
-
-## Develop an App Test
-
-To develop an app that talks to a Planetmint network, you'll want a test network to test it against. You have a few options:
-
-1. The IPDB Test Network (or "Testnet") is a free-to-use, publicly-available test network that you can test against. It is available at [IPDB testnet](https://test.ipdb.io/).
-1. You could also run a Planetmint node on you local machine. One way is to use this node setup guide with a one-node "network" by using the all-in-one docker solution, or manual installation and configuration of the components. Another way is to use one of the deployment methods listed in the [network setup guide](network-setup/index) or in the [the docs about contributing to Planetmint](../contributing/index).
-
-
-## (WIP) Quickstart
-
-
-
-## Try Planetmint
-
-Create a transaction and post it to the test network:
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/root/source/about-planetmint.rst b/docs/root/source/introduction/about-planetmint.rst
similarity index 91%
rename from docs/root/source/about-planetmint.rst
rename to docs/root/source/introduction/about-planetmint.rst
index d693992..6cebc74 100644
--- a/docs/root/source/about-planetmint.rst
+++ b/docs/root/source/introduction/about-planetmint.rst
@@ -4,24 +4,24 @@
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
-About Planetmint
-----------------
+What is Planetmint
+==================
Basic Facts
-===========
+-----------
-#. One can store arbitrary data (including encrypted data) in a Planetmint network, within limits: there’s a maximum transaction size. Every transaction has a ``metadata`` section which can store almost any Unicode string (up to some maximum length). Similarly, every CREATE transaction has an ``asset.data`` section which can store almost any Unicode string.
-#. The data stored in certain Planetmint transaction fields must not be encrypted, e.g. public keys and amounts. Planetmint doesn’t offer private transactions akin to Zcoin.
-#. Once data has been stored in a Planetmint network, it’s best to assume it can’t be change or deleted.
-#. Every node in a Planetmint network has a full copy of all the stored data.
-#. Every node in a Planetmint network can read all the stored data.
-#. Everyone with full access to a Planetmint node (e.g. the sysadmin of a node) can read all the data stored on that node.
-#. Everyone given access to a node via the Planetmint HTTP API can find and read all the data stored by Planetmint. The list of people with access might be quite short.
-#. If the connection between an external user and a Planetmint node isn’t encrypted (using HTTPS, for example), then a wiretapper can read all HTTP requests and responses in transit.
-#. If someone gets access to plaintext (regardless of where they got it), then they can (in principle) share it with the whole world. One can make it difficult for them to do that, e.g. if it is a lot of data and they only get access inside a secure room where they are searched as they leave the room.
+1. One can store arbitrary data (including encrypted data) in a Planetmint network, within limits: there’s a maximum transaction size. Every transaction has a ``metadata`` section which can store almost any Unicode string (up to some maximum length). Similarly, every CREATE transaction has an ``asset.data`` section which can store almost any Unicode string.
+2. . The data stored in certain Planetmint transaction fields must not be encrypted, e.g. public keys and amounts. Planetmint doesn’t offer private transactions akin to Zcoin.
+3. Once data has been stored in a Planetmint network, it’s best to assume it can’t be change or deleted.
+4. Every node in a Planetmint network has a full copy of all the stored data.
+5. Every node in a Planetmint network can read all the stored data.
+6. Everyone with full access to a Planetmint node (e.g. the sysadmin of a node) can read all the data stored on that node.
+7. Everyone given access to a node via the Planetmint HTTP API can find and read all the data stored by Planetmint. The list of people with access might be quite short.
+8. If the connection between an external user and a Planetmint node isn’t encrypted (using HTTPS, for example), then a wiretapper can read all HTTP requests and responses in transit.
+9. If someone gets access to plaintext (regardless of where they got it), then they can (in principle) share it with the whole world. One can make it difficult for them to do that, e.g. if it is a lot of data and they only get access inside a secure room where they are searched as they leave the room.
Planetmint for Asset Registrations & Transfers
-==============================================
+----------------------------------------------
Planetmint can store data of any kind, but it's designed to be particularly good for storing asset registrations and transfers:
@@ -37,7 +37,8 @@ Planetmint can store data of any kind, but it's designed to be particularly good
We used the word "owners" somewhat loosely above. A more accurate word might be fulfillers, signers, controllers, or transfer-enablers. See the section titled **A Note about Owners** in the relevant `Planetmint Transactions Spec `_.
-# Production-Ready?
+Production-Ready?
+-----------------
Depending on your use case, Planetmint may or may not be production-ready. You should ask your service provider.
If you want to go live (into production) with Planetmint, please consult with your service provider.
@@ -45,7 +46,7 @@ If you want to go live (into production) with Planetmint, please consult with yo
Note: Planetmint has an open source license with a "no warranty" section that is typical of open source licenses. This is standard in the software industry. For example, the Linux kernel is used in production by billions of machines even though its license includes a "no warranty" section. Warranties are usually provided above the level of the software license, by service providers.
Storing Private Data Off-Chain
-==============================
+------------------------------
A system could store data off-chain, e.g. in a third-party database, document store, or content management system (CMS) and it could use Planetmint to:
@@ -67,7 +68,7 @@ There are other ways to accomplish the same thing. The above is just one example
You might have noticed that the above example didn’t treat the “read permission” as an asset owned (controlled) by a user because if the permission asset is given to (transferred to or created by) the user then it cannot be controlled any further (by DocPile) until the user transfers it back to DocPile. Moreover, the user could transfer the asset to someone else, which might be problematic.
Storing Private Data On-Chain, Encrypted
-========================================
+-----------------------------------------
There are many ways to store private data on-chain, encrypted. Every use case has its own objectives and constraints, and the best solution depends on the use case. `The IPDB consulting team `_ can help you design the best solution for your use case.
diff --git a/docs/root/source/introduction/index.rst b/docs/root/source/introduction/index.rst
new file mode 100644
index 0000000..e3254d9
--- /dev/null
+++ b/docs/root/source/introduction/index.rst
@@ -0,0 +1,10 @@
+Introduction
+############
+
+.. include:: quickstart.md
+ :parser: myst_parser.sphinx_
+.. include:: about-planetmint.rst
+ :parser: myst_parser.sphinx_
+.. include:: properties.md
+ :parser: myst_parser.sphinx_
+
diff --git a/docs/root/source/properties.md b/docs/root/source/introduction/properties.md
similarity index 96%
rename from docs/root/source/properties.md
rename to docs/root/source/introduction/properties.md
index 861fe0a..0735ed0 100644
--- a/docs/root/source/properties.md
+++ b/docs/root/source/introduction/properties.md
@@ -5,7 +5,7 @@ SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
--->
-# Properties of Planetmint
+# Properties of Planetmint
## Decentralization
@@ -19,13 +19,13 @@ A consortium can increase its decentralization (and its resilience) by increasin
There’s no node that has a long-term special position in the Planetmint network. All nodes run the same software and perform the same duties.
-If someone has (or gets) admin access to a node, they can mess with that node (e.g. change or delete data stored on that node), but those changes should remain isolated to that node. The Planetmint network can only be compromised if more than one third of the nodes get compromised. See the [Tendermint documentation](https://tendermint.io/docs/introduction/introduction.html) for more details.
+If someone has (or gets) admin access to a node, they can mess with that node (e.g. change or delete data stored on that node), but those changes should remain isolated to that node. The Planetmint network can only be compromised if more than one third of the nodes get compromised. See the [Tendermint documentation](https://tendermint.com/docs/introduction/introduction.html) for more details.
It’s worth noting that not even the admin or superuser of a node can transfer assets. The only way to create a valid transfer transaction is to fulfill the current crypto-conditions on the asset, and the admin/superuser can’t do that because the admin user doesn’t have the necessary information (e.g. private keys).
## Byzantine Fault Tolerance
-[Tendermint](https://tendermint.io/) is used for consensus and transaction replication,
+[Tendermint](https://www.tendermint.com/) is used for consensus and transaction replication,
and Tendermint is [Byzantine Fault Tolerant (BFT)](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance).
## Node Diversity
diff --git a/docs/root/source/introduction/quickstart.md b/docs/root/source/introduction/quickstart.md
new file mode 100644
index 0000000..5549856
--- /dev/null
+++ b/docs/root/source/introduction/quickstart.md
@@ -0,0 +1,88 @@
+
+
+
+
+# Quickstart
+Planetmint is a metadata blockchain. This introduction gives an overview about how to attest data to Planetmint. First, simple transaction creation and sending is shown. Thereafter, an introdcution about how to set up a single node or a cluster is given.
+
+
+
+## The IPDB Testnet - sending transactions
+The IPDB foundation hosts a testnet server that is reset every night at 4am UTC.
+
+The following sequence shows a simple asset notarization / attestion on that testnet:
+Create a file named notarize.py
+
+```
+from planetmint_driver import Planetmint
+from planetmint_driver.crypto import generate_keypair
+
+plntmnt = Planetmint('https://test.ipdb.io')
+alice = generate_keypair()
+tx = plntmnt.transactions.prepare(
+ operation='CREATE',
+ signers=alice.public_key,
+ asset={'data': {'message': 'Blockchain all the things!'}})
+signed_tx = plntmnt.transactions.fulfill(
+ tx,
+ private_keys=alice.private_key)
+plntmnt.transactions.send_commit(signed_tx)
+```
+
+install dependencies and execute it
+
+```
+$ pip install planetmint-driver
+$ python notarize.py
+```
+# Install Planetmint
+## Local Node
+Planemtint is a Tendermint applicatoin with an attached database.
+A basic installation installs the database, Tendermint and therafter Planetmint.
+
+Planetmint currently supports Tarantool and MongoDB database. The installation is as follows:
+```
+# Tarantool
+$ curl -L https://tarantool.io/release/2/installer.sh | bash
+$ sudo apt-get -y install tarantool
+```
+*Caveat:* Tarantool versions before [2.4.2](https://www.tarantool.io/en/doc/latest/release/2.4.2/) automatically enable and start a demonstration instance that listens on port `3301` by default. Refer to the [Tarantool documentation](https://www.tarantool.io/en/doc/latest/getting_started/getting_started_db/#creating-db-locally) for more information.
+
+```
+# MongoDB
+$ sudo apt install mongodb
+```
+Tendermint can be installed and started as follows
+```
+$ wget https://github.com/tendermint/tendermint/releases/download/v0.34.15/tendermint_0.34.15_linux_amd64.tar.gz
+$ tar zxf tendermint_0.34.15_linux_amd64.tar.gz
+$ ./tendermint init
+$ ./tendermint node --proxy_app=tcp://localhost:26658
+```
+Planetmint installs and starts as described below
+```
+$ pip install planetmint
+$ planetmint configure
+$ planetmint start
+```
+
+## Cluster of nodes
+Setting up a cluster of nodes comes down to set up a cluster of tendermint nodes as documented at [Tendermint](https://docs.tendermint.com/v0.35/introduction/quick-start.html#cluster-of-nodes). In addition to that, the database and Planetmint need to be installed on the servers as described above.
+
+## Setup Instructions for Various Cases
+
+- Quickstart link below
+- [Set up a local Planetmint node for development, experimenting and testing](../node-setup/index)
+- [Set up and run a Planetmint network](../network-setup/index)
+
+## Develop an App Test
+
+To develop an app that talks to a Planetmint network, you'll want a test network to test it against. You have a few options:
+
+1. The IPDB Test Network (or "Testnet") is a free-to-use, publicly-available test network that you can test against. It is available at [IPDB testnet](https://test.ipdb.io/).
+1. You could also run a Planetmint node on you local machine. One way is to use this node setup guide with a one-node "network" by using the all-in-one docker solution, or manual installation and configuration of the components. Another way is to use one of the deployment methods listed in the [network setup guide](../network-setup/index) or in the [the docs about contributing to Planetmint](../contributing/index).
diff --git a/docs/root/source/korean/_static/CREATE_and_TRANSFER_example.png b/docs/root/source/korean/_static/CREATE_and_TRANSFER_example.png
deleted file mode 100644
index f9ef1ee..0000000
Binary files a/docs/root/source/korean/_static/CREATE_and_TRANSFER_example.png and /dev/null differ
diff --git a/docs/root/source/korean/_static/CREATE_example.png b/docs/root/source/korean/_static/CREATE_example.png
deleted file mode 100644
index 88a933c..0000000
Binary files a/docs/root/source/korean/_static/CREATE_example.png and /dev/null differ
diff --git a/docs/root/source/korean/_static/schemaDB.png b/docs/root/source/korean/_static/schemaDB.png
deleted file mode 100644
index 4ed7ad7..0000000
Binary files a/docs/root/source/korean/_static/schemaDB.png and /dev/null differ
diff --git a/docs/root/source/korean/assets_ko.md b/docs/root/source/korean/assets_ko.md
deleted file mode 100644
index 7928624..0000000
--- a/docs/root/source/korean/assets_ko.md
+++ /dev/null
@@ -1,26 +0,0 @@
-
-
-
-Planetmint가 자산 등록 및 전송에 적합한 방법
-==========================================================
-
-Planetmint는 모든 종류의 데이터를 저장할 수 있지만 자산 등록 및 전송을 저장하는 데 특히 유용합니다.:
-
-* Planetmint 네트워크에 전송되어 체크되고 저장되는 (있는 경우) 트랜잭션은 기본적으로 CREATE 트랜잭션과 TRANSFER 트랜잭션의 두 가지가 있습니다.
-* CREATE 트랜잭션은 임의의 메타 데이터와 함께 모든 종류의 자산 (나눌 수 없거나 분할 할 수없는)을 등록하는 데 사용할 수 있습니다.
-* 저작물에는 0 명, 1 명 또는 여러 명의 소유자가있을 수 있습니다.
-* 자산 소유자는 자산을 신규 소유자에게 양도하려는 사람이 만족해야하는 조건을 지정할 수 있습니다. 예를 들어 5 명의 현재 소유자 중 최소 3 명이 TRANSFER 트랜잭션에 암호를 사용해야합니다.
-<<<<<<< HEAD
-* BigchainDB는 TRANSFER 트랜잭션의 유효성을 검사하는 과정에서 조건이 충족되었는지 확인합니다. (또한 누구나 만족하는지 확인할 수 있습니다.)
-* BigchainDB는 자산의 이중 지출을 방지합니다.
-=======
-* Planetmint는 TRANSFER 트랜잭션의 유효성을 검사하는 과정에서 조건이 충족되었는지 확인합니다. (또한 누구나 만족하는지 확인할 수 있습니다.)
-* Planetmint는 자산의 이중 지출을 방지합니다.
->>>>>>> 3bfc3298f8210b135084e823eedd47f213538088
-* 유효성이 검증 된 트랜잭션은 [변경불가능](https://github.com/planetmint/planetmint/blob/master/docs/root/source/korean/immutable-ko.md) 입니다.
-
- Note
-
- 우리는 "소유자"라는 단어를 다소 느슨하게 사용했습니다. **보다 정확한 단어**는 이행자, 서명자, 조정자 또는 이전 가능 요소 일 수 있습니다. 관련 [Planetmint Transaction Spec](https://github.com/planetmint/BEPs/tree/master/tx-specs/)의 Owners에 대한 참고 사항 절을 참조하십시오.
diff --git a/docs/root/source/korean/bft-ko.md b/docs/root/source/korean/bft-ko.md
deleted file mode 100644
index c065752..0000000
--- a/docs/root/source/korean/bft-ko.md
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-# Planetmint와 Byzantine Fault Tolerance
-
-<<<<<<< HEAD
-[Planetmint Server](https://docs.planetmint.com/projects/server/en/latest/index.html)
-는 블록체인 합의와 트랜잭션 복제에 [Tendermint](https://tendermint.com/)를 사용합니다.
-=======
-[Planetmint Server](https://docs.planetmint.io/projects/server/en/latest/index.html)
-는 블록체인 합의와 트랜잭션 복제에 [Tendermint](https://tendermint.io/)를 사용합니다.
->>>>>>> 3bfc3298f8210b135084e823eedd47f213538088
-
-그리고 Tendermint 는 [Byzantine Fault Tolerant (BFT)](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance).
diff --git a/docs/root/source/korean/decentralized_kor.md b/docs/root/source/korean/decentralized_kor.md
deleted file mode 100644
index 173e9c3..0000000
--- a/docs/root/source/korean/decentralized_kor.md
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-# Planetmint 분산 방식
-
-분산이란 모든 것을 소유하거나 통제하는 사람이 없고, 단 하나의 실패 지점이 없다는 것을 의미합니다.
-
-이상적으로, Planetmint 네트워크에서 각각의 노드는 다른 개인 또는 조직에 의해 소유되고 관리됩니다. 네트워크가 한 조직 내에 상주하고 있더라도, 각 노드를 다른 사용자나 부서로 제어하는 것이 좋습니다.
-
-우리는 "Planetmint 컨소시엄" (또는 단지 "컨소시엄")은 Planetmint 네트워크의 노드를 구동하는 사람들 혹은 조직을 의미합니다. 컨소시엄은 회원제나 정책과 같은 결정을 내리기 위한 어떠한 형태의 거버넌스 요구합니다. 거버넌스 프로세스의 정확한 세부사항은 각 컨소시엄에 의해 결정되지만, 상당히 분산될 수 있습니다.
-
-컨소시엄은 관할의 다양성과 지리적 다양성 및 기타 종류의 다양성을 증가시킴으로써 분산화(및 탄력성)를 증가시킬 수 있습니다. 이 아이디어는 [노드 다양성 부문](diversity-ko)에서 확장됩니다.
-
-Planetmint 네트워크에는 오래된 특정한 위치를 가지는 노드가 없습니다. 모든 노드들은 동일한 소프트웨어를 실행하고 동일한 작업을 수행합니다.
-
-만약 노드에 대한 관리자 접근 권한이 있는 경우, 해당 노드를 제거할 수 있지만(예: 해당 노드에 저장된 데이터 변경 또는 삭제), 이러한 변경 사항은 해당 노드에 분리된 상태로 유지되어야 합니다. Planetmint 네트워크는 노드의 3분의 1 이상이 손상된 경우에만 손상될 수 있습니다. 자세한 내용은 [Tendermint 문서](https://tendermint.io/docs/introduction/introduction.html)을 참조하십시오.
-
-노드의 관리자나 슈퍼 유저도 자산을 전송할 수 없다는 점에 유의하십시오. 유효한 전송 트랜잭션을 생성하는 유일한 방법은 자산에 대한 현재 암호화 조건을 충족하는 것이며 관리자/슈퍼사용자는 필요한 정보를 가지고 있지 않기 때문에 이 작업을 수행할 수 없습니다(예: 개인 키).
-
-노드의 관리자나 슈퍼 사용자도 자산을 전송할 수는 없다는 점을 유의하십시오. 타당한 전송 트랜잭션을 만드는 유일한 방법은 자산에 대한 현재 암호화 조건을 충족시키는 것이며, 관리자 또는 슈퍼 사용자는 필요한 정보를 가지고 있지 않기 때문에 이 작업을 수행할 수 없습니다. (예: 개인 키)
\ No newline at end of file
diff --git a/docs/root/source/korean/diversity-ko.md b/docs/root/source/korean/diversity-ko.md
deleted file mode 100644
index 3b5b499..0000000
--- a/docs/root/source/korean/diversity-ko.md
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-# 노드 다양성의 종류
-
-
-한 명의 유저나 이벤트가 노드의 "상당수" 부분을 제어하거나 손상시키는 것을 어렵게 만드는 절차가 수행되어야 합니다.(Planetmint 서버는 Tendermint를 사용하기 때문에 여기서 "상당수"는 노드의 1/3을 말합니다.) 아래에 쓰여진 여러 가지의 다양성을 고려해야 합니다. 모든 종류에 있어서 높은 다양성을 갖는 것은 꽤 어려운 일입니다.
-
-1. **관할권 다양성.** 노드는 여러 합법적 관할권 내의 참여자들이 제어합니다. 이는 어떤 일을 수행하기에 이 수단들을 사용하기 어렵게 할 수 있습니다.
-1. **지리적 다양성.** 서버는 지리적으로 여러 곳에 물리적으로 위치합니다. 이는 자연 재해(홍수 또는 지진 등)가 문제를 야기할 만큼 손상되기 어렵게 합니다.
-1. **호스팅 다양성.** 서버는 여러 호스팅 공급자(ex. Amazon Web Services, Microsoft Azure, Digital Oceure, Rackspace)가 호스팅해야 합니다. 이는 하나의 호스팅 공급자가 '상당 수'의 노드에 영향을 미치기가 어려워집니다.
-1. **일반적인 의미의 다양성.** 일반적으로 멤버십 다양성은 컨소시엄에 많은 이점을 줍니다. 예를 들어, 그것은 문제 해결에 필요한 다양한 아이디어 소스를 컨소시엄에 제공합니다.
-
-참고: 모든 노드가 동일한 코드(ex. Planetmint의 동일한 구현)를 실행하고 있는 경우, 해당 코드의 버그를 사용하여 모든 노드를 손상시킬 수 있습니다. 이상적으로는 Planetmint 서버(예: Python, Go 등)에서 운영되고 있는 다양한 구현이 있어, 컨소시엄은 다양한 서버 구현을 할 수 있을 것입니다. 운영 체제에 대해서도 이는 유사하게 적용됩니다.
diff --git a/docs/root/source/korean/immutable-ko.md b/docs/root/source/korean/immutable-ko.md
deleted file mode 100644
index 5bcaac4..0000000
--- a/docs/root/source/korean/immutable-ko.md
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-# 어떻게 Planetmint는 불변성을 갖는가
-
-*Imunable*이라는 단어는 "시간 경과에 따른 불변성"을 의미합니다. 예를 들어, π의 10진수 값은 변경할 수 없습니다(3.14159...).
-
-블록체인 커뮤니티는 종종 블록체인을 "불변하다"고 설명합니다. 우리가 그 단어를 문자 그대로 해석한다면, 그것은 블록체인 데이터가 변경할 수 없거나 영구적이라는 것을 의미하는데, 이것은 말이 안됩니다. 데이터는 *변경 될 수 있습니다.* 예를 들어, 전염병이 인류를 멸종 시킬 수도 있는 것처럼 데이터는 수분에 의한 손상, 온도에 의한 손상, 엔트로피의 일반적인 증가로 인해 시간이 지남에 따라 손상될 수 있습니다.
-
-블록체인 데이터가 일반적인 경우보다 변경(혹은 삭제)하기가 더 어려운 것은 사실입니다. 블록체인 데이터는 단순히 (의도적인) "변형 방지"에 그치지 않고 하드 드라이브의 데이터 손상과 같은 비의도적으로 발생할 수 있는 무작위 변경에도 대응합니다. 따라서 블록체인에서 "불변한다"라는 단어를 우리는 어떤 모든 의도와 목적이 *실제적으로* 불변한 것으로 해석합니다. (언어학자들은 "불변한다"라는 단어가 블록체인 커뮤니티에서 쓰이는 *기술적 용어*라고 말할 것입니다.)
-
-블록체인 데이터는 여러 가지 방법을 통해 불변성을 가질 수 있습니다:
-
-1. **데이터 변경 또는 삭제를 위한 API 없음.** Blockchain 소프트웨어는 일반적으로 블록체인에 저장된 데이터를 변경하거나 삭제하기 위한 API를 제공하지 않습니다. Planetmint 역시 관련한 API를 제공하지 않습니다 . 이것은 변경이나 삭제가 *다른 방식*으로 일어나는 것을 막지 못합니다. 이것은 단지 하나의 방어선일 뿐입니다.
-1. **복제.** 모든 데이터는 여러 곳에 복제(복사)됩니다. 복제 팩터가 높을수록, 모든 복제본을 변경하거나 삭제하기가 더 어려워집니다.
-1. **내부 감시 장치.** 모든 노드가 모든 변경 사항을 모니터링하고 허용되지 않은 변경 사항이 발생하면 적절한 조치를 취할 수 있습니다.
-1. **외부 감시 장치.** 컨소시엄은 부정행위를 찾아 데이터를 모니터링하고 감사할 수 있는 검증된 제 3자를 선택할 수 있습니다. 공개적으로 읽을 수 있는 데이터를 가진 컨소시엄의 경우, 대중은 감사자 역할을 할 수 있습니다.
-1. **경제적 인센티브.** 일부 블록체인 시스템은 기존의 저장된 데이터를 변경하는 데 많은 비용이 들게 합니다. 그 예로 작업 증명 및 지분 증명 시스템이 있습니다. Planetmint의 경우에는 이런 인센티브를 사용하지 않습니다.
-1. 변화에 대한 손쉬운 실행 취소를 위해 오류 수정 코드와 같은 고급 기술을 사용해 데이터를 저장할 수 있습니다
-1. **암호화폐의 표식**은 종종 메시지(예: 트랜잭션)가 도중에 손상되었는지 확인하고 메시지에 서명한 사용자를 확인하는 방법으로 사용됩니다. Planetmint에서는 각 트랜잭션에 한 명 이상의 당사자가 서명해야 합니다
-1. **전체 또는 부분적 백업**은 때때로 자기 테이프 저장소, 기타 블록체인, 인쇄물 등에 기록될 수 있습니다.
-1. **강력한 보안** 노드 소유자는 강력한 보안 정책을 채택하고 적용할 수 있습니다.
-1. **노드 다양성.** 다양성으로 인해서 한 가지 요소(예: 자연 재해 또는 운영 체제 버그)가 상당 수의 노드를 손상시킬 수 없도록 합니다. [노드 다양성의 종류에 대한 섹션](https://github.com/planetmint/planetmint/blob/master/docs/root/source/korean/diversity-ko.md)을 참조하세요.
diff --git a/docs/root/source/korean/index.rst b/docs/root/source/korean/index.rst
deleted file mode 100644
index 101254c..0000000
--- a/docs/root/source/korean/index.rst
+++ /dev/null
@@ -1,98 +0,0 @@
-
-.. Copyright © 2020 Interplanetary Database Association e.V.,
- Planetmint and IPDB software contributors.
- SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
- Code is Apache-2.0 and docs are CC-BY-4.0
-
-Planetmint 문서
-========================
-
-블록체인 데이터베이스인 Planetmint를 만나보세요.
-
-`분산형 `_, `불변성 `_ 및 `자산에 대한 네이티브 지원 `_ 을 포함한 일부 데이터베이스 특성들과 블록체인 특성을 가지고 있습니다.
-
-높은 수준에서, 사용자는 Planetmint HTTP API를 사용하는 Planetmint 네트워크(노드 집합) 또는 Planetmint 파이썬 드라이버와 같은 API용 래퍼로 통신할 수 있습니다. 각 Planetmint 노드는 Planetmint Server 및 다양한 다른 소프트웨어를 실행합니다. 더 자세한 사항은 용어 페이지에서 이러한 용어 중 일부를 설명합니다.
-
-.. raw:: html
-
-
-
-
-
-
-
-
-
-
-
-Planetmint에 대한 추가 정보
--------------------------------------------------------
-
-.. toctree::
- :maxdepth: 1
-
- Planetmint Docs Home
- production-ready_kor
- terminology_kor
- decentralized_kor
- diversity-ko
- immutable-ko
- bft-ko
- query-ko
- assets_ko
- smart-contracts_ko
- transaction-concepts_ko
- store-files_ko
- permissions-ko
- private-data-ko
- Data Models
diff --git a/docs/root/source/korean/permissions-ko.md b/docs/root/source/korean/permissions-ko.md
deleted file mode 100644
index 52f4e18..0000000
--- a/docs/root/source/korean/permissions-ko.md
+++ /dev/null
@@ -1,59 +0,0 @@
-
-
-# Planetmint 사용 권한
-
-Planetmint를 사용하면 다른 사용자가 할 수 있는 것을 어느 정도 제어할 수 있습니다.
-이 능력은 \*nix환경에서의 "권한", SQL에서의 "특권", 보안 환경에서의 "액세스 제어"와 유사합니다.
-
-## 출력 지출/이전 권한
-
-Planetmint에서, 모든 출력에는 연관된 조건(crypto-condition)이 있습니다.
-
-사용되지 않은 출력을 쓰거나 전송하려면, 사용자(또는 사용자 그룹)이 조건을 충족시켜야 합니다.
-특정 사용자만이 출력을 보낼 권한이 있다는 뜻입니다. 가장 단순한 조건은, "공용 키에 해당하는 개인 키를 가진 사람만이 출력을 보낼 수 있습니다." 훨씬 더 정교한 조건들도 가능합니다, 예를 들어 “이 출력을 사용하려면,…"
-
-- "…회계 그룹의 모든 사람이 서명 할 수 있습니다."
-- "…네 명 중 세 명이 서명해야 합니다."
-- "…Bob이 반드시 서명해야 하거나 Tom과 Sylvia 둘 모두가 서명해야 합니다."
-
-자세한 내용은, [Planetmint Transactions Spec](https://github.com/planetmint/BEPs/tree/master/tx-specs/)관련 **트랜잭션 구성요소:조건** 섹션을 참조하세요.
-
-출력이 한번 소비되면 다시 사용할 수 없습니다: *아무도* 그렇게 할 권한이 없습니다. 즉, Planetmint는 누구나 출력을 "이중 소비" 하도록 허용 하지 않습니다.
-
-## 쓰기 권한
-
-누군가 TRANSFER 트랜잭션을 만들면, `metadata` 필드에 임의의 JSON 객체를 넣을 수 있다. (적정 범위 내에서; 실제 Planetmint 네트워크는 트랜잭션의 크기에 제한을 둔다.) 즉, TRANSFER 트랜잭션에서 원하는 모든 것을 쓸 수 있다.
-
-Planetmint에서 "쓰기 권한"이 없다는 의미인가요? 아닙니다!!
-
-TRANSFER 트랜잭션은 입력이 이전 출력을 충족시키는 경우에만 유효(허용)합니다. 이 출력들에 대한 조건은 누가 유효한 TRANSFER 트랜잭션을 할 수 있는지 조절 할 것입니다. 즉, 출력에 대한 조건은 특정 사용자에게 관련 자산 내역에 무엇인가 쓸 수 있는 "쓰기 권한"을 부여하는 것과 같습니다.
-
-예를 들어, 당신은 Planetmint를 사용하여 오직 당신만이 쓰기권한이 있는 공용 저널을 작성 할 수 있습니다. 방법은 다음과 같습니다: 먼저 하나의 출력으로 `asset.data` 을 통해 `{"title": "The Journal of John Doe"}` 와 같이 되도록 CREATE 트랜잭션을 생성합니다. 이 출력에는 금액 1과 사용자(개인 키를 가진)만이 출력을 보낼 수 있는 조건이 있습니다. 저널에 무엇인가를 추가하고 싶을 때마다, `metadata` 같은 필드에 최신 항목을 넣은 TRANSFER 트랜잭션을 새로 만들어야 합니다.
-
-```json
-{"timestamp": "1508319582",
- "entry": "I visited Marmot Lake with Jane."}
-```
-
-TRANSFER 트랜잭션에는 하나의 출력이 있습니다. 이 출력에는 금액1과 사용자(개인키를 가진)만이 출력을 보낼 수 있는 조건이 있습니다. 기타 등등. 당신만이 자산 내역(당신의 저널)에 덧붙일 수 있습니다.
-
-이와 같은 기술은 공학 노트북,공급망 기록,정부 회의록 등에도 사용 될 수 있습니다.
-
-또한 더 정교한 것들도 할 수 있습니다. 예를 들어, 누군가가 TRANSFER 트랜잭션을 작성할 때마다, *다른 누군가*에게 사용 권한을 부여하여 일종의 작성자-전달 혹은 연쇄 편지를 설정한다.
-
-Note
-
-누구나 CREATE 트랜잭션의 `asset.data` 필드에 있는 JSON(조건하에)을 쓸 수 있습니다. 허가가 필요하지 않습니다.
-
-## 읽기 권한
-
-다음 페이지를 참고하세요, [:doc:Planetmint, Privacy and Private Data](https://github.com/planetmint/planetmint/blob/master/docs/root/source/korean/private-data-ko.md).
-
-## 역할 기반 액세스 제어(RBAC)
-
-2017년 9월에, 우리는 [Planetmint RBAC 하부 시스템을 정의 할 수 있는 방법에 대한 블로그 게시물](https://blog.bigchaindb.com/role-based-access-control-for-planetmint-assets-b7cada491997)을 게재 했습니다. 글을 쓴 시점(2018년 1월)에는 플러그인을 사용해야 해서, 표준 Planetmint다음에서 사용가능한 [Planetmint Testnet](https://testnet.planetmint.io/) 를 사용 할 수 없었습니다. 이는 미래에 바뀔 수 있습니다. 만약 관심이 있다면, [Planetmint로 연락하십시요.](https://www.planetmint.io/contact/)
diff --git a/docs/root/source/korean/private-data-ko.md b/docs/root/source/korean/private-data-ko.md
deleted file mode 100644
index 1fb6dfd..0000000
--- a/docs/root/source/korean/private-data-ko.md
+++ /dev/null
@@ -1,102 +0,0 @@
-
-
-# Planetmint, 개인정보 및 개인 데이터
-
-## 기본 정보
-
-1. 한도 내에서 Planetmint 네트워크에 임의의 데이터(암호화 된 데이터 포함)를 저장 할 수 있습니다. 모든 트랜잭션에는 거의 모든 유니코드 문자열(최대 길이까지)을 저장 할 수 있는 `metadata` 섹션이 있습니다. 마찬가지로, 모든 CREATE 트랜잭션에는 거의 모든 유니코드 문자열을 저장 할 수 있는 `asset.data` 섹션이 있습니다.
-2. 특정 Planetmint 거래 필드에 저장된 데이터는 암호화 해서는 안됩니다, 예를 들어 공용키 및 자산과 같이. Planetmint는 Zcoin과 비슷한 개인 거래를 제공하지 않습니다.
-3. 데이터가 BigchinDB 네트워크에 저장되면 변경 또는 삭제 될 수 없다고 가정하는 것이 좋습니다.
-4. Planetmint 네트워크의 모든 노드에는 저장된 모든 데이터의 전체 복사본이 있습니다.
-5. Planetmint 네트워크의 모든 노드는 저장된 모든 데이터를 읽을 수 있습니다.
-6. Planetmint 노드(예를 들어 노드이 sysadmin)에 대한 전체 액세스 권한을 가진 모든 사용자는해당 노드에 저장된 모든 데이터를 읽을 수 있습니다.
-7. Planetmint HTTP API를 통해 노드에 접근하는 모든 사용자는 Planetmint에 저장된 모든 데이터를 찾고 읽을 수 있습니다. 액세스 권한이 있는 사람들의 목록은 매우 짧을 수 있습니다.
-8. 외부 사용자와 Planetmint 노드 사이의 연결이(예를 들어 HTTPS를 사용하여) 암호화되 않으면도청자는 전송중인 모든 HTTP 요청 및 응답을 읽을 수 있습니다.
-9. 만약 누군가가 평문에 접근 할 수 있다면(어디에서 가져왔는지 관계없이), 원칙적으로 이것을 전 세계와 공유 할 수 있습니다. 그렇게 하는 것을 어렵게 만들 수 있습니다, 예를 들어 데이터가 많고 방을 나갈 떄 검색되는 안전한 방 안에만 들어 갈 수 있는 것과 같습니다.
-
-## 오프 체인에서 개인 데이터 저장
-
-시스템은 제3자 데이터베이스, 문서 저장소 또는 CMS(컨텐츠 관리 시스템)와 같은 오프 체인 데이터를 저장할 수 있으며, BigchinDB를 사용하여 다음 작업을 수행할 수 있습니다:
-
-- 제3자 시스템에 읽기 권한 또는 기타 권한이 있는 사용자를 추적합니다. 이 작업을 수행하는 방법의 예는 아래에 있습니다.
-- 제3자 시스템에 대한 모든 요청을 영구적으로 기록합니다.
-- 모든 문서의 변경 사항을 감지 할 수 있도록, 다른 곳에 저장된 문서의 해시를 저장합니다.
-- 암호화 된 터널을 설정했다는 것을 증명할 수 있도록 두 개의 오프 체인 파티(예:Diffie-Hellman 키 교환) 간의 모든 핸드셰이크 설정 요청 및 응답을 기록합니다(독자가 해당 터널에 액세스하지 않고). 이 아이디어에 대한 자세한 내용은 [the Planetmint Privacy Protocols 저장소](https://github.com/planetmint/privacy-protocols)에 있습니다.
-
-특정 문서에 대한 읽기 권한을 가진 사람을 기록하는 간단한 방법은 제 3자 시스템(“Docpile“)이 모든 문서+사용자 쌍에 대해 BigchinDB 네트워크에 CREATE 트랜잭션을 저장하여 해당 사용자가 그 문서에 대한 읽기 권한을 가지고 있음을 나타낼 수 있습니다. 트랜잭션은 Docpile에 의해 서명 될 수 있습니다(또는 문서 소유자에 의해). 자산 데이터 필드는 1)사용자의 고유 ID 및 2)문서의 고유 ID를 포함합니다. CREATE 트랜잭션의 한 출력은 DocPile(또는 문서 소유자)에 의해서만 전송/소비 될 수 있습니다.
-
-
-읽기 권한을 취소하기 위해, DocPile은 원래 사용자가 더 이상 해당 문서에 대한 읽기 권한을 가지고 있지 않다고 하는 메타 데이터 필드를 사용하여, 원래의 CREATE 트랜잭션에서 하나의 출력을 보내기 위한 TRANSFER 트랜잭션을 생성 할 수 있습니다.
-
-이는 무한정으로 수행될 수 있습니다,즉.사용자가 다시 읽기 권한을 가지고 있음을 나타내기 위해 다른 TRANSFER 트랜잭션을 DocPile에서 작성할 수 있습니다.
-
-DocPile은 CREATE → TRANSFER → TRANSFER → 사용자+문서 쌍에 대한 etc.chain 과정에서 사용자의 마지막 트랜잭션을 읽음으로써 주어진 문서에 대한 읽기 권한을 가지고 있는지 파악할 수 있습니다.
-
-여기에 같은 일을 하는 다른 방법들이 있다. 위는 단지 하나의 예시이다.
-
-위의 예시에서는 사용자가 소유한(통제 된)자산으로 “읽기 권한“을 취급하지 않았다는 것을 알 수 있습니다, 왜냐하면 사용 권한 자산이 사용자에게 주어 지면(사용자에 의해 양도되거나 사용자에 의해 생성된 경우) 사용자가 다시 Docpile로 전송 할 때까지 어떠한 것도 제어 할 수 없기 때문입니다(Docpile에 의해).
-
-## 체인에서 암호화 된 개인 데이터 저장
-
-체인상에서 개인 데이터를 암호화하여 저장하는 방법에는 여러 가지가 있습니다. 모든 유스 케이스에는 고유한 목표와 제약이 있으며, 최상의 해결책은 유스 케이스에 달려있다.
-[Planetmint 컨설팅 팀](https://www.planetmint.io/services/), 우리의 파트너와 함께, 당신의유스 케이스에 가장 적합한 솔루션을 설계하는 데 도움을 줄 수 있습니다.
-
-아래에서는 다양한 암호화 기본 설정을 사용하여 가능한 시스템을 설정하는 예제를 설명합니다.
-
-참고 사항:
-
-- Ed25519 키 쌍은 [메시지 암호화 및 암호 해독이 아닌](https://crypto.stackexchange.com/questions/27866/why-curve25519-for-encryption-but-ed25519-for-signatures) 암호화 서명 및 확인을 위해 설계되었습니다. 암호화의 경우, X25519와 같은 암호화를 위해 설계된 키 쌍을 사용해야 합니다.
-- 누군가(또는 어떤 그룹)이 체인상의 암호화 된 데이터를 해독하는 방법을 발표하면 암호화 된 데이터에 액세스 할 수 있는 모든 사람이 평문을 가져올 수 있습니다. 데이터는 삭제할 수 없습니다.
-- 암호화 된 데이터는 MongoDM에서 색인을 생성하거나 검색 할 수 없습니다.(암호문을 색인화하고 검색 할 수 있지만 유용하지는 않습니다.) 암호화 된 데이터를 색인화하고 검색하기 위해 준 유사 암호를 사용할 수 있지만, MongoDB는 이를 지원할 계획이 없습니다. 색인화 또는 키워드 검색이 필요한 경우 `asset.data`의 몇가지 필드 또는 `metadata`객체를 일반 텍스트로 남겨두고 민감한 정보를 암호화 된 하위 객체에 저장할 수 있습니다.
-
-### 시스템 예시 1
-
-대칭 키로 데이터를 암호화하고 체인에(`metadata` 또는 `asset.data` 에서) 암호문을 저장하십시오. 키를 제 3자에게 알리려면, 공용 키를 사용하여 대칭 키를 암호화하고 암호화 키를 보냅니다. 개인 키로 대칭 키의 암호를 해독한 다음 대칭 키를 사용하여 on-chain 암호문의 암호를 해독할 수 있습니다.
-
-공용 키/ 개인 키 쌍과 함께 대칭 키를 사용하는 이유는 암호문을 한 번만 저장하면 되기 때문입니다.
-
-### 시스템 예시 2
-
-이 예시에서는 [프록시 재-암호화](https://en.wikipedia.org/wiki/Proxy_re-encryption) 를 사용합니다:
-
-1. MegaCorp는 자체 공용 키를 사용하여 일부 데이터를 암호화 한 후 암호화 된 데이터(암호문1)을 Planetmint 네트워크에 저장합니다.
-
-2. MegaCorp는 다른 사람들이 암호화 된 데이터를 읽을 수 있게 하고 싶지만, 공용 키를 공유하지 않고 모든 새로운 수신자에 대해 스스로를 다시 암호화 할 필요가 없습니다. 대신 프록시 재 암호화 서비스를 제공하기 위해 Moxie라는 “프록시“를 찾습니다.
-3. Zorban은 MegaCorp에 연결하여 데이터 읽기 권한을 요청합니다.
-4. MegaCorp는 Zorban에게 공용 키를 요청합니다.
-5. MegaCorp “재 암호화 키“를 생성하여 프록시 Moxie로 전송합니다.
-6. Moxie (프록시)는 재 암호화 키를 사용하여 암호문 1을 암호화하고 암호문 2를 만듭니다.
-7. Moxie는 Zorban(또는 Zorban에게 전달하는 MegaCorp)에게 암호문 2를 보냅니다.
-8. Zorban은 개인 키를 사용하여 암호문 2를 해독해서 원본 암호화되지 않은 데이터를 가져옵니다.
-
-참고:
-
-- 프록시는 암호문만 볼 수 있습니다. 암호화 되지 않은 데이터는 볼 수 없습니다.
-- Zorban은 암호문 1, 즉 체인 상의 데이터를 해독 할 수 있는 능력이 없습니다.
-- 위의 흐름에는 다양한 변형이 있습니다.
-
-## 시스템 예시 3
-
-이 예시는 [삭제 코딩](https://en.wikipedia.org/wiki/Erasure_code)을 사용합니다:
-
-1. 데이터를 n개의 조각으로 삭제하십시오.
-2. 서로 다른 암호화 키로 n개의 조각을 암호화 하십시오.
-3. n 개의 암호화 된 부분을 체인에 저장합니다 (예: n개의 별도 트랜잭션).
-4. n 개의 암호 해독 키 각각을 다른 당사자와 공유하십시오.
-
-만약 k< N 인 키홀더가 k개의 조각들을 가져와서 해독한다면, 그것들은 원본 텍스트를 다시 만들 수 있습니다. k미만이면 충분하지 않습니다.
-
-### 시스템 예시 4
-
-이 설정은 특수 노드가 데이터의 일부를 볼 수 있어야 하지만, 다른 노드는 볼 수 없어야 하는 기업용 블록 체인 시나리오에서 사용할 수 있습니다.
-
-- 특수 노드는 X25519 키 쌍 (또는 유사한 비대칭 *암호화*키 쌍)을 생성합니다 .
-- Planetmint 최종 사용자는 특수 노드의 X25519 공용 키(암호화 키)를 찾습니다.
- -최종 사용자는 위에서 언급 한 공용 키를 사용하여, asset.data 또는 메타 데이터(또는 모두)를 사용하여 유효한 Planetmint 트랜잭션을 생성합니다.
-- 이는 asset.data 또는 메타 데이터의 내용이 유효성 검증에 중요하지 않은 트랜잭션에 대해서만 수행되므로, 모든 노드 운영자가 트랜잭션을 검증 할 수 있습니다.
-- 특수 노드는 암호화 된 데이터를 해독 할 수 있지만, 다른 노드 운영자와 다른 최종 사용자는 할 수 없습니다.
diff --git a/docs/root/source/korean/production-ready_kor.md b/docs/root/source/korean/production-ready_kor.md
deleted file mode 100644
index e79fd01..0000000
--- a/docs/root/source/korean/production-ready_kor.md
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-# 배포 - 준비
-
-경우에 따라, Planetmint는 배포-준비가 될 수도 있고 되지 않을 수도 있습니다. 서비스 공급자에게 문의해야 합니다. 만약 Planetmint를 (배포로) 전환하고자 한다면, 서비스 공급자에게 문의하십시오.
-
-참고 : Planetmint는 "보증 없음" 섹션을 가지는 오픈소스 라이센스이며, 이는 전형적인 오픈소스 라이센스입니다. 이는 소프트웨어 산업의 표준입니다. 예를 들어, 리눅스 커널은 라이센스에 "보증 없음" 섹션을 가지고 있지만, 수십억 대의 시스템에 의해 배포되어 사용됩니다. 보증은 대개 서비스 공급자가 소프트웨어 라이센스 수준 이상으로 제공합니다.
diff --git a/docs/root/source/korean/query-ko.md b/docs/root/source/korean/query-ko.md
deleted file mode 100644
index 3819068..0000000
--- a/docs/root/source/korean/query-ko.md
+++ /dev/null
@@ -1,210 +0,0 @@
-
-
-Planetmint 쿼리
-===================
-
-노드 operator는 MongoDB의 쿼리 엔진의 최대 성능을 사용하여 모든 트랜잭션, 자산 및 메타데이터를 포함하여 저장된 모든 데이터를 검색하고 쿼리할 수 있습니다. 노드 operator는 외부 사용자에게 얼마나 많은 쿼리 파워를 송출할지 스스로 결정할 수 있습니다.
-
-
-예제 쿼리가 포함된 블로그 게시물
-------------------------------
-
-
-Planetmint 블로그에 MongoDB 도구를 사용하여 Planetmint 노드의 MongoDB 데이터베이스를 쿼리하는 방법에 대한 게시물을 올렸습니다. 데이터에 대한 일부 특정 예제 쿼리가 주요 내용입니다. [여기서 확인하세요](https://blog.bigchaindb.com/using-mongodb-to-query-bigchaindb-data-3fc651e0861b)
-
-MongoDB에 연결하기
--------------------------
-
-
-MongoDB 데이터베이스를 쿼리하려면 먼저 데이터베이스에 연결해야 합니다. 그러기 위해선 호스트 이름과 포트를 알아야 합니다.
-
-개발 및 테스트를 위해 지역 컴퓨터에서 Planetmint 노드를 실행 중인 경우 호스트 이름은 "로컬 호스트"여야 하며 이러한 값을 변경하지 않는 한 포트는 "27017"이어야 합니다. 원격 시스템에서 Planetmint 노드를 실행 중이며 해당 시스템에 SSH할 수 있는 경우에도 마찬가지입니다.
-
-원격 시스템에서 Planetmint 노드를 실행하고 MongoDB를 auth를 사용하고 공개적으로 액세스할 수 있도록 구성한 경우(권한이 있는 사용자에게) 호스트 이름과 포트를 확인할 수 있습니다.
-
-쿼리하기
-------------
-
-Planetmint 노드 운영자는 로컬 MongoDB 인스턴스에 대한 전체 액세스 권한을 가지므로 실행하는데 MongoDB의 다음의 API를 사용할 수 있습니다:
-
-- [the Mongo Shell](https://docs.mongodb.com/manual/mongo/)
-- [MongoDB Compass](https://www.mongodb.com/products/compass)
-- one of [the MongoDB drivers](https://docs.mongodb.com/ecosystem/drivers/), such as [PyMongo](https://api.mongodb.com/python/current/), or
-- MongoDB 쿼리에 대한 서드파티툴, RazorSQL, Studio 3T, Mongo Management Studio, NoSQLBooster for MongoDB, or Dr. Mongo.
-
-Note
-
-SQL을 이용해 mongoDB 데이터베이스를 쿼리할 수 있습니다. 예를 들어:
-
- * Studio 3T: "[How to Query MongoDB with SQL](https://studio3t.com/whats-new/how-to-query-mongodb-with-sql/)"
- * NoSQLBooster for MongoDB: "[How to Query MongoDB with SQL SELECT](https://mongobooster.com/blog/query-mongodb-with-sql/)"
-
-예를 들어, 기본 Planetmint 노드를 실행하는 시스템에 있는 경우 Mongo Shell (``mongo``)을 사용하여 연결하고 다음과 같이 볼 수 있습니다.
-
- $ mongo
- MongoDB shell version v3.6.5
- connecting to: mongodb://127.0.0.1:27017
- MongoDB server version: 3.6.4
- ...
- > show dbs
- admin 0.000GB
-<<<<<<< HEAD
- planet 0.000GB
-=======
- planetmint 0.000GB
->>>>>>> 3bfc3298f8210b135084e823eedd47f213538088
- config 0.000GB
- local 0.000GB
- > use planetmint
- switched to db planetmint
- > show collections
- abci_chains
- assets
- blocks
- elections
- metadata
- pre_commit
- transactions
- utxos
- validators
-
-위 예제는 몇 가지 상황을 보여줍니다:
-
-- 호스트 이름이나 포트를 지정하지 않으면 Mongo Shell은 각각 `localhost`와 `27017`으로 가정합니다. (`localhost`는 우분투에 IP주소를 127.0.0.1로 설정했습니다.)
-
-
-* Planetmint는 데이터를 `planetmint`이라는 데이터베이스에 저장합니다.
-* `planetmint` 데이터베이스에는 여러 [collections](https://docs.mongodb.com/manual/core/databases-and-collections/)가 포함되어 있습니다.
-* 어떤 컬렉션에도 투표가 저장되지 않습니다. 이런 데이터는 모두 자체(LevelDB) 데이터베이스에 의해 처리되고 저장됩니다.
-
-컬렉션에 대한 예시 문서
----------------------------------------
-
-``planetmint`` 데이터베이스의 가장 흥미로운 부분은 아래와 같습니다:
-
-- transactions
-- assets
-- metadata
-- blocks
-
-`db.assets.findOne()` 은 MongoDB 쿼리를 사용하여 이러한 컬렉션들을 탐색할 수 있습니다.
-
-### 트랜잭션에 대한 예시 문서
-
-transaction 컬렉션에서 CREATE 트랜잭션에는 추가 `"_id"` 필드(MongoDB에 추가됨)가 포함되며 `"asset"`과 `"metadata"` 필드에는 데이터가 저장되어 있지 않습니다.
-
- {
- "_id":ObjectId("5b17b9fa6ce88300067b6804"),
- "inputs":[…],
- "outputs":[…],
- "operation":"CREATE",
- "version":"2.0",
- "id":"816c4dd7…851af1629"
- }
-
-A TRANSFER transaction from the transactions collection is similar, but it keeps its `"asset"` field.
-
- {
- "_id":ObjectId("5b17b9fa6ce88300067b6807"),
- "inputs":[…],
- "outputs":[…],
- "operation":"TRANSFER",
- "asset":{
- "id":"816c4dd7ae…51af1629"
- },
- "version":"2.0",
- "id":"985ee697d…a3296b9"
- }
-
-### assets에 대한 예시 문서
-
-assets에 대한 기술에는 MongoDB가 추가한 `"_id"` 분야와 CREATE 거래에서 나온 `asset.data` 그리고 `"id"` 세 가지 최상위 분야로 구성되어 있습니다.
-
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-{
- "_id":ObjectId("5b17b9fe6ce88300067b6823"),
- "data":{
- "type":"cow",
- "name":"Mildred"
- },
- "id":"96002ef8740…45869959d8"
-}
-
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-### metadata에 대한 예시 문서
-
-
-metadata 컬렉션의 문서는 MongoDB가 추가한 `"_id"`필드와 거래에서 나온 `asset.data`그리고 거래에서 나온 ``"id"`` 세 가지 최상위 분야로 구성되어 있습니다.
-
- {
- "_id":ObjectId("5b17ba006ce88300067b683d"),
- "metadata":{
- "transfer_time":1058568256
- },
- "id":"53cba620e…ae9fdee0"
- }
-
-### blocks에 대한 예시 문서
-
- {
- "_id":ObjectId("5b212c1ceaaa420006f41c57"),
- "app_hash":"2b0b75c2c2…7fb2652ce26c6",
- "height":17,
- "transactions":[
- "5f1f2d6b…ed98c1e"
- ]
- }
-
-## 노드 operator가 외부 유저에게 보낼 수 있는 것
-
-각 노드 operator는 외부 사용자가 자신의 로컬 MongoDB 데이터베이스에서 정보를 얻는 방법을 결정할 수 있습니다. 그들은 다음과 같은 것들을 보낼 수 있습니다:
-
-- 외부유저를 쿼리 처리하는 로컬 MongoDB 데이터베이스 한된 제한된 권한을 가진 역할을 가진 MongoDB 사용자 예) read-only
-<<<<<<< HEAD
-- 제한된 미리 정의된 쿼리 집합을 허용하는 제한된 HTTP API, [Planetmint 서버에서 제공하는 HTTP API](http://planetmint.com/http-api), 혹은Django, Express, Ruby on Rails, or ASP.NET.를 이용해 구현된 커스텀 HTTP API
-=======
-- 제한된 미리 정의된 쿼리 집합을 허용하는 제한된 HTTP API, [Planetmint 서버에서 제공하는 HTTP API](http://planetmint.io/http-api), 혹은Django, Express, Ruby on Rails, or ASP.NET.를 이용해 구현된 커스텀 HTTP API
->>>>>>> 3bfc3298f8210b135084e823eedd47f213538088
-- 다른 API(예: GraphQL API) 제3자의 사용자 정의 코드 또는 코드를 사용하여 수행할 수 있습니다..
-
-각 노드 operator는 로컬 MongoDB 데이터베이스에 대한 다른 레벨 또는 유형의 액세스를 노출할 수 있습니다.
-예를 들어, 한 노드 operator가 최적화된 [공간 쿼리](https://docs.mongodb.com/manual/reference/operator/query-geospatial/)를 전문으로 제공하기로 정할 수 있습니다.
-
-보안 고려사항
------------------------
-
-Planetmint 버전 1.3.0 이전 버전에서는 하나의 MongoDB 논리 데이터베이스가 있었기 때문에 외부 사용자에게 데이터베이스를 노출하는 것은 매우 위험했으며 권장되지 않습니다. "Drop database"는 공유된 MongoDB 데이터베이스를 삭제합니다.
-
-Planetmint 버전 2.0.0 이상에선 각 노드에 고유한 독립 로컬 MongoDB 데이터베이스가 존재합니다. 노드 간 통신은 아래 그림 1에서와 같이 MongoDB 프로토콜이 아닌 Tendermint 프로토콜을 사용하여 수행됩니다. 노드의 로컬 MongoDB 데이터베이스가 손상되어도 다른 노드는 영향을 받지 않습니다.
-
-
-
-Figure 1: A Four-Node Planetmint 2.0 Network
-
-퍼포먼스 및 요금 고려사항
------------------------------------
-
-쿼리 프로세싱은 상당히 많은 리소스를 소모할 수 있으므로, Planetmint 서버 및 Tendermint Core와 별도의 컴퓨터에서 MongoDB를 실행하는 것이 좋습니다.
-
-노드 operator 는 조회에 사용되는 리소스를 측정하여 조회를 요청한 사람은 누구든지 요금을 청구할 수 있습니다.
-
-일부 쿼리는 너무 오래 걸리거나 리소스를 너무 많이 사용할 수 있습니다. 노드 operator는 사용할 수 있는 리소스에 상한을 두고, 초과된다면 중지(또는 차단)해야 합니다.
-
-MongoDB 쿼리를 더욱 효율적으로 만들기 위해 [인덱스](https://docs.mongodb.com/manual/indexes/)를 만들 수 있습니다. 이러한 인덱스는 노드 operator 또는 일부 외부 사용자가 생성할 수 있습니다(노드 운영자가 허용하는 경우). 인덱스는 비어 있지 않습니다. 새 데이터를 컬렉션에 추가할 때마다 해당 인덱스를 업데이트해야 합니다. 노드 운영자는 이러한 요금을 인덱스를 생성한 사람에게 전달하고자 할 수 있습니다. mongoDB에서는 [단일 컬렉션은 64개 이하의 인덱스를 가질 수 있습니다](https://docs.mongodb.com/manual/reference/limits/#Number-of-Indexes-per-Collection).
-
-Tendermint voting 파워가 0인 노드인 추종자 노드를 생성할 수 있다. 여전히 모든 데이터의 복사본이 있으므로 읽기 전용 노드로 사용할 수 있습니다. Follower 노드는 투표 검증자의 작업 부하에 영향을 미치지 않고 서비스로 전문화된 쿼리를 제공할 수 있습니다(쓰기도 가능). 팔로워의 팔로워들도 있을 수 있습니다.
-
-자바스크립트 쿼리 코드 예시
-------------------------------
-
-[MongoDB node.js 드라이버](https://mongodb.github.io/node-mongodb-native/?jmp=docs)와 같은 MongoDB 드라이버를 사용하여 다음 중 하나를 사용하여 노드의 MongoDB 데이터베이스에 연결할 수 있습니다. 여기 자바스크립트 쿼리 코드에 대한 링크가 있습니다.
-
-- [The Planetmint JavaScript/Node.js driver source code](https://github.com/bigchaindb/js-bidchaindb-driver)
-- [Example code by @manolodewiner](https://github.com/manolodewiner/query-mongodb-bigchaindb/blob/master/queryMongo.js)
-- [More example code by @manolodewiner](https://github.com/bigchaindb/bigchaindb/issues/2315#issuecomment-392724279)
\ No newline at end of file
diff --git a/docs/root/source/korean/smart-contracts_ko.md b/docs/root/source/korean/smart-contracts_ko.md
deleted file mode 100644
index 10d89fc..0000000
--- a/docs/root/source/korean/smart-contracts_ko.md
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-Planetmint 및 스마트계약
-==============================
-
-Planetmint에는 스마트 계약 (즉, 컴퓨터 프로그램)의 소스 코드를 저장할 수 있지만 Planetmint는 임의의 스마트 계약을 실행하지 않습니다.
-
-Planetmint는 대체 가능한 자산과 대체 할 수없는 자산 모두를 전송할 수있는 권한을 가진 사람을 시행하는 데 사용할 수 있습니다. 이중 지출을 막을 것입니다. 즉, ERC-20 (대체 가능한 토큰) 또는 ERC-721 (대체 할 수없는 토큰) 스마트 계약 대신 Planetmint 네트워크를 사용할 수 있습니다.
-
-자산 이전 권한은 쓰기 권한으로 해석 될 수 있으므로 로그, 저널 또는 감사 내역에 기록 할 수있는 사람을 제어하는데 사용할 수 있습니다. [Planetmint의 사용 권한](https://github.com/planetmint/planetmint/blob/master/docs/root/source/korean/permissions-ko.md)에 대한 자세한 내용은 페이지에서 확인하십시오.
-
-Planetmint 네트워크는 oracles 또는 체인 간 통신 프로토콜을 통해 다른 블록 체인 네트워크에 연결할 수 있습니다. 이는 Planetmint를 다른 블록 체인을 사용하여 임의의 스마트 계약을 실행하는 솔루션의 일부로 사용할 수 있음을 의미합니다.
diff --git a/docs/root/source/korean/store-files_ko.md b/docs/root/source/korean/store-files_ko.md
deleted file mode 100644
index 92e8f30..0000000
--- a/docs/root/source/korean/store-files_ko.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-# Planetmint에 파일을 저장하는 방법
-
-Planetmint 네트워크에 파일을 저장할 수는 있지만 그렇게하지 않는 것이 좋습니다. 파일이 아닌 구조화 된 데이터를 저장, 인덱싱 및 쿼리하는 데 가장 적합합니다.
-
-분산 된 파일 저장소를 원하면 Storj, Sia, Swarm 또는 IPFS / Filecoin을 확인하십시오. 파일 URL, 해시 또는 기타 메타 데이터를 Planetmint 네트워크에 저장할 수 있습니다.
-
-Planetmint 네트워크에 파일을 저장해야하는 경우,이를 수행하는 한 가지 방법은 긴 Base64 문자열로 변환 한 다음 해당 문자열을 하나 이상의 Planetmint 트랜잭션 (CREATE 트랜잭션의 `asset.data`)에 저장하는 것입니다 , 또는 어떤 거래의 `메타데이터` 일 수도있다.
diff --git a/docs/root/source/korean/terminology_kor.md b/docs/root/source/korean/terminology_kor.md
deleted file mode 100644
index fcca059..0000000
--- a/docs/root/source/korean/terminology_kor.md
+++ /dev/null
@@ -1,26 +0,0 @@
-
-
-# 용어
-
-Planetmint와 관련돈 몇 가지 전문화된 용어가 있습니다. 시작하기에 앞서, 최소한 다음과 같은 사항을 알아야합니다.
-
-## Planetmint 노드
-
-**Planetmint 노드**는 [Planetmint 서버](https://docs.planetmint.io/projects/server/en/latest/introduction.html) 및 관련된 소프트웨어를 실행하는 시스템(또는 논리적인 시스템)입니다. 각각의 노드는 한 개인이나 조직에 의해 제어될 수 있습니다.
-
-## Planetmint 네트워크
-
-Planetmint 노드들의 집합은 서로 연결하여 **Planetmint 네트워크**를 형성할 수 있습니다. 해당 네트워크에서 각각의 노드는 동일한 소프트웨어를 실행합니다. Planetmint 네트워크는 모니터링 같은 것들을 하기 위한 추가적인 시스템이 있을 수 있습니다.
-
-## Planetmint 컨소시엄
-
-Planetmint 네트워크에 노드들을 실행하는 사람과 조직은 **Planetmint 컨소시엄**(즉, 다른 조직)에 속합니다. 컨소시엄은 결정을 하기 위해 일종의 거버넌스 구조를 가져야합니다. 만약 Planetmint 네트워크가 단 하나의 회사에 의해서 운영된다면, "컨소시엄"은 단지 그 회사일 뿐입니다.
-
-**Planetmint 네트워크와 컨소시엄의 차이는 무엇일까요?**
-
-Planetmint 네트워크는 단지 연결된 노드들의 집합입니다. 컨소시엄은 하나의 Planetmint 네트워크를 가지는 조직이며, 해당 네트워크에서 각각의 노드는 다른 운영자를 가집니다.
diff --git a/docs/root/source/korean/transaction-concepts_ko.md b/docs/root/source/korean/transaction-concepts_ko.md
deleted file mode 100644
index ac8813a..0000000
--- a/docs/root/source/korean/transaction-concepts_ko.md
+++ /dev/null
@@ -1,65 +0,0 @@
-
-
-# 트랜잭션 개념
-
-*트랜잭션*은 물건 (예 : 자산)을 등록, 발행, 생성 또는 전송하는 데 사용됩니다.
-
-트랜잭션은 Planetmint가 저장하는 가장 기본적인 종류의 레코드입니다. CREATE 트랜잭션과 TRANSFER 트랜잭션의 두 종류가 있습니다.
-
-
-## 트랜잭션 생성
-
-CREATE 트랜잭션은 Planetmint에서 한 가지 (또는 자산)의 이력을 등록, 발행, 생성 또는 다른 방법으로 시작하는 데 사용될 수 있습니다. 예를 들어, 신원이나 창작물을 등록 할 수 있습니다. 이러한 것들을 종종 "자산"이라고 부르지만 literal 자산이 아닐 수도 있습니다.
-
-Planetmint는 Planetmint Server v0.8.0부터 나눌 수있는 자산을 지원합니다. 이는 "공유"의 초기 숫자로 자산을 생성 / 등록 할 수 있음을 의미합니다. 예를 들어, CREATE 트랜잭션은 50 개의 오크 나무로 된 트럭로드를 등록 할 수 있습니다. 분할 가능한 자산의 각 주식은 서로 공유 할 수 있어야합니다. 주식은 대체 가능해야합니다.
-
-CREATE 트랜잭션은 하나 이상의 출력을 가질 수 있습니다. 각 출력에는 관련 금액이 있습니다. 출력에 연결된 공유 수입니다. 예를 들어 자산이 50 개의 오크 나무로 구성되어있는 경우 한 출력에는 한 소유자 세트에 35 개의 오크 나무가 있고 다른 출력에는 다른 소유자 세트에는 15 개의 오크 나무가있을 수 있습니다.
-
-또한 각 출력에는 연관된 조건이 있습니다. 출력을 전송 / 소비하기 위해 충족되어야하는 조건 (TRANSFER 트랜잭션에 의해). Planetmint는 다양한 조건을 지원합니다. 자세한 내용은 관련 [Planetmint 트랜잭션 Spec](https://github.com/planetmint/BEPs/tree/master/tx-specs/)과 관련된 **트랜잭션 구성 요소 : 조건 섹션**을 참조하십시오.
-
-
-
-위의 예제에서는 Planetmint CREATE 트랜잭션 다이어그램을 보여줍니다. Pam은 자산 3 주를 소유 / 통제하고 다른 주식은 없습니다 (다른 산출물이 없으므로).
-
-각 출력에는 해당 출력의 조건과 연관된 모든 공개 키 목록이 있습니다. 다시 말하면, 그 목록은 "소유자"의 목록으로 해석 될 수 있습니다.보다 정확한 단어는 이행자, 서명자, 컨트롤러 또는 이전 가능 요소 일 수 있습니다. 관련 [Planetmint Transactions Spec](https://github.com/planetmint/BEPs/tree/master/tx-specs/) **소유자에 관한 참고 사항** 섹션을 참조하십시오.
-
-CREATE 트랜잭션은 모든 소유자가 서명해야합니다. (만약 당신이 그 서명을 원한다면, 그것은 인코딩되었지만 하나의 입력의 "이행"에있다.)
-
-## 트랜잭션 이전
-
-트랜잭션 이전은 다른 트랜잭션 (CREATE 트랜잭션 또는 다른 TRANSFER 트랜잭션)에서 하나 이상의 출력을 전송 / 소비 할 수 있습니다. 이러한 출력물은 모두 동일한 자산과 연결되어야합니다. TRANSFER 트랜잭션은 한 번에 하나의 자산의 공유 만 전송할 수 있습니다.
-
-트랜잭션 이전의 각 입력은 다른 트랜잭션의 한 출력에 연결됩니다. 각 입력은 전송 / 소비하려는 출력의 조건을 충족해야합니다.
-
-트랜잭션 이전은 위에서 설명한 CREATE 트랜잭션과 마찬가지로 하나 이상의 출력을 가질 수 있습니다. 투입물에 들어오는 총 주식 수는 산출물에서 나가는 총 주식 수와 같아야합니다.
-
-
-
-위 그림은 두 개의 Planetmint 트랜잭션, CREATE 트랜잭션 및 TRANSFER 트랜잭션의 다이어그램을 보여줍니다. CREATE 트랜잭션은 이전 다이어그램과 동일합니다. TRANSFER 트랜잭션은 Pam의 출력을 소비하므로 TRANSFER 트랜잭션의 입력에는 Pam의 유효한 서명 (즉, 유효한 이행)이 포함되어야합니다. TRANSFER 트랜잭션에는 두 개의 출력이 있습니다. Jim은 하나의 공유를 가져오고 Pam은 나머지 두 개의 공유를 가져옵니다.
-
-용어 : "Pam, 3"출력을 "소비 된 트랜잭션 출력"이라고하며 "Jim, 1"및 "Pam, 2"출력을 "사용되지 않은 트랜잭션 출력"(UTXO)이라고합니다.
-
-**예제 1:** 빨간 차가 Joe가 소유하고 관리한다고 가정합니다. 자동차의 현재 전송 조건에서 Joe가 유효한 전송을 서명해야한다고 가정합니다. Joe는 Joe의 서명 (현재 출력 조건을 충족시키기 위해)과 Rae가 유효한 전송을 서명해야한다는 새로운 출력 조건을 포함하는 입력을 포함하는 TRANSFER 트랜잭션을 작성할 수 있습니다.
-
-**예제 2:** 예를 들어 동일한 자산 유형의 이전에 전송되지 않은 4 개의 자산에 대한 출력 조건을 충족하는 TRANSFER 트랜잭션을 생성 할 수 있습니다. 종이 클립. 총 금액은 20, 10, 45 및 25 일 수 있으며, 말하자면 총 100 개의 클립입니다. 또한 TRANSFER 트랜잭션은 새로운 전송 조건을 설정합니다. 예를 들어, Gertrude가 서명하는 경우에만 60 개의 클립 클립이 전송 될 수 있으며 Jack과 Kelly가 서명하는 경우에만 40 개의 클립 클립이 전송 될 수 있습니다. 들어오는 클립 클립의 합계가 나가는 클립 클립의 합계와 같아야합니다 (100).
-
-## 트랜잭션 유효성
-
-언제 트랜잭션이 유효한지 유효성을 검사하는 것에 관해 해당 블로그에 게시되어있습니다. *The Planetmint Blog*:
-["What is a Valid Transaction in Planetmint?"](https://blog.bigchaindb.io/what-is-a-valid-transaction-in-bigchaindb-9a1a075a9598) (Note: That post was about Planetmint Server v1.0.0.)
-
-Each [Planetmint Transactions Spec](https://github.com/planetmint/BEPs/tree/master/tx-specs/) documents the conditions for a transaction (of that version) to be valid.
-
-## 트랜잭션 예시
-
-<<<<<<< HEAD
-아래의 [HTTP API 문서](https://docs.planetmint.com/projects/server/en/latest/http-client-server-api.html)와 [the Python 드라이버 문서](https://docs.planetmint.com/projects/py-driver/en/latest/usage.html)에는 예제 Planetmint 트랜잭션이 있습니다.
-=======
-아래의 [HTTP API 문서](https://docs.planetmint.io/projects/server/en/latest/http-client-server-api.html)와 [the Python 드라이버 문서](https://docs.planetmint.io/projects/py-driver/en/latest/usage.html)에는 예제 Planetmint 트랜잭션이 있습니다.
->>>>>>> 3bfc3298f8210b135084e823eedd47f213538088
-.
diff --git a/docs/root/source/installation/network-setup/index.rst b/docs/root/source/network-setup/index.rst
similarity index 68%
rename from docs/root/source/installation/network-setup/index.rst
rename to docs/root/source/network-setup/index.rst
index e21f5f9..1708f20 100644
--- a/docs/root/source/installation/network-setup/index.rst
+++ b/docs/root/source/network-setup/index.rst
@@ -4,16 +4,15 @@
SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
-Network setup
-=============
+Networks & Federations
+######################
+
There are several ways to setup a network. You can use the Kubernetes deployment template in this section, or use the Ansible solution in the Contributing section. Also, you can setup a single node on your machine and connect to an existing network.
-.. toctree::
- :maxdepth: 1
-
- networks
- network-setup
- k8s-deployment-template/index
- planetmint-node-ansible.md
+.. include:: networks.md
+ :parser: myst_parser.sphinx_
+.. include:: network-setup.md
+ :parser: myst_parser.sphinx_
+.. include:: k8s-deployment-template/index.rst
\ No newline at end of file
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/architecture.rst b/docs/root/source/network-setup/k8s-deployment-template/architecture.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/architecture.rst
rename to docs/root/source/network-setup/k8s-deployment-template/architecture.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/ca-installation.rst b/docs/root/source/network-setup/k8s-deployment-template/ca-installation.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/ca-installation.rst
rename to docs/root/source/network-setup/k8s-deployment-template/ca-installation.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/client-tls-certificate.rst b/docs/root/source/network-setup/k8s-deployment-template/client-tls-certificate.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/client-tls-certificate.rst
rename to docs/root/source/network-setup/k8s-deployment-template/client-tls-certificate.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/cloud-manager.rst b/docs/root/source/network-setup/k8s-deployment-template/cloud-manager.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/cloud-manager.rst
rename to docs/root/source/network-setup/k8s-deployment-template/cloud-manager.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/easy-rsa.rst b/docs/root/source/network-setup/k8s-deployment-template/easy-rsa.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/easy-rsa.rst
rename to docs/root/source/network-setup/k8s-deployment-template/easy-rsa.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/index.rst b/docs/root/source/network-setup/k8s-deployment-template/index.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/index.rst
rename to docs/root/source/network-setup/k8s-deployment-template/index.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/log-analytics.rst b/docs/root/source/network-setup/k8s-deployment-template/log-analytics.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/log-analytics.rst
rename to docs/root/source/network-setup/k8s-deployment-template/log-analytics.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/node-config-map-and-secrets.rst b/docs/root/source/network-setup/k8s-deployment-template/node-config-map-and-secrets.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/node-config-map-and-secrets.rst
rename to docs/root/source/network-setup/k8s-deployment-template/node-config-map-and-secrets.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/node-on-kubernetes.rst b/docs/root/source/network-setup/k8s-deployment-template/node-on-kubernetes.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/node-on-kubernetes.rst
rename to docs/root/source/network-setup/k8s-deployment-template/node-on-kubernetes.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/planetmint-network-on-kubernetes.rst b/docs/root/source/network-setup/k8s-deployment-template/planetmint-network-on-kubernetes.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/planetmint-network-on-kubernetes.rst
rename to docs/root/source/network-setup/k8s-deployment-template/planetmint-network-on-kubernetes.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/revoke-tls-certificate.rst b/docs/root/source/network-setup/k8s-deployment-template/revoke-tls-certificate.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/revoke-tls-certificate.rst
rename to docs/root/source/network-setup/k8s-deployment-template/revoke-tls-certificate.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/server-tls-certificate.rst b/docs/root/source/network-setup/k8s-deployment-template/server-tls-certificate.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/server-tls-certificate.rst
rename to docs/root/source/network-setup/k8s-deployment-template/server-tls-certificate.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/tectonic-azure.rst b/docs/root/source/network-setup/k8s-deployment-template/tectonic-azure.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/tectonic-azure.rst
rename to docs/root/source/network-setup/k8s-deployment-template/tectonic-azure.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/template-kubernetes-azure.rst b/docs/root/source/network-setup/k8s-deployment-template/template-kubernetes-azure.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/template-kubernetes-azure.rst
rename to docs/root/source/network-setup/k8s-deployment-template/template-kubernetes-azure.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/troubleshoot.rst b/docs/root/source/network-setup/k8s-deployment-template/troubleshoot.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/troubleshoot.rst
rename to docs/root/source/network-setup/k8s-deployment-template/troubleshoot.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/upgrade-on-kubernetes.rst b/docs/root/source/network-setup/k8s-deployment-template/upgrade-on-kubernetes.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/upgrade-on-kubernetes.rst
rename to docs/root/source/network-setup/k8s-deployment-template/upgrade-on-kubernetes.rst
diff --git a/docs/root/source/installation/network-setup/k8s-deployment-template/workflow.rst b/docs/root/source/network-setup/k8s-deployment-template/workflow.rst
similarity index 100%
rename from docs/root/source/installation/network-setup/k8s-deployment-template/workflow.rst
rename to docs/root/source/network-setup/k8s-deployment-template/workflow.rst
diff --git a/docs/root/source/installation/network-setup/network-setup.md b/docs/root/source/network-setup/network-setup.md
similarity index 86%
rename from docs/root/source/installation/network-setup/network-setup.md
rename to docs/root/source/network-setup/network-setup.md
index 8c666b4..8ccebe0 100644
--- a/docs/root/source/installation/network-setup/network-setup.md
+++ b/docs/root/source/network-setup/network-setup.md
@@ -155,13 +155,12 @@ recheck = false
Note: The list of `persistent_peers` doesn't have to include all nodes
in the network.
-## Member: Start MongoDB
+## Member: Start Tarantool
-If you installed MongoDB using `sudo apt install mongodb`, then MongoDB should already be running in the background. You can check using `systemctl status mongodb`.
+You install Tarantool as described [here](https://www.tarantool.io/ru/download/os-installation/ubuntu/).
-If MongoDB isn't running, then you can start it using the command `mongod`, but that will run it in the foreground. If you want to run it in the background (so it will continue running after you logout), you can use `mongod --fork --logpath /var/log/mongodb.log`. (You might have to create the `/var/log` directory if it doesn't already exist.)
+You can start it using the command `tarantool`.To run it in the background (so it will continue running after you logout), you can have to create a listener `box.cfg{listen=3301}`.
-If you installed MongoDB using `sudo apt install mongodb`, then a MongoDB startup script should already be installed (so MongoDB will start automatically when the machine is restarted). Otherwise, you should install a startup script for MongoDB.
## Member: Start Planetmint and Tendermint Using Monit
@@ -199,7 +198,7 @@ If you want to start and manage the Planetmint and Tendermint processes yourself
## How Others Can Access Your Node
-If you followed the above instructions, then your node should be publicly-accessible with Planetmint Root URL `https://hostname` or `http://hostname:9984`. That is, anyone can interact with your node using the [Planetmint HTTP API](../api/http-client-server-api) exposed at that address. The most common way to do that is to use one of the [Planetmint Drivers](../../drivers/index).
+If you followed the above instructions, then your node should be publicly-accessible with Planetmint Root URL `https://hostname` or `http://hostname:9984`. That is, anyone can interact with your node using the [Planetmint HTTP API](../connecting/http-client-server-api) exposed at that address. The most common way to do that is to use one of the [Planetmint Drivers](../connecting/drivers).
[bdb:software]: https://github.com/planetmint/planetmint/
[bdb:pypi]: https://pypi.org/project/Planetmint/#history
diff --git a/docs/root/source/installation/network-setup/networks.md b/docs/root/source/network-setup/networks.md
similarity index 94%
rename from docs/root/source/installation/network-setup/networks.md
rename to docs/root/source/network-setup/networks.md
index fbe3d8a..6007306 100644
--- a/docs/root/source/installation/network-setup/networks.md
+++ b/docs/root/source/network-setup/networks.md
@@ -25,7 +25,7 @@ We now describe how *we* set up the external (public-facing) DNS records for a P
There were several goals:
* Allow external users/clients to connect directly to any Planetmint node in the network (over the internet), if they want.
-* Each Planetmint node operator should get an SSL certificate for their Planetmint node, so that their Planetmint node can serve the [Planetmint HTTP API](../api/http-client-server-api) via HTTPS. (The same certificate might also be used to serve the [WebSocket API](../api/websocket-event-stream-api).)
+* Each Planetmint node operator should get an SSL certificate for their Planetmint node, so that their Planetmint node can serve the [Planetmint HTTP API](../connecting/http-client-server-api) via HTTPS. (The same certificate might also be used to serve the [WebSocket API](../connecting/websocket-event-stream-api).)
* There should be no sharing of SSL certificates among Planetmint node operators.
* Optional: Allow clients to connect to a "random" Planetmint node in the network at one particular domain (or subdomain).
diff --git a/docs/root/source/installation/node-setup/all-in-one-planetmint.md b/docs/root/source/node-setup/all-in-one-planetmint.md
similarity index 82%
rename from docs/root/source/installation/node-setup/all-in-one-planetmint.md
rename to docs/root/source/node-setup/all-in-one-planetmint.md
index 19af49f..946222e 100644
--- a/docs/root/source/installation/node-setup/all-in-one-planetmint.md
+++ b/docs/root/source/node-setup/all-in-one-planetmint.md
@@ -15,7 +15,7 @@ Docker image and a
This image contains all the services required for a Planetmint node i.e.
- Planetmint Server
-- MongoDB
+- Tarantool
- Tendermint
**Note:** **NOT for Production Use:** *This is an single node opinionated image not well suited for a network deployment.*
@@ -40,10 +40,9 @@ $ docker run \
--name planetmint \
--publish 9984:9984 \
--publish 9985:9985 \
- --publish 27017:27017 \
+ --publish 3303:3303 \
--publish 26657:26657 \
- --volume $HOME/planetmint_docker/mongodb/data/db:/data/db \
- --volume $HOME/planetmint_docker/mongodb/data/configdb:/data/configdb \
+ --volume $HOME/planetmint_docker/tarantool:/var/lib/tarantool \
--volume $HOME/planetmint_docker/tendermint:/tendermint \
planetmint/planetmint:all-in-one
```
@@ -55,14 +54,12 @@ Let's analyze that command:
* `publish 9984:9984` map the host port `9984` to the container port `9984`
(the Planetmint API server)
* `9985` Planetmint Websocket server
- * `27017` Default port for MongoDB
* `26657` Tendermint RPC server
-* `--volume "$HOME/planetmint_docker/mongodb:/data"` map the host directory
- `$HOME/planetmint_docker/mongodb` to the container directory `/data`;
- this allows us to have the data persisted on the host machine,
+ * `3303` Configured port for Tarantool
+* `$HOME/planetmint_docker/tarantool:/var/lib/tarantool` this allows us to have the data persisted on the host machine,
you can read more in the [official Docker
documentation](https://docs.docker.com/engine/tutorials/dockervolumes)
- * `$HOME/planetmint_docker/tendermint:/tendermint` to persist Tendermint data.
+* `$HOME/planetmint_docker/tendermint:/tendermint` to persist Tendermint data.
* `planetmint/planetmint:all-in-one` the image to use. All the options after the container name are passed on to the entrypoint inside the container.
## Verify
@@ -71,7 +68,7 @@ Let's analyze that command:
$ docker ps | grep planetmint
```
-Send your first transaction using [Planetmint drivers](../../drivers/index).
+Send your first transaction using [Planetmint drivers](../connecting/drivers).
## Building Your Own Image
diff --git a/docs/root/source/installation/node-setup/aws-setup.md b/docs/root/source/node-setup/aws-setup.md
similarity index 100%
rename from docs/root/source/installation/node-setup/aws-setup.md
rename to docs/root/source/node-setup/aws-setup.md
diff --git a/docs/root/source/installation/node-setup/configuration.md b/docs/root/source/node-setup/configuration.md
similarity index 73%
rename from docs/root/source/installation/node-setup/configuration.md
rename to docs/root/source/node-setup/configuration.md
index 2eefac3..3f2f293 100644
--- a/docs/root/source/installation/node-setup/configuration.md
+++ b/docs/root/source/node-setup/configuration.md
@@ -22,28 +22,24 @@ The value of each setting is determined according to the following rules:
* Otherwise, use the default value
The local config file is `$HOME/.planetmint` by default (a file which might not even exist), but you can tell Planetmint to use a different file by using the `-c` command-line option, e.g. `planetmint -c path/to/config_file.json start`
-or using the `PLANETMINT_CONFIG_PATH` environment variable, e.g. `BIGHAINDB_CONFIG_PATH=.my_planetmint_config planetmint start`.
+or using the `PLANETMINT_CONFIG_PATH` environment variable, e.g. `PLANETMINT_CONFIG_PATH=.my_planetmint_config planetmint start`.
Note that the `-c` command line option will always take precedence if both the `PLANETMINT_CONFIG_PATH` and the `-c` command line option are used.
You can read the current default values in the file [planetmint/\_\_init\_\_.py](https://github.com/planetmint/planetmint/blob/master/planetmint/__init__.py). (The link is to the latest version.)
-Running `planetmint -y configure localmongodb` will generate a local config file in `$HOME/.planetmint` with all the default values.
## database.*
The settings with names of the form `database.*` are for the backend database
-(currently only MongoDB). They are:
+(currently only Tarantool). They are:
-* `database.backend` can only be `localmongodb`, currently.
+* `database.backend` can only be `localtarantool`, currently.
* `database.host` is the hostname (FQDN) of the backend database.
* `database.port` is self-explanatory.
-* `database.name` is a user-chosen name for the database inside MongoDB, e.g. `planetmint`.
-* `database.connection_timeout` is the maximum number of milliseconds that Planetmint will wait before giving up on one attempt to connect to the backend database.
-* `database.max_tries` is the maximum number of times that Planetmint will try to establish a connection with the backend database. If 0, then it will try forever.
-* `database.replicaset` is the name of the MongoDB replica set. The default value is `null` because in Planetmint 2.0+, each Planetmint node has its own independent MongoDB database and no replica set is necessary. Replica set must already exist if this option is configured, Planetmint will not create it.
-* `database.ssl` must be `true` or `false`. It tells Planetmint Server whether it should connect to MongoDB using TLS/SSL or not. The default value is `false`.
+* `database.user` is a user-chosen name for the database inside Tarantool, e.g. `planetmint`.
+* `database.pass` is the password of the user for connection to tarantool listener.
-There are three ways for Planetmint Server to authenticate itself with MongoDB (or a specific MongoDB database): no authentication, username/password, and x.509 certificate authentication.
+There are two ways for Planetmint Server to authenticate itself with Tarantool (or a specific Tarantool service): no authentication, username/password.
**No Authentication**
@@ -51,65 +47,25 @@ If you use all the default Planetmint configuration settings, then no authentica
**Username/Password Authentication**
-To use username/password authentication, a MongoDB instance must already be running somewhere (maybe in another machine), it must already have a database for use by Planetmint (usually named `planetmint`, which is the default `database.name`), and that database must already have a "readWrite" user with associated username and password. To create such a user, login to your MongoDB instance as Admin and run the following commands:
-
-```text
-use
-db.createUser({user: "", pwd: "", roles: [{role: "readWrite", db: ""}]})
-```
-
-* `database.login` is the user's username.
-* `database.password` is the user's password, given in plaintext.
-* `database.ca_cert`, `database.certfile`, `database.keyfile`, `database.crlfile`, and `database.keyfile_passphrase` are not used so they can have their default values.
-
-**x.509 Certificate Authentication**
-
-To use x.509 certificate authentication, a MongoDB instance must be running somewhere (maybe in another machine), it must already have a database for use by Planetmint (usually named `planetmint`, which is the default `database.name`), and that database must be set up to use x.509 authentication. See the MongoDB docs about how to do that.
-
-* `database.login` is the user's username.
-* `database.password` isn't used so the default value (`null`) is fine.
-* `database.ca_cert`, `database.certfile`, `database.keyfile` and `database.crlfile` are the paths to the CA, signed certificate, private key and certificate revocation list files respectively.
-* `database.keyfile_passphrase` is the private key decryption passphrase, specified in plaintext.
-
-**Example using environment variables**
-
-```text
-export PLANETMINT_DATABASE_BACKEND=localmongodb
-export PLANETMINT_DATABASE_HOST=localhost
-export PLANETMINT_DATABASE_PORT=27017
-export PLANETMINT_DATABASE_NAME=database8
-export PLANETMINT_DATABASE_CONNECTION_TIMEOUT=5000
-export PLANETMINT_DATABASE_MAX_TRIES=3
-```
+To use username/password authentication, a Tarantool instance must already be running somewhere (maybe in another machine), it must already have a spaces for use by Planetmint, and that database must already have a "readWrite" user with associated username and password.
**Default values**
-If (no environment variables were set and there's no local config file), or you used `planetmint -y configure localmongodb` to create a default local config file for a `localmongodb` backend, then the defaults will be:
-
```js
"database": {
- "backend": "localmongodb",
+ "backend": "tarantool",
"host": "localhost",
- "port": 27017,
- "name": "planetmint",
- "connection_timeout": 5000,
- "max_tries": 3,
- "replicaset": null,
- "login": null,
+ "port": 3301,
+ "username": null,
"password": null
- "ssl": false,
- "ca_cert": null,
- "certfile": null,
- "keyfile": null,
- "crlfile": null,
- "keyfile_passphrase": null,
+
}
```
## server.*
`server.bind`, `server.loglevel` and `server.workers`
-are settings for the [Gunicorn HTTP server](http://gunicorn.org/), which is used to serve the [HTTP client-server API](../api/http-client-server-api).
+are settings for the [Gunicorn HTTP server](http://gunicorn.org/), which is used to serve the [HTTP client-server API](../connecting/http-client-server-api).
`server.bind` is where to bind the Gunicorn HTTP server socket. It's a string. It can be any valid value for [Gunicorn's bind setting](http://docs.gunicorn.org/en/stable/settings.html#bind). For example:
@@ -166,7 +122,7 @@ export PLANETMINT_SERVER_WORKERS=5
These settings are for the
[aiohttp server](https://aiohttp.readthedocs.io/en/stable/index.html),
which is used to serve the
-[WebSocket Event Stream API](../api/websocket-event-stream-api).
+[WebSocket Event Stream API](../connecting/websocket-event-stream-api).
`wsserver.scheme` should be either `"ws"` or `"wss"`
(but setting it to `"wss"` does *not* enable SSL/TLS).
`wsserver.host` is where to bind the aiohttp server socket and
diff --git a/docs/root/source/installation/node-setup/deploy-a-machine.md b/docs/root/source/node-setup/deploy-a-machine.md
similarity index 96%
rename from docs/root/source/installation/node-setup/deploy-a-machine.md
rename to docs/root/source/node-setup/deploy-a-machine.md
index dc84990..2b0c149 100644
--- a/docs/root/source/installation/node-setup/deploy-a-machine.md
+++ b/docs/root/source/node-setup/deploy-a-machine.md
@@ -25,7 +25,7 @@ using private IP addresses, but we don't cover that here.)
## Operating System
-**Use Ubuntu 18.04 or Ubuntu Server 18.04 as the operating system.**
+**Use Ubuntu 18.04 Server or above versions as the operating system.**
Similar instructions will work on other versions of Ubuntu,
and other recent Debian-like Linux distros,
diff --git a/docs/root/source/node-setup/index.rst b/docs/root/source/node-setup/index.rst
new file mode 100644
index 0000000..a25273b
--- /dev/null
+++ b/docs/root/source/node-setup/index.rst
@@ -0,0 +1,31 @@
+
+.. Copyright © 2020 Interplanetary Database Association e.V.,
+ Planetmint and IPDB software contributors.
+ SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+ Code is Apache-2.0 and docs are CC-BY-4.0
+
+Node setup
+==========
+
+You can use the all-in-one docker solution, or install Tendermint, MongoDB, and Planetmint step by step. For more advanced users and for development, the second option is recommended.
+
+
+
+.. include:: deploy-a-machine.md
+ :parser: myst_parser.sphinx_
+.. include:: aws-setup.md
+ :parser: myst_parser.sphinx_
+.. include:: all-in-one-planetmint.md
+ :parser: myst_parser.sphinx_
+.. include:: planetmint-node-ansible.md
+ :parser: myst_parser.sphinx_
+.. include:: set-up-node-software.md
+ :parser: myst_parser.sphinx_
+.. include:: set-up-nginx.md
+ :parser: myst_parser.sphinx_
+.. include:: configuration.md
+ :parser: myst_parser.sphinx_
+.. include:: production-node/index.rst
+ :parser: myst_parser.sphinx_
+
+
diff --git a/docs/root/source/installation/node-setup/planetmint-node-ansible.md b/docs/root/source/node-setup/planetmint-node-ansible.md
similarity index 100%
rename from docs/root/source/installation/node-setup/planetmint-node-ansible.md
rename to docs/root/source/node-setup/planetmint-node-ansible.md
diff --git a/docs/root/source/node-setup/production-node/index.rst b/docs/root/source/node-setup/production-node/index.rst
new file mode 100644
index 0000000..724c9eb
--- /dev/null
+++ b/docs/root/source/node-setup/production-node/index.rst
@@ -0,0 +1,20 @@
+
+.. Copyright © 2020 Interplanetary Database Association e.V.,
+ Planetmint and IPDB software contributors.
+ SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+ Code is Apache-2.0 and docs are CC-BY-4.0
+
+Production Nodes
+================
+
+.. include:: node-requirements.md
+ :parser: myst_parser.sphinx_
+.. include:: node-assumptions.md
+ :parser: myst_parser.sphinx_
+.. include:: node-components.md
+ :parser: myst_parser.sphinx_
+.. include:: node-security-and-privacy.md
+ :parser: myst_parser.sphinx_
+.. include:: reverse-proxy-notes.md
+ :parser: myst_parser.sphinx_
+
diff --git a/docs/root/source/installation/node-setup/production-node/node-assumptions.md b/docs/root/source/node-setup/production-node/node-assumptions.md
similarity index 100%
rename from docs/root/source/installation/node-setup/production-node/node-assumptions.md
rename to docs/root/source/node-setup/production-node/node-assumptions.md
diff --git a/docs/root/source/installation/node-setup/production-node/node-components.md b/docs/root/source/node-setup/production-node/node-components.md
similarity index 76%
rename from docs/root/source/installation/node-setup/production-node/node-components.md
rename to docs/root/source/node-setup/production-node/node-components.md
index 44f2abe..a1759e7 100644
--- a/docs/root/source/installation/node-setup/production-node/node-components.md
+++ b/docs/root/source/node-setup/production-node/node-components.md
@@ -10,17 +10,15 @@ Code is Apache-2.0 and docs are CC-BY-4.0
A production Planetmint node must include:
* Planetmint Server
-* MongoDB Server 3.4+ (mongod)
+* Tarantool
* Tendermint
* Storage for MongoDB and Tendermint
It could also include several other components, including:
* NGINX or similar, to provide authentication, rate limiting, etc.
-* An NTP daemon running on all machines running Planetmint Server or mongod, and possibly other machines
-* Probably _not_ MongoDB Automation Agent. It's for automating the deployment of an entire MongoDB cluster.
-* MongoDB Monitoring Agent
-* MongoDB Backup Agent
+* An NTP daemon running on all machines running Planetmint Server or tarantool, and possibly other machines
+
* Log aggregation software
* Monitoring software
* Maybe more
diff --git a/docs/root/source/installation/node-setup/production-node/node-requirements.md b/docs/root/source/node-setup/production-node/node-requirements.md
similarity index 88%
rename from docs/root/source/installation/node-setup/production-node/node-requirements.md
rename to docs/root/source/node-setup/production-node/node-requirements.md
index 077a638..453d7c7 100644
--- a/docs/root/source/installation/node-setup/production-node/node-requirements.md
+++ b/docs/root/source/node-setup/production-node/node-requirements.md
@@ -7,7 +7,7 @@ Code is Apache-2.0 and docs are CC-BY-4.0
# Production Node Requirements
-**This page is about the requirements of Planetmint Server.** You can find the requirements of MongoDB, Tendermint and other [production node components](node-components) in the documentation for that software.
+**This page is about the requirements of Planetmint Server.** You can find the requirements of Tarantool, Tendermint and other [production node components](node-components) in the documentation for that software.
## OS Requirements
diff --git a/docs/root/source/installation/node-setup/production-node/node-security-and-privacy.md b/docs/root/source/node-setup/production-node/node-security-and-privacy.md
similarity index 93%
rename from docs/root/source/installation/node-setup/production-node/node-security-and-privacy.md
rename to docs/root/source/node-setup/production-node/node-security-and-privacy.md
index 4841c94..779d1de 100644
--- a/docs/root/source/installation/node-setup/production-node/node-security-and-privacy.md
+++ b/docs/root/source/node-setup/production-node/node-security-and-privacy.md
@@ -14,5 +14,5 @@ Here are some references about how to secure an Ubuntu 18.04 server:
Also, here are some recommendations a node operator can follow to enhance the privacy of the data coming to, stored on, and leaving their node:
-- Ensure that all data stored on a node is encrypted at rest, e.g. using full disk encryption. This can be provided as a service by the operating system, transparently to Planetmint, MongoDB and Tendermint.
+- Ensure that all data stored on a node is encrypted at rest, e.g. using full disk encryption. This can be provided as a service by the operating system, transparently to Planetmint, Tarantool and Tendermint.
- Ensure that all data is encrypted in transit, i.e. enforce using HTTPS for the HTTP API and the Websocket API. This can be done using NGINX or similar, as we do with the IPDB Testnet.
diff --git a/docs/root/source/installation/node-setup/production-node/reverse-proxy-notes.md b/docs/root/source/node-setup/production-node/reverse-proxy-notes.md
similarity index 100%
rename from docs/root/source/installation/node-setup/production-node/reverse-proxy-notes.md
rename to docs/root/source/node-setup/production-node/reverse-proxy-notes.md
diff --git a/docs/root/source/installation/node-setup/set-up-nginx.md b/docs/root/source/node-setup/set-up-nginx.md
similarity index 100%
rename from docs/root/source/installation/node-setup/set-up-nginx.md
rename to docs/root/source/node-setup/set-up-nginx.md
diff --git a/docs/root/source/installation/node-setup/set-up-node-software.md b/docs/root/source/node-setup/set-up-node-software.md
similarity index 78%
rename from docs/root/source/installation/node-setup/set-up-node-software.md
rename to docs/root/source/node-setup/set-up-node-software.md
index afce6d6..5cfa42b 100644
--- a/docs/root/source/installation/node-setup/set-up-node-software.md
+++ b/docs/root/source/node-setup/set-up-node-software.md
@@ -5,11 +5,11 @@ SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
Code is Apache-2.0 and docs are CC-BY-4.0
--->
-# Set Up Planetmint, MongoDB and Tendermint
+# Set Up Planetmint, Tarantool and Tendermint
We now install and configure software that must run
in every Planetmint node: Planetmint Server,
-MongoDB and Tendermint.
+Tarantool and Tendermint.
## Install Planetmint Server
@@ -69,25 +69,21 @@ under `"wsserver"`:
where `bnode.example.com` should be replaced by your node's actual subdomain.
-## Install (and Start) MongoDB
+## Install (and Start) Tarantool
-Install a recent version of MongoDB.
+Install a recent version of Tarantool.
Planetmint Server requires version 3.4 or newer.
```
-sudo apt install mongodb
+curl -L https://tarantool.io/DDJLJzv/release/2.8/installer.sh | bash
+
+sudo apt-get -y install tarantool
```
-If you install MongoDB using the above command (which installs the `mongodb` package),
-it also configures MongoDB, starts MongoDB (in the background),
-and installs a MongoDB startup script
-(so that MongoDB will be started automatically when the machine is restarted).
+## Sharding with Tarantool
-Note: The `mongodb` package is _not_ the official MongoDB package
-from MongoDB the company. If you want to install the official MongoDB package,
-please see
-[the MongoDB documentation](https://docs.mongodb.com/manual/installation/).
-Note that installing the official package _doesn't_ also start MongoDB.
+If the load on a single node becomes to large Tarantool allows for sharding to scale horizontally.
+For more information on how to setup sharding with Tarantool please refer to the [official Tarantool documentation](https://www.tarantool.io/en/doc/latest/reference/reference_rock/vshard/vshard_index/).
## Install Tendermint
diff --git a/docs/root/source/terminology.md b/docs/root/source/terminology.md
index 6827de7..e9b09af 100644
--- a/docs/root/source/terminology.md
+++ b/docs/root/source/terminology.md
@@ -11,8 +11,6 @@ There is some specialized terminology associated with Planetmint. To get started
## Planetmint Node
-**Planetmint node** is a machine (or logical machine) running [Planetmint Server](https://docs.planetmint.com/projects/server/en/latest/introduction.html) and related software. Each node is controlled by one person or organization.
-
**Planetmint node** is a machine (or logical machine) running [Planetmint Server](https://docs.planetmint.io/projects/server/en/latest/introduction.html) and related software. Each node is controlled by one person or organization.
## Planetmint Network
@@ -27,6 +25,10 @@ The people and organizations that run the nodes in a Planetmint network belong t
A Planetmint network is just a bunch of connected nodes. A consortium is an organization which has a Planetmint network, and where each node in that network has a different operator.
+## Validators
+
+A validator node is a Planetmint node that is a validator as it is defined for Tendermint (see [Tendermint Validator](https://docs.tendermint.com/master/nodes/validators.html)).
+
## Transactions
Are described in detail in `Planetmint Transactions Spec `_ .
@@ -78,7 +80,7 @@ You could do more elaborate things too. As one example, each time someone writes
### Role-Based Access Control (RBAC)
-In September 2017, we published a [blog post about how one can define an RBAC sub-system on top of Planetmint](https://blog.planetmint.com/role-based-access-control-for-planetmint-assets-b7cada491997).
+In September 2017, we published a [blog post about how one can define an RBAC sub-system on top of Planetmint](https://blog.bigchaindb.com/role-based-access-control-for-bigchaindb-assets-b7cada491997).
At the time of writing (January 2018), doing so required the use of a plugin, so it's not possible using standard Planetmint (which is what's available on the [IPDB Testnet](https://test.ipdb.io/>). That may change in the future.
If you're interested, `contact IPDB `_.
diff --git a/docs/root/source/tools/index.rst b/docs/root/source/tools/index.rst
new file mode 100644
index 0000000..ebebdc3
--- /dev/null
+++ b/docs/root/source/tools/index.rst
@@ -0,0 +1,8 @@
+Tools
+=====
+
+You can use the all-in-one docker solution, or install Tendermint, MongoDB, and Planetmint step by step. For more advanced users and for development, the second option is recommended.
+
+
+.. include:: planetmint-cli.md
+ :parser: myst_parser.sphinx_
diff --git a/docs/root/source/installation/node-setup/planetmint-cli.md b/docs/root/source/tools/planetmint-cli.md
similarity index 91%
rename from docs/root/source/installation/node-setup/planetmint-cli.md
rename to docs/root/source/tools/planetmint-cli.md
index 08706ae..a29e855 100644
--- a/docs/root/source/installation/node-setup/planetmint-cli.md
+++ b/docs/root/source/tools/planetmint-cli.md
@@ -22,38 +22,38 @@ Show the version number. `planetmint -v` does the same thing.
## planetmint configure
-Generate a local configuration file (which can be used to set some or all [Planetmint node configuration settings](configuration)). It will ask you for the values of some configuration settings.
+Generate a local configuration file (which can be used to set some or all [Planetmint node configuration settings](../node-setup/configuration)). It will ask you for the values of some configuration settings.
If you press Enter for a value, it will use the default value.
-At this point, only one database backend is supported: `localmongodb`.
+At this point, only one database backend is supported: `tarantool`.
If you use the `-c` command-line option, it will generate the file at the specified path:
```text
-planetmint -c path/to/new_config.json configure localmongodb
+planetmint -c path/to/new_config.json configure tarantool
```
If you don't use the `-c` command-line option, the file will be written to `$HOME/.planetmint` (the default location where Planetmint looks for a config file, if one isn't specified).
If you use the `-y` command-line option, then there won't be any interactive prompts: it will use the default values for all the configuration settings.
```text
-planetmint -y configure localmongodb
+planetmint -y configure tarantool
```
## planetmint show-config
-Show the values of the [Planetmint node configuration settings](configuration).
+Show the values of the [Planetmint node configuration settings](../node-setup/configuration).
## planetmint init
-Create a backend database (local MongoDB), all database tables/collections,
+Create a backend database (local tarantool), all database tables/collections,
various backend database indexes, and the genesis block.
## planetmint drop
-Drop (erase) the backend database (the local MongoDB database used by this node).
+Drop (erase) the backend database (the local tarantool database used by this node).
You will be prompted to make sure.
If you want to force-drop the database (i.e. skipping the yes/no prompt), then use `planetmint -y drop`
@@ -79,7 +79,7 @@ section of Python's documentation.
For a more fine-grained control over the logging configuration you can use the
configuration file as documented under
-[Configuration Settings](configuration).
+[Configuration Settings](../node-setup/configuration).
## planetmint election
@@ -94,7 +94,7 @@ Create a new election which proposes a change to the Planetmint network.
If the command succeeds, it will post an election transaction and output `election_id`.
-The election proposal consists of vote tokens allocated to every current validator proportional to his voting power. Validators spend their votes to approve the election using the [election-approve command](#election-approve).
+The election proposal consists of vote tokens allocated to every current validator proportional to his voting power. Validators spend their votes to approve the election using the [election-approve command](election-approve).
Every election has a type. Currently supported types are `upsert-validator` and `chain-migration`. Their transaction operations are `VALIDATOR_ELECTION` and `CHAIN_MIGRATION` accordingly. See below for how to create an election of a particular type.
@@ -148,11 +148,12 @@ $ planetmint election new migration --private-key /home/user/.tendermint/config/
```
Concluded chain migration elections halt block production at whichever block height they are approved.
-Afterwards, validators are supposed to upgrade Tendermint, set new `chain_id`, `app_hash`, and `validators` (to learn these values, use the [election show](#election-show) command) in `genesis.json`, make and save a MongoDB dump, and restart the system.
+Afterwards, validators are supposed to upgrade Tendermint, set new `chain_id`, `app_hash`, and `validators` (to learn these values, use the [election show](#election-show) command) in `genesis.json`, make and save a tarantool dump, and restart the system.
For more details about how chain migrations work, refer to [Type 3 scenarios in BEP-42](https://github.com/planetmint/BEPs/tree/master/42).
+(election-approve)=
### election approve
Approve an election by voting for it. The command places a `VOTE` transaction, spending all of the validator's vote tokens to the election address.
@@ -173,6 +174,7 @@ $ planetmint election approve 04a067582cf03eba2b53b82e4adb5ece424474cbd4f7183780
Once a proposal has been approved by the sufficient amount of validators (contributing more than `2/3` of the total voting power), the proposed change is applied to the network.
+(election-show)=
### election show
Retrieves the information about elections.
diff --git a/docs/root/source/installation/node-setup/troubleshooting.md b/docs/root/source/troubleshooting.md
similarity index 88%
rename from docs/root/source/installation/node-setup/troubleshooting.md
rename to docs/root/source/troubleshooting.md
index aa679c0..4bda6ba 100644
--- a/docs/root/source/installation/node-setup/troubleshooting.md
+++ b/docs/root/source/troubleshooting.md
@@ -2,14 +2,14 @@
## General Tips
-- Check the Planetmint, Tendermint and MongoDB logs.
+- Check the Planetmint, Tendermint and Tarantool logs.
For help with that, see the page about [Logging and Log Rotation](../appendices/log-rotation).
- Try Googling the error message.
## Tendermint Tips
-* [Configure Tendermint to create no empty blocks](https://tendermint.io/docs/tendermint-core/using-tendermint.html#no-empty-blocks).
-* Store the Tendermint data on a fast drive. You can do that by changing [the location of TMHOME](https://tendermint.io/docs/tendermint-core/using-tendermint.html#directory-root) to be on the fast drive.
+* [Configure Tendermint to create no empty blocks](https://tendermint.com/docs/tendermint-core/using-tendermint.html#no-empty-blocks).
+* Store the Tendermint data on a fast drive. You can do that by changing [the location of TMHOME](https://tendermint.com/docs/tendermint-core/using-tendermint.html#directory-root) to be on the fast drive.
See the [Tendermint tips in the vrde/notes repository](https://github.com/vrde/notes/tree/master/tendermint).
@@ -36,7 +36,7 @@ addr_book_strict = false
If you want to refresh your node back to a fresh empty state, then your best bet is to terminate it and deploy a new machine, but if that's not an option, then you can:
-* drop the `planetmint` database in MongoDB using `planetmint drop` (but that only works if MongoDB is running)
+* drop the `planetmint` database in tarantool using `planetmint drop` (but that only works if tarantool is running)
* reset Tendermint using `tendermint unsafe_reset_all`
* delete the directory `$HOME/.tendermint`
@@ -84,7 +84,7 @@ If you started Planetmint in the foreground, a `Ctrl + C` or `Ctrl + Z` would sh
## Member: Dynamically Add or Remove Validators
-One member can make a proposal to call an election to add a validator, remove a validator, or change the voting power of a validator. They then share the election/proposal ID with all the other members. Once more than 2/3 of the voting power votes yes, the proposed change comes into effect. The commands to create a new election/proposal, to approve an election/proposal, and to get the current status of an election/proposal can be found in the documentation about the [planetmint election](../server-reference/planetmint-cli#planetmint-election) subcommands.
+One member can make a proposal to call an election to add a validator, remove a validator, or change the voting power of a validator. They then share the election/proposal ID with all the other members. Once more than 2/3 of the voting power votes yes, the proposed change comes into effect. The commands to create a new election/proposal, to approve an election/proposal, and to get the current status of an election/proposal can be found in the documentation about the [planetmint election](tools/planetmint-cli#planetmint-election) subcommands.
## Logging
diff --git a/integration/python/Dockerfile b/integration/python/Dockerfile
index 036d92d..c710550 100644
--- a/integration/python/Dockerfile
+++ b/integration/python/Dockerfile
@@ -6,16 +6,16 @@ RUN apt-get update \
&& apt-get clean
RUN apt-get install -y vim
RUN apt-get update
-RUN apt-get install -y build-essential cmake openssh-client openssh-server
+RUN apt-get install -y build-essential cmake openssh-client openssh-server git
RUN apt-get install -y zsh
RUN mkdir -p /src
RUN pip install --upgrade meson ninja
RUN pip install --upgrade \
pytest~=6.2.5 \
- git+https://github.com/planetmint/cryptoconditions.git@asset-migration \
- git+https://github.com/planetmint/planetmint-driver-python.git@asset-migration \
pycco \
websocket-client~=0.47.0 \
+ planetmint-cryptoconditions>=0.10.0 \
+ planetmint-driver>=9.2.0 \
blns
-
+RUN pip install base58 pynacl==1.4.0 zenroom==2.1.0.dev1655293214 pyasn1==0.4.8 cryptography==3.4.7
diff --git a/integration/python/src/conftest.py b/integration/python/src/conftest.py
index 3afba13..747e527 100644
--- a/integration/python/src/conftest.py
+++ b/integration/python/src/conftest.py
@@ -5,64 +5,50 @@
import pytest
-GENERATE_KEYPAIR = \
- """Rule input encoding base58
- Rule output encoding base58
- Scenario 'ecdh': Create the keypair
- Given that I am known as 'Pippo'
- When I create the ecdh key
- When I create the testnet key
- Then print data"""
+CONDITION_SCRIPT = """Scenario 'ecdh': create the signature of an object
+ Given I have the 'keyring'
+ Given that I have a 'string dictionary' named 'houses'
+ When I create the signature of 'houses'
+ Then print the 'signature'"""
-# secret key to public key
-SK_TO_PK = \
- """Rule input encoding base58
- Rule output encoding base58
- Scenario 'ecdh': Create the keypair
- Given that I am known as '{}'
- Given I have the 'keys'
- When I create the ecdh public key
- When I create the testnet address
- Then print my 'ecdh public key'
- Then print my 'testnet address'"""
-
-FULFILL_SCRIPT = \
- """Rule input encoding base58
- Rule output encoding base58
- Scenario 'ecdh': Bob verifies the signature from Alice
+FULFILL_SCRIPT = """Scenario 'ecdh': Bob verifies the signature from Alice
Given I have a 'ecdh public key' from 'Alice'
- Given that I have a 'string dictionary' named 'houses' inside 'asset'
- Given I have a 'signature' named 'data.signature' inside 'result'
- When I verify the 'houses' has a signature in 'data.signature' by 'Alice'
+ Given that I have a 'string dictionary' named 'houses'
+ Given I have a 'signature' named 'signature'
+ When I verify the 'houses' has a signature in 'signature' by 'Alice'
Then print the string 'ok'"""
-HOUSE_ASSETS = [{
- "data": {
- "houses": [
- {
- "name": "Harry",
- "team": "Gryffindor",
- },
- {
- "name": "Draco",
- "team": "Slytherin",
- }
- ],
- }
-}]
+SK_TO_PK = """Scenario 'ecdh': Create the keypair
+ Given that I am known as '{}'
+ Given I have the 'keyring'
+ When I create the ecdh public key
+ When I create the bitcoin address
+ Then print my 'ecdh public key'
+ Then print my 'bitcoin address'"""
-ZENROOM_DATA = {
- 'also': 'more data'
+GENERATE_KEYPAIR = """Scenario 'ecdh': Create the keypair
+ Given that I am known as 'Pippo'
+ When I create the ecdh key
+ When I create the bitcoin key
+ Then print data"""
+
+INITIAL_STATE = {"also": "more data"}
+SCRIPT_INPUT = {
+ "houses": [
+ {
+ "name": "Harry",
+ "team": "Gryffindor",
+ },
+ {
+ "name": "Draco",
+ "team": "Slytherin",
+ },
+ ],
}
-CONDITION_SCRIPT = """Rule input encoding base58
- Rule output encoding base58
- Scenario 'ecdh': create the signature of an object
- Given I have the 'keys'
- Given that I have a 'string dictionary' named 'houses' inside 'asset'
- When I create the signature of 'houses'
- When I rename the 'signature' to 'data.signature'
- Then print the 'data.signature'"""
+metadata = {"units": 300, "type": "KG"}
+
+ZENROOM_DATA = {"that": "is my data"}
@pytest.fixture
@@ -87,7 +73,12 @@ def condition_script_zencode():
@pytest.fixture
def zenroom_house_assets():
- return HOUSE_ASSETS
+ return SCRIPT_INPUT
+
+
+@pytest.fixture
+def zenroom_script_input():
+ return SCRIPT_INPUT
@pytest.fixture
diff --git a/integration/python/src/helper/hosts.py b/integration/python/src/helper/hosts.py
index b14f875..a76e238 100644
--- a/integration/python/src/helper/hosts.py
+++ b/integration/python/src/helper/hosts.py
@@ -32,5 +32,4 @@ class Hosts:
def assert_transaction(self, tx_id) -> None:
txs = self.get_transactions(tx_id)
for tx in txs:
- assert txs[0] == tx, \
- 'Cannot find transaction {}'.format(tx_id)
+ assert txs[0] == tx, "Cannot find transaction {}".format(tx_id)
diff --git a/integration/python/src/test_basic.py b/integration/python/src/test_basic.py
index 49ee745..31c877c 100644
--- a/integration/python/src/test_basic.py
+++ b/integration/python/src/test_basic.py
@@ -14,7 +14,7 @@ import time
def test_basic():
# Setup up connection to Planetmint integration test nodes
- hosts = Hosts('/shared/hostnames')
+ hosts = Hosts("/shared/hostnames")
pm_alpha = hosts.get_connection()
# genarate a keypair
@@ -22,62 +22,63 @@ def test_basic():
# create a digital asset for Alice
game_boy_token = [{
- 'data': {
- 'hash': '0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF',
- 'storageID': '0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF',
+ "data": {
+ "hash": "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
+ "storageID": "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
},
}]
# prepare the transaction with the digital asset and issue 10 tokens to bob
prepared_creation_tx = pm_alpha.transactions.prepare(
- operation='CREATE',
+ operation="CREATE",
metadata={
- 'hash': '0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF',
- 'storageID': '0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF', },
+ "hash": "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
+ "storageID": "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
+ },
signers=alice.public_key,
recipients=[([alice.public_key], 10)],
assets=game_boy_token)
# fulfill and send the transaction
- fulfilled_creation_tx = pm_alpha.transactions.fulfill(
- prepared_creation_tx,
- private_keys=alice.private_key)
+ fulfilled_creation_tx = pm_alpha.transactions.fulfill(prepared_creation_tx, private_keys=alice.private_key)
pm_alpha.transactions.send_commit(fulfilled_creation_tx)
time.sleep(1)
- creation_tx_id = fulfilled_creation_tx['id']
+ creation_tx_id = fulfilled_creation_tx["id"]
# Assert that transaction is stored on all planetmint nodes
hosts.assert_transaction(creation_tx_id)
# Transfer
# create the output and inout for the transaction
- transfer_assets = [{'id': creation_tx_id}]
+ transfer_assets = [{"id": creation_tx_id}]
output_index = 0
- output = fulfilled_creation_tx['outputs'][output_index]
- transfer_input = {'fulfillment': output['condition']['details'],
- 'fulfills': {'output_index': output_index,
- 'transaction_id': transfer_assets[0]['id']},
- 'owners_before': output['public_keys']}
+ output = fulfilled_creation_tx["outputs"][output_index]
+ transfer_input = {
+ "fulfillment": output["condition"]["details"],
+ "fulfills": {"output_index": output_index, "transaction_id": transfer_assets[0]["id"]},
+ "owners_before": output["public_keys"],
+ }
# prepare the transaction and use 3 tokens
prepared_transfer_tx = pm_alpha.transactions.prepare(
- operation='TRANSFER',
- assets=transfer_assets,
+ operation="TRANSFER",
+ asset=transfer_assets,
inputs=transfer_input,
- metadata={'hash': '0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF',
- 'storageID': '0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF', },
- recipients=[([alice.public_key], 10)])
+ metadata={
+ "hash": "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
+ "storageID": "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
+ },
+ recipients=[([alice.public_key], 10)],
+ )
# fulfill and send the transaction
- fulfilled_transfer_tx = pm_alpha.transactions.fulfill(
- prepared_transfer_tx,
- private_keys=alice.private_key)
+ fulfilled_transfer_tx = pm_alpha.transactions.fulfill(prepared_transfer_tx, private_keys=alice.private_key)
sent_transfer_tx = pm_alpha.transactions.send_commit(fulfilled_transfer_tx)
time.sleep(1)
- transfer_tx_id = sent_transfer_tx['id']
+ transfer_tx_id = sent_transfer_tx["id"]
# Assert that transaction is stored on both planetmint nodes
hosts.assert_transaction(transfer_tx_id)
diff --git a/integration/python/src/test_divisible_asset.py b/integration/python/src/test_divisible_asset.py
index 0fa4cc1..0558704 100644
--- a/integration/python/src/test_divisible_asset.py
+++ b/integration/python/src/test_divisible_asset.py
@@ -15,9 +15,6 @@
# We run a series of checks for each step, that is retrieving
# the transaction from the remote system, and also checking the `amount`
# of a given transaction.
-#
-# This integration test is a rip-off of our
-# [tutorial](https://docs.planetmint.com/projects/py-driver/en/latest/usage.html).
# ## Imports
# We need the `pytest` package to catch the `BadRequest` exception properly.
@@ -36,7 +33,7 @@ def test_divisible_assets():
# ## Set up a connection to Planetmint
# Check [test_basic.py](./test_basic.html) to get some more details
# about the endpoint.
- hosts = Hosts('/shared/hostnames')
+ hosts = Hosts("/shared/hostnames")
pm = hosts.get_connection()
# Oh look, it is Alice again and she brought her friend Bob along.
@@ -51,13 +48,9 @@ def test_divisible_assets():
# the bike for one hour.
bike_token = [{
- 'data': {
- 'token_for': {
- 'bike': {
- 'serial_number': 420420
- }
- },
- 'description': 'Time share token. Each token equals one hour of riding.',
+ "data": {
+ "token_for": {"bike": {"serial_number": 420420}},
+ "description": "Time share token. Each token equals one hour of riding.",
},
}]
@@ -65,28 +58,22 @@ def test_divisible_assets():
# Here, Alice defines in a tuple that she wants to assign
# these 10 tokens to Bob.
prepared_token_tx = pm.transactions.prepare(
- operation='CREATE',
- signers=alice.public_key,
- recipients=[([bob.public_key], 10)],
- assets=bike_token)
+ operation="CREATE", signers=alice.public_key, recipients=[([bob.public_key], 10)], assets=bike_token
+ )
# She fulfills and sends the transaction.
- fulfilled_token_tx = pm.transactions.fulfill(
- prepared_token_tx,
- private_keys=alice.private_key)
+ fulfilled_token_tx = pm.transactions.fulfill(prepared_token_tx, private_keys=alice.private_key)
pm.transactions.send_commit(fulfilled_token_tx)
# We store the `id` of the transaction to use it later on.
- bike_token_id = fulfilled_token_tx['id']
+ bike_token_id = fulfilled_token_tx["id"]
# Let's check if the transaction was successful.
- assert pm.transactions.retrieve(bike_token_id), \
- 'Cannot find transaction {}'.format(bike_token_id)
+ assert pm.transactions.retrieve(bike_token_id), "Cannot find transaction {}".format(bike_token_id)
# Bob owns 10 tokens now.
- assert pm.transactions.retrieve(bike_token_id)['outputs'][0][
- 'amount'] == '10'
+ assert pm.transactions.retrieve(bike_token_id)["outputs"][0]["amount"] == "10"
# ## Bob wants to use the bike
# Now that Bob got the tokens and the sun is shining, he wants to get out
@@ -94,51 +81,47 @@ def test_divisible_assets():
# To use the bike he has to send the tokens back to Alice.
# To learn about the details of transferring a transaction check out
# [test_basic.py](./test_basic.html)
- transfer_assets = [{'id': bike_token_id}]
+ transfer_assets = [{"id": bike_token_id}]
output_index = 0
- output = fulfilled_token_tx['outputs'][output_index]
- transfer_input = {'fulfillment': output['condition']['details'],
- 'fulfills': {'output_index': output_index,
- 'transaction_id': fulfilled_token_tx[
- 'id']},
- 'owners_before': output['public_keys']}
+ output = fulfilled_token_tx["outputs"][output_index]
+ transfer_input = {
+ "fulfillment": output["condition"]["details"],
+ "fulfills": {"output_index": output_index, "transaction_id": fulfilled_token_tx["id"]},
+ "owners_before": output["public_keys"],
+ }
# To use the tokens Bob has to reassign 7 tokens to himself and the
# amount he wants to use to Alice.
prepared_transfer_tx = pm.transactions.prepare(
- operation='TRANSFER',
- assets=transfer_assets,
+ operation="TRANSFER",
+ asset=transfer_assets,
inputs=transfer_input,
- recipients=[([alice.public_key], 3), ([bob.public_key], 7)])
+ recipients=[([alice.public_key], 3), ([bob.public_key], 7)],
+ )
# He signs and sends the transaction.
- fulfilled_transfer_tx = pm.transactions.fulfill(
- prepared_transfer_tx,
- private_keys=bob.private_key)
+ fulfilled_transfer_tx = pm.transactions.fulfill(prepared_transfer_tx, private_keys=bob.private_key)
sent_transfer_tx = pm.transactions.send_commit(fulfilled_transfer_tx)
# First, Bob checks if the transaction was successful.
- assert pm.transactions.retrieve(
- fulfilled_transfer_tx['id']) == sent_transfer_tx
+ assert pm.transactions.retrieve(fulfilled_transfer_tx["id"]) == sent_transfer_tx
- hosts.assert_transaction(fulfilled_transfer_tx['id'])
+ hosts.assert_transaction(fulfilled_transfer_tx["id"])
# There are two outputs in the transaction now.
# The first output shows that Alice got back 3 tokens...
- assert pm.transactions.retrieve(
- fulfilled_transfer_tx['id'])['outputs'][0]['amount'] == '3'
+ assert pm.transactions.retrieve(fulfilled_transfer_tx["id"])["outputs"][0]["amount"] == "3"
# ... while Bob still has 7 left.
- assert pm.transactions.retrieve(
- fulfilled_transfer_tx['id'])['outputs'][1]['amount'] == '7'
+ assert pm.transactions.retrieve(fulfilled_transfer_tx["id"])["outputs"][1]["amount"] == "7"
# ## Bob wants to ride the bike again
# It's been a week and Bob wants to right the bike again.
# Now he wants to ride for 8 hours, that's a lot Bob!
# He prepares the transaction again.
- transfer_assets = [{'id': bike_token_id}]
+ transfer_assets = [{"id": bike_token_id}]
# This time we need an `output_index` of 1, since we have two outputs
# in the `fulfilled_transfer_tx` we created before. The first output with
# index 0 is for Alice and the second output is for Bob.
@@ -146,24 +129,21 @@ def test_divisible_assets():
# correct output with the correct amount of tokens.
output_index = 1
- output = fulfilled_transfer_tx['outputs'][output_index]
+ output = fulfilled_transfer_tx["outputs"][output_index]
- transfer_input = {'fulfillment': output['condition']['details'],
- 'fulfills': {'output_index': output_index,
- 'transaction_id': fulfilled_transfer_tx['id']},
- 'owners_before': output['public_keys']}
+ transfer_input = {
+ "fulfillment": output["condition"]["details"],
+ "fulfills": {"output_index": output_index, "transaction_id": fulfilled_transfer_tx["id"]},
+ "owners_before": output["public_keys"],
+ }
# This time Bob only provides Alice in the `recipients` because he wants
# to spend all his tokens
prepared_transfer_tx = pm.transactions.prepare(
- operation='TRANSFER',
- assets=transfer_assets,
- inputs=transfer_input,
- recipients=[([alice.public_key], 8)])
+ operation="TRANSFER", assets=transfer_assets, inputs=transfer_input, recipients=[([alice.public_key], 8)]
+ )
- fulfilled_transfer_tx = pm.transactions.fulfill(
- prepared_transfer_tx,
- private_keys=bob.private_key)
+ fulfilled_transfer_tx = pm.transactions.fulfill(prepared_transfer_tx, private_keys=bob.private_key)
# Oh Bob, what have you done?! You tried to spend more tokens than you had.
# Remember Bob, last time you spent 3 tokens already,
@@ -174,10 +154,12 @@ def test_divisible_assets():
# Now Bob gets an error saying that the amount he wanted to spent is
# higher than the amount of tokens he has left.
assert error.value.args[0] == 400
- message = 'Invalid transaction (AmountError): The amount used in the ' \
- 'inputs `7` needs to be same as the amount used in the ' \
- 'outputs `8`'
- assert error.value.args[2]['message'] == message
+ message = (
+ "Invalid transaction (AmountError): The amount used in the "
+ "inputs `7` needs to be same as the amount used in the "
+ "outputs `8`"
+ )
+ assert error.value.args[2]["message"] == message
# We have to stop this test now, I am sorry, but Bob is pretty upset
# about his mistake. See you next time :)
diff --git a/integration/python/src/test_double_spend.py b/integration/python/src/test_double_spend.py
index 0d1e988..4de502e 100644
--- a/integration/python/src/test_double_spend.py
+++ b/integration/python/src/test_double_spend.py
@@ -16,33 +16,31 @@ from .helper.hosts import Hosts
def test_double_create():
- hosts = Hosts('/shared/hostnames')
+ hosts = Hosts("/shared/hostnames")
pm = hosts.get_connection()
alice = generate_keypair()
results = queue.Queue()
tx = pm.transactions.fulfill(
- pm.transactions.prepare(
- operation='CREATE',
- signers=alice.public_key,
- assets=[{'data': {'uuid': str(uuid4())}}]),
- private_keys=alice.private_key)
+ pm.transactions.prepare(operation="CREATE", signers=alice.public_key, assets=[{"data": {"uuid": str(uuid4())}}]),
+ private_keys=alice.private_key,
+ )
def send_and_queue(tx):
try:
pm.transactions.send_commit(tx)
- results.put('OK')
+ results.put("OK")
except planetmint_driver.exceptions.TransportError:
- results.put('FAIL')
+ results.put("FAIL")
- t1 = Thread(target=send_and_queue, args=(tx, ))
- t2 = Thread(target=send_and_queue, args=(tx, ))
+ t1 = Thread(target=send_and_queue, args=(tx,))
+ t2 = Thread(target=send_and_queue, args=(tx,))
t1.start()
t2.start()
results = [results.get(timeout=2), results.get(timeout=2)]
- assert results.count('OK') == 1
- assert results.count('FAIL') == 1
+ assert results.count("OK") == 1
+ assert results.count("FAIL") == 1
diff --git a/integration/python/src/test_multiple_owners.py b/integration/python/src/test_multiple_owners.py
index c5e1f9f..d8e7f72 100644
--- a/integration/python/src/test_multiple_owners.py
+++ b/integration/python/src/test_multiple_owners.py
@@ -15,8 +15,6 @@
# We run a series of checks for each step, that is retrieving
# the transaction from the remote system, and also checking the public keys
# of a given transaction.
-#
-# This integration test is a rip-off of our mutliple signature acceptance tests.
# # Imports
import time
@@ -30,7 +28,7 @@ from .helper.hosts import Hosts
def test_multiple_owners():
# Setup up connection to Planetmint integration test nodes
- hosts = Hosts('/shared/hostnames')
+ hosts = Hosts("/shared/hostnames")
pm_alpha = hosts.get_connection()
# Generate Keypairs for Alice and Bob!
@@ -41,32 +39,22 @@ def test_multiple_owners():
# high rents anymore. Bob suggests to get a dish washer for the
# kitchen. Alice agrees and here they go, creating the asset for their
# dish washer.
- dw_asset = [{
- 'data': {
- 'dish washer': {
- 'serial_number': 1337
- }
- }
- }]
+ dw_asset = [{"data": {"dish washer": {"serial_number": 1337}}}]
# They prepare a `CREATE` transaction. To have multiple owners, both
# Bob and Alice need to be the recipients.
prepared_dw_tx = pm_alpha.transactions.prepare(
- operation='CREATE',
- signers=alice.public_key,
- recipients=(alice.public_key, bob.public_key),
- assets=dw_asset)
+ operation="CREATE", signers=alice.public_key, recipients=(alice.public_key, bob.public_key), assets=dw_asset
+ )
# Now they both sign the transaction by providing their private keys.
# And send it afterwards.
- fulfilled_dw_tx = pm_alpha.transactions.fulfill(
- prepared_dw_tx,
- private_keys=[alice.private_key, bob.private_key])
+ fulfilled_dw_tx = pm_alpha.transactions.fulfill(prepared_dw_tx, private_keys=[alice.private_key, bob.private_key])
pm_alpha.transactions.send_commit(fulfilled_dw_tx)
# We store the `id` of the transaction to use it later on.
- dw_id = fulfilled_dw_tx['id']
+ dw_id = fulfilled_dw_tx["id"]
time.sleep(1)
@@ -74,12 +62,10 @@ def test_multiple_owners():
hosts.assert_transaction(dw_id)
# Let's check if the transaction was successful.
- assert pm_alpha.transactions.retrieve(dw_id), \
- 'Cannot find transaction {}'.format(dw_id)
+ assert pm_alpha.transactions.retrieve(dw_id), "Cannot find transaction {}".format(dw_id)
# The transaction should have two public keys in the outputs.
- assert len(
- pm_alpha.transactions.retrieve(dw_id)['outputs'][0]['public_keys']) == 2
+ assert len(pm_alpha.transactions.retrieve(dw_id)["outputs"][0]["public_keys"]) == 2
# ## Alice and Bob transfer a transaction to Carol.
# Alice and Bob save a lot of money living together. They often go out
@@ -91,43 +77,39 @@ def test_multiple_owners():
# Alice and Bob prepare the transaction to transfer the dish washer to
# Carol.
- transfer_assets = [{'id': dw_id}]
+ transfer_assets = [{"id": dw_id}]
output_index = 0
- output = fulfilled_dw_tx['outputs'][output_index]
- transfer_input = {'fulfillment': output['condition']['details'],
- 'fulfills': {'output_index': output_index,
- 'transaction_id': fulfilled_dw_tx[
- 'id']},
- 'owners_before': output['public_keys']}
+ output = fulfilled_dw_tx["outputs"][output_index]
+ transfer_input = {
+ "fulfillment": output["condition"]["details"],
+ "fulfills": {"output_index": output_index, "transaction_id": fulfilled_dw_tx["id"]},
+ "owners_before": output["public_keys"],
+ }
# Now they create the transaction...
prepared_transfer_tx = pm_alpha.transactions.prepare(
- operation='TRANSFER',
- assets=transfer_assets,
- inputs=transfer_input,
- recipients=carol.public_key)
+ operation="TRANSFER", assets=transfer_assets, inputs=transfer_input, recipients=carol.public_key
+ )
# ... and sign it with their private keys, then send it.
fulfilled_transfer_tx = pm_alpha.transactions.fulfill(
- prepared_transfer_tx,
- private_keys=[alice.private_key, bob.private_key])
+ prepared_transfer_tx, private_keys=[alice.private_key, bob.private_key]
+ )
sent_transfer_tx = pm_alpha.transactions.send_commit(fulfilled_transfer_tx)
time.sleep(1)
# Now compare if both nodes returned the same transaction
- hosts.assert_transaction(fulfilled_transfer_tx['id'])
+ hosts.assert_transaction(fulfilled_transfer_tx["id"])
# They check if the transaction was successful.
- assert pm_alpha.transactions.retrieve(
- fulfilled_transfer_tx['id']) == sent_transfer_tx
+ assert pm_alpha.transactions.retrieve(fulfilled_transfer_tx["id"]) == sent_transfer_tx
# The owners before should include both Alice and Bob.
- assert len(
- pm_alpha.transactions.retrieve(fulfilled_transfer_tx['id'])['inputs'][0][
- 'owners_before']) == 2
+ assert len(pm_alpha.transactions.retrieve(fulfilled_transfer_tx["id"])["inputs"][0]["owners_before"]) == 2
# While the new owner is Carol.
- assert pm_alpha.transactions.retrieve(fulfilled_transfer_tx['id'])[
- 'outputs'][0]['public_keys'][0] == carol.public_key
+ assert (
+ pm_alpha.transactions.retrieve(fulfilled_transfer_tx["id"])["outputs"][0]["public_keys"][0] == carol.public_key
+ )
diff --git a/integration/python/src/test_naughty_strings.py b/integration/python/src/test_naughty_strings.py
index 700d1d0..921d718 100644
--- a/integration/python/src/test_naughty_strings.py
+++ b/integration/python/src/test_naughty_strings.py
@@ -27,6 +27,40 @@ from planetmint_driver.exceptions import BadRequest
from .helper.hosts import Hosts
naughty_strings = blns.all()
+skipped_naughty_strings = [
+ "1.00",
+ "$1.00",
+ "-1.00",
+ "-$1.00",
+ "0.00",
+ "0..0",
+ ".",
+ "0.0.0",
+ "-.",
+ ",./;'[]\\-=",
+ "ثم نفس سقطت وبالتحديد،, جزيرتي باستخدام أن دنو. إذ هنا؟ الستار وتنصيب كان. أهّل ايطاليا، بريطانيا-فرنسا قد أخذ. سليمان، إتفاقية بين ما, يذكر الحدود أي بعد, معاملة بولندا، الإطلاق عل إيو.",
+ "test\x00",
+ "Ṱ̺̺̕o͞ ̷i̲̬͇̪͙n̝̗͕v̟̜̘̦͟o̶̙̰̠kè͚̮̺̪̹̱̤ ̖t̝͕̳̣̻̪͞h̼͓̲̦̳̘̲e͇̣̰̦̬͎ ̢̼̻̱̘h͚͎͙̜̣̲ͅi̦̲̣̰̤v̻͍e̺̭̳̪̰-m̢iͅn̖̺̞̲̯̰d̵̼̟͙̩̼̘̳ ̞̥̱̳̭r̛̗̘e͙p͠r̼̞̻̭̗e̺̠̣͟s̘͇̳͍̝͉e͉̥̯̞̲͚̬͜ǹ̬͎͎̟̖͇̤t͍̬̤͓̼̭͘ͅi̪̱n͠g̴͉ ͏͉ͅc̬̟h͡a̫̻̯͘o̫̟̖͍̙̝͉s̗̦̲.̨̹͈̣",
+ "̡͓̞ͅI̗̘̦͝n͇͇͙v̮̫ok̲̫̙͈i̖͙̭̹̠̞n̡̻̮̣̺g̲͈͙̭͙̬͎ ̰t͔̦h̞̲e̢̤ ͍̬̲͖f̴̘͕̣è͖ẹ̥̩l͖͔͚i͓͚̦͠n͖͍̗͓̳̮g͍ ̨o͚̪͡f̘̣̬ ̖̘͖̟͙̮c҉͔̫͖͓͇͖ͅh̵̤̣͚͔á̗̼͕ͅo̼̣̥s̱͈̺̖̦̻͢.̛̖̞̠̫̰",
+ "̗̺͖̹̯͓Ṯ̤͍̥͇͈h̲́e͏͓̼̗̙̼̣͔ ͇̜̱̠͓͍ͅN͕͠e̗̱z̘̝̜̺͙p̤̺̹͍̯͚e̠̻̠͜r̨̤͍̺̖͔̖̖d̠̟̭̬̝͟i̦͖̩͓͔̤a̠̗̬͉̙n͚͜ ̻̞̰͚ͅh̵͉i̳̞v̢͇ḙ͎͟-҉̭̩̼͔m̤̭̫i͕͇̝̦n̗͙ḍ̟ ̯̲͕͞ǫ̟̯̰̲͙̻̝f ̪̰̰̗̖̭̘͘c̦͍̲̞͍̩̙ḥ͚a̮͎̟̙͜ơ̩̹͎s̤.̝̝ ҉Z̡̖̜͖̰̣͉̜a͖̰͙̬͡l̲̫̳͍̩g̡̟̼̱͚̞̬ͅo̗͜.̟",
+ "̦H̬̤̗̤͝e͜ ̜̥̝̻͍̟́w̕h̖̯͓o̝͙̖͎̱̮ ҉̺̙̞̟͈W̷̼̭a̺̪͍į͈͕̭͙̯̜t̶̼̮s̘͙͖̕ ̠̫̠B̻͍͙͉̳ͅe̵h̵̬͇̫͙i̹͓̳̳̮͎̫̕n͟d̴̪̜̖ ̰͉̩͇͙̲͞ͅT͖̼͓̪͢h͏͓̮̻e̬̝̟ͅ ̤̹̝W͙̞̝͔͇͝ͅa͏͓͔̹̼̣l̴͔̰̤̟͔ḽ̫.͕",
+ '">',
+ "'>",
+ ">",
+ "",
+ "< / script >< script >alert(document.title)< / script >",
+ " onfocus=alert(document.title) autofocus ",
+ '" onfocus=alert(document.title) autofocus ',
+ "' onfocus=alert(document.title) autofocus ",
+ "<script>alert(document.title)</script>",
+ "/dev/null; touch /tmp/blns.fail ; echo",
+ "../../../../../../../../../../../etc/passwd%00",
+ "../../../../../../../../../../../etc/hosts",
+ "() { 0; }; touch /tmp/blns.shellshock1.fail;",
+ "() { _; } >_[$($())] { touch /tmp/blns.shellshock2.fail; }",
+]
+
+naughty_strings = [naughty for naughty in naughty_strings if naughty not in skipped_naughty_strings]
# This is our base test case, but we'll reuse it to send naughty strings as both keys and values.
@@ -34,7 +68,7 @@ def send_naughty_tx(assets, metadata):
# ## Set up a connection to Planetmint
# Check [test_basic.py](./test_basic.html) to get some more details
# about the endpoint.
- hosts = Hosts('/shared/hostnames')
+ hosts = Hosts("/shared/hostnames")
pm = hosts.get_connection()
# Here's Alice.
@@ -42,15 +76,11 @@ def send_naughty_tx(assets, metadata):
# Alice is in a naughty mood today, so she creates a tx with some naughty strings
prepared_transaction = pm.transactions.prepare(
- operation='CREATE',
- signers=alice.public_key,
- assets=assets,
- metadata=metadata)
+ operation="CREATE", signers=alice.public_key, assets=assets, metadata=metadata
+ )
# She fulfills the transaction
- fulfilled_transaction = pm.transactions.fulfill(
- prepared_transaction,
- private_keys=alice.private_key)
+ fulfilled_transaction = pm.transactions.fulfill(prepared_transaction, private_keys=alice.private_key)
# The fulfilled tx gets sent to the pm network
try:
@@ -59,23 +89,24 @@ def send_naughty_tx(assets, metadata):
sent_transaction = e
# If her key contained a '.', began with a '$', or contained a NUL character
- regex = r'.*\..*|\$.*|.*\x00.*'
+ regex = r".*\..*|\$.*|.*\x00.*"
key = next(iter(metadata))
if re.match(regex, key):
# Then she expects a nicely formatted error code
status_code = sent_transaction.status_code
error = sent_transaction.error
regex = (
- r'\{\s*\n*'
+ r"\{\s*\n*"
r'\s*"message":\s*"Invalid transaction \(ValidationError\):\s*'
- r'Invalid key name.*The key name cannot contain characters.*\n*'
+ r"Invalid key name.*The key name cannot contain characters.*\n*"
r'\s*"status":\s*400\n*'
- r'\s*\}\n*')
+ r"\s*\}\n*"
+ )
assert status_code == 400
assert re.fullmatch(regex, error), sent_transaction
# Otherwise, she expects to see her transaction in the database
- elif 'id' in sent_transaction.keys():
- tx_id = sent_transaction['id']
+ elif "id" in sent_transaction.keys():
+ tx_id = sent_transaction["id"]
assert pm.transactions.retrieve(tx_id)
# If neither condition was true, then something weird happened...
else:
@@ -85,8 +116,8 @@ def send_naughty_tx(assets, metadata):
@pytest.mark.parametrize("naughty_string", naughty_strings, ids=naughty_strings)
def test_naughty_keys(naughty_string):
- assets = [{'data': {naughty_string: 'nice_value'}}]
- metadata = {naughty_string: 'nice_value'}
+ assets = [{"data": {naughty_string: "nice_value"}}]
+ metadata = {naughty_string: "nice_value"}
send_naughty_tx(assets, metadata)
@@ -94,7 +125,7 @@ def test_naughty_keys(naughty_string):
@pytest.mark.parametrize("naughty_string", naughty_strings, ids=naughty_strings)
def test_naughty_values(naughty_string):
- assets = [{'data': {'nice_key': naughty_string}}]
- metadata = {'nice_key': naughty_string}
+ assets = [{"data": {"nice_key": naughty_string}}]
+ metadata = {"nice_key": naughty_string}
send_naughty_tx(assets, metadata)
diff --git a/integration/python/src/test_stream.py b/integration/python/src/test_stream.py
index 3be9ec3..15e1970 100644
--- a/integration/python/src/test_stream.py
+++ b/integration/python/src/test_stream.py
@@ -35,11 +35,11 @@ def test_stream():
# ## Set up the test
# We use the env variable `BICHAINDB_ENDPOINT` to know where to connect.
# Check [test_basic.py](./test_basic.html) for more information.
- hosts = Hosts('/shared/hostnames')
+ hosts = Hosts("/shared/hostnames")
pm = hosts.get_connection()
# *That's pretty bad, but let's do like this for now.*
- WS_ENDPOINT = 'ws://{}:9985/api/v1/streams/valid_transactions'.format(hosts.hostnames[0])
+ WS_ENDPOINT = "ws://{}:9985/api/v1/streams/valid_transactions".format(hosts.hostnames[0])
# Hello to Alice again, she is pretty active in those tests, good job
# Alice!
@@ -89,11 +89,11 @@ def test_stream():
# random `uuid`.
for _ in range(10):
tx = pm.transactions.fulfill(
- pm.transactions.prepare(
- operation='CREATE',
- signers=alice.public_key,
- assets=[{'data': {'uuid': str(uuid4())}}]),
- private_keys=alice.private_key)
+ pm.transactions.prepare(
+ operation="CREATE", signers=alice.public_key, assets=[{"data": {"uuid": str(uuid4())}}]
+ ),
+ private_keys=alice.private_key,
+ )
# We don't want to wait for each transaction to be in a block. By using
# `async` mode, we make sure that the driver returns as soon as the
# transaction is pushed to the Planetmint API. Remember: we expect all
@@ -103,7 +103,7 @@ def test_stream():
pm.transactions.send_async(tx)
# The `id` of every sent transaction is then stored in a list.
- sent.append(tx['id'])
+ sent.append(tx["id"])
# ## Check the valid transactions coming from Planetmint
# Now we are ready to check if Planetmint did its job. A simple way to
@@ -117,9 +117,9 @@ def test_stream():
# the timeout, then game over ¯\\\_(ツ)\_/¯
try:
event = received.get(timeout=5)
- txid = json.loads(event)['transaction_id']
+ txid = json.loads(event)["transaction_id"]
except queue.Empty:
- assert False, 'Did not receive all expected transactions'
+ assert False, "Did not receive all expected transactions"
# Last thing is to try to remove the `txid` from the set of sent
# transactions. If this test is running in parallel with others, we
diff --git a/integration/python/src/test_threshold.py b/integration/python/src/test_threshold.py
index 8b6db4a..07c76e1 100644
--- a/integration/python/src/test_threshold.py
+++ b/integration/python/src/test_threshold.py
@@ -18,27 +18,22 @@ from .helper.hosts import Hosts
def prepare_condition_details(condition: ThresholdSha256):
- condition_details = {
- 'subconditions': [],
- 'threshold': condition.threshold,
- 'type': condition.TYPE_NAME
- }
+ condition_details = {"subconditions": [], "threshold": condition.threshold, "type": condition.TYPE_NAME}
for s in condition.subconditions:
- if (s['type'] == 'fulfillment' and s['body'].TYPE_NAME == 'ed25519-sha-256'):
- condition_details['subconditions'].append({
- 'type': s['body'].TYPE_NAME,
- 'public_key': base58.b58encode(s['body'].public_key).decode()
- })
+ if s["type"] == "fulfillment" and s["body"].TYPE_NAME == "ed25519-sha-256":
+ condition_details["subconditions"].append(
+ {"type": s["body"].TYPE_NAME, "public_key": base58.b58encode(s["body"].public_key).decode()}
+ )
else:
- condition_details['subconditions'].append(prepare_condition_details(s['body']))
+ condition_details["subconditions"].append(prepare_condition_details(s["body"]))
return condition_details
def test_threshold():
# Setup connection to test nodes
- hosts = Hosts('/shared/hostnames')
+ hosts = Hosts("/shared/hostnames")
pm = hosts.get_connection()
# Generate Keypars for Alice, Bob an Carol!
@@ -49,13 +44,7 @@ def test_threshold():
# high rents anymore. Bob suggests to get a dish washer for the
# kitchen. Alice agrees and here they go, creating the asset for their
# dish washer.
- dw_asset = [{
- 'data': {
- 'dish washer': {
- 'serial_number': 1337
- }
- }
- }]
+ dw_asset = [{"data": {"dish washer": {"serial_number": 1337}}}]
# Create subfulfillments
alice_ed25519 = Ed25519Sha256(public_key=base58.b58decode(alice.public_key))
@@ -74,37 +63,37 @@ def test_threshold():
# Assemble output and input for the handcrafted tx
output = {
- 'amount': '1',
- 'condition': {
- 'details': condition_details,
- 'uri': condition_uri,
+ "amount": "1",
+ "condition": {
+ "details": condition_details,
+ "uri": condition_uri,
},
- 'public_keys': (alice.public_key, bob.public_key, carol.public_key),
+ "public_keys": (alice.public_key, bob.public_key, carol.public_key),
}
# The yet to be fulfilled input:
input_ = {
- 'fulfillment': None,
- 'fulfills': None,
- 'owners_before': (alice.public_key, bob.public_key),
+ "fulfillment": None,
+ "fulfills": None,
+ "owners_before": (alice.public_key, bob.public_key),
}
# Assemble the handcrafted transaction
handcrafted_dw_tx = {
- 'operation': 'CREATE',
- 'assets': dw_asset,
- 'metadata': None,
- 'outputs': (output,),
- 'inputs': (input_,),
- 'version': '2.0',
- 'id': None,
+ "operation": "CREATE",
+ "asset": dw_asset,
+ "metadata": None,
+ "outputs": (output,),
+ "inputs": (input_,),
+ "version": "2.0",
+ "id": None,
}
# Create sha3-256 of message to sign
message = json.dumps(
handcrafted_dw_tx,
sort_keys=True,
- separators=(',', ':'),
+ separators=(",", ":"),
ensure_ascii=False,
)
message = sha3.sha3_256(message.encode())
@@ -121,19 +110,19 @@ def test_threshold():
fulfillment_uri = fulfillment_threshold.serialize_uri()
- handcrafted_dw_tx['inputs'][0]['fulfillment'] = fulfillment_uri
+ handcrafted_dw_tx["inputs"][0]["fulfillment"] = fulfillment_uri
# Create tx_id for handcrafted_dw_tx and send tx commit
json_str_tx = json.dumps(
handcrafted_dw_tx,
sort_keys=True,
- separators=(',', ':'),
+ separators=(",", ":"),
ensure_ascii=False,
)
dw_creation_txid = sha3.sha3_256(json_str_tx.encode()).hexdigest()
- handcrafted_dw_tx['id'] = dw_creation_txid
+ handcrafted_dw_tx["id"] = dw_creation_txid
pm.transactions.send_commit(handcrafted_dw_tx)
@@ -144,18 +133,12 @@ def test_threshold():
def test_weighted_threshold():
- hosts = Hosts('/shared/hostnames')
+ hosts = Hosts("/shared/hostnames")
pm = hosts.get_connection()
alice, bob, carol = generate_keypair(), generate_keypair(), generate_keypair()
- assets = [{
- 'data': {
- 'trashcan': {
- 'animals': ['racoon_1', 'racoon_2']
- }
- }
- }]
+ assets = [{"data": {"trashcan": {"animals": ["racoon_1", "racoon_2"]}}}]
alice_ed25519 = Ed25519Sha256(public_key=base58.b58decode(alice.public_key))
bob_ed25519 = Ed25519Sha256(public_key=base58.b58decode(bob.public_key))
@@ -175,37 +158,37 @@ def test_weighted_threshold():
# Assemble output and input for the handcrafted tx
output = {
- 'amount': '1',
- 'condition': {
- 'details': condition_details,
- 'uri': condition_uri,
+ "amount": "1",
+ "condition": {
+ "details": condition_details,
+ "uri": condition_uri,
},
- 'public_keys': (alice.public_key, bob.public_key, carol.public_key),
+ "public_keys": (alice.public_key, bob.public_key, carol.public_key),
}
# The yet to be fulfilled input:
input_ = {
- 'fulfillment': None,
- 'fulfills': None,
- 'owners_before': (alice.public_key, bob.public_key),
+ "fulfillment": None,
+ "fulfills": None,
+ "owners_before": (alice.public_key, bob.public_key),
}
# Assemble the handcrafted transaction
handcrafted_tx = {
- 'operation': 'CREATE',
- 'assets': assets,
- 'metadata': None,
- 'outputs': (output,),
- 'inputs': (input_,),
- 'version': '2.0',
- 'id': None,
+ "operation": "CREATE",
+ "asset": assets,
+ "metadata": None,
+ "outputs": (output,),
+ "inputs": (input_,),
+ "version": "2.0",
+ "id": None,
}
# Create sha3-256 of message to sign
message = json.dumps(
handcrafted_tx,
sort_keys=True,
- separators=(',', ':'),
+ separators=(",", ":"),
ensure_ascii=False,
)
message = sha3.sha3_256(message.encode())
@@ -224,19 +207,19 @@ def test_weighted_threshold():
fulfillment_uri = fulfillment_threshold.serialize_uri()
- handcrafted_tx['inputs'][0]['fulfillment'] = fulfillment_uri
+ handcrafted_tx["inputs"][0]["fulfillment"] = fulfillment_uri
# Create tx_id for handcrafted_dw_tx and send tx commit
json_str_tx = json.dumps(
handcrafted_tx,
sort_keys=True,
- separators=(',', ':'),
+ separators=(",", ":"),
ensure_ascii=False,
)
creation_tx_id = sha3.sha3_256(json_str_tx.encode()).hexdigest()
- handcrafted_tx['id'] = creation_tx_id
+ handcrafted_tx["id"] = creation_tx_id
pm.transactions.send_commit(handcrafted_tx)
@@ -254,50 +237,50 @@ def test_weighted_threshold():
# Assemble output and input for the handcrafted tx
transfer_output = {
- 'amount': '1',
- 'condition': {
- 'details': {
- 'type': alice_transfer_ed25519.TYPE_NAME,
- 'public_key': base58.b58encode(alice_transfer_ed25519.public_key).decode()
+ "amount": "1",
+ "condition": {
+ "details": {
+ "type": alice_transfer_ed25519.TYPE_NAME,
+ "public_key": base58.b58encode(alice_transfer_ed25519.public_key).decode(),
},
- 'uri': transfer_condition_uri,
+ "uri": transfer_condition_uri,
},
- 'public_keys': (alice.public_key,),
+ "public_keys": (alice.public_key,),
}
# The yet to be fulfilled input:
transfer_input_ = {
- 'fulfillment': None,
- 'fulfills': {
- 'transaction_id': creation_tx_id,
- 'output_index': 0
- },
- 'owners_before': (alice.public_key, bob.public_key, carol.public_key),
+ "fulfillment": None,
+ "fulfills": {"transaction_id": creation_tx_id, "output_index": 0},
+ "owners_before": (alice.public_key, bob.public_key, carol.public_key),
}
# Assemble the handcrafted transaction
handcrafted_transfer_tx = {
- 'operation': 'TRANSFER',
- 'assets': [{'id': creation_tx_id}],
- 'metadata': None,
- 'outputs': (transfer_output,),
- 'inputs': (transfer_input_,),
- 'version': '2.0',
- 'id': None,
+ "operation": "TRANSFER",
+ "assets": [{"id": creation_tx_id}],
+ "metadata": None,
+ "outputs": (transfer_output,),
+ "inputs": (transfer_input_,),
+ "version": "2.0",
+ "id": None,
}
# Create sha3-256 of message to sign
message = json.dumps(
handcrafted_transfer_tx,
sort_keys=True,
- separators=(',', ':'),
+ separators=(",", ":"),
ensure_ascii=False,
)
message = sha3.sha3_256(message.encode())
- message.update('{}{}'.format(
- handcrafted_transfer_tx['inputs'][0]['fulfills']['transaction_id'],
- handcrafted_transfer_tx['inputs'][0]['fulfills']['output_index']).encode())
+ message.update(
+ "{}{}".format(
+ handcrafted_transfer_tx["inputs"][0]["fulfills"]["transaction_id"],
+ handcrafted_transfer_tx["inputs"][0]["fulfills"]["output_index"],
+ ).encode()
+ )
# Sign message with Alice's und Bob's private key
bob_transfer_ed25519.sign(message.digest(), base58.b58decode(bob.private_key))
@@ -314,19 +297,19 @@ def test_weighted_threshold():
fulfillment_uri = fulfillment_threshold.serialize_uri()
- handcrafted_transfer_tx['inputs'][0]['fulfillment'] = fulfillment_uri
+ handcrafted_transfer_tx["inputs"][0]["fulfillment"] = fulfillment_uri
# Create tx_id for handcrafted_dw_tx and send tx commit
json_str_tx = json.dumps(
handcrafted_transfer_tx,
sort_keys=True,
- separators=(',', ':'),
+ separators=(",", ":"),
ensure_ascii=False,
)
transfer_tx_id = sha3.sha3_256(json_str_tx.encode()).hexdigest()
- handcrafted_transfer_tx['id'] = transfer_tx_id
+ handcrafted_transfer_tx["id"] = transfer_tx_id
pm.transactions.send_commit(handcrafted_transfer_tx)
diff --git a/integration/python/src/test_zenroom.py b/integration/python/src/test_zenroom.py
index 5fdbcea..f38db29 100644
--- a/integration/python/src/test_zenroom.py
+++ b/integration/python/src/test_zenroom.py
@@ -1,82 +1,132 @@
-# GOAL:
-# In this script I tried to implement the ECDSA signature using zenroom
-
-# However, the scripts are customizable and so with the same procedure
-# we can implement more complex smart contracts
-
-# PUBLIC IDENTITY
-# The public identity of the users in this script (Bob and Alice)
-# is the pair (ECDH public key, Testnet address)
-
import json
+import base58
+from hashlib import sha3_256
+from cryptoconditions.types.zenroom import ZenroomSha256
+from planetmint_driver.crypto import generate_keypair
-from cryptoconditions import ZenroomSha256
-from json.decoder import JSONDecodeError
+from .helper.hosts import Hosts
+from zenroom import zencode_exec
+import time
-def test_zenroom(gen_key_zencode, secret_key_to_private_key_zencode, fulfill_script_zencode,
- condition_script_zencode, zenroom_data, zenroom_house_assets):
- alice = json.loads(ZenroomSha256.run_zenroom(gen_key_zencode).output)['keys']
- bob = json.loads(ZenroomSha256.run_zenroom(gen_key_zencode).output)['keys']
+def test_zenroom_signing(
+ gen_key_zencode,
+ secret_key_to_private_key_zencode,
+ fulfill_script_zencode,
+ zenroom_data,
+ zenroom_house_assets,
+ zenroom_script_input,
+ condition_script_zencode,
+):
- zen_public_keys = json.loads(ZenroomSha256.run_zenroom(secret_key_to_private_key_zencode.format('Alice'),
- keys={'keys': alice}).output)
- zen_public_keys.update(json.loads(ZenroomSha256.run_zenroom(secret_key_to_private_key_zencode.format('Bob'),
- keys={'keys': bob}).output))
+ biolabs = generate_keypair()
+ version = "2.0"
- # CRYPTO-CONDITIONS: instantiate an Ed25519 crypto-condition for buyer
- zenSha = ZenroomSha256(script=fulfill_script_zencode, keys=zen_public_keys, data=zenroom_data)
+ alice = json.loads(zencode_exec(gen_key_zencode).output)["keyring"]
+ bob = json.loads(zencode_exec(gen_key_zencode).output)["keyring"]
+
+ zen_public_keys = json.loads(
+ zencode_exec(secret_key_to_private_key_zencode.format("Alice"), keys=json.dumps({"keyring": alice})).output
+ )
+ zen_public_keys.update(
+ json.loads(
+ zencode_exec(secret_key_to_private_key_zencode.format("Bob"), keys=json.dumps({"keyring": bob})).output
+ )
+ )
+
+ zenroomscpt = ZenroomSha256(script=fulfill_script_zencode, data=zenroom_data, keys=zen_public_keys)
+ print(f"zenroom is: {zenroomscpt.script}")
# CRYPTO-CONDITIONS: generate the condition uri
- condition_uri = zenSha.condition.serialize_uri()
+ condition_uri_zen = zenroomscpt.condition.serialize_uri()
+ print(f"\nzenroom condition URI: {condition_uri_zen}")
# CRYPTO-CONDITIONS: construct an unsigned fulfillment dictionary
- unsigned_fulfillment_dict = {
- 'type': zenSha.TYPE_NAME,
- 'script': fulfill_script_zencode,
- 'keys': zen_public_keys,
+ unsigned_fulfillment_dict_zen = {
+ "type": zenroomscpt.TYPE_NAME,
+ "public_key": base58.b58encode(biolabs.public_key).decode(),
}
-
output = {
- 'amount': '1000',
- 'condition': {
- 'details': unsigned_fulfillment_dict,
- 'uri': condition_uri,
+ "amount": "10",
+ "condition": {
+ "details": unsigned_fulfillment_dict_zen,
+ "uri": condition_uri_zen,
},
- 'data': zenroom_data,
- 'script': fulfill_script_zencode,
- 'conf': '',
- 'public_keys': (zen_public_keys['Alice']['ecdh_public_key'], ),
+ "public_keys": [
+ biolabs.public_key,
+ ],
}
-
input_ = {
- 'fulfillment': None,
- 'fulfills': None,
- 'owners_before': (zen_public_keys['Alice']['ecdh_public_key'], ),
+ "fulfillment": None,
+ "fulfills": None,
+ "owners_before": [
+ biolabs.public_key,
+ ],
+ }
+ metadata = {"result": {"output": ["ok"]}}
+
+ script_ = {
+ "code": {"type": "zenroom", "raw": "test_string", "parameters": [{"obj": "1"}, {"obj": "2"}]},
+ "state": "dd8bbd234f9869cab4cc0b84aa660e9b5ef0664559b8375804ee8dce75b10576",
+ "input": zenroom_script_input,
+ "output": ["ok"],
+ "policies": {},
}
token_creation_tx = {
- 'operation': 'CREATE',
- 'assets': zenroom_house_assets,
- 'metadata': None,
- 'outputs': (output,),
- 'inputs': (input_,),
- 'version': '2.0',
- 'id': None,
+ "operation": "CREATE",
+ "asset": {"data": {"test": "my asset"}},
+ "script": script_,
+ "metadata": metadata,
+ "outputs": [
+ output,
+ ],
+ "inputs": [
+ input_,
+ ],
+ "version": version,
+ "id": None,
}
# JSON: serialize the transaction-without-id to a json formatted string
- message = json.dumps(
+ tx = json.dumps(
token_creation_tx,
sort_keys=True,
- separators=(',', ':'),
+ separators=(",", ":"),
ensure_ascii=False,
)
+ script_ = json.dumps(script_)
+ # major workflow:
+ # we store the fulfill script in the transaction/message (zenroom-sha)
+ # the condition script is used to fulfill the transaction and create the signature
+ #
+ # the server should ick the fulfill script and recreate the zenroom-sha and verify the signature
- try:
- assert(not zenSha.validate(message=message))
- except: # noqa
- pass
+ signed_input = zenroomscpt.sign(script_, condition_script_zencode, alice)
- message = zenSha.sign(message, condition_script_zencode, alice)
- assert(zenSha.validate(message=message))
+ input_signed = json.loads(signed_input)
+ input_signed["input"]["signature"] = input_signed["output"]["signature"]
+ del input_signed["output"]["signature"]
+ del input_signed["output"]["logs"]
+ input_signed["output"] = ["ok"] # define expected output that is to be compared
+ input_msg = json.dumps(input_signed)
+
+ assert zenroomscpt.validate(message=input_msg)
+
+ tx = json.loads(tx)
+ fulfillment_uri_zen = zenroomscpt.serialize_uri()
+
+ tx["inputs"][0]["fulfillment"] = fulfillment_uri_zen
+ tx["script"] = input_signed
+ tx["id"] = None
+ json_str_tx = json.dumps(tx, sort_keys=True, skipkeys=False, separators=(",", ":"))
+ # SHA3: hash the serialized id-less transaction to generate the id
+ shared_creation_txid = sha3_256(json_str_tx.encode()).hexdigest()
+ tx["id"] = shared_creation_txid
+ hosts = Hosts("/shared/hostnames")
+ pm_alpha = hosts.get_connection()
+ sent_transfer_tx = pm_alpha.transactions.send_commit(tx)
+ time.sleep(1)
+ # Assert that transaction is stored on both planetmint nodes
+ hosts.assert_transaction(shared_creation_txid)
+ print(f"\n\nstatus and result : + {sent_transfer_tx}")
diff --git a/integration/scripts/all-in-one.bash b/integration/scripts/all-in-one.bash
index e719587..f60a581 100755
--- a/integration/scripts/all-in-one.bash
+++ b/integration/scripts/all-in-one.bash
@@ -4,14 +4,11 @@
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
-
-# MongoDB configuration
-[ "$(stat -c %U /data/db)" = mongodb ] || chown -R mongodb /data/db
-
# Planetmint configuration
/usr/src/app/scripts/planetmint-monit-config
-nohup mongod --bind_ip_all > "$HOME/.planetmint-monit/logs/mongodb_log_$(date +%Y%m%d_%H%M%S)" 2>&1 &
+# Tarantool startup and configuration
+tarantool /usr/src/app/scripts/init.lua
# Start services
monit -d 5 -I -B
\ No newline at end of file
diff --git a/integration/scripts/genesis.py b/integration/scripts/genesis.py
index 3593f34..8f21446 100755
--- a/integration/scripts/genesis.py
+++ b/integration/scripts/genesis.py
@@ -15,19 +15,19 @@ def edit_genesis() -> None:
for file_name in file_names:
file = open(file_name)
genesis = json.load(file)
- validators.extend(genesis['validators'])
+ validators.extend(genesis["validators"])
file.close()
genesis_file = open(file_names[0])
genesis_json = json.load(genesis_file)
- genesis_json['validators'] = validators
+ genesis_json["validators"] = validators
genesis_file.close()
- with open('/shared/genesis.json', 'w') as f:
+ with open("/shared/genesis.json", "w") as f:
json.dump(genesis_json, f, indent=True)
return None
-if __name__ == '__main__':
+if __name__ == "__main__":
edit_genesis()
diff --git a/integration/scripts/init.lua b/integration/scripts/init.lua
new file mode 100644
index 0000000..87fba97
--- /dev/null
+++ b/integration/scripts/init.lua
@@ -0,0 +1,86 @@
+#!/usr/bin/env tarantool
+box.cfg {
+ listen = 3303,
+ background = true,
+ log = '.planetmint-monit/logs/tarantool.log',
+ pid_file = '.planetmint-monit/monit_processes/tarantool.pid'
+}
+
+box.schema.user.grant('guest','read,write,execute,create,drop','universe')
+
+function indexed_pattern_search(space_name, field_no, pattern)
+ if (box.space[space_name] == nil) then
+ print("Error: Failed to find the specified space")
+ return nil
+ end
+ local index_no = -1
+ for i=0,box.schema.INDEX_MAX,1 do
+ if (box.space[space_name].index[i] == nil) then break end
+ if (box.space[space_name].index[i].type == "TREE"
+ and box.space[space_name].index[i].parts[1].fieldno == field_no
+ and (box.space[space_name].index[i].parts[1].type == "scalar"
+ or box.space[space_name].index[i].parts[1].type == "string")) then
+ index_no = i
+ break
+ end
+ end
+ if (index_no == -1) then
+ print("Error: Failed to find an appropriate index")
+ return nil
+ end
+ local index_search_key = ""
+ local index_search_key_length = 0
+ local last_character = ""
+ local c = ""
+ local c2 = ""
+ for i=1,string.len(pattern),1 do
+ c = string.sub(pattern, i, i)
+ if (last_character ~= "%") then
+ if (c == '^' or c == "$" or c == "(" or c == ")" or c == "."
+ or c == "[" or c == "]" or c == "*" or c == "+"
+ or c == "-" or c == "?") then
+ break
+ end
+ if (c == "%") then
+ c2 = string.sub(pattern, i + 1, i + 1)
+ if (string.match(c2, "%p") == nil) then break end
+ index_search_key = index_search_key .. c2
+ else
+ index_search_key = index_search_key .. c
+ end
+ end
+ last_character = c
+ end
+ index_search_key_length = string.len(index_search_key)
+ local result_set = {}
+ local number_of_tuples_in_result_set = 0
+ local previous_tuple_field = ""
+ while true do
+ local number_of_tuples_since_last_yield = 0
+ local is_time_for_a_yield = false
+ for _,tuple in box.space[space_name].index[index_no]:
+ pairs(index_search_key,{iterator = box.index.GE}) do
+ if (string.sub(tuple[field_no], 1, index_search_key_length)
+ > index_search_key) then
+ break
+ end
+ number_of_tuples_since_last_yield = number_of_tuples_since_last_yield + 1
+ if (number_of_tuples_since_last_yield >= 10
+ and tuple[field_no] ~= previous_tuple_field) then
+ index_search_key = tuple[field_no]
+ is_time_for_a_yield = true
+ break
+ end
+ previous_tuple_field = tuple[field_no]
+ if (string.match(tuple[field_no], pattern) ~= nil) then
+ number_of_tuples_in_result_set = number_of_tuples_in_result_set + 1
+ result_set[number_of_tuples_in_result_set] = tuple
+ end
+ end
+ if (is_time_for_a_yield ~= true) then
+ break
+ end
+ require('fiber').yield()
+ end
+ return result_set
+end
\ No newline at end of file
diff --git a/k8s/configuration/config-map.yaml b/k8s/configuration/config-map.yaml
index a284737..8d60b40 100644
--- a/k8s/configuration/config-map.yaml
+++ b/k8s/configuration/config-map.yaml
@@ -70,7 +70,7 @@ data:
openresty-backend-port: "8080"
# Planetmint configuration parameters
- # Refer https://docs.planetmint.com/projects/server/en/latest/server-reference/configuration.html
+ # Refer https://docs.planetmint.io/en/latest/node-setup/configuration.html
# planetmint-api-port is the port number on which Planetmint is listening
# for HTTP requests.
diff --git a/k8s/logging-and-monitoring/analyze.py b/k8s/logging-and-monitoring/analyze.py
index a3ca68f..d50a877 100644
--- a/k8s/logging-and-monitoring/analyze.py
+++ b/k8s/logging-and-monitoring/analyze.py
@@ -31,25 +31,27 @@ import re
from dateutil.parser import parse
-lineformat = re.compile(r'(?P\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) - - '
- r'\[(?P\d{2}\/[a-z]{3}\/\d{4}:\d{2}:\d{2}:\d{2} '
- r'(\+|\-)\d{4})\] ((\"(GET|POST) )(?P.+)(http\/1\.1")) '
- r'(?P\d{3}) '
- r'(?P\d+) '
- r'(["](?P(\-)|(.+))["]) '
- r'(["](?P.+)["])',
- re.IGNORECASE)
+lineformat = re.compile(
+ r"(?P\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) - - "
+ r"\[(?P\d{2}\/[a-z]{3}\/\d{4}:\d{2}:\d{2}:\d{2} "
+ r'(\+|\-)\d{4})\] ((\"(GET|POST) )(?P.+)(http\/1\.1")) '
+ r"(?P\d{3}) "
+ r"(?P\d+) "
+ r'(["](?P(\-)|(.+))["]) '
+ r'(["](?P.+)["])',
+ re.IGNORECASE,
+)
filepath = sys.argv[1]
logline_list = []
with open(filepath) as csvfile:
- csvreader = csv.reader(csvfile, delimiter=',')
+ csvreader = csv.reader(csvfile, delimiter=",")
for row in csvreader:
- if row and (row[8] != 'LogEntry'):
+ if row and (row[8] != "LogEntry"):
# because the first line is just the column headers, such as 'LogEntry'
logline = row[8]
- print(logline + '\n')
+ print(logline + "\n")
logline_data = re.search(lineformat, logline)
if logline_data:
logline_dict = logline_data.groupdict()
@@ -63,20 +65,19 @@ total_bytes_sent = 0
tstamp_list = []
for lldict in logline_list:
- total_bytes_sent += int(lldict['bytessent'])
- dt = lldict['dateandtime']
+ total_bytes_sent += int(lldict["bytessent"])
+ dt = lldict["dateandtime"]
# https://tinyurl.com/lqjnhot
dtime = parse(dt[:11] + " " + dt[12:])
tstamp_list.append(dtime.timestamp())
-print('Number of log lines seen: {}'.format(len(logline_list)))
+print("Number of log lines seen: {}".format(len(logline_list)))
# Time range
trange_sec = max(tstamp_list) - min(tstamp_list)
trange_days = trange_sec / 60.0 / 60.0 / 24.0
-print('Time range seen (days): {}'.format(trange_days))
+print("Time range seen (days): {}".format(trange_days))
-print('Total bytes sent: {}'.format(total_bytes_sent))
+print("Total bytes sent: {}".format(total_bytes_sent))
-print('Average bytes sent per day (out via GET): {}'.
- format(total_bytes_sent / trange_days))
+print("Average bytes sent per day (out via GET): {}".format(total_bytes_sent / trange_days))
diff --git a/k8s/nginx-https-web-proxy/nginx-https-web-proxy-conf.yaml b/k8s/nginx-https-web-proxy/nginx-https-web-proxy-conf.yaml
index 8c0c0fc..d5eedf5 100644
--- a/k8s/nginx-https-web-proxy/nginx-https-web-proxy-conf.yaml
+++ b/k8s/nginx-https-web-proxy/nginx-https-web-proxy-conf.yaml
@@ -61,10 +61,10 @@ data:
# expected-http-referer is the expected regex expression of the Referer
# header in the HTTP requests to the proxy.
- # The default below accepts the referrer value to be *.planetmint.com
- expected-http-referer: "^https://(.*)planetmint\\.com/(.*)"
+ # The default below accepts the referrer value to be *.planetmint.io
+ expected-http-referer: "^https://(.*)planetmint\\.io/(.*)"
# expected-http-origin is the expected regex expression of the Origin
# header in the HTTP requests to the proxy.
- # The default below accepts the origin value to be *.planetmint.com
- expected-http-origin: "^https://(.*)planetmint\\.com"
+ # The default below accepts the origin value to be *.planetmint.io
+ expected-http-origin: "^https://(.*)planetmint\\.io"
diff --git a/k8s/scripts/functions b/k8s/scripts/functions
index d2cc8b2..0bc37b6 100755
--- a/k8s/scripts/functions
+++ b/k8s/scripts/functions
@@ -298,7 +298,7 @@ data:
openresty-backend-port: "8080"
# Planetmint configuration parameters
- # Refer https://docs.planetmint.com/projects/server/en/latest/server-reference/configuration.html
+ # Refer https://docs.planetmint.io/en/latest/node-setup/configuration.html
# planetmint-api-port is the port number on which Planetmint is listening
# for HTTP requests.
diff --git a/k8s/scripts/vars b/k8s/scripts/vars
index f85222f..85788f4 100644
--- a/k8s/scripts/vars
+++ b/k8s/scripts/vars
@@ -1,5 +1,5 @@
# DNS name of the planetmint node
-NODE_FQDN="test.planetmint.com"
+NODE_FQDN="test.planetmint.io"
# NODE_FRONTEND_PORT is the port number on which this node's services
# are available to external clients. Default is 443(https)
diff --git a/planetmint/README.md b/planetmint/README.md
index 6ad05e4..f144c4e 100644
--- a/planetmint/README.md
+++ b/planetmint/README.md
@@ -17,7 +17,7 @@ The `Planetmint` class is defined here. Most node-level operations and database
### [`models.py`](./models.py)
-`Block`, `Transaction`, and `Asset` classes are defined here. The classes mirror the block and transaction structure from the [documentation](https://docs.planetmint.com/projects/server/en/latest/data-models/index.html), but also include methods for validation and signing.
+`Block`, `Transaction`, and `Asset` classes are defined here. The classes mirror the block and transaction structure from the documentation, but also include methods for validation and signing.
### [`validation.py`](./validation.py)
@@ -35,7 +35,7 @@ Methods for managing the configuration, including loading configuration files, a
### [`commands`](./commands)
-Contains code for the [CLI](https://docs.planetmint.com/projects/server/en/latest/server-reference/planetmint-cli.html) for Planetmint.
+Contains code for the [CLI](https://docs.planetmint.io/en/latest/tools/index.html#command-line-interface-cli) for Planetmint.
### [`db`](./db)
diff --git a/planetmint/__init__.py b/planetmint/__init__.py
index 785daef..1fa5393 100644
--- a/planetmint/__init__.py
+++ b/planetmint/__init__.py
@@ -3,104 +3,16 @@
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
-import copy
-import logging
+from transactions.common.transaction import Transaction # noqa
+from transactions.types.elections.validator_election import ValidatorElection # noqa
+from transactions.types.elections.vote import Vote # noqa
+from transactions.types.elections.chain_migration_election import ChainMigrationElection
+from planetmint.lib import Planetmint
+from planetmint.core import App
-from planetmint.log import DEFAULT_LOGGING_CONFIG as log_config
-from planetmint.lib import Planetmint # noqa
-from planetmint.migrations.chain_migration_election import ChainMigrationElection
-from planetmint.version import __version__ # noqa
-from planetmint.core import App # noqa
-# from functools import reduce
-# PORT_NUMBER = reduce(lambda x, y: x * y, map(ord, 'Planetmint')) % 2**16
-# basically, the port number is 9984
-
-# The following variable is used by `planetmint configure` to
-# prompt the user for database values. We cannot rely on
-# _base_database_localmongodb.keys() because dicts are unordered.
-# I tried to configure
-
-_database_keys_map = {
- 'localmongodb': ('host', 'port', 'name'),
-}
-
-_base_database_localmongodb = {
- 'host': 'localhost',
- 'port': 27017,
- 'name': 'bigchain',
- 'replicaset': None,
- 'login': None,
- 'password': None,
-}
-
-_database_localmongodb = {
- 'backend': 'localmongodb',
- 'connection_timeout': 5000,
- 'max_tries': 3,
- 'ssl': False,
- 'ca_cert': None,
- 'certfile': None,
- 'keyfile': None,
- 'keyfile_passphrase': None,
- 'crlfile': None,
-}
-_database_localmongodb.update(_base_database_localmongodb)
-
-_database_map = {
- 'localmongodb': _database_localmongodb,
-}
-
-config = {
- 'server': {
- # Note: this section supports all the Gunicorn settings:
- # - http://docs.gunicorn.org/en/stable/settings.html
- 'bind': 'localhost:9984',
- 'loglevel': logging.getLevelName(
- log_config['handlers']['console']['level']).lower(),
- 'workers': None, # if None, the value will be cpu_count * 2 + 1
- },
- 'wsserver': {
- 'scheme': 'ws',
- 'host': 'localhost',
- 'port': 9985,
- 'advertised_scheme': 'ws',
- 'advertised_host': 'localhost',
- 'advertised_port': 9985,
- },
- 'tendermint': {
- 'host': 'localhost',
- 'port': 26657,
- 'version': 'v0.34.15', # look for __tm_supported_versions__
- },
- # FIXME: hardcoding to localmongodb for now
- 'database': _database_map['localmongodb'],
- 'log': {
- 'file': log_config['handlers']['file']['filename'],
- 'error_file': log_config['handlers']['errors']['filename'],
- 'level_console': logging.getLevelName(
- log_config['handlers']['console']['level']).lower(),
- 'level_logfile': logging.getLevelName(
- log_config['handlers']['file']['level']).lower(),
- 'datefmt_console': log_config['formatters']['console']['datefmt'],
- 'datefmt_logfile': log_config['formatters']['file']['datefmt'],
- 'fmt_console': log_config['formatters']['console']['format'],
- 'fmt_logfile': log_config['formatters']['file']['format'],
- 'granular_levels': {},
- },
-}
-
-# We need to maintain a backup copy of the original config dict in case
-# the user wants to reconfigure the node. Check ``planetmint.config_utils``
-# for more info.
-_config = copy.deepcopy(config)
-from planetmint.transactions.common.transaction import Transaction # noqa
-from planetmint import models # noqa
-from planetmint.upsert_validator import ValidatorElection # noqa
-from planetmint.transactions.types.elections.vote import Vote # noqa
-
-Transaction.register_type(Transaction.CREATE, models.Transaction)
-Transaction.register_type(Transaction.TRANSFER, models.Transaction)
+Transaction.register_type(Transaction.CREATE, Transaction)
+Transaction.register_type(Transaction.TRANSFER, Transaction)
Transaction.register_type(ValidatorElection.OPERATION, ValidatorElection)
Transaction.register_type(ChainMigrationElection.OPERATION, ChainMigrationElection)
Transaction.register_type(Vote.OPERATION, Vote)
diff --git a/planetmint/backend/__init__.py b/planetmint/backend/__init__.py
index db1e2ac..1468dc7 100644
--- a/planetmint/backend/__init__.py
+++ b/planetmint/backend/__init__.py
@@ -12,6 +12,5 @@ configuration or the ``PLANETMINT_DATABASE_BACKEND`` environment variable.
"""
# Include the backend interfaces
-from planetmint.backend import schema, query # noqa
-
-from planetmint.backend.connection import connect # noqa
+from planetmint.backend import schema, query, convert # noqa
+from planetmint.backend.connection import connect, Connection
diff --git a/planetmint/backend/connection.py b/planetmint/backend/connection.py
index 34708ce..febc67e 100644
--- a/planetmint/backend/connection.py
+++ b/planetmint/backend/connection.py
@@ -3,103 +3,110 @@
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
+import tarantool
import logging
-from importlib import import_module
-from itertools import repeat
-import planetmint
+from itertools import repeat
+from importlib import import_module
+from transactions.common.exceptions import ConfigurationError
+from planetmint.config import Config
from planetmint.backend.exceptions import ConnectionError
-from planetmint.backend.utils import get_planetmint_config_value, get_planetmint_config_value_or_key_error
-from planetmint.transactions.common.exceptions import ConfigurationError
BACKENDS = {
- 'localmongodb': 'planetmint.backend.localmongodb.connection.LocalMongoDBConnection',
+ "tarantool_db": "planetmint.backend.tarantool.connection.TarantoolDBConnection",
+ "localmongodb": "planetmint.backend.localmongodb.connection.LocalMongoDBConnection",
}
logger = logging.getLogger(__name__)
-def connect(backend=None, host=None, port=None, name=None, max_tries=None,
- connection_timeout=None, replicaset=None, ssl=None, login=None, password=None,
- ca_cert=None, certfile=None, keyfile=None, keyfile_passphrase=None,
- crlfile=None):
- """Create a new connection to the database backend.
-
- All arguments default to the current configuration's values if not
- given.
-
- Args:
- backend (str): the name of the backend to use.
- host (str): the host to connect to.
- port (int): the port to connect to.
- name (str): the name of the database to use.
- replicaset (str): the name of the replica set (only relevant for
- MongoDB connections).
-
- Returns:
- An instance of :class:`~planetmint.backend.connection.Connection`
- based on the given (or defaulted) :attr:`backend`.
-
- Raises:
- :exc:`~ConnectionError`: If the connection to the database fails.
- :exc:`~ConfigurationError`: If the given (or defaulted) :attr:`backend`
- is not supported or could not be loaded.
- :exc:`~AuthenticationError`: If there is a OperationFailure due to
- Authentication failure after connecting to the database.
- """
-
- backend = backend or get_planetmint_config_value_or_key_error('backend')
- host = host or get_planetmint_config_value_or_key_error('host')
- port = port or get_planetmint_config_value_or_key_error('port')
- dbname = name or get_planetmint_config_value_or_key_error('name')
- # Not sure how to handle this here. This setting is only relevant for
- # mongodb.
- # I added **kwargs for both RethinkDBConnection and MongoDBConnection
- # to handle these these additional args. In case of RethinkDBConnection
- # it just does not do anything with it.
- #
- # UPD: RethinkDBConnection is not here anymore cause we no longer support RethinkDB.
- # The problem described above might be reconsidered next time we introduce a backend,
- # if it ever happens.
- replicaset = replicaset or get_planetmint_config_value('replicaset')
- ssl = ssl if ssl is not None else get_planetmint_config_value('ssl', False)
- login = login or get_planetmint_config_value('login')
- password = password or get_planetmint_config_value('password')
- ca_cert = ca_cert or get_planetmint_config_value('ca_cert')
- certfile = certfile or get_planetmint_config_value('certfile')
- keyfile = keyfile or get_planetmint_config_value('keyfile')
- keyfile_passphrase = keyfile_passphrase or get_planetmint_config_value('keyfile_passphrase', None)
- crlfile = crlfile or get_planetmint_config_value('crlfile')
-
+def connect(
+ host: str = None, port: int = None, login: str = None, password: str = None, backend: str = None, **kwargs
+):
try:
- module_name, _, class_name = BACKENDS[backend].rpartition('.')
- Class = getattr(import_module(module_name), class_name)
- except KeyError:
- raise ConfigurationError('Backend `{}` is not supported. '
- 'Planetmint currently supports {}'.format(backend, BACKENDS.keys()))
- except (ImportError, AttributeError) as exc:
- raise ConfigurationError('Error loading backend `{}`'.format(backend)) from exc
+ backend = backend
+ if not backend and kwargs and kwargs.get("backend"):
+ backend = kwargs["backend"]
- logger.debug('Connection: {}'.format(Class))
- return Class(host=host, port=port, dbname=dbname,
- max_tries=max_tries, connection_timeout=connection_timeout,
- replicaset=replicaset, ssl=ssl, login=login, password=password,
- ca_cert=ca_cert, certfile=certfile, keyfile=keyfile,
- keyfile_passphrase=keyfile_passphrase, crlfile=crlfile)
+ if backend and backend != Config().get()["database"]["backend"]:
+ Config().init_config(backend)
+ else:
+ backend = Config().get()["database"]["backend"]
+ except KeyError:
+ logger.info("Backend {} not supported".format(backend))
+ raise ConfigurationError
+
+ host = host or Config().get()["database"]["host"] if not kwargs.get("host") else kwargs["host"]
+ port = port or Config().get()["database"]["port"] if not kwargs.get("port") else kwargs["port"]
+ login = login or Config().get()["database"]["login"] if not kwargs.get("login") else kwargs["login"]
+ password = password or Config().get()["database"]["password"]
+ try:
+ if backend == "tarantool_db":
+ modulepath, _, class_name = BACKENDS[backend].rpartition(".")
+ Class = getattr(import_module(modulepath), class_name)
+ return Class(host=host, port=port, user=login, password=password, kwargs=kwargs)
+ elif backend == "localmongodb":
+ modulepath, _, class_name = BACKENDS[backend].rpartition(".")
+ Class = getattr(import_module(modulepath), class_name)
+ dbname = _kwargs_parser(key="name", kwargs=kwargs) or Config().get()["database"]["name"]
+ replicaset = _kwargs_parser(key="replicaset", kwargs=kwargs) or Config().get()["database"]["replicaset"]
+ ssl = _kwargs_parser(key="ssl", kwargs=kwargs) or Config().get()["database"]["ssl"]
+ login = (
+ login or Config().get()["database"]["login"]
+ if _kwargs_parser(key="login", kwargs=kwargs) is None
+ else _kwargs_parser(key="login", kwargs=kwargs) # noqa: E501
+ )
+ password = (
+ password or Config().get()["database"]["password"]
+ if _kwargs_parser(key="password", kwargs=kwargs) is None
+ else _kwargs_parser(key="password", kwargs=kwargs) # noqa: E501
+ )
+ ca_cert = _kwargs_parser(key="ca_cert", kwargs=kwargs) or Config().get()["database"]["ca_cert"]
+ certfile = _kwargs_parser(key="certfile", kwargs=kwargs) or Config().get()["database"]["certfile"]
+ keyfile = _kwargs_parser(key="keyfile", kwargs=kwargs) or Config().get()["database"]["keyfile"]
+ keyfile_passphrase = (
+ _kwargs_parser(key="keyfile_passphrase", kwargs=kwargs)
+ or Config().get()["database"]["keyfile_passphrase"]
+ )
+ crlfile = _kwargs_parser(key="crlfile", kwargs=kwargs) or Config().get()["database"]["crlfile"]
+ max_tries = _kwargs_parser(key="max_tries", kwargs=kwargs)
+ connection_timeout = _kwargs_parser(key="connection_timeout", kwargs=kwargs)
+
+ return Class(
+ host=host,
+ port=port,
+ dbname=dbname,
+ max_tries=max_tries,
+ connection_timeout=connection_timeout,
+ replicaset=replicaset,
+ ssl=ssl,
+ login=login,
+ password=password,
+ ca_cert=ca_cert,
+ certfile=certfile,
+ keyfile=keyfile,
+ keyfile_passphrase=keyfile_passphrase,
+ crlfile=crlfile,
+ )
+ except tarantool.error.NetworkError as network_err:
+ print(f"Host {host}:{port} can't be reached.\n{network_err}")
+ raise network_err
+
+
+def _kwargs_parser(key, kwargs):
+ if kwargs.get(key):
+ return kwargs[key]
+ return None
class Connection:
"""Connection class interface.
-
All backend implementations should provide a connection class that inherits
from and implements this class.
"""
- def __init__(self, host=None, port=None, dbname=None,
- connection_timeout=None, max_tries=None,
- **kwargs):
+ def __init__(self, host=None, port=None, dbname=None, connection_timeout=None, max_tries=None, **kwargs):
"""Create a new :class:`~.Connection` instance.
-
Args:
host (str): the host to connect to.
port (int): the port to connect to.
@@ -113,14 +120,15 @@ class Connection:
configuration's ``database`` settings
"""
- dbconf = planetmint.config['database']
+ dbconf = Config().get()["database"]
- self.host = host or dbconf['host']
- self.port = port or dbconf['port']
- self.dbname = dbname or dbconf['name']
- self.connection_timeout = connection_timeout if connection_timeout is not None \
- else dbconf['connection_timeout']
- self.max_tries = max_tries if max_tries is not None else dbconf['max_tries']
+ self.host = host or dbconf["host"]
+ self.port = port or dbconf["port"]
+ self.dbname = dbname or dbconf["name"]
+ self.connection_timeout = (
+ connection_timeout if connection_timeout is not None else dbconf["connection_timeout"]
+ )
+ self.max_tries = max_tries if max_tries is not None else dbconf["max_tries"]
self.max_tries_counter = range(self.max_tries) if self.max_tries != 0 else repeat(0)
self._conn = None
@@ -132,7 +140,6 @@ class Connection:
def run(self, query):
"""Run a query.
-
Args:
query: the query to run
Raises:
@@ -148,7 +155,6 @@ class Connection:
def connect(self):
"""Try to connect to the database.
-
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails.
@@ -160,11 +166,16 @@ class Connection:
try:
self._conn = self._connect()
except ConnectionError as exc:
- logger.warning('Attempt %s/%s. Connection to %s:%s failed after %sms.',
- attempt, self.max_tries if self.max_tries != 0 else '∞',
- self.host, self.port, self.connection_timeout)
+ logger.warning(
+ "Attempt %s/%s. Connection to %s:%s failed after %sms.",
+ attempt,
+ self.max_tries if self.max_tries != 0 else "∞",
+ self.host,
+ self.port,
+ self.connection_timeout,
+ )
if attempt == self.max_tries:
- logger.critical('Cannot connect to the Database. Giving up.')
+ logger.critical("Cannot connect to the Database. Giving up.")
raise ConnectionError() from exc
else:
break
diff --git a/planetmint/backend/convert.py b/planetmint/backend/convert.py
new file mode 100644
index 0000000..6ec074f
--- /dev/null
+++ b/planetmint/backend/convert.py
@@ -0,0 +1,26 @@
+# Copyright © 2020 Interplanetary Database Association e.V.,
+# Planetmint and IPDB software contributors.
+# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+# Code is Apache-2.0 and docs are CC-BY-4.0
+
+"""Convert interfaces for backends."""
+
+from functools import singledispatch
+
+
+@singledispatch
+def prepare_asset(connection, transaction_type, transaction_id, filter_operation, asset):
+ """
+ This function is used for preparing assets,
+ before storing them to database.
+ """
+ raise NotImplementedError
+
+
+@singledispatch
+def prepare_metadata(connection, transaction_id, metadata):
+ """
+ This function is used for preparing metadata,
+ before storing them to database.
+ """
+ raise NotImplementedError
diff --git a/planetmint/backend/exceptions.py b/planetmint/backend/exceptions.py
index 2ab5ef6..cf22952 100644
--- a/planetmint/backend/exceptions.py
+++ b/planetmint/backend/exceptions.py
@@ -3,10 +3,10 @@
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
-from planetmint.exceptions import BigchainDBError
+from planetmint.exceptions import PlanetmintError
-class BackendError(BigchainDBError):
+class BackendError(PlanetmintError):
"""Top level exception for any backend exception."""
diff --git a/planetmint/backend/localmongodb/__init__.py b/planetmint/backend/localmongodb/__init__.py
index c786508..97b45cd 100644
--- a/planetmint/backend/localmongodb/__init__.py
+++ b/planetmint/backend/localmongodb/__init__.py
@@ -1,4 +1,4 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
+# Copyright © 2020 Interplanetary Database Association e.V.,conn_tarantool
# Planetmint and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
@@ -22,7 +22,7 @@ generic backend interfaces to the implementations in this module.
"""
# Register the single dispatched modules on import.
-from planetmint.backend.localmongodb import schema, query # noqa
+from planetmint.backend.localmongodb import schema, query, convert # noqa
# MongoDBConnection should always be accessed via
# ``planetmint.backend.connect()``.
diff --git a/planetmint/backend/localmongodb/connection.py b/planetmint/backend/localmongodb/connection.py
index 8ad3226..ad03455 100644
--- a/planetmint/backend/localmongodb/connection.py
+++ b/planetmint/backend/localmongodb/connection.py
@@ -5,25 +5,31 @@
import logging
from ssl import CERT_REQUIRED
-
import pymongo
-from planetmint.backend.connection import Connection
-from planetmint.backend.exceptions import (DuplicateKeyError,
- OperationError,
- ConnectionError)
-from planetmint.backend.utils import get_planetmint_config_value
-from planetmint.transactions.common.exceptions import ConfigurationError
+from planetmint.config import Config
+from planetmint.backend.exceptions import DuplicateKeyError, OperationError, ConnectionError
+from transactions.common.exceptions import ConfigurationError
from planetmint.utils import Lazy
+from planetmint.backend.connection import Connection
logger = logging.getLogger(__name__)
class LocalMongoDBConnection(Connection):
-
- def __init__(self, replicaset=None, ssl=None, login=None, password=None,
- ca_cert=None, certfile=None, keyfile=None,
- keyfile_passphrase=None, crlfile=None, **kwargs):
+ def __init__(
+ self,
+ replicaset=None,
+ ssl=None,
+ login=None,
+ password=None,
+ ca_cert=None,
+ certfile=None,
+ keyfile=None,
+ keyfile_passphrase=None,
+ crlfile=None,
+ **kwargs,
+ ):
"""Create a new Connection instance.
Args:
@@ -34,15 +40,19 @@ class LocalMongoDBConnection(Connection):
"""
super().__init__(**kwargs)
- self.replicaset = replicaset or get_planetmint_config_value('replicaset')
- self.ssl = ssl if ssl is not None else get_planetmint_config_value('ssl', False)
- self.login = login or get_planetmint_config_value('login')
- self.password = password or get_planetmint_config_value('password')
- self.ca_cert = ca_cert or get_planetmint_config_value('ca_cert')
- self.certfile = certfile or get_planetmint_config_value('certfile')
- self.keyfile = keyfile or get_planetmint_config_value('keyfile')
- self.keyfile_passphrase = keyfile_passphrase or get_planetmint_config_value('keyfile_passphrase')
- self.crlfile = crlfile or get_planetmint_config_value('crlfile')
+ self.replicaset = replicaset or Config().get()["database"]["replicaset"]
+ self.ssl = ssl if ssl is not None else Config().get()["database"]["ssl"]
+ self.login = login or Config().get()["database"]["login"]
+ self.password = password or Config().get()["database"]["password"]
+ self.ca_cert = ca_cert or Config().get()["database"]["ca_cert"]
+ self.certfile = certfile or Config().get()["database"]["certfile"]
+ self.keyfile = keyfile or Config().get()["database"]["keyfile"]
+ self.keyfile_passphrase = keyfile_passphrase or Config().get()["database"]["keyfile_passphrase"]
+ self.crlfile = crlfile or Config().get()["database"]["crlfile"]
+ if not self.ssl:
+ self.ssl = False
+ if not self.keyfile_passphrase:
+ self.keyfile_passphrase = None
@property
def db(self):
@@ -64,15 +74,14 @@ class LocalMongoDBConnection(Connection):
try:
return query.run(self.conn)
except pymongo.errors.AutoReconnect:
- logger.warning('Lost connection to the database, '
- 'retrying query.')
+ logger.warning("Lost connection to the database, " "retrying query.")
return query.run(self.conn)
except pymongo.errors.AutoReconnect as exc:
raise ConnectionError from exc
except pymongo.errors.DuplicateKeyError as exc:
raise DuplicateKeyError from exc
except pymongo.errors.OperationFailure as exc:
- print(f'DETAILS: {exc.details}')
+ print(f"DETAILS: {exc.details}")
raise OperationError from exc
def _connect(self):
@@ -93,44 +102,45 @@ class LocalMongoDBConnection(Connection):
# `ConnectionFailure`.
# The presence of ca_cert, certfile, keyfile, crlfile implies the
# use of certificates for TLS connectivity.
- if self.ca_cert is None or self.certfile is None or \
- self.keyfile is None or self.crlfile is None:
- client = pymongo.MongoClient(self.host,
- self.port,
- replicaset=self.replicaset,
- serverselectiontimeoutms=self.connection_timeout,
- ssl=self.ssl,
- **MONGO_OPTS)
+ if self.ca_cert is None or self.certfile is None or self.keyfile is None or self.crlfile is None:
+ client = pymongo.MongoClient(
+ self.host,
+ self.port,
+ replicaset=self.replicaset,
+ serverselectiontimeoutms=self.connection_timeout,
+ ssl=self.ssl,
+ **MONGO_OPTS,
+ )
if self.login is not None and self.password is not None:
client[self.dbname].authenticate(self.login, self.password)
else:
- logger.info('Connecting to MongoDB over TLS/SSL...')
- client = pymongo.MongoClient(self.host,
- self.port,
- replicaset=self.replicaset,
- serverselectiontimeoutms=self.connection_timeout,
- ssl=self.ssl,
- ssl_ca_certs=self.ca_cert,
- ssl_certfile=self.certfile,
- ssl_keyfile=self.keyfile,
- ssl_pem_passphrase=self.keyfile_passphrase,
- ssl_crlfile=self.crlfile,
- ssl_cert_reqs=CERT_REQUIRED,
- **MONGO_OPTS)
+ logger.info("Connecting to MongoDB over TLS/SSL...")
+ client = pymongo.MongoClient(
+ self.host,
+ self.port,
+ replicaset=self.replicaset,
+ serverselectiontimeoutms=self.connection_timeout,
+ ssl=self.ssl,
+ ssl_ca_certs=self.ca_cert,
+ ssl_certfile=self.certfile,
+ ssl_keyfile=self.keyfile,
+ ssl_pem_passphrase=self.keyfile_passphrase,
+ ssl_crlfile=self.crlfile,
+ ssl_cert_reqs=CERT_REQUIRED,
+ **MONGO_OPTS,
+ )
if self.login is not None:
- client[self.dbname].authenticate(self.login,
- mechanism='MONGODB-X509')
+ client[self.dbname].authenticate(self.login, mechanism="MONGODB-X509")
return client
- except (pymongo.errors.ConnectionFailure,
- pymongo.errors.OperationFailure) as exc:
- logger.info('Exception in _connect(): {}'.format(exc))
+ except (pymongo.errors.ConnectionFailure, pymongo.errors.OperationFailure) as exc:
+ logger.info("Exception in _connect(): {}".format(exc))
raise ConnectionError(str(exc)) from exc
except pymongo.errors.ConfigurationError as exc:
raise ConfigurationError from exc
MONGO_OPTS = {
- 'socketTimeoutMS': 20000,
+ "socketTimeoutMS": 20000,
}
diff --git a/planetmint/backend/localmongodb/convert.py b/planetmint/backend/localmongodb/convert.py
new file mode 100644
index 0000000..5e3aa87
--- /dev/null
+++ b/planetmint/backend/localmongodb/convert.py
@@ -0,0 +1,24 @@
+# Copyright © 2020 Interplanetary Database Association e.V.,
+# Planetmint and IPDB software contributors.
+# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+# Code is Apache-2.0 and docs are CC-BY-4.0
+
+"""Convert implementation for MongoDb"""
+
+from planetmint.backend.utils import module_dispatch_registrar
+from planetmint.backend import convert
+from planetmint.backend.localmongodb.connection import LocalMongoDBConnection
+
+register_query = module_dispatch_registrar(convert)
+
+
+@register_query(LocalMongoDBConnection)
+def prepare_asset(connection, transaction_type, transaction_id, filter_operation, asset):
+ if transaction_type in filter_operation:
+ asset["id"] = transaction_id
+ return asset
+
+
+@register_query(LocalMongoDBConnection)
+def prepare_metadata(connection, transaction_id, metadata):
+ return {"id": transaction_id, "metadata": metadata}
diff --git a/planetmint/backend/localmongodb/query.py b/planetmint/backend/localmongodb/query.py
index 64fa4ad..aa529cf 100644
--- a/planetmint/backend/localmongodb/query.py
+++ b/planetmint/backend/localmongodb/query.py
@@ -1,3 +1,5 @@
+from functools import singledispatch
+
# Copyright © 2020 Interplanetary Database Association e.V.,
# Planetmint and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
@@ -11,112 +13,88 @@ from planetmint import backend
from planetmint.backend.exceptions import DuplicateKeyError
from planetmint.backend.utils import module_dispatch_registrar
from planetmint.backend.localmongodb.connection import LocalMongoDBConnection
-from planetmint.transactions.common.transaction import Transaction
+from transactions.common.transaction import Transaction
register_query = module_dispatch_registrar(backend.query)
@register_query(LocalMongoDBConnection)
def store_transactions(conn, signed_transactions):
- return conn.run(conn.collection('transactions')
- .insert_many(signed_transactions))
+ return conn.run(conn.collection("transactions").insert_many(signed_transactions))
@register_query(LocalMongoDBConnection)
def get_transaction(conn, transaction_id):
- return conn.run(
- conn.collection('transactions')
- .find_one({'id': transaction_id}, {'_id': 0}))
+ return conn.run(conn.collection("transactions").find_one({"id": transaction_id}, {"_id": 0}))
@register_query(LocalMongoDBConnection)
def get_transactions(conn, transaction_ids):
try:
return conn.run(
- conn.collection('transactions')
- .find({'id': {'$in': transaction_ids}},
- projection={'_id': False}))
+ conn.collection("transactions").find({"id": {"$in": transaction_ids}}, projection={"_id": False})
+ )
except IndexError:
pass
@register_query(LocalMongoDBConnection)
def store_metadatas(conn, metadata):
- return conn.run(
- conn.collection('metadata')
- .insert_many(metadata, ordered=False))
+ return conn.run(conn.collection("metadata").insert_many(metadata, ordered=False))
@register_query(LocalMongoDBConnection)
def get_metadata(conn, transaction_ids):
- return conn.run(
- conn.collection('metadata')
- .find({'id': {'$in': transaction_ids}},
- projection={'_id': False}))
+ return conn.run(conn.collection("metadata").find({"id": {"$in": transaction_ids}}, projection={"_id": False}))
@register_query(LocalMongoDBConnection)
def store_asset(conn, asset):
try:
- return conn.run(
- conn.collection('assets')
- .insert_one(asset))
+ return conn.run(conn.collection("assets").insert_one(asset))
except DuplicateKeyError:
pass
@register_query(LocalMongoDBConnection)
def store_assets(conn, assets):
- return conn.run(
- conn.collection('assets')
- .insert_many(assets, ordered=False))
+ return conn.run(conn.collection("assets").insert_many(assets, ordered=False))
# TODO: pass filter/projection in function call this is not the expected behaviour for a function called get_asset
@register_query(LocalMongoDBConnection)
def get_asset(conn, asset_id):
try:
- return conn.run(
- conn.collection('assets')
- .find_one({'id': asset_id}, {'_id': 0, 'id': 0}))
+ return conn.run(conn.collection("assets").find_one({"id": asset_id}, {"_id": 0, "id": 0}))
except IndexError:
pass
# TODO: pass filter/projection in function call this is not the expected behaviour for a function called get_assets
@register_query(LocalMongoDBConnection)
def get_assets(conn, asset_ids):
- return conn.run(
- conn.collection('assets')
- .find({'id': {'$in': asset_ids}},
- projection={'_id': False, 'id': False}))
+ return conn.run(conn.collection("assets").find({"id": {"$in": asset_ids}}, projection={"_id": False}))
@register_query(LocalMongoDBConnection)
def get_spent(conn, transaction_id, output):
- query = {'inputs':
- {'$elemMatch':
- {'$and': [{'fulfills.transaction_id': transaction_id},
- {'fulfills.output_index': output}]}}}
+ query = {
+ "inputs": {
+ "$elemMatch": {"$and": [{"fulfills.transaction_id": transaction_id}, {"fulfills.output_index": output}]}
+ }
+ }
- return conn.run(
- conn.collection('transactions')
- .find(query, {'_id': 0}))
+ return conn.run(conn.collection("transactions").find(query, {"_id": 0}))
@register_query(LocalMongoDBConnection)
def get_latest_block(conn):
- return conn.run(
- conn.collection('blocks')
- .find_one(projection={'_id': False},
- sort=[('height', DESCENDING)]))
+ return conn.run(conn.collection("blocks").find_one(projection={"_id": False}, sort=[("height", DESCENDING)]))
@register_query(LocalMongoDBConnection)
def store_block(conn, block):
try:
- return conn.run(
- conn.collection('blocks')
- .insert_one(block))
+ return conn.run(conn.collection("blocks").insert_one(block))
except DuplicateKeyError:
pass
@@ -125,32 +103,47 @@ def store_block(conn, block):
def get_txids_filtered(conn, asset_ids, operation=None, last_tx=None):
match = {
- Transaction.CREATE: {'operation': 'CREATE', 'id': {'$in': asset_ids}},
- Transaction.TRANSFER: {'operation': 'TRANSFER', 'assets.id': {'$in': asset_ids}},
- None: {'$or': [{'assets.id': {'$in': asset_ids}}, {'id': {'$in': asset_ids}}]},
+ Transaction.CREATE: {"operation": "CREATE", "id": {"$in": asset_ids}},
+ Transaction.TRANSFER: {"operation": "TRANSFER", "asset.id": {"$in": asset_ids}},
+ None: {"$or": [{"assets.id": {"$in": asset_ids}}, {"id": {"$in": asset_ids}}]},
}[operation]
- cursor = conn.run(conn.collection('transactions').find(match))
+ cursor = conn.run(conn.collection("transactions").find(match))
if last_tx:
- cursor = cursor.sort([('$natural', DESCENDING)]).limit(1)
+ cursor = cursor.sort([("$natural", DESCENDING)]).limit(1)
- return (elem['id'] for elem in cursor)
+ return (elem["id"] for elem in cursor)
@register_query(LocalMongoDBConnection)
-def text_search(conn, search, *, language='english', case_sensitive=False,
- diacritic_sensitive=False, text_score=False, limit=0, table='assets'):
+def text_search(
+ conn,
+ search,
+ *,
+ language="english",
+ case_sensitive=False,
+ diacritic_sensitive=False,
+ text_score=False,
+ limit=0,
+ table="assets"
+):
cursor = conn.run(
conn.collection(table)
- .find({'$text': {
- '$search': search,
- '$language': language,
- '$caseSensitive': case_sensitive,
- '$diacriticSensitive': diacritic_sensitive}},
- {'score': {'$meta': 'textScore'}, '_id': False})
- .sort([('score', {'$meta': 'textScore'})])
- .limit(limit))
+ .find(
+ {
+ "$text": {
+ "$search": search,
+ "$language": language,
+ "$caseSensitive": case_sensitive,
+ "$diacriticSensitive": diacritic_sensitive,
+ }
+ },
+ {"score": {"$meta": "textScore"}, "_id": False},
+ )
+ .sort([("score", {"$meta": "textScore"})])
+ .limit(limit)
+ )
if text_score:
return cursor
@@ -159,58 +152,54 @@ def text_search(conn, search, *, language='english', case_sensitive=False,
def _remove_text_score(asset):
- asset.pop('score', None)
+ asset.pop("score", None)
return asset
@register_query(LocalMongoDBConnection)
def get_owned_ids(conn, owner):
cursor = conn.run(
- conn.collection('transactions').aggregate([
- {'$match': {'outputs.public_keys': owner}},
- {'$project': {'_id': False}}
- ]))
+ conn.collection("transactions").aggregate(
+ [{"$match": {"outputs.public_keys": owner}}, {"$project": {"_id": False}}]
+ )
+ )
return cursor
@register_query(LocalMongoDBConnection)
def get_spending_transactions(conn, inputs):
- transaction_ids = [i['transaction_id'] for i in inputs]
- output_indexes = [i['output_index'] for i in inputs]
- query = {'inputs':
- {'$elemMatch':
- {'$and':
- [
- {'fulfills.transaction_id': {'$in': transaction_ids}},
- {'fulfills.output_index': {'$in': output_indexes}}
- ]}}}
+ transaction_ids = [i["transaction_id"] for i in inputs]
+ output_indexes = [i["output_index"] for i in inputs]
+ query = {
+ "inputs": {
+ "$elemMatch": {
+ "$and": [
+ {"fulfills.transaction_id": {"$in": transaction_ids}},
+ {"fulfills.output_index": {"$in": output_indexes}},
+ ]
+ }
+ }
+ }
- cursor = conn.run(
- conn.collection('transactions').find(query, {'_id': False}))
+ cursor = conn.run(conn.collection("transactions").find(query, {"_id": False}))
return cursor
@register_query(LocalMongoDBConnection)
def get_block(conn, block_id):
- return conn.run(
- conn.collection('blocks')
- .find_one({'height': block_id},
- projection={'_id': False}))
+ return conn.run(conn.collection("blocks").find_one({"height": block_id}, projection={"_id": False}))
@register_query(LocalMongoDBConnection)
def get_block_with_transaction(conn, txid):
- return conn.run(
- conn.collection('blocks')
- .find({'transactions': txid},
- projection={'_id': False, 'height': True}))
+ return conn.run(conn.collection("blocks").find({"transactions": txid}, projection={"_id": False, "height": True}))
@register_query(LocalMongoDBConnection)
def delete_transactions(conn, txn_ids):
- conn.run(conn.collection('assets').delete_many({'id': {'$in': txn_ids}}))
- conn.run(conn.collection('metadata').delete_many({'id': {'$in': txn_ids}}))
- conn.run(conn.collection('transactions').delete_many({'id': {'$in': txn_ids}}))
+ conn.run(conn.collection("assets").delete_many({"id": {"$in": txn_ids}}))
+ conn.run(conn.collection("metadata").delete_many({"id": {"$in": txn_ids}}))
+ conn.run(conn.collection("transactions").delete_many({"id": {"$in": txn_ids}}))
@register_query(LocalMongoDBConnection)
@@ -218,7 +207,7 @@ def store_unspent_outputs(conn, *unspent_outputs):
if unspent_outputs:
try:
return conn.run(
- conn.collection('utxos').insert_many(
+ conn.collection("utxos").insert_many(
unspent_outputs,
ordered=False,
)
@@ -232,14 +221,19 @@ def store_unspent_outputs(conn, *unspent_outputs):
def delete_unspent_outputs(conn, *unspent_outputs):
if unspent_outputs:
return conn.run(
- conn.collection('utxos').delete_many({
- '$or': [{
- '$and': [
- {'transaction_id': unspent_output['transaction_id']},
- {'output_index': unspent_output['output_index']},
- ],
- } for unspent_output in unspent_outputs]
- })
+ conn.collection("utxos").delete_many(
+ {
+ "$or": [
+ {
+ "$and": [
+ {"transaction_id": unspent_output["transaction_id"]},
+ {"output_index": unspent_output["output_index"]},
+ ],
+ }
+ for unspent_output in unspent_outputs
+ ]
+ }
+ )
)
@@ -247,51 +241,36 @@ def delete_unspent_outputs(conn, *unspent_outputs):
def get_unspent_outputs(conn, *, query=None):
if query is None:
query = {}
- return conn.run(conn.collection('utxos').find(query,
- projection={'_id': False}))
+ return conn.run(conn.collection("utxos").find(query, projection={"_id": False}))
@register_query(LocalMongoDBConnection)
def store_pre_commit_state(conn, state):
- return conn.run(
- conn.collection('pre_commit')
- .replace_one({}, state, upsert=True)
- )
+ return conn.run(conn.collection("pre_commit").replace_one({}, state, upsert=True))
@register_query(LocalMongoDBConnection)
-def get_pre_commit_state(conn):
- return conn.run(conn.collection('pre_commit').find_one())
+def get_pre_commit_state(connection):
+ return connection.run(connection.collection("pre_commit").find_one())
@register_query(LocalMongoDBConnection)
def store_validator_set(conn, validators_update):
- height = validators_update['height']
- return conn.run(
- conn.collection('validators').replace_one(
- {'height': height},
- validators_update,
- upsert=True
- )
- )
+ height = validators_update["height"]
+ return conn.run(conn.collection("validators").replace_one({"height": height}, validators_update, upsert=True))
@register_query(LocalMongoDBConnection)
def delete_validator_set(conn, height):
- return conn.run(
- conn.collection('validators').delete_many({'height': height})
- )
+ return conn.run(conn.collection("validators").delete_many({"height": height}))
@register_query(LocalMongoDBConnection)
def store_election(conn, election_id, height, is_concluded):
return conn.run(
- conn.collection('elections').replace_one(
- {'election_id': election_id,
- 'height': height},
- {'election_id': election_id,
- 'height': height,
- 'is_concluded': is_concluded},
+ conn.collection("elections").replace_one(
+ {"election_id": election_id, "height": height},
+ {"election_id": election_id, "height": height, "is_concluded": is_concluded},
upsert=True,
)
)
@@ -299,29 +278,22 @@ def store_election(conn, election_id, height, is_concluded):
@register_query(LocalMongoDBConnection)
def store_elections(conn, elections):
- return conn.run(
- conn.collection('elections').insert_many(elections)
- )
+ return conn.run(conn.collection("elections").insert_many(elections))
@register_query(LocalMongoDBConnection)
def delete_elections(conn, height):
- return conn.run(
- conn.collection('elections').delete_many({'height': height})
- )
+ return conn.run(conn.collection("elections").delete_many({"height": height}))
@register_query(LocalMongoDBConnection)
def get_validator_set(conn, height=None):
query = {}
if height is not None:
- query = {'height': {'$lte': height}}
+ query = {"height": {"$lte": height}}
cursor = conn.run(
- conn.collection('validators')
- .find(query, projection={'_id': False})
- .sort([('height', DESCENDING)])
- .limit(1)
+ conn.collection("validators").find(query, projection={"_id": False}).sort([("height", DESCENDING)]).limit(1)
)
return next(cursor, None)
@@ -329,34 +301,27 @@ def get_validator_set(conn, height=None):
@register_query(LocalMongoDBConnection)
def get_election(conn, election_id):
- query = {'election_id': election_id}
+ query = {"election_id": election_id}
return conn.run(
- conn.collection('elections')
- .find_one(query, projection={'_id': False},
- sort=[('height', DESCENDING)])
+ conn.collection("elections").find_one(query, projection={"_id": False}, sort=[("height", DESCENDING)])
)
+
@register_query(LocalMongoDBConnection)
def get_asset_tokens_for_public_key(conn, asset_id, public_key):
- query = {'outputs.public_keys': [public_key],
- 'assets.id': asset_id}
+ query = {"outputs.public_keys": [public_key], "asset.id": asset_id}
- cursor = conn.run(
- conn.collection('transactions').aggregate([
- {'$match': query},
- {'$project': {'_id': False}}
- ]))
+ cursor = conn.run(conn.collection("transactions").aggregate([{"$match": query}, {"$project": {"_id": False}}]))
return cursor
@register_query(LocalMongoDBConnection)
def store_abci_chain(conn, height, chain_id, is_synced=True):
return conn.run(
- conn.collection('abci_chains').replace_one(
- {'height': height},
- {'height': height, 'chain_id': chain_id,
- 'is_synced': is_synced},
+ conn.collection("abci_chains").replace_one(
+ {"height": height},
+ {"height": height, "chain_id": chain_id, "is_synced": is_synced},
upsert=True,
)
)
@@ -364,14 +329,9 @@ def store_abci_chain(conn, height, chain_id, is_synced=True):
@register_query(LocalMongoDBConnection)
def delete_abci_chain(conn, height):
- return conn.run(
- conn.collection('abci_chains').delete_many({'height': height})
- )
+ return conn.run(conn.collection("abci_chains").delete_many({"height": height}))
@register_query(LocalMongoDBConnection)
def get_latest_abci_chain(conn):
- return conn.run(
- conn.collection('abci_chains')
- .find_one(projection={'_id': False}, sort=[('height', DESCENDING)])
- )
+ return conn.run(conn.collection("abci_chains").find_one(projection={"_id": False}, sort=[("height", DESCENDING)]))
diff --git a/planetmint/backend/localmongodb/schema.py b/planetmint/backend/localmongodb/schema.py
index d92d6d4..b8fd6a0 100644
--- a/planetmint/backend/localmongodb/schema.py
+++ b/planetmint/backend/localmongodb/schema.py
@@ -20,48 +20,48 @@ register_schema = module_dispatch_registrar(backend.schema)
INDEXES = {
- 'transactions': [
- ('id', dict(unique=True, name='transaction_id')),
- ('asset.id', dict(name='asset_id')),
- ('outputs.public_keys', dict(name='outputs')),
- ([('inputs.fulfills.transaction_id', ASCENDING),
- ('inputs.fulfills.output_index', ASCENDING)], dict(name='inputs')),
+ "transactions": [
+ ("id", dict(unique=True, name="transaction_id")),
+ ("asset.id", dict(name="asset_id")),
+ ("outputs.public_keys", dict(name="outputs")),
+ (
+ [("inputs.fulfills.transaction_id", ASCENDING), ("inputs.fulfills.output_index", ASCENDING)],
+ dict(name="inputs"),
+ ),
],
- 'assets': [
- ('id', dict(name='asset_id', unique=True)),
- ([('$**', TEXT)], dict(name='text')),
+ "assets": [
+ ("id", dict(name="asset_id", unique=True)),
+ ([("$**", TEXT)], dict(name="text")),
],
- 'blocks': [
- ([('height', DESCENDING)], dict(name='height', unique=True)),
+ "blocks": [
+ ([("height", DESCENDING)], dict(name="height", unique=True)),
],
- 'metadata': [
- ('id', dict(name='transaction_id', unique=True)),
- ([('$**', TEXT)], dict(name='text')),
+ "metadata": [
+ ("id", dict(name="transaction_id", unique=True)),
+ ([("$**", TEXT)], dict(name="text")),
],
- 'utxos': [
- ([('transaction_id', ASCENDING),
- ('output_index', ASCENDING)], dict(name='utxo', unique=True)),
+ "utxos": [
+ ([("transaction_id", ASCENDING), ("output_index", ASCENDING)], dict(name="utxo", unique=True)),
],
- 'pre_commit': [
- ('height', dict(name='height', unique=True)),
+ "pre_commit": [
+ ("height", dict(name="height", unique=True)),
],
- 'elections': [
- ([('height', DESCENDING), ('election_id', ASCENDING)],
- dict(name='election_id_height', unique=True)),
+ "elections": [
+ ([("height", DESCENDING), ("election_id", ASCENDING)], dict(name="election_id_height", unique=True)),
],
- 'validators': [
- ('height', dict(name='height', unique=True)),
+ "validators": [
+ ("height", dict(name="height", unique=True)),
],
- 'abci_chains': [
- ('height', dict(name='height', unique=True)),
- ('chain_id', dict(name='chain_id', unique=True)),
+ "abci_chains": [
+ ("height", dict(name="height", unique=True)),
+ ("chain_id", dict(name="chain_id", unique=True)),
],
}
@register_schema(LocalMongoDBConnection)
def create_database(conn, dbname):
- logger.info('Create database `%s`.', dbname)
+ logger.info("Create database `%s`.", dbname)
# TODO: read and write concerns can be declared here
conn.conn.get_database(dbname)
@@ -72,15 +72,15 @@ def create_tables(conn, dbname):
# create the table
# TODO: read and write concerns can be declared here
try:
- logger.info(f'Create `{table_name}` table.')
+ logger.info(f"Create `{table_name}` table.")
conn.conn[dbname].create_collection(table_name)
except CollectionInvalid:
- logger.info(f'Collection {table_name} already exists.')
+ logger.info(f"Collection {table_name} already exists.")
create_indexes(conn, dbname, table_name, INDEXES[table_name])
def create_indexes(conn, dbname, collection, indexes):
- logger.info(f'Ensure secondary indexes for `{collection}`.')
+ logger.info(f"Ensure secondary indexes for `{collection}`.")
for fields, kwargs in indexes:
conn.conn[dbname][collection].create_index(fields, **kwargs)
diff --git a/planetmint/backend/query.py b/planetmint/backend/query.py
index c9da238..3ed074f 100644
--- a/planetmint/backend/query.py
+++ b/planetmint/backend/query.py
@@ -6,12 +6,12 @@
"""Query interfaces for backends."""
from functools import singledispatch
-
from planetmint.backend.exceptions import OperationError
+# FIXME ADD HERE HINT FOR RETURNING TYPE
@singledispatch
-def store_asset(connection, asset):
+def store_asset(asset: dict, connection):
"""Write an asset to the asset table.
Args:
@@ -25,14 +25,14 @@ def store_asset(connection, asset):
@singledispatch
-def store_assets(connection, assets):
+def store_assets(assets: list, connection):
"""Write a list of assets to the assets table.
+ backend
+ Args:
+ assets (list): a list of assets to write.
- Args:
- assets (list): a list of assets to write.
-
- Returns:
- The database response.
+ Returns:
+ The database response.
"""
raise NotImplementedError
@@ -100,18 +100,6 @@ def get_asset(connection, asset_id):
raise NotImplementedError
-@singledispatch
-def get_assets(connection, asset_ids):
- """Get assets from the assets table.
-
- Args:
- asset_ids (list): list of asset ids to fetch
-
- Returns:
- The result of the operation.
- """
-
- raise NotImplementedError
@singledispatch
def get_spent(connection, transaction_id, condition_id):
@@ -203,7 +191,19 @@ def get_metadata(connection, transaction_ids):
@singledispatch
-def get_txids_filtered(connection, asset_ids, operation=None):
+def get_assets(connection, asset_ids) -> list:
+ """Get a list of assets from the assets table.
+ Args:
+ asset_ids (list): a list of ids for the assets to be retrieved from
+ the database.
+ Returns:
+ assets (list): the list of returned assets.
+ """
+ raise NotImplementedError
+
+
+@singledispatch
+def get_txids_filtered(connection, asset_id, operation=None):
"""Return all transactions for a particular asset id and optional operation.
Args:
@@ -215,8 +215,17 @@ def get_txids_filtered(connection, asset_ids, operation=None):
@singledispatch
-def text_search(conn, search, *, language='english', case_sensitive=False,
- diacritic_sensitive=False, text_score=False, limit=0, table=None):
+def text_search(
+ conn,
+ search,
+ *,
+ language="english",
+ case_sensitive=False,
+ diacritic_sensitive=False,
+ text_score=False,
+ limit=0,
+ table=None
+):
"""Return all the assets that match the text search.
The results are sorted by text score.
@@ -243,8 +252,7 @@ def text_search(conn, search, *, language='english', case_sensitive=False,
OperationError: If the backend does not support text search
"""
- raise OperationError('This query is only supported when running '
- 'Planetmint with MongoDB as the backend.')
+ raise OperationError("This query is only supported when running " "Planetmint with MongoDB as the backend.")
@singledispatch
@@ -384,8 +392,7 @@ def get_validator_set(conn, height):
@singledispatch
def get_election(conn, election_id):
- """Return the election record
- """
+ """Return the election record"""
raise NotImplementedError
@@ -428,3 +435,9 @@ def get_latest_abci_chain(conn):
None otherwise.
"""
raise NotImplementedError
+
+
+@singledispatch
+def _group_transaction_by_ids(txids: list, connection):
+ """Returns the transactions object (JSON TYPE), from list of ids."""
+ raise NotImplementedError
diff --git a/planetmint/backend/schema.py b/planetmint/backend/schema.py
index b19315b..3140b21 100644
--- a/planetmint/backend/schema.py
+++ b/planetmint/backend/schema.py
@@ -5,26 +5,81 @@
"""Database creation and schema-providing interfaces for backends."""
-from functools import singledispatch
import logging
-import planetmint
+from functools import singledispatch
+from planetmint.config import Config
from planetmint.backend.connection import connect
-from planetmint.transactions.common.exceptions import ValidationError
-from planetmint.transactions.common.utils import (
- validate_all_values_for_key_in_obj, validate_all_values_for_key_in_list)
+from transactions.common.exceptions import ValidationError
+from transactions.common.utils import (
+ validate_all_values_for_key_in_obj,
+ validate_all_values_for_key_in_list,
+)
logger = logging.getLogger(__name__)
# Tables/collections that every backend database must create
-TABLES = ('transactions', 'blocks', 'assets', 'metadata',
- 'validators', 'elections', 'pre_commit', 'utxos', 'abci_chains')
+TABLES = (
+ "transactions",
+ "blocks",
+ "assets",
+ "metadata",
+ "validators",
+ "elections",
+ "pre_commit",
+ "utxos",
+ "abci_chains",
+)
-VALID_LANGUAGES = ('danish', 'dutch', 'english', 'finnish', 'french', 'german',
- 'hungarian', 'italian', 'norwegian', 'portuguese', 'romanian',
- 'russian', 'spanish', 'swedish', 'turkish', 'none',
- 'da', 'nl', 'en', 'fi', 'fr', 'de', 'hu', 'it', 'nb', 'pt',
- 'ro', 'ru', 'es', 'sv', 'tr')
+SPACE_NAMES = (
+ "abci_chains",
+ "assets",
+ "blocks",
+ "blocks_tx",
+ "elections",
+ "meta_data",
+ "pre_commits",
+ "validators",
+ "transactions",
+ "inputs",
+ "outputs",
+ "keys",
+ "utxos",
+)
+
+VALID_LANGUAGES = (
+ "danish",
+ "dutch",
+ "english",
+ "finnish",
+ "french",
+ "german",
+ "hungarian",
+ "italian",
+ "norwegian",
+ "portuguese",
+ "romanian",
+ "russian",
+ "spanish",
+ "swedish",
+ "turkish",
+ "none",
+ "da",
+ "nl",
+ "en",
+ "fi",
+ "fr",
+ "de",
+ "hu",
+ "it",
+ "nb",
+ "pt",
+ "ro",
+ "ru",
+ "es",
+ "sv",
+ "tr",
+)
@singledispatch
@@ -80,7 +135,7 @@ def init_database(connection=None, dbname=None):
"""
connection = connection or connect()
- dbname = dbname or planetmint.config['database']['name']
+ dbname = dbname or Config().get()["database"]["name"]
create_database(connection, dbname)
create_tables(connection, dbname)
@@ -89,41 +144,43 @@ def init_database(connection=None, dbname=None):
def validate_language_key(obj, key):
"""Validate all nested "language" key in `obj`.
- Args:
- obj (dict): dictionary whose "language" key is to be validated.
+ Args:
+ obj (dict): dictionary whose "language" key is to be validated.
- Returns:
- None: validation successful
+ Returns:
+ None: validation successful
- Raises:
- ValidationError: will raise exception in case language is not valid.
+ Raises:
+ ValidationError: will raise exception in case language is not valid.
"""
- backend = planetmint.config['database']['backend']
+ backend = Config().get()["database"]["backend"]
- if backend == 'localmongodb':
+ if backend == "localmongodb":
data = obj.get(key, {})
if isinstance(data, dict):
- validate_all_values_for_key_in_obj(data, 'language', validate_language)
+ validate_all_values_for_key_in_obj(data, "language", validate_language)
elif isinstance(data, list):
- validate_all_values_for_key_in_list(data, 'language', validate_language)
+ validate_all_values_for_key_in_list(data, "language", validate_language)
def validate_language(value):
"""Check if `value` is a valid language.
- https://docs.mongodb.com/manual/reference/text-search-languages/
+ https://docs.mongodb.com/manual/reference/text-search-languages/
- Args:
- value (str): language to validated
+ Args:
+ value (str): language to validated
- Returns:
- None: validation successful
+ Returns:
+ None: validation successful
- Raises:
- ValidationError: will raise exception in case language is not valid.
+ Raises:
+ ValidationError: will raise exception in case language is not valid.
"""
if value not in VALID_LANGUAGES:
- error_str = ('MongoDB does not support text search for the '
- 'language "{}". If you do not understand this error '
- 'message then please rename key/field "language" to '
- 'something else like "lang".').format(value)
+ error_str = (
+ "MongoDB does not support text search for the "
+ 'language "{}". If you do not understand this error '
+ 'message then please rename key/field "language" to '
+ 'something else like "lang".'
+ ).format(value)
raise ValidationError(error_str)
diff --git a/planetmint/backend/tarantool/__init__.py b/planetmint/backend/tarantool/__init__.py
new file mode 100644
index 0000000..1e667c0
--- /dev/null
+++ b/planetmint/backend/tarantool/__init__.py
@@ -0,0 +1,5 @@
+# Register the single dispatched modules on import.
+from planetmint.backend.tarantool import query, connection, schema, convert # noqa
+
+# MongoDBConnection should always be accessed via
+# ``planetmint.backend.connect()``.
diff --git a/planetmint/backend/tarantool/basic.lua b/planetmint/backend/tarantool/basic.lua
new file mode 100644
index 0000000..fcc46eb
--- /dev/null
+++ b/planetmint/backend/tarantool/basic.lua
@@ -0,0 +1,78 @@
+box.cfg{listen = 3303}
+
+function indexed_pattern_search(space_name, field_no, pattern)
+ if (box.space[space_name] == nil) then
+ print("Error: Failed to find the specified space")
+ return nil
+ end
+ local index_no = -1
+ for i=0,box.schema.INDEX_MAX,1 do
+ if (box.space[space_name].index[i] == nil) then break end
+ if (box.space[space_name].index[i].type == "TREE"
+ and box.space[space_name].index[i].parts[1].fieldno == field_no
+ and (box.space[space_name].index[i].parts[1].type == "scalar"
+ or box.space[space_name].index[i].parts[1].type == "string")) then
+ index_no = i
+ break
+ end
+ end
+ if (index_no == -1) then
+ print("Error: Failed to find an appropriate index")
+ return nil
+ end
+ local index_search_key = ""
+ local index_search_key_length = 0
+ local last_character = ""
+ local c = ""
+ local c2 = ""
+ for i=1,string.len(pattern),1 do
+ c = string.sub(pattern, i, i)
+ if (last_character ~= "%") then
+ if (c == '^' or c == "$" or c == "(" or c == ")" or c == "."
+ or c == "[" or c == "]" or c == "*" or c == "+"
+ or c == "-" or c == "?") then
+ break
+ end
+ if (c == "%") then
+ c2 = string.sub(pattern, i + 1, i + 1)
+ if (string.match(c2, "%p") == nil) then break end
+ index_search_key = index_search_key .. c2
+ else
+ index_search_key = index_search_key .. c
+ end
+ end
+ last_character = c
+ end
+ index_search_key_length = string.len(index_search_key)
+ local result_set = {}
+ local number_of_tuples_in_result_set = 0
+ local previous_tuple_field = ""
+ while true do
+ local number_of_tuples_since_last_yield = 0
+ local is_time_for_a_yield = false
+ for _,tuple in box.space[space_name].index[index_no]:
+ pairs(index_search_key,{iterator = box.index.GE}) do
+ if (string.sub(tuple[field_no], 1, index_search_key_length)
+ > index_search_key) then
+ break
+ end
+ number_of_tuples_since_last_yield = number_of_tuples_since_last_yield + 1
+ if (number_of_tuples_since_last_yield >= 10
+ and tuple[field_no] ~= previous_tuple_field) then
+ index_search_key = tuple[field_no]
+ is_time_for_a_yield = true
+ break
+ end
+ previous_tuple_field = tuple[field_no]
+ if (string.match(tuple[field_no], pattern) ~= nil) then
+ number_of_tuples_in_result_set = number_of_tuples_in_result_set + 1
+ result_set[number_of_tuples_in_result_set] = tuple
+ end
+ end
+ if (is_time_for_a_yield ~= true) then
+ break
+ end
+ require('fiber').yield()
+ end
+ return result_set
+end
\ No newline at end of file
diff --git a/planetmint/backend/tarantool/connection.py b/planetmint/backend/tarantool/connection.py
new file mode 100644
index 0000000..0c719ff
--- /dev/null
+++ b/planetmint/backend/tarantool/connection.py
@@ -0,0 +1,103 @@
+# Copyright © 2020 Interplanetary Database Association e.V.,
+# Planetmint and IPDB software contributors.
+# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+# Code is Apache-2.0 and docs are CC-BY-4.0
+
+import logging
+import tarantool
+
+from planetmint.config import Config
+from transactions.common.exceptions import ConfigurationError
+from planetmint.utils import Lazy
+from planetmint.backend.connection import Connection
+
+logger = logging.getLogger(__name__)
+
+
+class TarantoolDBConnection(Connection):
+ def __init__(
+ self,
+ host: str = "localhost",
+ port: int = 3303,
+ user: str = None,
+ password: str = None,
+ **kwargs,
+ ):
+ try:
+ super().__init__(**kwargs)
+ self.host = host
+ self.port = port
+ # TODO add user support later on
+ self.init_path = Config().get()["database"]["init_config"]["absolute_path"]
+ self.drop_path = Config().get()["database"]["drop_config"]["absolute_path"]
+ self.SPACE_NAMES = [
+ "abci_chains",
+ "assets",
+ "blocks",
+ "blocks_tx",
+ "elections",
+ "meta_data",
+ "pre_commits",
+ "validators",
+ "transactions",
+ "inputs",
+ "outputs",
+ "keys",
+ ]
+ except tarantool.error.NetworkError as network_err:
+ logger.info("Host cant be reached")
+ raise network_err
+ except ConfigurationError:
+ logger.info("Exception in _connect(): {}")
+ raise ConfigurationError
+
+ def query(self):
+ return Lazy()
+
+ def _file_content_to_bytes(self, path):
+ with open(path, "r") as f:
+ execute = f.readlines()
+ f.close()
+ return "".join(execute).encode()
+
+ def _connect(self):
+ return tarantool.connect(host=self.host, port=self.port)
+
+ def get_space(self, space_name: str):
+ return self.conn.space(space_name)
+
+ def space(self, space_name: str):
+ return self.query().space(space_name)
+
+ def run(self, query, only_data=True):
+ try:
+ return query.run(self.conn).data if only_data else query.run(self.conn)
+ except tarantool.error.OperationalError as op_error:
+ raise op_error
+ except tarantool.error.NetworkError as net_error:
+ raise net_error
+
+ def get_connection(self):
+ return self.conn
+
+ def drop_database(self):
+ db_config = Config().get()["database"]
+ cmd_resp = self.run_command(command=self.drop_path, config=db_config) # noqa: F841
+
+ def init_database(self):
+ db_config = Config().get()["database"]
+ cmd_resp = self.run_command(command=self.init_path, config=db_config) # noqa: F841
+
+ def run_command(self, command: str, config: dict):
+ from subprocess import run
+
+ print(f" commands: {command}")
+ host_port = "%s:%s" % (self.host, self.port)
+ execute_cmd = self._file_content_to_bytes(path=command)
+ output = run(
+ ["tarantoolctl", "connect", host_port],
+ input=execute_cmd,
+ capture_output=True,
+ ).stderr
+ output = output.decode()
+ return output
diff --git a/planetmint/backend/tarantool/convert.py b/planetmint/backend/tarantool/convert.py
new file mode 100644
index 0000000..15ea5ef
--- /dev/null
+++ b/planetmint/backend/tarantool/convert.py
@@ -0,0 +1,25 @@
+# Copyright © 2020 Interplanetary Database Association e.V.,
+# Planetmint and IPDB software contributors.
+# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+# Code is Apache-2.0 and docs are CC-BY-4.0
+
+"""Convert implementation for Tarantool"""
+
+from planetmint.backend.utils import module_dispatch_registrar
+from planetmint.backend import convert
+from planetmint.backend.tarantool.connection import TarantoolDBConnection
+
+register_query = module_dispatch_registrar(convert)
+
+
+@register_query(TarantoolDBConnection)
+def prepare_asset(connection, transaction_type, transaction_id, filter_operation, asset):
+ asset_id = transaction_id
+ if transaction_type not in filter_operation:
+ asset_id = asset["id"]
+ return tuple([asset, transaction_id, asset_id])
+
+
+@register_query(TarantoolDBConnection)
+def prepare_metadata(connection, transaction_id, metadata):
+ return {"id": transaction_id, "metadata": metadata}
diff --git a/planetmint/backend/tarantool/drop.lua b/planetmint/backend/tarantool/drop.lua
new file mode 100644
index 0000000..da35bc6
--- /dev/null
+++ b/planetmint/backend/tarantool/drop.lua
@@ -0,0 +1,14 @@
+box.space.abci_chains:drop()
+box.space.assets:drop()
+box.space.blocks:drop()
+box.space.blocks_tx:drop()
+box.space.elections:drop()
+box.space.meta_data:drop()
+box.space.pre_commits:drop()
+box.space.utxos:drop()
+box.space.validators:drop()
+box.space.transactions:drop()
+box.space.inputs:drop()
+box.space.outputs:drop()
+box.space.keys:drop()
+box.space.scripts:drop()
diff --git a/planetmint/backend/tarantool/init.lua b/planetmint/backend/tarantool/init.lua
new file mode 100644
index 0000000..46344d9
--- /dev/null
+++ b/planetmint/backend/tarantool/init.lua
@@ -0,0 +1,74 @@
+abci_chains = box.schema.space.create('abci_chains', {engine='memtx', is_sync = false})
+abci_chains:format({{name='height' , type='integer'},{name='is_synched' , type='boolean'},{name='chain_id',type='string'}})
+abci_chains:create_index('id_search' ,{type='hash', parts={'chain_id'}})
+abci_chains:create_index('height_search' ,{type='tree',unique=false, parts={'height'}})
+
+assets = box.schema.space.create('assets' , {engine='memtx' , is_sync=false})
+assets:format({{name='data' , type='any'}, {name='tx_id', type='string'}, {name='asset_id', type='string'}})
+assets:create_index('txid_search', {type='hash', parts={'tx_id'}})
+assets:create_index('assetid_search', {type='tree',unique=false, parts={'asset_id', 'tx_id'}})
+assets:create_index('only_asset_search', {type='tree', unique=false, parts={'asset_id'}})
+
+blocks = box.schema.space.create('blocks' , {engine='memtx' , is_sync=false})
+blocks:format{{name='app_hash',type='string'},{name='height' , type='integer'},{name='block_id' , type='string'}}
+blocks:create_index('id_search' , {type='hash' , parts={'block_id'}})
+blocks:create_index('block_search' , {type='tree', unique = false, parts={'height'}})
+blocks:create_index('block_id_search', {type = 'hash', parts ={'block_id'}})
+
+blocks_tx = box.schema.space.create('blocks_tx')
+blocks_tx:format{{name='transaction_id', type = 'string'}, {name = 'block_id', type = 'string'}}
+blocks_tx:create_index('id_search',{ type = 'hash', parts={'transaction_id'}})
+blocks_tx:create_index('block_search', {type = 'tree',unique=false, parts={'block_id'}})
+
+elections = box.schema.space.create('elections',{engine = 'memtx' , is_sync = false})
+elections:format({{name='election_id' , type='string'},{name='height' , type='integer'}, {name='is_concluded' , type='boolean'}})
+elections:create_index('id_search' , {type='hash', parts={'election_id'}})
+elections:create_index('height_search' , {type='tree',unique=false, parts={'height'}})
+elections:create_index('update_search', {type='tree', unique=false, parts={'election_id', 'height'}})
+
+meta_datas = box.schema.space.create('meta_data',{engine = 'memtx' , is_sync = false})
+meta_datas:format({{name='transaction_id' , type='string'}, {name='meta_data' , type='any'}})
+meta_datas:create_index('id_search', { type='hash' , parts={'transaction_id'}})
+
+pre_commits = box.schema.space.create('pre_commits' , {engine='memtx' , is_sync=false})
+pre_commits:format({{name='commit_id', type='string'}, {name='height',type='integer'}, {name='transactions',type=any}})
+pre_commits:create_index('id_search', {type ='hash' , parts={'commit_id'}})
+pre_commits:create_index('height_search', {type ='tree',unique=true, parts={'height'}})
+
+validators = box.schema.space.create('validators' , {engine = 'memtx' , is_sync = false})
+validators:format({{name='validator_id' , type='string'},{name='height',type='integer'},{name='validators' , type='any'}})
+validators:create_index('id_search' , {type='hash' , parts={'validator_id'}})
+validators:create_index('height_search' , {type='tree', unique=true, parts={'height'}})
+
+transactions = box.schema.space.create('transactions',{engine='memtx' , is_sync=false})
+transactions:format({{name='transaction_id' , type='string'}, {name='operation' , type='string'}, {name='version' ,type='string'}, {name='dict_map', type='any'}})
+transactions:create_index('id_search' , {type = 'hash' , parts={'transaction_id'}})
+transactions:create_index('transaction_search' , {type = 'tree',unique=false, parts={'operation', 'transaction_id'}})
+
+inputs = box.schema.space.create('inputs')
+inputs:format({{name='transaction_id' , type='string'}, {name='fulfillment' , type='any'}, {name='owners_before' , type='array'}, {name='fulfills_transaction_id', type = 'string'}, {name='fulfills_output_index', type = 'string'}, {name='input_id', type='string'}, {name='input_index', type='number'}})
+inputs:create_index('delete_search' , {type = 'hash', parts={'input_id'}})
+inputs:create_index('spent_search' , {type = 'tree', unique=false, parts={'fulfills_transaction_id', 'fulfills_output_index'}})
+inputs:create_index('id_search', {type = 'tree', unique=false, parts = {'transaction_id'}})
+
+outputs = box.schema.space.create('outputs')
+outputs:format({{name='transaction_id' , type='string'}, {name='amount' , type='string'}, {name='uri', type='string'}, {name='details_type', type='string'}, {name='details_public_key', type='any'}, {name = 'output_id', type = 'string'}, {name='treshold', type='any'}, {name='subconditions', type='any'}, {name='output_index', type='number'}})
+outputs:create_index('unique_search' ,{type='hash', parts={'output_id'}})
+outputs:create_index('id_search' ,{type='tree', unique=false, parts={'transaction_id'}})
+
+keys = box.schema.space.create('keys')
+keys:format({{name = 'id', type='string'}, {name = 'transaction_id', type = 'string'} ,{name = 'output_id', type = 'string'}, {name = 'public_key', type = 'string'}, {name = 'key_index', type = 'integer'}})
+keys:create_index('id_search', {type = 'hash', parts={'id'}})
+keys:create_index('keys_search', {type = 'tree', unique=false, parts={'public_key'}})
+keys:create_index('txid_search', {type = 'tree', unique=false, parts={'transaction_id'}})
+keys:create_index('output_search', {type = 'tree', unique=false, parts={'output_id'}})
+
+utxos = box.schema.space.create('utxos', {engine = 'memtx' , is_sync = false})
+utxos:format({{name='transaction_id' , type='string'}, {name='output_index' , type='integer'}, {name='utxo_dict', type='string'}})
+utxos:create_index('id_search', {type='hash' , parts={'transaction_id', 'output_index'}})
+utxos:create_index('transaction_search', {type='tree', unique=false, parts={'transaction_id'}})
+utxos:create_index('index_search', {type='tree', unique=false, parts={'output_index'}})
+
+scripts = box.schema.space.create('scripts' , {engine='memtx' , is_sync=false})
+scripts:format({{name='transaction_id', type='string'},{name='script' , type='any'}})
+scripts:create_index('txid_search', {type='hash', parts={'transaction_id'}})
diff --git a/planetmint/backend/tarantool/query.py b/planetmint/backend/tarantool/query.py
new file mode 100644
index 0000000..588df73
--- /dev/null
+++ b/planetmint/backend/tarantool/query.py
@@ -0,0 +1,482 @@
+# Copyright © 2020 Interplanetary Database Association e.V.,
+# Planetmint and IPDB software contributors.
+# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
+# Code is Apache-2.0 and docs are CC-BY-4.0
+
+"""Query implementation for Tarantool"""
+import json
+
+from secrets import token_hex
+from hashlib import sha256
+from operator import itemgetter
+from tarantool.error import DatabaseError
+from planetmint.backend import query
+from planetmint.backend.utils import module_dispatch_registrar
+from planetmint.backend.tarantool.connection import TarantoolDBConnection
+from planetmint.backend.tarantool.transaction.tools import TransactionCompose, TransactionDecompose
+
+
+register_query = module_dispatch_registrar(query)
+
+
+@register_query(TarantoolDBConnection)
+def _group_transaction_by_ids(connection, txids: list):
+ _transactions = []
+ for txid in txids:
+ _txobject = connection.run(connection.space("transactions").select(txid, index="id_search"))
+ if len(_txobject) == 0:
+ continue
+ _txobject = _txobject[0]
+ _txinputs = connection.run(connection.space("inputs").select(txid, index="id_search"))
+ _txoutputs = connection.run(connection.space("outputs").select(txid, index="id_search"))
+ _txkeys = connection.run(connection.space("keys").select(txid, index="txid_search"))
+ _txassets = connection.run(connection.space("assets").select(txid, index="txid_search"))
+ _txmeta = connection.run(connection.space("meta_data").select(txid, index="id_search"))
+ _txscript = connection.run(connection.space("scripts").select(txid, index="txid_search"))
+
+ _txinputs = sorted(_txinputs, key=itemgetter(6), reverse=False)
+ _txoutputs = sorted(_txoutputs, key=itemgetter(8), reverse=False)
+ result_map = {
+ "transaction": _txobject,
+ "inputs": _txinputs,
+ "outputs": _txoutputs,
+ "keys": _txkeys,
+ "asset": _txassets,
+ "metadata": _txmeta,
+ "script": _txscript,
+ }
+ tx_compose = TransactionCompose(db_results=result_map)
+ _transaction = tx_compose.convert_to_dict()
+ _transactions.append(_transaction)
+ return _transactions
+
+
+@register_query(TarantoolDBConnection)
+def store_transactions(connection, signed_transactions: list):
+ for transaction in signed_transactions:
+ txprepare = TransactionDecompose(transaction)
+ txtuples = txprepare.convert_to_tuple()
+ try:
+ connection.run(connection.space("transactions").insert(txtuples["transactions"]), only_data=False)
+ except: # This is used for omitting duplicate error in database for test -> test_bigchain_api::test_double_inclusion # noqa: E501, E722
+ continue
+ for _in in txtuples["inputs"]:
+ connection.run(connection.space("inputs").insert(_in), only_data=False)
+ for _out in txtuples["outputs"]:
+ connection.run(connection.space("outputs").insert(_out), only_data=False)
+
+ for _key in txtuples["keys"]:
+ connection.run(connection.space("keys").insert(_key), only_data=False)
+
+ if txtuples["metadata"] is not None:
+ connection.run(connection.space("meta_data").insert(txtuples["metadata"]), only_data=False)
+
+ if txtuples["asset"] is not None:
+ connection.run(connection.space("assets").insert(txtuples["asset"]), only_data=False)
+
+ if txtuples["script"] is not None:
+ connection.run(connection.space("scripts").insert(txtuples["script"]), only_data=False)
+
+
+@register_query(TarantoolDBConnection)
+def get_transaction(connection, transaction_id: str):
+ _transactions = _group_transaction_by_ids(txids=[transaction_id], connection=connection)
+ return next(iter(_transactions), None)
+
+
+@register_query(TarantoolDBConnection)
+def get_transactions(connection, transactions_ids: list):
+ _transactions = _group_transaction_by_ids(txids=transactions_ids, connection=connection)
+ return _transactions
+
+
+@register_query(TarantoolDBConnection)
+def store_metadatas(connection, metadata: list):
+ for meta in metadata:
+ connection.run(
+ connection.space("meta_data").insert(
+ (meta["id"], json.dumps(meta["data"] if not "metadata" in meta else meta["metadata"]))
+ ) # noqa: E713
+ )
+
+
+@register_query(TarantoolDBConnection)
+def get_metadata(connection, transaction_ids: list):
+ _returned_data = []
+ for _id in transaction_ids:
+ metadata = connection.run(connection.space("meta_data").select(_id, index="id_search"))
+ if metadata is not None:
+ if len(metadata) > 0:
+ metadata[0] = list(metadata[0])
+ metadata[0][1] = json.loads(metadata[0][1])
+ metadata[0] = tuple(metadata[0])
+ _returned_data.append(metadata)
+ return _returned_data
+
+
+@register_query(TarantoolDBConnection)
+def store_asset(connection, asset):
+ def convert(obj):
+ if isinstance(obj, tuple):
+ obj = list(obj)
+ obj[0] = json.dumps(obj[0])
+ return tuple(obj)
+ else:
+ return (json.dumps(obj), obj["id"], obj["id"])
+
+ try:
+ return connection.run(connection.space("assets").insert(convert(asset)), only_data=False)
+ except DatabaseError:
+ pass
+
+
+@register_query(TarantoolDBConnection)
+def store_assets(connection, assets: list):
+ for asset in assets:
+ store_asset(connection, asset)
+
+
+@register_query(TarantoolDBConnection)
+def get_asset(connection, asset_id: str):
+ _data = connection.run(connection.space("assets").select(asset_id, index="txid_search"))
+
+ return json.loads(_data[0][0]) if len(_data) > 0 else []
+
+
+@register_query(TarantoolDBConnection)
+def get_assets(connection, assets_ids: list) -> list:
+ _returned_data = []
+ for _id in list(set(assets_ids)):
+ res = connection.run(connection.space("assets").select(_id, index="txid_search"))
+ _returned_data.append(res[0])
+
+ sorted_assets = sorted(_returned_data, key=lambda k: k[1], reverse=False)
+ return [(json.loads(asset[0]), asset[1]) for asset in sorted_assets]
+
+
+@register_query(TarantoolDBConnection)
+def get_spent(connection, fullfil_transaction_id: str, fullfil_output_index: str):
+ _inputs = connection.run(
+ connection.space("inputs").select([fullfil_transaction_id, str(fullfil_output_index)], index="spent_search")
+ )
+ _transactions = _group_transaction_by_ids(txids=[inp[0] for inp in _inputs], connection=connection)
+ return _transactions
+
+
+@register_query(TarantoolDBConnection)
+def get_latest_block(connection): # TODO Here is used DESCENDING OPERATOR
+ _all_blocks = connection.run(connection.space("blocks").select())
+ block = {"app_hash": "", "height": 0, "transactions": []}
+
+ if _all_blocks is not None:
+ if len(_all_blocks) > 0:
+ _block = sorted(_all_blocks, key=itemgetter(1), reverse=True)[0]
+ _txids = connection.run(connection.space("blocks_tx").select(_block[2], index="block_search"))
+ block["app_hash"] = _block[0]
+ block["height"] = _block[1]
+ block["transactions"] = [tx[0] for tx in _txids]
+ else:
+ block = None
+ return block
+
+
+@register_query(TarantoolDBConnection)
+def store_block(connection, block: dict):
+ block_unique_id = token_hex(8)
+ connection.run(
+ connection.space("blocks").insert((block["app_hash"], block["height"], block_unique_id)), only_data=False
+ )
+ for txid in block["transactions"]:
+ connection.run(connection.space("blocks_tx").insert((txid, block_unique_id)), only_data=False)
+
+
+@register_query(TarantoolDBConnection)
+def get_txids_filtered(
+ connection, asset_id: str, operation: str = None, last_tx: any = None
+): # TODO here is used 'OR' operator
+ actions = {
+ "CREATE": {"sets": ["CREATE", asset_id], "index": "transaction_search"},
+ # 1 - operation, 2 - id (only in transactions) +
+ "TRANSFER": {"sets": ["TRANSFER", asset_id], "index": "transaction_search"},
+ # 1 - operation, 2 - asset.id (linked mode) + OPERATOR OR
+ None: {"sets": [asset_id, asset_id]},
+ }[operation]
+ _transactions = []
+ if actions["sets"][0] == "CREATE": # +
+ _transactions = connection.run(
+ connection.space("transactions").select([operation, asset_id], index=actions["index"])
+ )
+ elif actions["sets"][0] == "TRANSFER": # +
+ _assets = connection.run(connection.space("assets").select([asset_id], index="only_asset_search"))
+
+ for asset in _assets:
+ _txid = asset[1]
+ _tmp_transactions = connection.run(
+ connection.space("transactions").select([operation, _txid], index=actions["index"])
+ )
+ if len(_tmp_transactions) != 0:
+ _transactions.extend(_tmp_transactions)
+ else:
+ _tx_ids = connection.run(connection.space("transactions").select([asset_id], index="id_search"))
+ _assets_ids = connection.run(connection.space("assets").select([asset_id], index="only_asset_search"))
+ return tuple(set([sublist[1] for sublist in _assets_ids] + [sublist[0] for sublist in _tx_ids]))
+
+ if last_tx:
+ return tuple(next(iter(_transactions)))
+
+ return tuple([elem[0] for elem in _transactions])
+
+
+@register_query(TarantoolDBConnection)
+def text_search(conn, search, table="assets", limit=0):
+ pattern = ".{}.".format(search)
+ field_no = 1 if table == "assets" else 2 # 2 for meta_data
+ res = conn.run(conn.space(table).call("indexed_pattern_search", (table, field_no, pattern)))
+
+ to_return = []
+
+ if len(res[0]): # NEEDS BEAUTIFICATION
+ if table == "assets":
+ for result in res[0]:
+ to_return.append({"data": json.loads(result[0])["data"], "id": result[1]})
+ else:
+ for result in res[0]:
+ to_return.append({"metadata": json.loads(result[1]), "id": result[0]})
+
+ return to_return if limit == 0 else to_return[:limit]
+
+
+def _remove_text_score(asset):
+ asset.pop("score", None)
+ return asset
+
+
+@register_query(TarantoolDBConnection)
+def get_owned_ids(connection, owner: str):
+ _keys = connection.run(connection.space("keys").select(owner, index="keys_search"))
+ if _keys is None or len(_keys) == 0:
+ return []
+ _transactionids = list(set([key[1] for key in _keys]))
+ _transactions = _group_transaction_by_ids(txids=_transactionids, connection=connection)
+ return _transactions
+
+
+@register_query(TarantoolDBConnection)
+def get_spending_transactions(connection, inputs):
+ _transactions = []
+
+ for inp in inputs:
+ _trans_list = get_spent(
+ fullfil_transaction_id=inp["transaction_id"],
+ fullfil_output_index=inp["output_index"],
+ connection=connection,
+ )
+ _transactions.extend(_trans_list)
+
+ return _transactions
+
+
+@register_query(TarantoolDBConnection)
+def get_block(connection, block_id=[]):
+ _block = connection.run(connection.space("blocks").select(block_id, index="block_search", limit=1))
+ if _block is None or len(_block) == 0:
+ return []
+ _block = _block[0]
+ _txblock = connection.run(connection.space("blocks_tx").select(_block[2], index="block_search"))
+ return {"app_hash": _block[0], "height": _block[1], "transactions": [_tx[0] for _tx in _txblock]}
+
+
+@register_query(TarantoolDBConnection)
+def get_block_with_transaction(connection, txid: str):
+ _all_blocks_tx = connection.run(connection.space("blocks_tx").select(txid, index="id_search"))
+ if _all_blocks_tx is None or len(_all_blocks_tx) == 0:
+ return []
+ _block = connection.run(connection.space("blocks").select(_all_blocks_tx[0][1], index="block_id_search"))
+ return [{"height": _height[1]} for _height in _block]
+
+
+@register_query(TarantoolDBConnection)
+def delete_transactions(connection, txn_ids: list):
+ for _id in txn_ids:
+ connection.run(connection.space("transactions").delete(_id), only_data=False)
+ for _id in txn_ids:
+ _inputs = connection.run(connection.space("inputs").select(_id, index="id_search"), only_data=False)
+ _outputs = connection.run(connection.space("outputs").select(_id, index="id_search"), only_data=False)
+ _keys = connection.run(connection.space("keys").select(_id, index="txid_search"), only_data=False)
+ for _kID in _keys:
+ connection.run(connection.space("keys").delete(_kID[0], index="id_search"), only_data=False)
+ for _inpID in _inputs:
+ connection.run(connection.space("inputs").delete(_inpID[5], index="delete_search"), only_data=False)
+ for _outpID in _outputs:
+ connection.run(connection.space("outputs").delete(_outpID[5], index="unique_search"), only_data=False)
+
+ for _id in txn_ids:
+ connection.run(connection.space("meta_data").delete(_id, index="id_search"), only_data=False)
+
+ for _id in txn_ids:
+ connection.run(connection.space("assets").delete(_id, index="txid_search"), only_data=False)
+
+
+@register_query(TarantoolDBConnection)
+def store_unspent_outputs(connection, *unspent_outputs: list):
+ result = []
+ if unspent_outputs:
+ for utxo in unspent_outputs:
+ output = connection.run(
+ connection.space("utxos").insert((utxo["transaction_id"], utxo["output_index"], json.dumps(utxo)))
+ )
+ result.append(output)
+ return result
+
+
+@register_query(TarantoolDBConnection)
+def delete_unspent_outputs(connection, *unspent_outputs: list):
+ result = []
+ if unspent_outputs:
+ for utxo in unspent_outputs:
+ output = connection.run(connection.space("utxos").delete((utxo["transaction_id"], utxo["output_index"])))
+ result.append(output)
+ return result
+
+
+@register_query(TarantoolDBConnection)
+def get_unspent_outputs(connection, query=None): # for now we don't have implementation for 'query'.
+ _utxos = connection.run(connection.space("utxos").select([]))
+ return [json.loads(utx[2]) for utx in _utxos]
+
+
+@register_query(TarantoolDBConnection)
+def store_pre_commit_state(connection, state: dict):
+ _precommit = connection.run(connection.space("pre_commits").select([], limit=1))
+ _precommitTuple = (
+ (token_hex(8), state["height"], state["transactions"])
+ if _precommit is None or len(_precommit) == 0
+ else _precommit[0]
+ )
+ connection.run(
+ connection.space("pre_commits").upsert(
+ _precommitTuple, op_list=[("=", 1, state["height"]), ("=", 2, state["transactions"])], limit=1
+ ),
+ only_data=False,
+ )
+
+
+@register_query(TarantoolDBConnection)
+def get_pre_commit_state(connection):
+ _commit = connection.run(connection.space("pre_commits").select([], index="id_search"))
+ if _commit is None or len(_commit) == 0:
+ return None
+ _commit = sorted(_commit, key=itemgetter(1), reverse=False)[0]
+ return {"height": _commit[1], "transactions": _commit[2]}
+
+
+@register_query(TarantoolDBConnection)
+def store_validator_set(conn, validators_update: dict):
+ _validator = conn.run(conn.space("validators").select(validators_update["height"], index="height_search", limit=1))
+ unique_id = token_hex(8) if _validator is None or len(_validator) == 0 else _validator[0][0]
+ conn.run(
+ conn.space("validators").upsert(
+ (unique_id, validators_update["height"], validators_update["validators"]),
+ op_list=[("=", 1, validators_update["height"]), ("=", 2, validators_update["validators"])],
+ limit=1,
+ ),
+ only_data=False,
+ )
+
+
+@register_query(TarantoolDBConnection)
+def delete_validator_set(connection, height: int):
+ _validators = connection.run(connection.space("validators").select(height, index="height_search"))
+ for _valid in _validators:
+ connection.run(connection.space("validators").delete(_valid[0]), only_data=False)
+
+
+@register_query(TarantoolDBConnection)
+def store_election(connection, election_id: str, height: int, is_concluded: bool):
+ connection.run(
+ connection.space("elections").upsert(
+ (election_id, height, is_concluded), op_list=[("=", 1, height), ("=", 2, is_concluded)], limit=1
+ ),
+ only_data=False,
+ )
+
+
+@register_query(TarantoolDBConnection)
+def store_elections(connection, elections: list):
+ for election in elections:
+ _election = connection.run( # noqa: F841
+ connection.space("elections").insert(
+ (election["election_id"], election["height"], election["is_concluded"])
+ ),
+ only_data=False,
+ )
+
+
+@register_query(TarantoolDBConnection)
+def delete_elections(connection, height: int):
+ _elections = connection.run(connection.space("elections").select(height, index="height_search"))
+ for _elec in _elections:
+ connection.run(connection.space("elections").delete(_elec[0]), only_data=False)
+
+
+@register_query(TarantoolDBConnection)
+def get_validator_set(connection, height: int = None):
+ _validators = connection.run(connection.space("validators").select())
+ if height is not None and _validators is not None:
+ _validators = [
+ {"height": validator[1], "validators": validator[2]} for validator in _validators if validator[1] <= height
+ ]
+ return next(iter(sorted(_validators, key=lambda k: k["height"], reverse=True)), None)
+ elif _validators is not None:
+ _validators = [{"height": validator[1], "validators": validator[2]} for validator in _validators]
+ return next(iter(sorted(_validators, key=lambda k: k["height"], reverse=True)), None)
+ return None
+
+
+@register_query(TarantoolDBConnection)
+def get_election(connection, election_id: str):
+ _elections = connection.run(connection.space("elections").select(election_id, index="id_search"))
+ if _elections is None or len(_elections) == 0:
+ return None
+ _election = sorted(_elections, key=itemgetter(0), reverse=True)[0]
+ return {"election_id": _election[0], "height": _election[1], "is_concluded": _election[2]}
+
+
+@register_query(TarantoolDBConnection)
+def get_asset_tokens_for_public_key(
+ connection, asset_id: str, public_key: str
+): # FIXME Something can be wrong with this function ! (public_key) is not used # noqa: E501
+ # space = connection.space("keys")
+ # _keys = space.select([public_key], index="keys_search")
+ _transactions = connection.run(connection.space("assets").select([asset_id], index="assetid_search"))
+ # _transactions = _transactions
+ # _keys = _keys.data
+ _grouped_transactions = _group_transaction_by_ids(connection=connection, txids=[_tx[1] for _tx in _transactions])
+ return _grouped_transactions
+
+
+@register_query(TarantoolDBConnection)
+def store_abci_chain(connection, height: int, chain_id: str, is_synced: bool = True):
+ hash_id_primarykey = sha256(json.dumps(obj={"height": height}).encode()).hexdigest()
+ connection.run(
+ connection.space("abci_chains").upsert(
+ (height, is_synced, chain_id, hash_id_primarykey),
+ op_list=[("=", 0, height), ("=", 1, is_synced), ("=", 2, chain_id)],
+ ),
+ only_data=False,
+ )
+
+
+@register_query(TarantoolDBConnection)
+def delete_abci_chain(connection, height: int):
+ hash_id_primarykey = sha256(json.dumps(obj={"height": height}).encode()).hexdigest()
+ connection.run(connection.space("abci_chains").delete(hash_id_primarykey), only_data=False)
+
+
+@register_query(TarantoolDBConnection)
+def get_latest_abci_chain(connection):
+ _all_chains = connection.run(connection.space("abci_chains").select())
+ if _all_chains is None or len(_all_chains) == 0:
+ return None
+ _chain = sorted(_all_chains, key=itemgetter(0), reverse=True)[0]
+ return {"height": _chain[0], "is_synced": _chain[1], "chain_id": _chain[2]}
diff --git a/planetmint/backend/tarantool/schema.py b/planetmint/backend/tarantool/schema.py
new file mode 100644
index 0000000..c10c6b2
--- /dev/null
+++ b/planetmint/backend/tarantool/schema.py
@@ -0,0 +1,212 @@
+import logging
+
+from planetmint.config import Config
+from planetmint.backend.utils import module_dispatch_registrar
+from planetmint import backend
+from planetmint.backend.tarantool.connection import TarantoolDBConnection
+
+logger = logging.getLogger(__name__)
+register_schema = module_dispatch_registrar(backend.schema)
+
+SPACE_NAMES = (
+ "abci_chains",
+ "assets",
+ "blocks",
+ "blocks_tx",
+ "elections",
+ "meta_data",
+ "pre_commits",
+ "validators",
+ "transactions",
+ "inputs",
+ "outputs",
+ "keys",
+ "utxos",
+ "scripts",
+)
+
+
+SPACE_COMMANDS = {
+ "abci_chains": "abci_chains = box.schema.space.create('abci_chains', {engine='memtx', is_sync = false})",
+ "assets": "assets = box.schema.space.create('assets' , {engine='memtx' , is_sync=false})",
+ "blocks": "blocks = box.schema.space.create('blocks' , {engine='memtx' , is_sync=false})",
+ "blocks_tx": "blocks_tx = box.schema.space.create('blocks_tx')",
+ "elections": "elections = box.schema.space.create('elections',{engine = 'memtx' , is_sync = false})",
+ "meta_data": "meta_datas = box.schema.space.create('meta_data',{engine = 'memtx' , is_sync = false})",
+ "pre_commits": "pre_commits = box.schema.space.create('pre_commits' , {engine='memtx' , is_sync=false})",
+ "validators": "validators = box.schema.space.create('validators' , {engine = 'memtx' , is_sync = false})",
+ "transactions": "transactions = box.schema.space.create('transactions',{engine='memtx' , is_sync=false})",
+ "inputs": "inputs = box.schema.space.create('inputs')",
+ "outputs": "outputs = box.schema.space.create('outputs')",
+ "keys": "keys = box.schema.space.create('keys')",
+ "utxos": "utxos = box.schema.space.create('utxos', {engine = 'memtx' , is_sync = false})",
+ "scripts": "scripts = box.schema.space.create('scripts', {engine = 'memtx' , is_sync = false})",
+}
+
+INDEX_COMMANDS = {
+ "abci_chains": {
+ "id_search": "abci_chains:create_index('id_search' ,{type='tree', parts={'id'}})",
+ "height_search": "abci_chains:create_index('height_search' ,{type='tree', unique=false, parts={'height'}})",
+ },
+ "assets": {
+ "txid_search": "assets:create_index('txid_search', {type='tree', parts={'tx_id'}})",
+ "assetid_search": "assets:create_index('assetid_search', {type='tree',unique=false, parts={'asset_id', 'tx_id'}})", # noqa: E501
+ "only_asset_search": "assets:create_index('only_asset_search', {type='tree', unique=false, parts={'asset_id'}})", # noqa: E501
+ "text_search": "assets:create_index('secondary', {unique=false,parts={1,'string'}})",
+ },
+ "blocks": {
+ "id_search": "blocks:create_index('id_search' , {type='tree' , parts={'block_id'}})",
+ "block_search": "blocks:create_index('block_search' , {type='tree', unique = false, parts={'height'}})",
+ "block_id_search": "blocks:create_index('block_id_search', {type = 'hash', parts ={'block_id'}})",
+ },
+ "blocks_tx": {
+ "id_search": "blocks_tx:create_index('id_search',{ type = 'tree', parts={'transaction_id'}})",
+ "block_search": "blocks_tx:create_index('block_search', {type = 'tree',unique=false, parts={'block_id'}})",
+ },
+ "elections": {
+ "id_search": "elections:create_index('id_search' , {type='tree', parts={'election_id'}})",
+ "height_search": "elections:create_index('height_search' , {type='tree',unique=false, parts={'height'}})",
+ "update_search": "elections:create_index('update_search', {type='tree', unique=false, parts={'election_id', 'height'}})", # noqa: E501
+ },
+ "meta_data": {
+ "id_search": "meta_datas:create_index('id_search', { type='tree' , parts={'transaction_id'}})",
+ "text_search": "meta_datas:create_index('secondary', {unique=false,parts={2,'string'}})",
+ },
+ "pre_commits": {
+ "id_search": "pre_commits:create_index('id_search', {type ='tree' , parts={'commit_id'}})",
+ "height_search": "pre_commits:create_index('height_search', {type ='tree',unique=true, parts={'height'}})",
+ },
+ "validators": {
+ "id_search": "validators:create_index('id_search' , {type='tree' , parts={'validator_id'}})",
+ "height_search": "validators:create_index('height_search' , {type='tree', unique=true, parts={'height'}})",
+ },
+ "transactions": {
+ "id_search": "transactions:create_index('id_search' , {type = 'tree' , parts={'transaction_id'}})",
+ "transaction_search": "transactions:create_index('transaction_search' , {type = 'tree',unique=false, parts={'operation', 'transaction_id'}})", # noqa: E501
+ },
+ "inputs": {
+ "delete_search": "inputs:create_index('delete_search' , {type = 'tree', parts={'input_id'}})",
+ "spent_search": "inputs:create_index('spent_search' , {type = 'tree', unique=false, parts={'fulfills_transaction_id', 'fulfills_output_index'}})", # noqa: E501
+ "id_search": "inputs:create_index('id_search', {type = 'tree', unique=false, parts = {'transaction_id'}})",
+ },
+ "outputs": {
+ "unique_search": "outputs:create_index('unique_search' ,{type='tree', parts={'output_id'}})",
+ "id_search": "outputs:create_index('id_search' ,{type='tree', unique=false, parts={'transaction_id'}})",
+ },
+ "keys": {
+ "id_search": "keys:create_index('id_search', {type = 'tree', parts={'id'}})",
+ "keys_search": "keys:create_index('keys_search', {type = 'tree', unique=false, parts={'public_key'}})",
+ "txid_search": "keys:create_index('txid_search', {type = 'tree', unique=false, parts={'transaction_id'}})",
+ "output_search": "keys:create_index('output_search', {type = 'tree', unique=false, parts={'output_id'}})",
+ },
+ "utxos": {
+ "id_search": "utxos:create_index('id_search', {type='tree' , parts={'transaction_id', 'output_index'}})",
+ "transaction_search": "utxos:create_index('transaction_search', {type='tree', unique=false, parts={'transaction_id'}})", # noqa: E501
+ "index_Search": "utxos:create_index('index_search', {type='tree', unique=false, parts={'output_index'}})",
+ },
+ "scripts": {
+ "txid_search": "scripts:create_index('txid_search', {type='tree', parts={'transaction_id'}})",
+ },
+}
+
+
+SCHEMA_COMMANDS = {
+ "abci_chains": "abci_chains:format({{name='height' , type='integer'},{name='is_synched' , type='boolean'},{name='chain_id',type='string'}, {name='id', type='string'}})", # noqa: E501
+ "assets": "assets:format({{name='data' , type='string'}, {name='tx_id', type='string'}, {name='asset_id', type='string'}})", # noqa: E501
+ "blocks": "blocks:format{{name='app_hash',type='string'},{name='height' , type='integer'},{name='block_id' , type='string'}}", # noqa: E501
+ "blocks_tx": "blocks_tx:format{{name='transaction_id', type = 'string'}, {name = 'block_id', type = 'string'}}",
+ "elections": "elections:format({{name='election_id' , type='string'},{name='height' , type='integer'}, {name='is_concluded' , type='boolean'}})", # noqa: E501
+ "meta_data": "meta_datas:format({{name='transaction_id' , type='string'}, {name='meta_data' , type='string'}})", # noqa: E501
+ "pre_commits": "pre_commits:format({{name='commit_id', type='string'}, {name='height',type='integer'}, {name='transactions',type=any}})", # noqa: E501
+ "validators": "validators:format({{name='validator_id' , type='string'},{name='height',type='integer'},{name='validators' , type='any'}})", # noqa: E501
+ "transactions": "transactions:format({{name='transaction_id' , type='string'}, {name='operation' , type='string'}, {name='version' ,type='string'}, {name='dict_map', type='any'}})", # noqa: E501
+ "inputs": "inputs:format({{name='transaction_id' , type='string'}, {name='fulfillment' , type='any'}, {name='owners_before' , type='array'}, {name='fulfills_transaction_id', type = 'string'}, {name='fulfills_output_index', type = 'string'}, {name='input_id', type='string'}, {name='input_index', type='number'}})", # noqa: E501
+ "outputs": "outputs:format({{name='transaction_id' , type='string'}, {name='amount' , type='string'}, {name='uri', type='string'}, {name='details_type', type='string'}, {name='details_public_key', type='any'}, {name = 'output_id', type = 'string'}, {name='treshold', type='any'}, {name='subconditions', type='any'}, {name='output_index', type='number'}})", # noqa: E501
+ "keys": "keys:format({{name = 'id', type='string'}, {name = 'transaction_id', type = 'string'} ,{name = 'output_id', type = 'string'}, {name = 'public_key', type = 'string'}, {name = 'key_index', type = 'integer'}})", # noqa: E501
+ "utxos": "utxos:format({{name='transaction_id' , type='string'}, {name='output_index' , type='integer'}, {name='utxo_dict', type='string'}})", # noqa: E501
+ "scripts": "scripts:format({{name='transaction_id', type='string'},{name='script' , type='any'}})", # noqa: E501
+}
+
+SCHEMA_DROP_COMMANDS = {
+ "abci_chains": "box.space.abci_chains:drop()",
+ "assets": "box.space.assets:drop()",
+ "blocks": "box.space.blocks:drop()",
+ "blocks_tx": "box.space.blocks_tx:drop()",
+ "elections": "box.space.elections:drop()",
+ "meta_data": "box.space.meta_data:drop()",
+ "pre_commits": "box.space.pre_commits:drop()",
+ "validators": "box.space.validators:drop()",
+ "transactions": "box.space.transactions:drop()",
+ "inputs": "box.space.inputs:drop()",
+ "outputs": "box.space.outputs:drop()",
+ "keys": "box.space.keys:drop()",
+ "utxos": "box.space.utxos:drop()",
+ "scripts": "box.space.scripts:drop()",
+}
+
+
+@register_schema(TarantoolDBConnection)
+def drop_database(connection, not_used=None):
+ for _space in SPACE_NAMES:
+ try:
+ cmd = SCHEMA_DROP_COMMANDS[_space].encode()
+ run_command_with_output(command=cmd)
+ print(f"Space '{_space}' was dropped succesfuly.")
+ except Exception:
+ print(f"Unexpected error while trying to drop space '{_space}'")
+
+
+@register_schema(TarantoolDBConnection)
+def create_database(connection, dbname):
+ """
+
+ For tarantool implementation, this function runs
+ create_tables, to initiate spaces, schema and indexes.
+
+ """
+ logger.info("Create database `%s`.", dbname)
+
+
+def run_command_with_output(command):
+ from subprocess import run
+
+ host_port = "%s:%s" % (
+ Config().get()["database"]["host"],
+ Config().get()["database"]["port"],
+ )
+ output = run(["tarantoolctl", "connect", host_port], input=command, capture_output=True)
+ if output.returncode != 0:
+ raise Exception(f"Error while trying to execute cmd {command} on host:port {host_port}: {output.stderr}")
+ return output.stdout
+
+
+@register_schema(TarantoolDBConnection)
+def create_tables(connection, dbname):
+ for _space in SPACE_NAMES:
+ try:
+ cmd = SPACE_COMMANDS[_space].encode()
+ run_command_with_output(command=cmd)
+ print(f"Space '{_space}' created.")
+ except Exception as err:
+ print(f"Unexpected error while trying to create '{_space}': {err}")
+ create_schema(space_name=_space)
+ create_indexes(space_name=_space)
+
+
+def create_indexes(space_name):
+ indexes = INDEX_COMMANDS[space_name]
+ for index_name, index_cmd in indexes.items():
+ try:
+ run_command_with_output(command=index_cmd.encode())
+ print(f"Index '{index_name}' created succesfully.")
+ except Exception as err:
+ print(f"Unexpected error while trying to create index '{index_name}': '{err}'")
+
+
+def create_schema(space_name):
+ try:
+ cmd = SCHEMA_COMMANDS[space_name].encode()
+ run_command_with_output(command=cmd)
+ print(f"Schema created for {space_name} succesfully.")
+ except Exception as unexpected_error:
+ print(f"Got unexpected error when creating index for '{space_name}' Space.\n {unexpected_error}")
diff --git a/planetmint/backend/tarantool/tarantool.md b/planetmint/backend/tarantool/tarantool.md
new file mode 100644
index 0000000..1379d01
--- /dev/null
+++ b/planetmint/backend/tarantool/tarantool.md
@@ -0,0 +1,31 @@
+# How to start using planetmint with tarantool
+
+First of all you have do download [Tarantool](https://www.tarantool.io/en/download/os-installation/ubuntu/).
+
+
+## How to connect tarantool to planetmint
+
+After a successful instalation you should be able to run from you terminal command ```tarantool```. In the cli of tarantool you need initializa a listening following the example :
+```
+box.cfg{listen=3301}
+```
+[^1].
+Afterwards quit cli of tarantool and scan by port if to be sure that service was created by tarantool.
+
+### How to init spaces and indexes of tarantool[^2].
+
+For this step you need to go in the root folder of planetmint and run from your virtual enviroment:
+
+```
+python planetmint init localhost 3301 admin pass
+```
+
+### In case you want to reset tarantool you can run command above and adding at the end True.
+
+
+[^1]: This is example of the port address that can be used.
+
+[^2]: Not yet working
+
+
+
diff --git a/planetmint/backend/tarantool/transaction/__init__.py b/planetmint/backend/tarantool/transaction/__init__.py
new file mode 100644
index 0000000..34bd719
--- /dev/null
+++ b/planetmint/backend/tarantool/transaction/__init__.py
@@ -0,0 +1 @@
+from planetmint.backend.tarantool.transaction import tools
diff --git a/planetmint/backend/tarantool/transaction/tools.py b/planetmint/backend/tarantool/transaction/tools.py
new file mode 100644
index 0000000..f7e96cb
--- /dev/null
+++ b/planetmint/backend/tarantool/transaction/tools.py
@@ -0,0 +1,226 @@
+import copy
+import json
+
+from secrets import token_hex
+from transactions.common.memoize import HDict
+
+
+def get_items(_list):
+ for item in _list:
+ if type(item) is dict:
+ yield item
+
+
+def _save_keys_order(dictionary):
+ filter_keys = ["asset", "metadata"]
+ if type(dictionary) is dict or type(dictionary) is HDict:
+ keys = list(dictionary.keys())
+ _map = {}
+ for key in keys:
+ _map[key] = _save_keys_order(dictionary=dictionary[key]) if key not in filter_keys else None
+
+ return _map
+ elif type(dictionary) is list:
+ _maps = []
+ for _item in get_items(_list=dictionary):
+ _map = {}
+ keys = list(_item.keys())
+ for key in keys:
+ _map[key] = _save_keys_order(dictionary=_item[key]) if key not in filter_keys else None
+ _maps.append(_map)
+ return _maps
+ else:
+ return None
+
+
+class TransactionDecompose:
+ def __init__(self, _transaction):
+ self._transaction = _transaction
+ self._tuple_transaction = {
+ "transactions": (),
+ "inputs": [],
+ "outputs": [],
+ "keys": [],
+ "script": None,
+ "metadata": None,
+ "asset": None,
+ }
+
+ def get_map(self, dictionary: dict = None):
+
+ return (
+ _save_keys_order(dictionary=dictionary)
+ if dictionary is not None
+ else _save_keys_order(dictionary=self._transaction)
+ )
+
+ def __create_hash(self, n: int):
+ return token_hex(n)
+
+ def _metadata_check(self):
+ metadata = self._transaction.get("metadata")
+ if metadata is None:
+ return
+
+ self._tuple_transaction["metadata"] = (self._transaction["id"], json.dumps(metadata))
+
+ def __asset_check(self):
+ _asset = self._transaction.get("asset")
+ if _asset is None:
+ return
+ asset_id = _asset["id"] if _asset.get("id") is not None else self._transaction["id"]
+ self._tuple_transaction["asset"] = (json.dumps(_asset), self._transaction["id"], asset_id)
+
+ def __prepare_inputs(self):
+ _inputs = []
+ input_index = 0
+ for _input in self._transaction["inputs"]:
+
+ _inputs.append(
+ (
+ self._transaction["id"],
+ _input["fulfillment"],
+ _input["owners_before"],
+ _input["fulfills"]["transaction_id"] if _input["fulfills"] is not None else "",
+ str(_input["fulfills"]["output_index"]) if _input["fulfills"] is not None else "",
+ self.__create_hash(7),
+ input_index,
+ )
+ )
+ input_index = input_index + 1
+ return _inputs
+
+ def __prepare_outputs(self):
+ _outputs = []
+ _keys = []
+ output_index = 0
+ for _output in self._transaction["outputs"]:
+ output_id = self.__create_hash(7)
+ if _output["condition"]["details"].get("subconditions") is None:
+ tmp_output = (
+ self._transaction["id"],
+ _output["amount"],
+ _output["condition"]["uri"],
+ _output["condition"]["details"]["type"],
+ _output["condition"]["details"]["public_key"],
+ output_id,
+ None,
+ None,
+ output_index,
+ )
+ else:
+ tmp_output = (
+ self._transaction["id"],
+ _output["amount"],
+ _output["condition"]["uri"],
+ _output["condition"]["details"]["type"],
+ None,
+ output_id,
+ _output["condition"]["details"]["threshold"],
+ _output["condition"]["details"]["subconditions"],
+ output_index,
+ )
+
+ _outputs.append(tmp_output)
+ output_index = output_index + 1
+ key_index = 0
+ for _key in _output["public_keys"]:
+ key_id = self.__create_hash(7)
+ _keys.append((key_id, self._transaction["id"], output_id, _key, key_index))
+ key_index = key_index + 1
+ return _keys, _outputs
+
+ def __prepare_transaction(self):
+ _map = self.get_map()
+ return (self._transaction["id"], self._transaction["operation"], self._transaction["version"], _map)
+
+ def __prepare_script(self):
+ try:
+ return (self._transaction["id"], self._transaction["script"])
+ except KeyError:
+ return None
+
+ def convert_to_tuple(self):
+ self._metadata_check()
+ self.__asset_check()
+ self._tuple_transaction["transactions"] = self.__prepare_transaction()
+ self._tuple_transaction["inputs"] = self.__prepare_inputs()
+ keys, outputs = self.__prepare_outputs()
+ self._tuple_transaction["outputs"] = outputs
+ self._tuple_transaction["keys"] = keys
+ self._tuple_transaction["script"] = self.__prepare_script()
+ return self._tuple_transaction
+
+
+class TransactionCompose:
+ def __init__(self, db_results):
+ self.db_results = db_results
+ self._map = self.db_results["transaction"][3]
+
+ def _get_transaction_operation(self):
+ return self.db_results["transaction"][1]
+
+ def _get_transaction_version(self):
+ return self.db_results["transaction"][2]
+
+ def _get_transaction_id(self):
+ return self.db_results["transaction"][0]
+
+ def _get_asset(self):
+ _asset = iter(self.db_results["asset"])
+ _res_asset = next(iter(next(_asset, iter([]))), None)
+ return json.loads(_res_asset)
+
+ def _get_metadata(self):
+ return json.loads(self.db_results["metadata"][0][1]) if len(self.db_results["metadata"]) == 1 else None
+
+ def _get_inputs(self):
+ _inputs = []
+ for _input in self.db_results["inputs"]:
+ _in = copy.deepcopy(self._map["inputs"][_input[-1]])
+ _in["fulfillment"] = _input[1]
+ if _in["fulfills"] is not None:
+ _in["fulfills"]["transaction_id"] = _input[3]
+ _in["fulfills"]["output_index"] = int(_input[4])
+ _in["owners_before"] = _input[2]
+ _inputs.append(_in)
+ return _inputs
+
+ def _get_outputs(self):
+ _outputs = []
+ for _output in self.db_results["outputs"]:
+ _out = copy.deepcopy(self._map["outputs"][_output[-1]])
+ _out["amount"] = _output[1]
+ _tmp_keys = [(_key[3], _key[4]) for _key in self.db_results["keys"] if _key[2] == _output[5]]
+ _sorted_keys = sorted(_tmp_keys, key=lambda tup: (tup[1]))
+ _out["public_keys"] = [_key[0] for _key in _sorted_keys]
+
+ _out["condition"]["uri"] = _output[2]
+ if _output[7] is None:
+ _out["condition"]["details"]["type"] = _output[3]
+ _out["condition"]["details"]["public_key"] = _output[4]
+ else:
+ _out["condition"]["details"]["subconditions"] = _output[7]
+ _out["condition"]["details"]["type"] = _output[3]
+ _out["condition"]["details"]["threshold"] = _output[6]
+ _outputs.append(_out)
+ return _outputs
+
+ def _get_script(self):
+ if self.db_results["script"]:
+ return self.db_results["script"][0][1]
+ else:
+ return None
+
+ def convert_to_dict(self):
+ transaction = {k: None for k in list(self._map.keys())}
+ transaction["id"] = self._get_transaction_id()
+ transaction["asset"] = self._get_asset()
+ transaction["metadata"] = self._get_metadata()
+ transaction["version"] = self._get_transaction_version()
+ transaction["operation"] = self._get_transaction_operation()
+ transaction["inputs"] = self._get_inputs()
+ transaction["outputs"] = self._get_outputs()
+ if self._get_script():
+ transaction["script"] = self._get_script()
+ return transaction
diff --git a/planetmint/backend/tarantool/utils.py b/planetmint/backend/tarantool/utils.py
new file mode 100644
index 0000000..d5f8fc4
--- /dev/null
+++ b/planetmint/backend/tarantool/utils.py
@@ -0,0 +1,13 @@
+import subprocess
+
+
+def run_cmd(commands: list, config: dict):
+ ret = subprocess.Popen(
+ ["%s %s:%s < %s" % ("tarantoolctl connect", "localhost", "3303", "planetmint/backend/tarantool/init.lua")],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ universal_newlines=True,
+ bufsize=0,
+ shell=True,
+ )
+ return True if ret >= 0 else False
diff --git a/planetmint/backend/utils.py b/planetmint/backend/utils.py
index 4e6138a..4b76642 100644
--- a/planetmint/backend/utils.py
+++ b/planetmint/backend/utils.py
@@ -3,8 +3,6 @@
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
-import planetmint
-
class ModuleDispatchRegistrationError(Exception):
"""Raised when there is a problem registering dispatched functions for a
@@ -21,19 +19,13 @@ def module_dispatch_registrar(module):
return dispatch_registrar.register(obj_type)(func)
except AttributeError as ex:
raise ModuleDispatchRegistrationError(
- ('`{module}` does not contain a single-dispatchable '
- 'function named `{func}`. The module being registered '
- 'was not implemented correctly!').format(
- func=func_name, module=module.__name__)) from ex
+ (
+ "`{module}` does not contain a single-dispatchable "
+ "function named `{func}`. The module being registered "
+ "was not implemented correctly!"
+ ).format(func=func_name, module=module.__name__)
+ ) from ex
return wrapper
return dispatch_wrapper
-
-
-def get_planetmint_config_value(key, default_value=None):
- return planetmint.config['database'].get(key, default_value)
-
-
-def get_planetmint_config_value_or_key_error(key):
- return planetmint.config['database'][key]
diff --git a/planetmint/commands/election_types.py b/planetmint/commands/election_types.py
index cfa58b9..b6f4282 100644
--- a/planetmint/commands/election_types.py
+++ b/planetmint/commands/election_types.py
@@ -1,31 +1,28 @@
elections = {
- 'upsert-validator': {
- 'help': 'Propose a change to the validator set',
- 'args': {
- 'public_key': {
- 'help': 'Public key of the validator to be added/updated/removed.'
+ "upsert-validator": {
+ "help": "Propose a change to the validator set",
+ "args": {
+ "public_key": {"help": "Public key of the validator to be added/updated/removed."},
+ "power": {
+ "type": int,
+ "help": "The proposed power for the validator. Setting to 0 will remove the validator.",
},
- 'power': {
- 'type': int,
- 'help': 'The proposed power for the validator. Setting to 0 will remove the validator.'},
- 'node_id': {
- 'help': 'The node_id of the validator.'
+ "node_id": {"help": "The node_id of the validator."},
+ "--private-key": {
+ "dest": "sk",
+ "required": True,
+ "help": "Path to the private key of the election initiator.",
},
- '--private-key': {
- 'dest': 'sk',
- 'required': True,
- 'help': 'Path to the private key of the election initiator.'
- }
- }
+ },
},
- 'chain-migration': {
- 'help': 'Call for a halt to block production to allow for a version change across breaking changes.',
- 'args': {
- '--private-key': {
- 'dest': 'sk',
- 'required': True,
- 'help': 'Path to the private key of the election initiator.'
+ "chain-migration": {
+ "help": "Call for a halt to block production to allow for a version change across breaking changes.",
+ "args": {
+ "--private-key": {
+ "dest": "sk",
+ "required": True,
+ "help": "Path to the private key of the election initiator.",
}
- }
- }
+ },
+ },
}
diff --git a/planetmint/commands/planetmint.py b/planetmint/commands/planetmint.py
index 9d09571..bb19050 100644
--- a/planetmint/commands/planetmint.py
+++ b/planetmint/commands/planetmint.py
@@ -10,28 +10,26 @@ the command-line interface (CLI) for Planetmint Server.
import os
import logging
import argparse
-import copy
import json
import sys
+import planetmint
from planetmint.core import rollback
-from planetmint.migrations.chain_migration_election import ChainMigrationElection
from planetmint.utils import load_node_key
-from planetmint.transactions.common.transaction_mode_types import BROADCAST_TX_COMMIT
-from planetmint.transactions.common.exceptions import (
- DatabaseDoesNotExist, ValidationError)
-from planetmint.transactions.types.elections.vote import Vote
-import planetmint
-from planetmint import (backend, ValidatorElection,
- Planetmint)
+from transactions.common.transaction_mode_types import BROADCAST_TX_COMMIT
+from transactions.common.exceptions import DatabaseDoesNotExist, ValidationError
+from transactions.types.elections.vote import Vote
+from transactions.types.elections.chain_migration_election import ChainMigrationElection
+from transactions.types.elections.validator_utils import election_id_to_public_key
+from planetmint import ValidatorElection, Planetmint
from planetmint.backend import schema
from planetmint.commands import utils
-from planetmint.commands.utils import (configure_planetmint,
- input_on_stderr)
+from planetmint.commands.utils import configure_planetmint, input_on_stderr
from planetmint.log import setup_logging
from planetmint.tendermint_utils import public_key_from_base64
from planetmint.commands.election_types import elections
from planetmint.version import __tm_supported_versions__
+from planetmint.config import Config
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
@@ -49,9 +47,9 @@ def run_show_config(args):
# TODO Proposal: remove the "hidden" configuration. Only show config. If
# the system needs to be configured, then display information on how to
# configure the system.
- config = copy.deepcopy(planetmint.config)
- del config['CONFIGURED']
- print(json.dumps(config, indent=4, sort_keys=True))
+ _config = Config().get()
+ del _config["CONFIGURED"]
+ print(json.dumps(_config, indent=4, sort_keys=True))
@configure_planetmint
@@ -61,46 +59,47 @@ def run_configure(args):
config_file_exists = False
# if the config path is `-` then it's stdout
- if config_path != '-':
+ if config_path != "-":
config_file_exists = os.path.exists(config_path)
if config_file_exists and not args.yes:
- want = input_on_stderr('Config file `{}` exists, do you want to '
- 'override it? (cannot be undone) [y/N]: '.format(config_path))
- if want != 'y':
+ want = input_on_stderr(
+ "Config file `{}` exists, do you want to " "override it? (cannot be undone) [y/N]: ".format(config_path)
+ )
+ if want != "y":
return
- conf = copy.deepcopy(planetmint.config)
-
+ Config().init_config(args.backend)
+ conf = Config().get()
# select the correct config defaults based on the backend
- print('Generating default configuration for backend {}'
- .format(args.backend), file=sys.stderr)
- database_keys = planetmint._database_keys_map[args.backend]
- conf['database'] = planetmint._database_map[args.backend]
+ print("Generating default configuration for backend {}".format(args.backend), file=sys.stderr)
+ database_keys = Config().get_db_key_map(args.backend)
if not args.yes:
- for key in ('bind', ):
- val = conf['server'][key]
- conf['server'][key] = input_on_stderr('API Server {}? (default `{}`): '.format(key, val), val)
+ for key in ("bind",):
+ val = conf["server"][key]
+ conf["server"][key] = input_on_stderr("API Server {}? (default `{}`): ".format(key, val), val)
- for key in ('scheme', 'host', 'port'):
- val = conf['wsserver'][key]
- conf['wsserver'][key] = input_on_stderr('WebSocket Server {}? (default `{}`): '.format(key, val), val)
+ for key in ("scheme", "host", "port"):
+ val = conf["wsserver"][key]
+ conf["wsserver"][key] = input_on_stderr("WebSocket Server {}? (default `{}`): ".format(key, val), val)
for key in database_keys:
- val = conf['database'][key]
- conf['database'][key] = input_on_stderr('Database {}? (default `{}`): '.format(key, val), val)
+ val = conf["database"][key]
+ conf["database"][key] = input_on_stderr("Database {}? (default `{}`): ".format(key, val), val)
- for key in ('host', 'port'):
- val = conf['tendermint'][key]
- conf['tendermint'][key] = input_on_stderr('Tendermint {}? (default `{}`)'.format(key, val), val)
+ for key in ("host", "port"):
+ val = conf["tendermint"][key]
+ conf["tendermint"][key] = input_on_stderr("Tendermint {}? (default `{}`)".format(key, val), val)
- if config_path != '-':
+ if config_path != "-":
planetmint.config_utils.write_config(conf, config_path)
else:
print(json.dumps(conf, indent=4, sort_keys=True))
- print('Configuration written to {}'.format(config_path), file=sys.stderr)
- print('Ready to go!', file=sys.stderr)
+
+ Config().set(conf)
+ print("Configuration written to {}".format(config_path), file=sys.stderr)
+ print("Ready to go!", file=sys.stderr)
@configure_planetmint
@@ -110,22 +109,20 @@ def run_election(args):
b = Planetmint()
# Call the function specified by args.action, as defined above
- globals()[f'run_election_{args.action}'](args, b)
+ globals()[f"run_election_{args.action}"](args, b)
def run_election_new(args, planet):
- election_type = args.election_type.replace('-', '_')
- globals()[f'run_election_new_{election_type}'](args, planet)
+ election_type = args.election_type.replace("-", "_")
+ globals()[f"run_election_new_{election_type}"](args, planet)
def create_new_election(sk, planet, election_class, data):
try:
key = load_node_key(sk)
- voters = election_class.recipients(planet)
- election = election_class.generate([key.public_key],
- voters,
- data, None).sign([key.private_key])
- election.validate(planet)
+ voters = planet.get_recipients_list()
+ election = election_class.generate([key.public_key], voters, data, None).sign([key.private_key])
+ planet.validate_election(election)
except ValidationError as e:
logger.error(e)
return False
@@ -134,11 +131,11 @@ def create_new_election(sk, planet, election_class, data):
return False
resp = planet.write_transaction(election, BROADCAST_TX_COMMIT)
- if resp == (202, ''):
- logger.info('[SUCCESS] Submitted proposal with id: {}'.format(election.id))
+ if resp == (202, ""):
+ logger.info("[SUCCESS] Submitted proposal with id: {}".format(election.id))
return election.id
else:
- logger.error('Failed to commit election proposal')
+ logger.error("Failed to commit election proposal")
return False
@@ -157,10 +154,9 @@ def run_election_new_upsert_validator(args, planet):
"""
new_validator = {
- 'public_key': {'value': public_key_from_base64(args.public_key),
- 'type': 'ed25519-base16'},
- 'power': args.power,
- 'node_id': args.node_id
+ "public_key": {"value": public_key_from_base64(args.public_key), "type": "ed25519-base16"},
+ "power": args.power,
+ "node_id": args.node_id,
}
return create_new_election(args.sk, planet, ValidatorElection, new_validator)
@@ -198,23 +194,21 @@ def run_election_approve(args, planet):
if len(voting_powers) > 0:
voting_power = voting_powers[0]
else:
- logger.error('The key you provided does not match any of the eligible voters in this election.')
+ logger.error("The key you provided does not match any of the eligible voters in this election.")
return False
inputs = [i for i in tx.to_inputs() if key.public_key in i.owners_before]
- election_pub_key = ValidatorElection.to_public_key(tx.id)
- approval = Vote.generate(inputs,
- [([election_pub_key], voting_power)],
- tx.id).sign([key.private_key])
- approval.validate(planet)
+ election_pub_key = election_id_to_public_key(tx.id)
+ approval = Vote.generate(inputs, [([election_pub_key], voting_power)], tx.id).sign([key.private_key])
+ planet.validate_transaction(approval)
resp = planet.write_transaction(approval, BROADCAST_TX_COMMIT)
- if resp == (202, ''):
- logger.info('[SUCCESS] Your vote has been submitted')
+ if resp == (202, ""):
+ logger.info("[SUCCESS] Your vote has been submitted")
return approval.id
else:
- logger.error('Failed to commit vote')
+ logger.error("Failed to commit vote")
return False
@@ -230,10 +224,10 @@ def run_election_show(args, planet):
election = planet.get_transaction(args.election_id)
if not election:
- logger.error(f'No election found with election_id {args.election_id}')
+ logger.error(f"No election found with election_id {args.election_id}")
return
- response = election.show_election(planet)
+ response = planet.show_election_status(election)
logger.info(response)
@@ -242,7 +236,6 @@ def run_election_show(args, planet):
def _run_init():
bdb = planetmint.Planetmint()
-
schema.init_database(connection=bdb.connection)
@@ -255,18 +248,19 @@ def run_init(args):
@configure_planetmint
def run_drop(args):
"""Drop the database"""
- dbname = planetmint.config['database']['name']
if not args.yes:
- response = input_on_stderr('Do you want to drop `{}` database? [y/n]: '.format(dbname))
- if response != 'y':
+ response = input_on_stderr("Do you want to drop `{}` database? [y/n]: ")
+ if response != "y":
return
- conn = backend.connect()
+ from planetmint.backend.connection import connect
+
+ conn = connect()
try:
- schema.drop_database(conn, dbname)
+ schema.drop_database(conn)
except DatabaseDoesNotExist:
- print("Cannot drop '{name}'. The database does not exist.".format(name=dbname), file=sys.stderr)
+ print("Drop was executed, but spaces doesn't exist.", file=sys.stderr)
def run_recover(b):
@@ -280,116 +274,104 @@ def run_start(args):
# Configure Logging
setup_logging()
- logger.info('Planetmint Version %s', planetmint.__version__)
- run_recover(planetmint.lib.Planetmint())
-
if not args.skip_initialize_database:
- logger.info('Initializing database')
+ logger.info("Initializing database")
_run_init()
- logger.info('Starting Planetmint main process.')
+ logger.info("Planetmint Version %s", planetmint.version.__version__)
+ run_recover(planetmint.lib.Planetmint())
+
+ logger.info("Starting Planetmint main process.")
from planetmint.start import start
+
start(args)
def run_tendermint_version(args):
"""Show the supported Tendermint version(s)"""
supported_tm_ver = {
- 'description': 'Planetmint supports the following Tendermint version(s)',
- 'tendermint': __tm_supported_versions__,
+ "description": "Planetmint supports the following Tendermint version(s)",
+ "tendermint": __tm_supported_versions__,
}
print(json.dumps(supported_tm_ver, indent=4, sort_keys=True))
def create_parser():
- parser = argparse.ArgumentParser(
- description='Control your Planetmint node.',
- parents=[utils.base_parser])
+ parser = argparse.ArgumentParser(description="Control your Planetmint node.", parents=[utils.base_parser])
# all the commands are contained in the subparsers object,
# the command selected by the user will be stored in `args.command`
# that is used by the `main` function to select which other
# function to call.
- subparsers = parser.add_subparsers(title='Commands',
- dest='command')
+ subparsers = parser.add_subparsers(title="Commands", dest="command")
# parser for writing a config file
- config_parser = subparsers.add_parser('configure',
- help='Prepare the config file.')
+ config_parser = subparsers.add_parser("configure", help="Prepare the config file.")
- config_parser.add_argument('backend',
- choices=['localmongodb'],
- default='localmongodb',
- const='localmongodb',
- nargs='?',
- help='The backend to use. It can only be '
- '"localmongodb", currently.')
+ config_parser.add_argument(
+ "backend",
+ choices=["tarantool_db", "localmongodb"],
+ default="tarantool_db",
+ const="tarantool_db",
+ nargs="?",
+ help="The backend to use. It can only be " '"tarantool_db", currently.',
+ )
# parser for managing elections
- election_parser = subparsers.add_parser('election',
- help='Manage elections.')
+ election_parser = subparsers.add_parser("election", help="Manage elections.")
- election_subparser = election_parser.add_subparsers(title='Action',
- dest='action')
+ election_subparser = election_parser.add_subparsers(title="Action", dest="action")
- new_election_parser = election_subparser.add_parser('new',
- help='Calls a new election.')
+ new_election_parser = election_subparser.add_parser("new", help="Calls a new election.")
- new_election_subparser = new_election_parser.add_subparsers(title='Election_Type',
- dest='election_type')
+ new_election_subparser = new_election_parser.add_subparsers(title="Election_Type", dest="election_type")
# Parser factory for each type of new election, so we get a bunch of commands that look like this:
# election new ...
for name, data in elections.items():
- args = data['args']
- generic_parser = new_election_subparser.add_parser(name, help=data['help'])
+ args = data["args"]
+ generic_parser = new_election_subparser.add_parser(name, help=data["help"])
for arg, kwargs in args.items():
generic_parser.add_argument(arg, **kwargs)
- approve_election_parser = election_subparser.add_parser('approve',
- help='Approve the election.')
- approve_election_parser.add_argument('election_id',
- help='The election_id of the election.')
- approve_election_parser.add_argument('--private-key',
- dest='sk',
- required=True,
- help='Path to the private key of the election initiator.')
+ approve_election_parser = election_subparser.add_parser("approve", help="Approve the election.")
+ approve_election_parser.add_argument("election_id", help="The election_id of the election.")
+ approve_election_parser.add_argument(
+ "--private-key", dest="sk", required=True, help="Path to the private key of the election initiator."
+ )
- show_election_parser = election_subparser.add_parser('show',
- help='Provides information about an election.')
+ show_election_parser = election_subparser.add_parser("show", help="Provides information about an election.")
- show_election_parser.add_argument('election_id',
- help='The transaction id of the election you wish to query.')
+ show_election_parser.add_argument("election_id", help="The transaction id of the election you wish to query.")
# parsers for showing/exporting config values
- subparsers.add_parser('show-config',
- help='Show the current configuration')
+ subparsers.add_parser("show-config", help="Show the current configuration")
# parser for database-level commands
- subparsers.add_parser('init',
- help='Init the database')
+ subparsers.add_parser("init", help="Init the database")
- subparsers.add_parser('drop',
- help='Drop the database')
+ subparsers.add_parser("drop", help="Drop the database")
# parser for starting Planetmint
- start_parser = subparsers.add_parser('start',
- help='Start Planetmint')
+ start_parser = subparsers.add_parser("start", help="Start Planetmint")
- start_parser.add_argument('--no-init',
- dest='skip_initialize_database',
- default=False,
- action='store_true',
- help='Skip database initialization')
+ start_parser.add_argument(
+ "--no-init",
+ dest="skip_initialize_database",
+ default=False,
+ action="store_true",
+ help="Skip database initialization",
+ )
- subparsers.add_parser('tendermint-version',
- help='Show the Tendermint supported versions')
+ subparsers.add_parser("tendermint-version", help="Show the Tendermint supported versions")
- start_parser.add_argument('--experimental-parallel-validation',
- dest='experimental_parallel_validation',
- default=False,
- action='store_true',
- help='💀 EXPERIMENTAL: parallelize validation for better throughput 💀')
+ start_parser.add_argument(
+ "--experimental-parallel-validation",
+ dest="experimental_parallel_validation",
+ default=False,
+ action="store_true",
+ help="💀 EXPERIMENTAL: parallelize validation for better throughput 💀",
+ )
return parser
diff --git a/planetmint/commands/utils.py b/planetmint/commands/utils.py
index 5c8a105..6c9a9b7 100644
--- a/planetmint/commands/utils.py
+++ b/planetmint/commands/utils.py
@@ -12,9 +12,9 @@ import builtins
import functools
import multiprocessing as mp
import sys
-
import planetmint
import planetmint.config_utils
+
from planetmint.version import __version__
@@ -30,22 +30,22 @@ def configure_planetmint(command):
The command wrapper function.
"""
+
@functools.wraps(command)
def configure(args):
config_from_cmdline = None
try:
if args.log_level is not None:
config_from_cmdline = {
- 'log': {
- 'level_console': args.log_level,
- 'level_logfile': args.log_level,
+ "log": {
+ "level_console": args.log_level,
+ "level_logfile": args.log_level,
},
- 'server': {'loglevel': args.log_level},
+ "server": {"loglevel": args.log_level},
}
except AttributeError:
pass
- planetmint.config_utils.autoconfigure(
- filename=args.config, config=config_from_cmdline, force=True)
+ planetmint.config_utils.autoconfigure(filename=args.config, config=config_from_cmdline, force=True)
command(args)
return configure
@@ -53,13 +53,13 @@ def configure_planetmint(command):
def _convert(value, default=None, convert=None):
def convert_bool(value):
- if value.lower() in ('true', 't', 'yes', 'y'):
+ if value.lower() in ("true", "t", "yes", "y"):
return True
- if value.lower() in ('false', 'f', 'no', 'n'):
+ if value.lower() in ("false", "f", "no", "n"):
return False
- raise ValueError('{} cannot be converted to bool'.format(value))
+ raise ValueError("{} cannot be converted to bool".format(value))
- if value == '':
+ if value == "":
value = None
if convert is None:
@@ -80,7 +80,7 @@ def _convert(value, default=None, convert=None):
# We need this because `input` always prints on stdout, while it should print
# to stderr. It's a very old bug, check it out here:
# - https://bugs.python.org/issue1927
-def input_on_stderr(prompt='', default=None, convert=None):
+def input_on_stderr(prompt="", default=None, convert=None):
"""Output a string to stderr and wait for input.
Args:
@@ -92,7 +92,7 @@ def input_on_stderr(prompt='', default=None, convert=None):
``default`` will be used.
"""
- print(prompt, end='', file=sys.stderr)
+ print(prompt, end="", file=sys.stderr)
value = builtins.input()
return _convert(value, default, convert)
@@ -121,14 +121,13 @@ def start(parser, argv, scope):
# look up in the current scope for a function called 'run_'
# replacing all the dashes '-' with the lowercase character '_'
- func = scope.get('run_' + args.command.replace('-', '_'))
+ func = scope.get("run_" + args.command.replace("-", "_"))
# if no command has been found, raise a `NotImplementedError`
if not func:
- raise NotImplementedError('Command `{}` not yet implemented'.
- format(args.command))
+ raise NotImplementedError("Command `{}` not yet implemented".format(args.command))
- args.multiprocess = getattr(args, 'multiprocess', False)
+ args.multiprocess = getattr(args, "multiprocess", False)
if args.multiprocess is False:
args.multiprocess = 1
@@ -138,24 +137,28 @@ def start(parser, argv, scope):
return func(args)
-base_parser = argparse.ArgumentParser(add_help=False, prog='planetmint')
+base_parser = argparse.ArgumentParser(add_help=False, prog="planetmint")
-base_parser.add_argument('-c', '--config',
- help='Specify the location of the configuration file '
- '(use "-" for stdout)')
+base_parser.add_argument(
+ "-c", "--config", help="Specify the location of the configuration file " '(use "-" for stdout)'
+)
# NOTE: this flag should not have any default value because that will override
# the environment variables provided to configure the logger.
-base_parser.add_argument('-l', '--log-level',
- type=str.upper, # convert to uppercase for comparison to choices
- choices=['DEBUG', 'BENCHMARK', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
- help='Log level')
+base_parser.add_argument(
+ "-l",
+ "--log-level",
+ type=str.upper, # convert to uppercase for comparison to choices
+ choices=["DEBUG", "BENCHMARK", "INFO", "WARNING", "ERROR", "CRITICAL"],
+ help="Log level",
+)
-base_parser.add_argument('-y', '--yes', '--yes-please',
- action='store_true',
- help='Assume "yes" as answer to all prompts and run '
- 'non-interactively')
+base_parser.add_argument(
+ "-y",
+ "--yes",
+ "--yes-please",
+ action="store_true",
+ help='Assume "yes" as answer to all prompts and run ' "non-interactively",
+)
-base_parser.add_argument('-v', '--version',
- action='version',
- version='%(prog)s {}'.format(__version__))
+base_parser.add_argument("-v", "--version", action="version", version="%(prog)s {}".format(__version__))
diff --git a/planetmint/config.py b/planetmint/config.py
new file mode 100644
index 0000000..f9c89e2
--- /dev/null
+++ b/planetmint/config.py
@@ -0,0 +1,186 @@
+import copy
+import logging
+import os
+
+# from planetmint.log import DEFAULT_LOGGING_CONFIG as log_config
+from planetmint.version import __version__ # noqa
+
+
+class Singleton(type):
+ _instances = {}
+
+ def __call__(cls, *args, **kwargs):
+ if cls not in cls._instances:
+ cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
+ return cls._instances[cls]
+
+
+class Config(metaclass=Singleton):
+ def __init__(self):
+ # from functools import reduce
+ # PORT_NUMBER = reduce(lambda x, y: x * y, map(ord, 'Planetmint')) % 2**16
+ # basically, the port number is 9984
+
+ # The following variable is used by `planetmint configure` to
+ # prompt the user for database values. We cannot rely on
+ # _base_database_localmongodb.keys() because dicts are unordered.
+ # I tried to configure
+ self.log_config = DEFAULT_LOGGING_CONFIG
+ db = "tarantool_db"
+ self.__private_database_keys_map = { # TODO Check if it is working after removing 'name' field
+ "tarantool_db": ("host", "port"),
+ "localmongodb": ("host", "port", "name"),
+ }
+ self.__private_database_localmongodb = {
+ "backend": "localmongodb",
+ "host": "localhost",
+ "port": 27017,
+ "name": "bigchain",
+ "replicaset": None,
+ "login": None,
+ "password": None,
+ "connection_timeout": 5000,
+ "max_tries": 3,
+ "ssl": False,
+ "ca_cert": None,
+ "certfile": None,
+ "keyfile": None,
+ "keyfile_passphrase": None,
+ "crlfile": None,
+ }
+ self.__private_init_config = {
+ "absolute_path": os.path.dirname(os.path.abspath(__file__)) + "/backend/tarantool/init.lua"
+ }
+
+ self.__private_drop_config = {
+ "absolute_path": os.path.dirname(os.path.abspath(__file__)) + "/backend/tarantool/drop.lua"
+ }
+ self.__private_database_tarantool = {
+ "backend": "tarantool_db",
+ "connection_timeout": 5000,
+ "max_tries": 3,
+ "name": "universe",
+ "reconnect_delay": 0.5,
+ "host": "localhost",
+ "port": 3303,
+ "connect_now": True,
+ "encoding": "utf-8",
+ "login": "guest",
+ "password": "",
+ "service": "tarantoolctl connect",
+ "init_config": self.__private_init_config,
+ "drop_config": self.__private_drop_config,
+ }
+
+ self.__private_database_map = {
+ "tarantool_db": self.__private_database_tarantool,
+ "localmongodb": self.__private_database_localmongodb,
+ }
+ self.__private_config = {
+ "server": {
+ # Note: this section supports all the Gunicorn settings:
+ # - http://docs.gunicorn.org/en/stable/settings.html
+ "bind": "localhost:9984",
+ "loglevel": logging.getLevelName(self.log_config["handlers"]["console"]["level"]).lower(),
+ "workers": None, # if None, the value will be cpu_count * 2 + 1
+ },
+ "wsserver": {
+ "scheme": "ws",
+ "host": "localhost",
+ "port": 9985,
+ "advertised_scheme": "ws",
+ "advertised_host": "localhost",
+ "advertised_port": 9985,
+ },
+ "tendermint": {
+ "host": "localhost",
+ "port": 26657,
+ "version": "v0.31.5", # look for __tm_supported_versions__
+ },
+ "database": self.__private_database_map,
+ "log": {
+ "file": self.log_config["handlers"]["file"]["filename"],
+ "error_file": self.log_config["handlers"]["errors"]["filename"],
+ "level_console": logging.getLevelName(self.log_config["handlers"]["console"]["level"]).lower(),
+ "level_logfile": logging.getLevelName(self.log_config["handlers"]["file"]["level"]).lower(),
+ "datefmt_console": self.log_config["formatters"]["console"]["datefmt"],
+ "datefmt_logfile": self.log_config["formatters"]["file"]["datefmt"],
+ "fmt_console": self.log_config["formatters"]["console"]["format"],
+ "fmt_logfile": self.log_config["formatters"]["file"]["format"],
+ "granular_levels": {},
+ },
+ }
+ self._private_real_config = copy.deepcopy(self.__private_config)
+ # select the correct config defaults based on the backend
+ self._private_real_config["database"] = self.__private_database_map[db]
+
+ def init_config(self, db):
+ self._private_real_config = copy.deepcopy(self.__private_config)
+ # select the correct config defaults based on the backend
+ self._private_real_config["database"] = self.__private_database_map[db]
+ return self._private_real_config
+
+ def get(self):
+ return self._private_real_config
+
+ def set(self, config):
+ self._private_real_config = config
+
+ def get_db_key_map(sefl, db):
+ return sefl.__private_database_keys_map[db]
+
+ def get_db_map(sefl, db):
+ return sefl.__private_database_map[db]
+
+
+DEFAULT_LOG_DIR = os.getcwd()
+DEFAULT_LOGGING_CONFIG = {
+ "version": 1,
+ "disable_existing_loggers": False,
+ "formatters": {
+ "console": {
+ "class": "logging.Formatter",
+ "format": (
+ "[%(asctime)s] [%(levelname)s] (%(name)s) " "%(message)s (%(processName)-10s - pid: %(process)d)"
+ ),
+ "datefmt": "%Y-%m-%d %H:%M:%S",
+ },
+ "file": {
+ "class": "logging.Formatter",
+ "format": (
+ "[%(asctime)s] [%(levelname)s] (%(name)s) " "%(message)s (%(processName)-10s - pid: %(process)d)"
+ ),
+ "datefmt": "%Y-%m-%d %H:%M:%S",
+ },
+ },
+ "handlers": {
+ "console": {
+ "class": "logging.StreamHandler",
+ "formatter": "console",
+ "level": logging.INFO,
+ },
+ "file": {
+ "class": "logging.handlers.RotatingFileHandler",
+ "filename": os.path.join(DEFAULT_LOG_DIR, "planetmint.log"),
+ "mode": "w",
+ "maxBytes": 209715200,
+ "backupCount": 5,
+ "formatter": "file",
+ "level": logging.INFO,
+ },
+ "errors": {
+ "class": "logging.handlers.RotatingFileHandler",
+ "filename": os.path.join(DEFAULT_LOG_DIR, "planetmint-errors.log"),
+ "mode": "w",
+ "maxBytes": 209715200,
+ "backupCount": 5,
+ "formatter": "file",
+ "level": logging.ERROR,
+ },
+ },
+ "loggers": {},
+ "root": {
+ "level": logging.DEBUG,
+ "handlers": ["console", "file", "errors"],
+ },
+}
diff --git a/planetmint/config_utils.py b/planetmint/config_utils.py
index fa5d94d..465c882 100644
--- a/planetmint/config_utils.py
+++ b/planetmint/config_utils.py
@@ -21,27 +21,24 @@ import copy
import json
import logging
import collections.abc
+
from functools import lru_cache
-
from pkg_resources import iter_entry_points, ResolutionError
-
-from planetmint.transactions.common import exceptions
-
-import planetmint
-
+from planetmint.config import Config
+from transactions.common import exceptions
from planetmint.validation import BaseValidationRules
# TODO: move this to a proper configuration file for logging
-logging.getLogger('requests').setLevel(logging.WARNING)
+logging.getLogger("requests").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
CONFIG_DEFAULT_PATH = os.environ.setdefault(
- 'PLANETMINT_CONFIG_PATH',
- os.path.join(os.path.expanduser('~'), '.planetmint'),
+ "PLANETMINT_CONFIG_PATH",
+ os.path.join(os.path.expanduser("~"), ".planetmint"),
)
-CONFIG_PREFIX = 'PLANETMINT'
-CONFIG_SEP = '_'
+CONFIG_PREFIX = "PLANETMINT"
+CONFIG_SEP = "_"
def map_leafs(func, mapping):
@@ -99,21 +96,21 @@ def file_config(filename=None):
dict: The config values in the specified config file (or the
file at CONFIG_DEFAULT_PATH, if filename == None)
"""
- logger.debug('On entry into file_config(), filename = {}'.format(filename))
+ logger.debug("On entry into file_config(), filename = {}".format(filename))
if filename is None:
filename = CONFIG_DEFAULT_PATH
- logger.debug('file_config() will try to open `{}`'.format(filename))
+ logger.debug("file_config() will try to open `{}`".format(filename))
with open(filename) as f:
try:
config = json.load(f)
except ValueError as err:
raise exceptions.ConfigurationError(
- 'Failed to parse the JSON configuration from `{}`, {}'.format(filename, err)
+ "Failed to parse the JSON configuration from `{}`, {}".format(filename, err)
)
- logger.info('Configuration loaded from `{}`'.format(filename))
+ logger.info("Configuration loaded from `{}`".format(filename))
return config
@@ -139,7 +136,7 @@ def env_config(config):
return map_leafs(load_from_env, config)
-def update_types(config, reference, list_sep=':'):
+def update_types(config, reference, list_sep=":"):
"""Return a new configuration where all the values types
are aligned with the ones in the default configuration
"""
@@ -192,10 +189,11 @@ def set_config(config):
Any previous changes made to ``planetmint.config`` will be lost.
"""
# Deep copy the default config into planetmint.config
- planetmint.config = copy.deepcopy(planetmint._config)
+ _config = Config().get()
# Update the default config with whatever is in the passed config
- update(planetmint.config, update_types(config, planetmint.config))
- planetmint.config['CONFIGURED'] = True
+ update(_config, update_types(config, _config))
+ _config["CONFIGURED"] = True
+ Config().set(_config)
def update_config(config):
@@ -207,9 +205,11 @@ def update_config(config):
to the default config
"""
+ _config = Config().get()
# Update the default config with whatever is in the passed config
- update(planetmint.config, update_types(config, planetmint.config))
- planetmint.config['CONFIGURED'] = True
+ update(_config, update_types(config, _config))
+ _config["CONFIGURED"] = True
+ Config().set(_config)
def write_config(config, filename=None):
@@ -223,12 +223,12 @@ def write_config(config, filename=None):
if not filename:
filename = CONFIG_DEFAULT_PATH
- with open(filename, 'w') as f:
+ with open(filename, "w") as f:
json.dump(config, f, indent=4)
def is_configured():
- return bool(planetmint.config.get('CONFIGURED'))
+ return bool(Config().get().get("CONFIGURED"))
def autoconfigure(filename=None, config=None, force=False):
@@ -236,11 +236,11 @@ def autoconfigure(filename=None, config=None, force=False):
been initialized.
"""
if not force and is_configured():
- logger.debug('System already configured, skipping autoconfiguration')
+ logger.debug("System already configured, skipping autoconfiguration")
return
# start with the current configuration
- newconfig = planetmint.config
+ newconfig = Config().get()
# update configuration from file
try:
@@ -249,7 +249,7 @@ def autoconfigure(filename=None, config=None, force=False):
if filename:
raise
else:
- logger.info('Cannot find config file `%s`.' % e.filename)
+ logger.info("Cannot find config file `%s`." % e.filename)
# override configuration with env variables
newconfig = env_config(newconfig)
@@ -277,20 +277,20 @@ def load_validation_plugin(name=None):
# We should probably support Requirements specs in the config, e.g.
# validation_plugin: 'my-plugin-package==0.0.1;default'
plugin = None
- for entry_point in iter_entry_points('planetmint.validation', name):
+ for entry_point in iter_entry_points("planetmint.validation", name):
plugin = entry_point.load()
# No matching entry_point found
if not plugin:
- raise ResolutionError(
- 'No plugin found in group `planetmint.validation` with name `{}`'.
- format(name))
+ raise ResolutionError("No plugin found in group `planetmint.validation` with name `{}`".format(name))
# Is this strictness desireable?
# It will probably reduce developer headaches in the wild.
if not issubclass(plugin, (BaseValidationRules,)):
- raise TypeError('object of type "{}" does not implement `planetmint.'
- 'validation.BaseValidationRules`'.format(type(plugin)))
+ raise TypeError(
+ 'object of type "{}" does not implement `planetmint.'
+ "validation.BaseValidationRules`".format(type(plugin))
+ )
return plugin
@@ -302,7 +302,7 @@ def load_events_plugins(names=None):
return plugins
for name in names:
- for entry_point in iter_entry_points('planetmint.events', name):
+ for entry_point in iter_entry_points("planetmint.events", name):
plugins.append((name, entry_point.load()))
return plugins
diff --git a/planetmint/core.py b/planetmint/core.py
index 43c13f4..5f3496c 100644
--- a/planetmint/core.py
+++ b/planetmint/core.py
@@ -8,6 +8,7 @@ with Tendermint.
"""
import logging
import sys
+
from tendermint.abci import types_pb2
from abci.application import BaseApplication
from abci.application import OkCode
@@ -18,14 +19,11 @@ from tendermint.abci.types_pb2 import (
ResponseDeliverTx,
ResponseBeginBlock,
ResponseEndBlock,
- ResponseCommit
+ ResponseCommit,
)
from planetmint import Planetmint
-from planetmint.transactions.types.elections.election import Election
-from planetmint.tendermint_utils import (decode_transaction,
- calculate_hash)
+from planetmint.tendermint_utils import decode_transaction, calculate_hash, decode_validator
from planetmint.lib import Block
-import planetmint.upsert_validator.validator_utils as vutils
from planetmint.events import EventTypes, Event
@@ -42,40 +40,41 @@ class App(BaseApplication):
def __init__(self, planetmint_node=None, events_queue=None):
# super().__init__(abci)
- logger.debug('Checking values of types')
+ logger.debug("Checking values of types")
logger.debug(dir(types_pb2))
self.events_queue = events_queue
self.planetmint_node = planetmint_node or Planetmint()
self.block_txn_ids = []
- self.block_txn_hash = ''
+ self.block_txn_hash = ""
self.block_transactions = []
self.validators = None
self.new_height = None
self.chain = self.planetmint_node.get_latest_abci_chain()
def log_abci_migration_error(self, chain_id, validators):
- logger.error('An ABCI chain migration is in process. '
- 'Download theself.planetmint_node.get_latest_abci_chain new ABCI client and configure it with '
- f'chain_id={chain_id} and validators={validators}.')
+ logger.error(
+ "An ABCI chain migration is in process. "
+ "Download theself.planetmint_node.get_latest_abci_chain new ABCI client and configure it with "
+ f"chain_id={chain_id} and validators={validators}."
+ )
def abort_if_abci_chain_is_not_synced(self):
- if self.chain is None or self.chain['is_synced']:
+ if self.chain is None or self.chain["is_synced"]:
return
validators = self.planetmint_node.get_validators()
- self.log_abci_migration_error(self.chain['chain_id'], validators)
+ self.log_abci_migration_error(self.chain["chain_id"], validators)
sys.exit(1)
def init_chain(self, genesis):
"""Initialize chain upon genesis or a migration"""
- app_hash = ''
+ app_hash = ""
height = 0
known_chain = self.planetmint_node.get_latest_abci_chain()
if known_chain is not None:
- chain_id = known_chain['chain_id']
+ chain_id = known_chain["chain_id"]
- if known_chain['is_synced']:
- msg = (f'Got invalid InitChain ABCI request ({genesis}) - '
- f'the chain {chain_id} is already synced.')
+ if known_chain["is_synced"]:
+ msg = f"Got invalid InitChain ABCI request ({genesis}) - " f"the chain {chain_id} is already synced."
logger.error(msg)
sys.exit(1)
if chain_id != genesis.chain_id:
@@ -84,22 +83,19 @@ class App(BaseApplication):
sys.exit(1)
# set migration values for app hash and height
block = self.planetmint_node.get_latest_block()
- app_hash = '' if block is None else block['app_hash']
- height = 0 if block is None else block['height'] + 1
+ app_hash = "" if block is None else block["app_hash"]
+ height = 0 if block is None else block["height"] + 1
known_validators = self.planetmint_node.get_validators()
- validator_set = [vutils.decode_validator(v)
- for v in genesis.validators]
+ validator_set = [decode_validator(v) for v in genesis.validators]
if known_validators and known_validators != validator_set:
- self.log_abci_migration_error(known_chain['chain_id'],
- known_validators)
+ self.log_abci_migration_error(known_chain["chain_id"], known_validators)
sys.exit(1)
block = Block(app_hash=app_hash, height=height, transactions=[])
self.planetmint_node.store_block(block._asdict())
self.planetmint_node.store_validator_set(height + 1, validator_set)
- abci_chain_height = 0 if known_chain is None else known_chain['height']
+ abci_chain_height = 0 if known_chain is None else known_chain["height"]
self.planetmint_node.store_abci_chain(abci_chain_height, genesis.chain_id, True)
- self.chain = {'height': abci_chain_height, 'is_synced': True,
- 'chain_id': genesis.chain_id}
+ self.chain = {"height": abci_chain_height, "is_synced": True, "chain_id": genesis.chain_id}
return ResponseInitChain()
def info(self, request):
@@ -118,12 +114,12 @@ class App(BaseApplication):
r = ResponseInfo()
block = self.planetmint_node.get_latest_block()
if block:
- chain_shift = 0 if self.chain is None else self.chain['height']
- r.last_block_height = block['height'] - chain_shift
- r.last_block_app_hash = block['app_hash'].encode('utf-8')
+ chain_shift = 0 if self.chain is None else self.chain["height"]
+ r.last_block_height = block["height"] - chain_shift
+ r.last_block_app_hash = block["app_hash"].encode("utf-8")
else:
r.last_block_height = 0
- r.last_block_app_hash = b''
+ r.last_block_app_hash = b""
return r
def check_tx(self, raw_transaction):
@@ -136,13 +132,13 @@ class App(BaseApplication):
self.abort_if_abci_chain_is_not_synced()
- logger.debug('check_tx: %s', raw_transaction)
+ logger.debug("check_tx: %s", raw_transaction)
transaction = decode_transaction(raw_transaction)
if self.planetmint_node.is_valid_transaction(transaction):
- logger.debug('check_tx: VALID')
+ logger.debug("check_tx: VALID")
return ResponseCheckTx(code=OkCode)
else:
- logger.debug('check_tx: INVALID')
+ logger.debug("check_tx: INVALID")
return ResponseCheckTx(code=CodeTypeError)
def begin_block(self, req_begin_block):
@@ -153,10 +149,9 @@ class App(BaseApplication):
"""
self.abort_if_abci_chain_is_not_synced()
- chain_shift = 0 if self.chain is None else self.chain['height']
+ chain_shift = 0 if self.chain is None else self.chain["height"]
# req_begin_block.header.num_txs not found, so removing it.
- logger.debug('BEGIN BLOCK, height:%s',
- req_begin_block.header.height + chain_shift)
+ logger.debug("BEGIN BLOCK, height:%s", req_begin_block.header.height + chain_shift)
self.block_txn_ids = []
self.block_transactions = []
@@ -171,15 +166,16 @@ class App(BaseApplication):
self.abort_if_abci_chain_is_not_synced()
- logger.debug('deliver_tx: %s', raw_transaction)
+ logger.debug("deliver_tx: %s", raw_transaction)
transaction = self.planetmint_node.is_valid_transaction(
- decode_transaction(raw_transaction), self.block_transactions)
+ decode_transaction(raw_transaction), self.block_transactions
+ )
if not transaction:
- logger.debug('deliver_tx: INVALID')
+ logger.debug("deliver_tx: INVALID")
return ResponseDeliverTx(code=CodeTypeError)
else:
- logger.debug('storing tx')
+ logger.debug("storing tx")
self.block_txn_ids.append(transaction.id)
self.block_transactions.append(transaction)
return ResponseDeliverTx(code=OkCode)
@@ -194,29 +190,25 @@ class App(BaseApplication):
self.abort_if_abci_chain_is_not_synced()
- chain_shift = 0 if self.chain is None else self.chain['height']
-
+ chain_shift = 0 if self.chain is None else self.chain["height"]
height = request_end_block.height + chain_shift
self.new_height = height
# store pre-commit state to recover in case there is a crash during
# `end_block` or `commit`
- logger.debug(f'Updating pre-commit state: {self.new_height}')
- pre_commit_state = dict(height=self.new_height,
- transactions=self.block_txn_ids)
+ logger.debug(f"Updating pre-commit state: {self.new_height}")
+ pre_commit_state = dict(height=self.new_height, transactions=self.block_txn_ids)
self.planetmint_node.store_pre_commit_state(pre_commit_state)
block_txn_hash = calculate_hash(self.block_txn_ids)
block = self.planetmint_node.get_latest_block()
if self.block_txn_ids:
- self.block_txn_hash = calculate_hash([block['app_hash'], block_txn_hash])
+ self.block_txn_hash = calculate_hash([block["app_hash"], block_txn_hash])
else:
- self.block_txn_hash = block['app_hash']
+ self.block_txn_hash = block["app_hash"]
- validator_update = Election.process_block(self.planetmint_node,
- self.new_height,
- self.block_transactions)
+ validator_update = self.planetmint_node.process_block(self.new_height, self.block_transactions)
return ResponseEndBlock(validator_updates=validator_update)
@@ -225,46 +217,52 @@ class App(BaseApplication):
self.abort_if_abci_chain_is_not_synced()
- data = self.block_txn_hash.encode('utf-8')
+ data = self.block_txn_hash.encode("utf-8")
# register a new block only when new transactions are received
if self.block_txn_ids:
self.planetmint_node.store_bulk_transactions(self.block_transactions)
- block = Block(app_hash=self.block_txn_hash,
- height=self.new_height,
- transactions=self.block_txn_ids)
+ block = Block(app_hash=self.block_txn_hash, height=self.new_height, transactions=self.block_txn_ids)
# NOTE: storing the block should be the last operation during commit
# this effects crash recovery. Refer BEP#8 for details
self.planetmint_node.store_block(block._asdict())
- logger.debug('Commit-ing new block with hash: apphash=%s ,'
- 'height=%s, txn ids=%s', data, self.new_height,
- self.block_txn_ids)
+ logger.debug(
+ "Commit-ing new block with hash: apphash=%s ," "height=%s, txn ids=%s",
+ data,
+ self.new_height,
+ self.block_txn_ids,
+ )
if self.events_queue:
- event = Event(EventTypes.BLOCK_VALID, {
- 'height': self.new_height,
- 'transactions': self.block_transactions
- })
+ event = Event(
+ EventTypes.BLOCK_VALID,
+ {"height": self.new_height, "hash": self.block_txn_hash, "transactions": self.block_transactions},
+ )
self.events_queue.put(event)
return ResponseCommit(data=data)
-def rollback(b):
- pre_commit = b.get_pre_commit_state()
+def rollback(planetmint):
+ pre_commit = None
- if pre_commit is None:
+ try:
+ pre_commit = planetmint.get_pre_commit_state()
+ except Exception as e:
+ logger.exception("Unexpected error occurred while executing get_pre_commit_state()", e)
+
+ if pre_commit is None or len(pre_commit) == 0:
# the pre_commit record is first stored in the first `end_block`
return
- latest_block = b.get_latest_block()
+ latest_block = planetmint.get_latest_block()
if latest_block is None:
- logger.error('Found precommit state but no blocks!')
+ logger.error("Found precommit state but no blocks!")
sys.exit(1)
# NOTE: the pre-commit state is always at most 1 block ahead of the commited state
- if latest_block['height'] < pre_commit['height']:
- Election.rollback(b, pre_commit['height'], pre_commit['transactions'])
- b.delete_transactions(pre_commit['transactions'])
+ if latest_block["height"] < pre_commit["height"]:
+ planetmint.rollback_election(pre_commit["height"], pre_commit["transactions"])
+ planetmint.delete_transactions(pre_commit["transactions"])
diff --git a/planetmint/events.py b/planetmint/events.py
index b702b4a..6157138 100644
--- a/planetmint/events.py
+++ b/planetmint/events.py
@@ -8,7 +8,7 @@ from collections import defaultdict
from multiprocessing import Queue
-POISON_PILL = 'POISON_PILL'
+POISON_PILL = "POISON_PILL"
class EventTypes:
@@ -73,7 +73,7 @@ class Exchange:
try:
self.started_queue.get(timeout=1)
- raise RuntimeError('Cannot create a new subscriber queue while Exchange is running.')
+ raise RuntimeError("Cannot create a new subscriber queue while Exchange is running.")
except Empty:
pass
@@ -99,7 +99,7 @@ class Exchange:
def run(self):
"""Start the exchange"""
- self.started_queue.put('STARTED')
+ self.started_queue.put("STARTED")
while True:
event = self.publisher_queue.get()
diff --git a/planetmint/exceptions.py b/planetmint/exceptions.py
index 9e12b7c..624f1e9 100644
--- a/planetmint/exceptions.py
+++ b/planetmint/exceptions.py
@@ -4,9 +4,9 @@
# Code is Apache-2.0 and docs are CC-BY-4.0
-class BigchainDBError(Exception):
+class PlanetmintError(Exception):
"""Base class for Planetmint exceptions."""
-class CriticalDoubleSpend(BigchainDBError):
+class CriticalDoubleSpend(PlanetmintError):
"""Data integrity error that requires attention"""
diff --git a/planetmint/fastquery.py b/planetmint/fastquery.py
index bfbb6a8..7832a5c 100644
--- a/planetmint/fastquery.py
+++ b/planetmint/fastquery.py
@@ -5,10 +5,10 @@
from planetmint.utils import condition_details_has_owner
from planetmint.backend import query
-from planetmint.transactions.common.transaction import TransactionLink
+from transactions.common.transaction import TransactionLink
-class FastQuery():
+class FastQuery:
"""Database queries that join on block results from a single node."""
def __init__(self, connection):
@@ -17,11 +17,12 @@ class FastQuery():
def get_outputs_by_public_key(self, public_key):
"""Get outputs for a public key"""
txs = list(query.get_owned_ids(self.connection, public_key))
- return [TransactionLink(tx['id'], index)
- for tx in txs
- for index, output in enumerate(tx['outputs'])
- if condition_details_has_owner(output['condition']['details'],
- public_key)]
+ return [
+ TransactionLink(tx["id"], index)
+ for tx in txs
+ for index, output in enumerate(tx["outputs"])
+ if condition_details_has_owner(output["condition"]["details"], public_key)
+ ]
def filter_spent_outputs(self, outputs):
"""Remove outputs that have been spent
@@ -31,9 +32,7 @@ class FastQuery():
"""
links = [o.to_dict() for o in outputs]
txs = list(query.get_spending_transactions(self.connection, links))
- spends = {TransactionLink.from_dict(input_['fulfills'])
- for tx in txs
- for input_ in tx['inputs']}
+ spends = {TransactionLink.from_dict(input_["fulfills"]) for tx in txs for input_ in tx["inputs"]}
return [ff for ff in outputs if ff not in spends]
def filter_unspent_outputs(self, outputs):
@@ -44,7 +43,5 @@ class FastQuery():
"""
links = [o.to_dict() for o in outputs]
txs = list(query.get_spending_transactions(self.connection, links))
- spends = {TransactionLink.from_dict(input_['fulfills'])
- for tx in txs
- for input_ in tx['inputs']}
+ spends = {TransactionLink.from_dict(input_["fulfills"]) for tx in txs for input_ in tx["inputs"]}
return [ff for ff in outputs if ff in spends]
diff --git a/planetmint/lib.py b/planetmint/lib.py
index 2f63918..feb9e77 100644
--- a/planetmint/lib.py
+++ b/planetmint/lib.py
@@ -8,32 +8,50 @@ MongoDB.
"""
import logging
-from collections import namedtuple
-from uuid import uuid4
-
+import json
import rapidjson
-
-try:
- from hashlib import sha3_256
-except ImportError:
- # NOTE: needed for Python < 3.6
- from sha3 import sha3_256
-
import requests
-
import planetmint
+
+from collections import namedtuple, OrderedDict
+from uuid import uuid4
+from hashlib import sha3_256
+from transactions import Transaction, Vote
+from transactions.common.crypto import public_key_from_ed25519_key
+from transactions.common.exceptions import (
+ SchemaValidationError,
+ ValidationError,
+ DuplicateTransaction,
+ InvalidSignature,
+ DoubleSpend,
+ InputDoesNotExist,
+ AssetIdMismatch,
+ AmountError,
+ MultipleInputsError,
+ InvalidProposer,
+ UnequalValidatorSet,
+ InvalidPowerChange,
+)
+from transactions.common.transaction import VALIDATOR_ELECTION, CHAIN_MIGRATION_ELECTION
+from transactions.common.transaction_mode_types import BROADCAST_TX_COMMIT, BROADCAST_TX_ASYNC, BROADCAST_TX_SYNC
+from transactions.types.elections.election import Election
+from transactions.types.elections.validator_utils import election_id_to_public_key
+from planetmint.config import Config
from planetmint import backend, config_utils, fastquery
-from planetmint.models import Transaction
-from planetmint.transactions.common.exceptions import (
- SchemaValidationError, ValidationError, DoubleSpend)
-from planetmint.transactions.common.transaction_mode_types import (
- BROADCAST_TX_COMMIT, BROADCAST_TX_ASYNC, BROADCAST_TX_SYNC)
-from planetmint.tendermint_utils import encode_transaction, merkleroot
+from planetmint.tendermint_utils import (
+ encode_transaction,
+ merkleroot,
+ key_from_base64,
+ public_key_to_base64,
+ encode_validator,
+ new_validator_set,
+)
from planetmint import exceptions as core_exceptions
from planetmint.validation import BaseValidationRules
logger = logging.getLogger(__name__)
+
class Planetmint(object):
"""Planetmint API
@@ -58,35 +76,26 @@ class Planetmint(object):
"""
config_utils.autoconfigure()
self.mode_commit = BROADCAST_TX_COMMIT
- self.mode_list = (BROADCAST_TX_ASYNC,
- BROADCAST_TX_SYNC,
- self.mode_commit)
- self.tendermint_host = planetmint.config['tendermint']['host']
- self.tendermint_port = planetmint.config['tendermint']['port']
- self.endpoint = 'http://{}:{}/'.format(self.tendermint_host, self.tendermint_port)
+ self.mode_list = (BROADCAST_TX_ASYNC, BROADCAST_TX_SYNC, self.mode_commit)
+ self.tendermint_host = Config().get()["tendermint"]["host"]
+ self.tendermint_port = Config().get()["tendermint"]["port"]
+ self.endpoint = "http://{}:{}/".format(self.tendermint_host, self.tendermint_port)
- validationPlugin = planetmint.config.get('validation_plugin')
+ validationPlugin = Config().get().get("validation_plugin")
if validationPlugin:
self.validation = config_utils.load_validation_plugin(validationPlugin)
else:
self.validation = BaseValidationRules
-
- self.connection = connection if connection else backend.connect(**planetmint.config['database'])
+ self.connection = connection if connection is not None else planetmint.backend.connect()
def post_transaction(self, transaction, mode):
"""Submit a valid transaction to the mempool."""
if not mode or mode not in self.mode_list:
- raise ValidationError('Mode must be one of the following {}.'
- .format(', '.join(self.mode_list)))
+ raise ValidationError("Mode must be one of the following {}.".format(", ".join(self.mode_list)))
tx_dict = transaction.tx_dict if transaction.tx_dict else transaction.to_dict()
- payload = {
- 'method': mode,
- 'jsonrpc': '2.0',
- 'params': [encode_transaction(tx_dict)],
- 'id': str(uuid4())
- }
+ payload = {"method": mode, "jsonrpc": "2.0", "params": [encode_transaction(tx_dict)], "id": str(uuid4())}
# TODO: handle connection errors!
return requests.post(self.endpoint, json=payload)
@@ -99,45 +108,55 @@ class Planetmint(object):
def _process_post_response(self, response, mode):
logger.debug(response)
- error = response.get('error')
+ error = response.get("error")
if error:
status_code = 500
- message = error.get('message', 'Internal Error')
- data = error.get('data', '')
+ message = error.get("message", "Internal Error")
+ data = error.get("data", "")
- if 'Tx already exists in cache' in data:
+ if "Tx already exists in cache" in data:
status_code = 400
- return (status_code, message + ' - ' + data)
+ return (status_code, message + " - " + data)
- result = response['result']
+ result = response["result"]
if mode == self.mode_commit:
- check_tx_code = result.get('check_tx', {}).get('code', 0)
- deliver_tx_code = result.get('deliver_tx', {}).get('code', 0)
+ check_tx_code = result.get("check_tx", {}).get("code", 0)
+ deliver_tx_code = result.get("deliver_tx", {}).get("code", 0)
error_code = check_tx_code or deliver_tx_code
else:
- error_code = result.get('code', 0)
+ error_code = result.get("code", 0)
if error_code:
- return (500, 'Transaction validation failed')
+ return (500, "Transaction validation failed")
- return (202, '')
+ return (202, "")
def store_bulk_transactions(self, transactions):
txns = []
assets = []
txn_metadatas = []
+
for t in transactions:
transaction = t.tx_dict if t.tx_dict else rapidjson.loads(rapidjson.dumps(t.to_dict()))
- if transaction['operation'] == t.CREATE:
- # Change this to use the first element of the assets list or to change to use the assets array itsel and manipulate it
- tx_assets = transaction.pop('assets')
- tx_assets[0]['id'] = transaction['id']
- assets.extend(tx_assets)
- metadata = transaction.pop('metadata')
- txn_metadatas.append({'id': transaction['id'],
- 'metadata': metadata})
+ tx_assets = transaction.pop("assets")
+ metadata = transaction.pop("metadata")
+
+ tx_assets = backend.convert.prepare_asset(
+ self.connection,
+ transaction_type=transaction["operation"],
+ transaction_id=transaction["id"],
+ filter_operation=[t.CREATE, t.VALIDATOR_ELECTION, t.CHAIN_MIGRATION_ELECTION],
+ assets=tx_assets,
+ )
+
+ metadata = backend.convert.prepare_metadata(
+ self.connection, transaction_id=transaction["id"], metadata=metadata
+ )
+
+ txn_metadatas.append(metadata)
+ assets.append(tx_assets)
txns.append(transaction)
backend.query.store_metadatas(self.connection, txn_metadatas)
@@ -149,23 +168,19 @@ class Planetmint(object):
return backend.query.delete_transactions(self.connection, txs)
def update_utxoset(self, transaction):
- """Update the UTXO set given ``transaction``. That is, remove
+ self.updated__ = """Update the UTXO set given ``transaction``. That is, remove
the outputs that the given ``transaction`` spends, and add the
outputs that the given ``transaction`` creates.
Args:
transaction (:obj:`~planetmint.models.Transaction`): A new
- transaction incoming into the system for which the UTXO
+ transaction incoming into the system for which the UTXOF
set needs to be updated.
"""
- spent_outputs = [
- spent_output for spent_output in transaction.spent_outputs
- ]
+ spent_outputs = [spent_output for spent_output in transaction.spent_outputs]
if spent_outputs:
self.delete_unspent_outputs(*spent_outputs)
- self.store_unspent_outputs(
- *[utxo._asdict() for utxo in transaction.unspent_outputs]
- )
+ self.store_unspent_outputs(*[utxo._asdict() for utxo in transaction.unspent_outputs])
def store_unspent_outputs(self, *unspent_outputs):
"""Store the given ``unspent_outputs`` (utxos).
@@ -175,8 +190,7 @@ class Planetmint(object):
length tuple or list of unspent outputs.
"""
if unspent_outputs:
- return backend.query.store_unspent_outputs(
- self.connection, *unspent_outputs)
+ return backend.query.store_unspent_outputs(self.connection, *unspent_outputs)
def get_utxoset_merkle_root(self):
"""Returns the merkle root of the utxoset. This implies that
@@ -205,9 +219,7 @@ class Planetmint(object):
# TODO Once ready, use the already pre-computed utxo_hash field.
# See common/transactions.py for details.
hashes = [
- sha3_256(
- '{}{}'.format(utxo['transaction_id'], utxo['output_index']).encode()
- ).digest() for utxo in utxoset
+ sha3_256("{}{}".format(utxo["transaction_id"], utxo["output_index"]).encode()).digest() for utxo in utxoset
]
# TODO Notice the sorted call!
return merkleroot(sorted(hashes))
@@ -229,8 +241,7 @@ class Planetmint(object):
length tuple or list of unspent outputs.
"""
if unspent_outputs:
- return backend.query.delete_unspent_outputs(
- self.connection, *unspent_outputs)
+ return backend.query.delete_unspent_outputs(self.connection, *unspent_outputs)
def is_committed(self, transaction_id):
transaction = backend.query.get_transaction(self.connection, transaction_id)
@@ -238,7 +249,6 @@ class Planetmint(object):
def get_transaction(self, transaction_id):
transaction = backend.query.get_transaction(self.connection, transaction_id)
-
if transaction:
# TODO: get_assets is used with transaction_id this will not work with the asset change
assets = backend.query.get_assets(self.connection, [transaction_id])
@@ -246,17 +256,17 @@ class Planetmint(object):
# NOTE: assets must not be replaced for transfer transactions
# TODO: check if this holds true for other tx types, some test cases connected to election and voting are still failing
# NOTE: assets should be appended for all txs that define new assets otherwise the ids are already stored in tx
- if transaction['operation'] != 'TRANSFER' and transaction['operation'] != 'VOTE' and assets:
- transaction['assets'] = list(assets)
+ if transaction["operation"] != "TRANSFER" and transaction["operation"] != "VOTE" and assets:
+ transaction["assets"] = list(assets)
- if 'metadata' not in transaction:
+ if "metadata" not in transaction:
metadata = metadata[0] if metadata else None
if metadata:
- metadata = metadata.get('metadata')
+ metadata = metadata.get("metadata")
- transaction.update({'metadata': metadata})
+ transaction.update({"metadata": metadata})
- transaction = Transaction.from_dict(transaction)
+ transaction = Transaction.from_dict(transaction, False)
return transaction
@@ -264,10 +274,8 @@ class Planetmint(object):
return backend.query.get_transactions(self.connection, txn_ids)
def get_transactions_filtered(self, asset_ids, operation=None, last_tx=None):
- """Get a list of transactions filtered on some criteria
- """
- txids = backend.query.get_txids_filtered(self.connection, asset_ids,
- operation, last_tx)
+ """Get a list of transactions filtered on some criteria"""
+ txids = backend.query.get_txids_filtered(self.connection, asset_ids, operation, last_tx)
for txid in txids:
yield self.get_transaction(txid)
@@ -293,27 +301,25 @@ class Planetmint(object):
return self.fastquery.filter_spent_outputs(outputs)
def get_spent(self, txid, output, current_transactions=[]):
- transactions = backend.query.get_spent(self.connection, txid,
- output)
+ transactions = backend.query.get_spent(self.connection, txid, output)
transactions = list(transactions) if transactions else []
if len(transactions) > 1:
raise core_exceptions.CriticalDoubleSpend(
- '`{}` was spent more than once. There is a problem'
- ' with the chain'.format(txid))
+ "`{}` was spent more than once. There is a problem" " with the chain".format(txid)
+ )
current_spent_transactions = []
for ctxn in current_transactions:
for ctxn_input in ctxn.inputs:
- if ctxn_input.fulfills and\
- ctxn_input.fulfills.txid == txid and\
- ctxn_input.fulfills.output == output:
+ if ctxn_input.fulfills and ctxn_input.fulfills.txid == txid and ctxn_input.fulfills.output == output:
current_spent_transactions.append(ctxn)
transaction = None
if len(transactions) + len(current_spent_transactions) > 1:
raise DoubleSpend('tx "{}" spends inputs twice'.format(txid))
elif transactions:
- transaction = Transaction.from_db(self, transactions[0])
+ transaction = backend.query.get_transactions(self.connection, [transactions[0]["id"]])
+ transaction = Transaction.from_dict(transaction[0], False)
elif current_spent_transactions:
transaction = current_spent_transactions[0]
@@ -341,17 +347,16 @@ class Planetmint(object):
block = backend.query.get_block(self.connection, block_id)
latest_block = self.get_latest_block()
- latest_block_height = latest_block['height'] if latest_block else 0
+ latest_block_height = latest_block["height"] if latest_block else 0
if not block and block_id > latest_block_height:
return
- result = {'height': block_id,
- 'transactions': []}
+ result = {"height": block_id, "transactions": []}
if block:
- transactions = backend.query.get_transactions(self.connection, block['transactions'])
- result['transactions'] = [t.to_dict() for t in Transaction.from_db(self, transactions)]
+ transactions = backend.query.get_transactions(self.connection, block["transactions"])
+ result["transactions"] = [t.to_dict() for t in self.tx_from_db(transactions)]
return result
@@ -367,9 +372,9 @@ class Planetmint(object):
"""
blocks = list(backend.query.get_block_with_transaction(self.connection, txid))
if len(blocks) > 1:
- logger.critical('Transaction id %s exists in multiple blocks', txid)
+ logger.critical("Transaction id %s exists in multiple blocks", txid)
- return [block['height'] for block in blocks]
+ return [block["height"] for block in blocks]
def validate_transaction(self, tx, current_transactions=[]):
"""Validate a transaction against the current status of the database."""
@@ -381,14 +386,70 @@ class Planetmint(object):
# throught the code base.
if isinstance(transaction, dict):
try:
- transaction = Transaction.from_dict(tx)
+ transaction = Transaction.from_dict(tx, False)
except SchemaValidationError as e:
- logger.warning('Invalid transaction schema: %s', e.__cause__.message)
+ logger.warning("Invalid transaction schema: %s", e.__cause__.message)
return False
except ValidationError as e:
- logger.warning('Invalid transaction (%s): %s', type(e).__name__, e)
+ logger.warning("Invalid transaction (%s): %s", type(e).__name__, e)
return False
- return transaction.validate(self, current_transactions)
+
+ if transaction.operation == Transaction.CREATE:
+ duplicates = any(txn for txn in current_transactions if txn.id == transaction.id)
+ if self.is_committed(transaction.id) or duplicates:
+ raise DuplicateTransaction("transaction `{}` already exists".format(transaction.id))
+ elif transaction.operation in [Transaction.TRANSFER, Transaction.VOTE]:
+ self.validate_transfer_inputs(transaction, current_transactions)
+
+ return transaction
+
+ def validate_transfer_inputs(self, tx, current_transactions=[]):
+ # store the inputs so that we can check if the asset ids match
+ input_txs = []
+ input_conditions = []
+ for input_ in tx.inputs:
+ input_txid = input_.fulfills.txid
+ input_tx = self.get_transaction(input_txid)
+ if input_tx is None:
+ for ctxn in current_transactions:
+ if ctxn.id == input_txid:
+ input_tx = ctxn
+
+ if input_tx is None:
+ raise InputDoesNotExist("input `{}` doesn't exist".format(input_txid))
+
+ spent = self.get_spent(input_txid, input_.fulfills.output, current_transactions)
+ if spent:
+ raise DoubleSpend("input `{}` was already spent".format(input_txid))
+
+ output = input_tx.outputs[input_.fulfills.output]
+ input_conditions.append(output)
+ input_txs.append(input_tx)
+
+ # Validate that all inputs are distinct
+ links = [i.fulfills.to_uri() for i in tx.inputs]
+ if len(links) != len(set(links)):
+ raise DoubleSpend('tx "{}" spends inputs twice'.format(tx.id))
+
+ # validate asset id
+ asset_id = tx.get_asset_id(input_txs)
+ if asset_id != tx.asset["id"]:
+ raise AssetIdMismatch(("The asset id of the input does not" " match the asset id of the" " transaction"))
+
+ if not tx.inputs_valid(input_conditions):
+ raise InvalidSignature("Transaction signature is invalid.")
+
+ input_amount = sum([input_condition.amount for input_condition in input_conditions])
+ output_amount = sum([output_condition.amount for output_condition in tx.outputs])
+
+ if output_amount != input_amount:
+ raise AmountError(
+ (
+ "The amount used in the inputs `{}`" " needs to be same as the amount used" " in the outputs `{}`"
+ ).format(input_amount, output_amount)
+ )
+
+ return True
def is_valid_transaction(self, tx, current_transactions=[]):
# NOTE: the function returns the Transaction object in case
@@ -396,10 +457,10 @@ class Planetmint(object):
try:
return self.validate_transaction(tx, current_transactions)
except ValidationError as e:
- logger.warning('Invalid transaction (%s): %s', type(e).__name__, e)
+ logger.warning("Invalid transaction (%s): %s", type(e).__name__, e)
return False
- def text_search(self, search, *, limit=0, table='assets'):
+ def text_search(self, search, *, limit=0, table="assets"):
"""Return an iterator of assets that match the text search
Args:
@@ -409,8 +470,7 @@ class Planetmint(object):
Returns:
iter: An iterator of assets that match the text search.
"""
- return backend.query.text_search(self.connection, search, limit=limit,
- table=table)
+ return backend.query.text_search(self.connection, search, limit=limit, table=table)
def get_assets(self, asset_ids):
"""Return a list of assets that match the asset_ids
@@ -440,12 +500,12 @@ class Planetmint(object):
def fastquery(self):
return fastquery.FastQuery(self.connection)
- def get_validator_change(self, height=None):
+ def get_validator_set(self, height=None):
return backend.query.get_validator_set(self.connection, height)
def get_validators(self, height=None):
- result = self.get_validator_change(height)
- return [] if result is None else result['validators']
+ result = self.get_validator_set(height)
+ return [] if result is None else result["validators"]
def get_election(self, election_id):
return backend.query.get_election(self.connection, election_id)
@@ -458,18 +518,16 @@ class Planetmint(object):
def store_validator_set(self, height, validators):
"""Store validator set at a given `height`.
- NOTE: If the validator set already exists at that `height` then an
- exception will be raised.
+ NOTE: If the validator set already exists at that `height` then an
+ exception will be raised.
"""
- return backend.query.store_validator_set(self.connection, {'height': height,
- 'validators': validators})
+ return backend.query.store_validator_set(self.connection, {"height": height, "validators": validators})
def delete_validator_set(self, height):
return backend.query.delete_validator_set(self.connection, height)
def store_abci_chain(self, height, chain_id, is_synced=True):
- return backend.query.store_abci_chain(self.connection, height,
- chain_id, is_synced)
+ return backend.query.store_abci_chain(self.connection, height, chain_id, is_synced)
def delete_abci_chain(self, height):
return backend.query.delete_abci_chain(self.connection, height)
@@ -494,16 +552,15 @@ class Planetmint(object):
block = self.get_latest_block()
- suffix = '-migrated-at-height-'
- chain_id = latest_chain['chain_id']
- block_height_str = str(block['height'])
+ suffix = "-migrated-at-height-"
+ chain_id = latest_chain["chain_id"]
+ block_height_str = str(block["height"])
new_chain_id = chain_id.split(suffix)[0] + suffix + block_height_str
- self.store_abci_chain(block['height'] + 1, new_chain_id, False)
+ self.store_abci_chain(block["height"] + 1, new_chain_id, False)
def store_election(self, election_id, height, is_concluded):
- return backend.query.store_election(self.connection, election_id,
- height, is_concluded)
+ return backend.query.store_election(self.connection, election_id, height, is_concluded)
def store_elections(self, elections):
return backend.query.store_elections(self.connection, elections)
@@ -511,5 +568,398 @@ class Planetmint(object):
def delete_elections(self, height):
return backend.query.delete_elections(self.connection, height)
+ def tx_from_db(self, tx_dict_list):
+ """Helper method that reconstructs a transaction dict that was returned
+ from the database. It checks what asset_id to retrieve, retrieves the
+ asset from the asset table and reconstructs the transaction.
-Block = namedtuple('Block', ('app_hash', 'height', 'transactions'))
+ Args:
+ tx_dict_list (:list:`dict` or :obj:`dict`): The transaction dict or
+ list of transaction dict as returned from the database.
+
+ Returns:
+ :class:`~Transaction`
+
+ """
+ return_list = True
+ if isinstance(tx_dict_list, dict):
+ tx_dict_list = [tx_dict_list]
+ return_list = False
+
+ tx_map = {}
+ tx_ids = []
+ for tx in tx_dict_list:
+ tx.update({"metadata": None})
+ tx_map[tx["id"]] = tx
+ tx_ids.append(tx["id"])
+
+ assets = list(self.get_assets(tx_ids))
+ for asset in assets:
+ if asset is not None:
+ # This is tarantool specific behaviour needs to be addressed
+ tx = tx_map[asset[1]]
+ tx["asset"] = asset[0]
+
+ tx_ids = list(tx_map.keys())
+ metadata_list = list(self.get_metadata(tx_ids))
+ for metadata in metadata_list:
+ if "id" in metadata:
+ tx = tx_map[metadata["id"]]
+ tx.update({"metadata": metadata.get("metadata")})
+
+ if return_list:
+ tx_list = []
+ for tx_id, tx in tx_map.items():
+ tx_list.append(Transaction.from_dict(tx))
+ return tx_list
+ else:
+ tx = list(tx_map.values())[0]
+ return Transaction.from_dict(tx)
+
+ # NOTE: moved here from Election needs to be placed somewhere else
+ def get_validators_dict(self, height=None):
+ """Return a dictionary of validators with key as `public_key` and
+ value as the `voting_power`
+ """
+ validators = {}
+ for validator in self.get_validators(height):
+ # NOTE: we assume that Tendermint encodes public key in base64
+ public_key = public_key_from_ed25519_key(key_from_base64(validator["public_key"]["value"]))
+ validators[public_key] = validator["voting_power"]
+
+ return validators
+
+ def validate_election(self, transaction, current_transactions=[]): # TODO: move somewhere else
+ """Validate election transaction
+
+ NOTE:
+ * A valid election is initiated by an existing validator.
+
+ * A valid election is one where voters are validators and votes are
+ allocated according to the voting power of each validator node.
+
+ Args:
+ :param planet: (Planetmint) an instantiated planetmint.lib.Planetmint object.
+ :param current_transactions: (list) A list of transactions to be validated along with the election
+
+ Returns:
+ Election: a Election object or an object of the derived Election subclass.
+
+ Raises:
+ ValidationError: If the election is invalid
+ """
+
+ duplicates = any(txn for txn in current_transactions if txn.id == transaction.id)
+ if self.is_committed(transaction.id) or duplicates:
+ raise DuplicateTransaction("transaction `{}` already exists".format(transaction.id))
+
+ current_validators = self.get_validators_dict()
+
+ # NOTE: Proposer should be a single node
+ if len(transaction.inputs) != 1 or len(transaction.inputs[0].owners_before) != 1:
+ raise MultipleInputsError("`tx_signers` must be a list instance of length one")
+
+ # NOTE: Check if the proposer is a validator.
+ [election_initiator_node_pub_key] = transaction.inputs[0].owners_before
+ if election_initiator_node_pub_key not in current_validators.keys():
+ raise InvalidProposer("Public key is not a part of the validator set")
+
+ # NOTE: Check if all validators have been assigned votes equal to their voting power
+ if not self.is_same_topology(current_validators, transaction.outputs):
+ raise UnequalValidatorSet("Validator set much be exactly same to the outputs of election")
+
+ if transaction.operation == VALIDATOR_ELECTION:
+ self.validate_validator_election(transaction)
+
+ return transaction
+
+ def validate_validator_election(self, transaction): # TODO: move somewhere else
+ """For more details refer BEP-21: https://github.com/planetmint/BEPs/tree/master/21"""
+
+ current_validators = self.get_validators_dict()
+
+ # NOTE: change more than 1/3 of the current power is not allowed
+ if transaction.asset["data"]["power"] >= (1 / 3) * sum(current_validators.values()):
+ raise InvalidPowerChange("`power` change must be less than 1/3 of total power")
+
+ def get_election_status(self, transaction):
+ election = self.get_election(transaction.id)
+ if election and election["is_concluded"]:
+ return Election.CONCLUDED
+
+ return Election.INCONCLUSIVE if self.has_validator_set_changed(transaction) else Election.ONGOING
+
+ def has_validator_set_changed(self, transaction): # TODO: move somewhere else
+ latest_change = self.get_validator_change()
+ if latest_change is None:
+ return False
+
+ latest_change_height = latest_change["height"]
+
+ election = self.get_election(transaction.id)
+
+ return latest_change_height > election["height"]
+
+ def get_validator_change(self): # TODO: move somewhere else
+ """Return the validator set from the most recent approved block
+
+ :return: {
+ 'height': ,
+ 'validators':
+ }
+ """
+ latest_block = self.get_latest_block()
+ if latest_block is None:
+ return None
+ return self.get_validator_set(latest_block["height"])
+
+ def get_validator_dict(self, height=None):
+ """Return a dictionary of validators with key as `public_key` and
+ value as the `voting_power`
+ """
+ validators = {}
+ for validator in self.get_validators(height):
+ # NOTE: we assume that Tendermint encodes public key in base64
+ public_key = public_key_from_ed25519_key(key_from_base64(validator["public_key"]["value"]))
+ validators[public_key] = validator["voting_power"]
+
+ return validators
+
+ def get_recipients_list(self):
+ """Convert validator dictionary to a recipient list for `Transaction`"""
+
+ recipients = []
+ for public_key, voting_power in self.get_validator_dict().items():
+ recipients.append(([public_key], voting_power))
+
+ return recipients
+
+ def show_election_status(self, transaction):
+ data = transaction.asset["data"]
+ if "public_key" in data.keys():
+ data["public_key"] = public_key_to_base64(data["public_key"]["value"])
+ response = ""
+ for k, v in data.items():
+ if k != "seed":
+ response += f"{k}={v}\n"
+ response += f"status={self.get_election_status(transaction)}"
+
+ if transaction.operation == CHAIN_MIGRATION_ELECTION:
+ response = self.append_chain_migration_status(response)
+
+ return response
+
+ def append_chain_migration_status(self, status):
+ chain = self.get_latest_abci_chain()
+ if chain is None or chain["is_synced"]:
+ return status
+
+ status += f'\nchain_id={chain["chain_id"]}'
+ block = self.get_latest_block()
+ status += f'\napp_hash={block["app_hash"]}'
+ validators = [
+ {
+ "pub_key": {
+ "type": "tendermint/PubKeyEd25519",
+ "value": k,
+ },
+ "power": v,
+ }
+ for k, v in self.get_validator_dict().items()
+ ]
+ status += f"\nvalidators={json.dumps(validators, indent=4)}"
+ return status
+
+ def is_same_topology(cls, current_topology, election_topology):
+ voters = {}
+ for voter in election_topology:
+ if len(voter.public_keys) > 1:
+ return False
+
+ [public_key] = voter.public_keys
+ voting_power = voter.amount
+ voters[public_key] = voting_power
+
+ # Check whether the voters and their votes is same to that of the
+ # validators and their voting power in the network
+ return current_topology == voters
+
+ def count_votes(self, election_pk, transactions, getter=getattr):
+ votes = 0
+ for txn in transactions:
+ if getter(txn, "operation") == Vote.OPERATION:
+ for output in getter(txn, "outputs"):
+ # NOTE: We enforce that a valid vote to election id will have only
+ # election_pk in the output public keys, including any other public key
+ # along with election_pk will lead to vote being not considered valid.
+ if len(getter(output, "public_keys")) == 1 and [election_pk] == getter(output, "public_keys"):
+ votes = votes + int(getter(output, "amount"))
+ return votes
+
+ def get_commited_votes(self, transaction, election_pk=None): # TODO: move somewhere else
+ if election_pk is None:
+ election_pk = election_id_to_public_key(transaction.id)
+ txns = list(backend.query.get_asset_tokens_for_public_key(self.connection, transaction.id, election_pk))
+ return self.count_votes(election_pk, txns, dict.get)
+
+ def _get_initiated_elections(self, height, txns): # TODO: move somewhere else
+ elections = []
+ for tx in txns:
+ if not isinstance(tx, Election):
+ continue
+
+ elections.append({"election_id": tx.id, "height": height, "is_concluded": False})
+ return elections
+
+ def _get_votes(self, txns): # TODO: move somewhere else
+ elections = OrderedDict()
+ for tx in txns:
+ if not isinstance(tx, Vote):
+ continue
+
+ election_id = tx.asset["id"]
+ if election_id not in elections:
+ elections[election_id] = []
+ elections[election_id].append(tx)
+ return elections
+
+ def process_block(self, new_height, txns): # TODO: move somewhere else
+ """Looks for election and vote transactions inside the block, records
+ and processes elections.
+
+ Every election is recorded in the database.
+
+ Every vote has a chance to conclude the corresponding election. When
+ an election is concluded, the corresponding database record is
+ marked as such.
+
+ Elections and votes are processed in the order in which they
+ appear in the block. Elections are concluded in the order of
+ appearance of their first votes in the block.
+
+ For every election concluded in the block, calls its `on_approval`
+ method. The returned value of the last `on_approval`, if any,
+ is a validator set update to be applied in one of the following blocks.
+
+ `on_approval` methods are implemented by elections of particular type.
+ The method may contain side effects but should be idempotent. To account
+ for other concluded elections, if it requires so, the method should
+ rely on the database state.
+ """
+ # elections initiated in this block
+ initiated_elections = self._get_initiated_elections(new_height, txns)
+
+ if initiated_elections:
+ self.store_elections(initiated_elections)
+
+ # elections voted for in this block and their votes
+ elections = self._get_votes(txns)
+
+ validator_update = None
+ for election_id, votes in elections.items():
+ election = self.get_transaction(election_id)
+ if election is None:
+ continue
+
+ if not self.has_election_concluded(election, votes):
+ continue
+
+ validator_update = self.approve_election(election, new_height)
+ self.store_election(election.id, new_height, is_concluded=True)
+
+ return [validator_update] if validator_update else []
+
+ def has_election_concluded(self, transaction, current_votes=[]): # TODO: move somewhere else
+ """Check if the election can be concluded or not.
+
+ * Elections can only be concluded if the validator set has not changed
+ since the election was initiated.
+ * Elections can be concluded only if the current votes form a supermajority.
+
+ Custom elections may override this function and introduce additional checks.
+ """
+ if self.has_validator_set_changed(transaction):
+ return False
+
+ if transaction.operation == VALIDATOR_ELECTION:
+ if not self.has_validator_election_concluded():
+ return False
+
+ if transaction.operation == CHAIN_MIGRATION_ELECTION:
+ if not self.has_chain_migration_concluded():
+ return False
+
+ election_pk = election_id_to_public_key(transaction.id)
+ votes_committed = self.get_commited_votes(transaction, election_pk)
+ votes_current = self.count_votes(election_pk, current_votes)
+
+ total_votes = sum(output.amount for output in transaction.outputs)
+ if (votes_committed < (2 / 3) * total_votes) and (votes_committed + votes_current >= (2 / 3) * total_votes):
+ return True
+
+ return False
+
+ def has_validator_election_concluded(self): # TODO: move somewhere else
+ latest_block = self.get_latest_block()
+ if latest_block is not None:
+ latest_block_height = latest_block["height"]
+ latest_validator_change = self.get_validator_set()["height"]
+
+ # TODO change to `latest_block_height + 3` when upgrading to Tendermint 0.24.0.
+ if latest_validator_change == latest_block_height + 2:
+ # do not conclude the election if there is a change assigned already
+ return False
+
+ return True
+
+ def has_chain_migration_concluded(self): # TODO: move somewhere else
+ chain = self.get_latest_abci_chain()
+ if chain is not None and not chain["is_synced"]:
+ # do not conclude the migration election if
+ # there is another migration in progress
+ return False
+
+ return True
+
+ def rollback_election(self, new_height, txn_ids): # TODO: move somewhere else
+ """Looks for election and vote transactions inside the block and
+ cleans up the database artifacts possibly created in `process_blocks`.
+
+ Part of the `end_block`/`commit` crash recovery.
+ """
+
+ # delete election records for elections initiated at this height and
+ # elections concluded at this height
+ self.delete_elections(new_height)
+
+ txns = [self.get_transaction(tx_id) for tx_id in txn_ids]
+
+ elections = self._get_votes(txns)
+ for election_id in elections:
+ election = self.get_transaction(election_id)
+ if election.operation == VALIDATOR_ELECTION:
+ # TODO change to `new_height + 2` when upgrading to Tendermint 0.24.0.
+ self.delete_validator_set(new_height + 1)
+ if election.operation == CHAIN_MIGRATION_ELECTION:
+ self.delete_abci_chain(new_height)
+
+ def approve_election(self, election, new_height):
+ """Override to update the database state according to the
+ election rules. Consider the current database state to account for
+ other concluded elections, if required.
+ """
+ if election.operation == CHAIN_MIGRATION_ELECTION:
+ self.migrate_abci_chain()
+ if election.operation == VALIDATOR_ELECTION:
+ validator_updates = [election.asset["data"]]
+ curr_validator_set = self.get_validators(new_height)
+ updated_validator_set = new_validator_set(curr_validator_set, validator_updates)
+
+ updated_validator_set = [v for v in updated_validator_set if v["voting_power"] > 0]
+
+ # TODO change to `new_height + 2` when upgrading to Tendermint 0.24.0.
+ self.store_validator_set(new_height + 1, updated_validator_set)
+ return encode_validator(election.asset["data"])
+
+
+Block = namedtuple("Block", ("app_hash", "height", "transactions"))
diff --git a/planetmint/log.py b/planetmint/log.py
index 091fe8e..07d95f8 100644
--- a/planetmint/log.py
+++ b/planetmint/log.py
@@ -3,71 +3,16 @@
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
-import planetmint
-import logging
-
-from planetmint.transactions.common.exceptions import ConfigurationError
+from transactions.common.exceptions import ConfigurationError
from logging.config import dictConfig as set_logging_config
-import os
-
-
-DEFAULT_LOG_DIR = os.getcwd()
-
-DEFAULT_LOGGING_CONFIG = {
- 'version': 1,
- 'disable_existing_loggers': False,
- 'formatters': {
- 'console': {
- 'class': 'logging.Formatter',
- 'format': ('[%(asctime)s] [%(levelname)s] (%(name)s) '
- '%(message)s (%(processName)-10s - pid: %(process)d)'),
- 'datefmt': '%Y-%m-%d %H:%M:%S',
- },
- 'file': {
- 'class': 'logging.Formatter',
- 'format': ('[%(asctime)s] [%(levelname)s] (%(name)s) '
- '%(message)s (%(processName)-10s - pid: %(process)d)'),
- 'datefmt': '%Y-%m-%d %H:%M:%S',
- }
- },
- 'handlers': {
- 'console': {
- 'class': 'logging.StreamHandler',
- 'formatter': 'console',
- 'level': logging.INFO,
- },
- 'file': {
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join(DEFAULT_LOG_DIR, 'planetmint.log'),
- 'mode': 'w',
- 'maxBytes': 209715200,
- 'backupCount': 5,
- 'formatter': 'file',
- 'level': logging.INFO,
- },
- 'errors': {
- 'class': 'logging.handlers.RotatingFileHandler',
- 'filename': os.path.join(DEFAULT_LOG_DIR, 'planetmint-errors.log'),
- 'mode': 'w',
- 'maxBytes': 209715200,
- 'backupCount': 5,
- 'formatter': 'file',
- 'level': logging.ERROR,
- }
- },
- 'loggers': {},
- 'root': {
- 'level': logging.DEBUG,
- 'handlers': ['console', 'file', 'errors'],
- },
-}
+from planetmint.config import Config, DEFAULT_LOGGING_CONFIG
def _normalize_log_level(level):
try:
return level.upper()
except AttributeError as exc:
- raise ConfigurationError('Log level must be a string!') from exc
+ raise ConfigurationError("Log level must be a string!") from exc
def setup_logging():
@@ -84,47 +29,47 @@ def setup_logging():
"""
logging_configs = DEFAULT_LOGGING_CONFIG
- new_logging_configs = planetmint.config['log']
+ new_logging_configs = Config().get()["log"]
- if 'file' in new_logging_configs:
- filename = new_logging_configs['file']
- logging_configs['handlers']['file']['filename'] = filename
+ if "file" in new_logging_configs:
+ filename = new_logging_configs["file"]
+ logging_configs["handlers"]["file"]["filename"] = filename
- if 'error_file' in new_logging_configs:
- error_filename = new_logging_configs['error_file']
- logging_configs['handlers']['errors']['filename'] = error_filename
+ if "error_file" in new_logging_configs:
+ error_filename = new_logging_configs["error_file"]
+ logging_configs["handlers"]["errors"]["filename"] = error_filename
- if 'level_console' in new_logging_configs:
- level = _normalize_log_level(new_logging_configs['level_console'])
- logging_configs['handlers']['console']['level'] = level
+ if "level_console" in new_logging_configs:
+ level = _normalize_log_level(new_logging_configs["level_console"])
+ logging_configs["handlers"]["console"]["level"] = level
- if 'level_logfile' in new_logging_configs:
- level = _normalize_log_level(new_logging_configs['level_logfile'])
- logging_configs['handlers']['file']['level'] = level
+ if "level_logfile" in new_logging_configs:
+ level = _normalize_log_level(new_logging_configs["level_logfile"])
+ logging_configs["handlers"]["file"]["level"] = level
- if 'fmt_console' in new_logging_configs:
- fmt = new_logging_configs['fmt_console']
- logging_configs['formatters']['console']['format'] = fmt
+ if "fmt_console" in new_logging_configs:
+ fmt = new_logging_configs["fmt_console"]
+ logging_configs["formatters"]["console"]["format"] = fmt
- if 'fmt_logfile' in new_logging_configs:
- fmt = new_logging_configs['fmt_logfile']
- logging_configs['formatters']['file']['format'] = fmt
+ if "fmt_logfile" in new_logging_configs:
+ fmt = new_logging_configs["fmt_logfile"]
+ logging_configs["formatters"]["file"]["format"] = fmt
- if 'datefmt_console' in new_logging_configs:
- fmt = new_logging_configs['datefmt_console']
- logging_configs['formatters']['console']['datefmt'] = fmt
+ if "datefmt_console" in new_logging_configs:
+ fmt = new_logging_configs["datefmt_console"]
+ logging_configs["formatters"]["console"]["datefmt"] = fmt
- if 'datefmt_logfile' in new_logging_configs:
- fmt = new_logging_configs['datefmt_logfile']
- logging_configs['formatters']['file']['datefmt'] = fmt
+ if "datefmt_logfile" in new_logging_configs:
+ fmt = new_logging_configs["datefmt_logfile"]
+ logging_configs["formatters"]["file"]["datefmt"] = fmt
- log_levels = new_logging_configs.get('granular_levels', {})
+ log_levels = new_logging_configs.get("granular_levels", {})
for logger_name, level in log_levels.items():
level = _normalize_log_level(level)
try:
- logging_configs['loggers'][logger_name]['level'] = level
+ logging_configs["loggers"][logger_name]["level"] = level
except KeyError:
- logging_configs['loggers'][logger_name] = {'level': level}
+ logging_configs["loggers"][logger_name] = {"level": level}
set_logging_config(logging_configs)
diff --git a/planetmint/migrations/chain_migration_election.py b/planetmint/migrations/chain_migration_election.py
deleted file mode 100644
index 5e23e40..0000000
--- a/planetmint/migrations/chain_migration_election.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import json
-
-from planetmint.transactions.common.schema import TX_SCHEMA_CHAIN_MIGRATION_ELECTION
-from planetmint.transactions.types.elections.election import Election
-
-
-class ChainMigrationElection(Election):
-
- OPERATION = 'CHAIN_MIGRATION_ELECTION'
- CREATE = OPERATION
- ALLOWED_OPERATIONS = (OPERATION,)
- TX_SCHEMA_CUSTOM = TX_SCHEMA_CHAIN_MIGRATION_ELECTION
-
- def has_concluded(self, planetmint, *args, **kwargs):
- chain = planetmint.get_latest_abci_chain()
- if chain is not None and not chain['is_synced']:
- # do not conclude the migration election if
- # there is another migration in progress
- return False
-
- return super().has_concluded(planetmint, *args, **kwargs)
-
- def on_approval(self, planet, *args, **kwargs):
- planet.migrate_abci_chain()
-
- def show_election(self, planet):
- output = super().show_election(planet)
- chain = planet.get_latest_abci_chain()
- if chain is None or chain['is_synced']:
- return output
-
- output += f'\nchain_id={chain["chain_id"]}'
- block = planet.get_latest_block()
- output += f'\napp_hash={block["app_hash"]}'
- validators = [
- {
- 'pub_key': {
- 'type': 'tendermint/PubKeyEd25519',
- 'value': k,
- },
- 'power': v,
- } for k, v in self.get_validators(planet).items()
- ]
- output += f'\nvalidators={json.dumps(validators, indent=4)}'
- return output
-
- def on_rollback(self, planet, new_height):
- planet.delete_abci_chain(new_height)
diff --git a/planetmint/models.py b/planetmint/models.py
index d57f9b2..bea5b39 100644
--- a/planetmint/models.py
+++ b/planetmint/models.py
@@ -3,57 +3,6 @@
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
-from planetmint.backend.schema import validate_language_key
-from planetmint.transactions.common.exceptions import (InvalidSignature, DuplicateTransaction)
-from planetmint.transactions.common.schema import validate_transaction_schema
-from planetmint.transactions.common.transaction import Transaction
-from planetmint.transactions.common.utils import (validate_txn_obj, validate_key)
-
-
-class Transaction(Transaction):
- ASSETS = 'assets'
- METADATA = 'metadata'
- DATA = 'data'
-
- def validate(self, planet, current_transactions=[]):
- """Validate transaction spend
- Args:
- planet (Planetmint): an instantiated planetmint.Planetmint object.
- Returns:
- The transaction (Transaction) if the transaction is valid else it
- raises an exception describing the reason why the transaction is
- invalid.
- Raises:
- ValidationError: If the transaction is invalid
- """
- input_conditions = []
-
- if self.operation == Transaction.CREATE:
- duplicates = any(txn for txn in current_transactions if txn.id == self.id)
- if planet.is_committed(self.id) or duplicates:
- raise DuplicateTransaction('transaction `{}` already exists'
- .format(self.id))
-
- if not self.inputs_valid(input_conditions):
- raise InvalidSignature('Transaction signature is invalid.')
-
- elif self.operation == Transaction.TRANSFER:
- self.validate_transfer_inputs(planet, current_transactions)
-
- return self
-
- @classmethod
- def from_dict(cls, tx_body):
- return super().from_dict(tx_body, False)
-
- @classmethod
- def validate_schema(cls, tx_body):
- validate_transaction_schema(tx_body)
- validate_txn_obj(cls.ASSETS, tx_body, cls.ASSETS, validate_key)
- validate_txn_obj(cls.METADATA, tx_body, cls.METADATA, validate_key)
- validate_language_key(tx_body, cls.ASSETS)
- validate_language_key(tx_body, cls.METADATA)
-
class FastTransaction:
"""A minimal wrapper around a transaction dictionary. This is useful for
@@ -68,7 +17,7 @@ class FastTransaction:
@property
def id(self):
- return self.data['id']
+ return self.data["id"]
def to_dict(self):
return self.data
diff --git a/planetmint/parallel_validation.py b/planetmint/parallel_validation.py
index 0062a99..e33436d 100644
--- a/planetmint/parallel_validation.py
+++ b/planetmint/parallel_validation.py
@@ -4,8 +4,8 @@
# Code is Apache-2.0 and docs are CC-BY-4.0
import multiprocessing as mp
-from collections import defaultdict
+from collections import defaultdict
from planetmint import App
from planetmint.lib import Planetmint
from planetmint.tendermint_utils import decode_transaction
@@ -39,8 +39,8 @@ class ParallelValidationApp(App):
return super().end_block(request_end_block)
-RESET = 'reset'
-EXIT = 'exit'
+RESET = "reset"
+EXIT = "exit"
class ParallelValidator:
@@ -64,7 +64,7 @@ class ParallelValidator:
def validate(self, raw_transaction):
dict_transaction = decode_transaction(raw_transaction)
- index = int(dict_transaction['id'], 16) % self.number_of_workers
+ index = int(dict_transaction["id"], 16) % self.number_of_workers
self.routing_queues[index].put((self.transaction_index, dict_transaction))
self.transaction_index += 1
@@ -106,13 +106,13 @@ class ValidationWorker:
def validate(self, dict_transaction):
# TODO: this will only work for now, no multiasset support => needs to be refactored for COMPOSE/DECOMPOSE
try:
- asset_id = dict_transaction['assets'][0]['id']
+ asset_id = dict_transaction["assets"][0]["id"]
except KeyError:
- asset_id = dict_transaction['id']
+ asset_id = dict_transaction["id"]
+ except TypeError:
+ asset_id = dict_transaction["id"]
- transaction = self.planetmint.is_valid_transaction(
- dict_transaction,
- self.validated_transactions[asset_id])
+ transaction = self.planetmint.is_valid_transaction(dict_transaction, self.validated_transactions[asset_id])
if transaction:
self.validated_transactions[asset_id].append(transaction)
diff --git a/planetmint/start.py b/planetmint/start.py
index 24dc356..906ca45 100644
--- a/planetmint/start.py
+++ b/planetmint/start.py
@@ -6,27 +6,27 @@
import logging
import setproctitle
-import planetmint
+from planetmint.config import Config
from planetmint.lib import Planetmint
from planetmint.core import App
from planetmint.parallel_validation import ParallelValidationApp
from planetmint.web import server, websocket_server
from planetmint.events import Exchange, EventTypes
from planetmint.utils import Process
-
+from planetmint.version import __version__
logger = logging.getLogger(__name__)
BANNER = """
****************************************************************************
* *
-* Planetmint 2.2.2 *
+* Planetmint {} *
* codename "jumping sloth" *
* Initialization complete. Planetmint Server is ready and waiting. *
* *
* You can send HTTP requests via the HTTP API documented in the *
* Planetmint Server docs at: *
-* https://planetmint.com/http-api *
+* https://planetmint.io/http-api *
* *
* Listening to client connections on: {:<15} *
* *
@@ -36,26 +36,27 @@ BANNER = """
def start(args):
# Exchange object for event stream api
- logger.info('Starting Planetmint')
+ logger.info("Starting Planetmint")
exchange = Exchange()
# start the web api
app_server = server.create_server(
- settings=planetmint.config['server'],
- log_config=planetmint.config['log'],
- planetmint_factory=Planetmint)
- p_webapi = Process(name='planetmint_webapi', target=app_server.run, daemon=True)
+ settings=Config().get()["server"], log_config=Config().get()["log"], planetmint_factory=Planetmint
+ )
+ p_webapi = Process(name="planetmint_webapi", target=app_server.run, daemon=True)
p_webapi.start()
- logger.info(BANNER.format(planetmint.config['server']['bind']))
+ logger.info(BANNER.format(__version__, Config().get()["server"]["bind"]))
# start websocket server
- p_websocket_server = Process(name='planetmint_ws',
- target=websocket_server.start,
- daemon=True,
- args=(exchange.get_subscriber_queue(EventTypes.BLOCK_VALID),))
+ p_websocket_server = Process(
+ name="planetmint_ws",
+ target=websocket_server.start,
+ daemon=True,
+ args=(exchange.get_subscriber_queue(EventTypes.BLOCK_VALID),),
+ )
p_websocket_server.start()
- p_exchange = Process(name='planetmint_exchange', target=exchange.run, daemon=True)
+ p_exchange = Process(name="planetmint_exchange", target=exchange.run, daemon=True)
p_exchange.start()
# We need to import this after spawning the web server
@@ -63,10 +64,9 @@ def start(args):
# for gevent.
from abci.server import ABCIServer
- setproctitle.setproctitle('planetmint')
+ setproctitle.setproctitle("planetmint")
# Start the ABCIServer
- # abci = ABCI(TmVersion(planetmint.config['tendermint']['version']))
if args.experimental_parallel_validation:
app = ABCIServer(
app=ParallelValidationApp(
@@ -82,5 +82,5 @@ def start(args):
app.run()
-if __name__ == '__main__':
+if __name__ == "__main__":
start()
diff --git a/planetmint/tendermint_utils.py b/planetmint/tendermint_utils.py
index 84d967e..28528d7 100644
--- a/planetmint/tendermint_utils.py
+++ b/planetmint/tendermint_utils.py
@@ -6,39 +6,91 @@
import base64
import hashlib
import json
-from binascii import hexlify
+import codecs
-try:
- from hashlib import sha3_256
-except ImportError:
- from sha3 import sha3_256
+from binascii import hexlify
+from tendermint.abci import types_pb2
+from tendermint.crypto import keys_pb2
+from hashlib import sha3_256
+from transactions.common.exceptions import InvalidPublicKey
+
+
+def encode_validator(v):
+ ed25519_public_key = v["public_key"]["value"]
+ pub_key = keys_pb2.PublicKey(ed25519=bytes.fromhex(ed25519_public_key))
+
+ return types_pb2.ValidatorUpdate(pub_key=pub_key, power=v["power"])
+
+
+def decode_validator(v):
+ return {
+ "public_key": {
+ "type": "ed25519-base64",
+ "value": codecs.encode(v.pub_key.ed25519, "base64").decode().rstrip("\n"),
+ },
+ "voting_power": v.power,
+ }
+
+
+def new_validator_set(validators, updates):
+ validators_dict = {}
+ for v in validators:
+ validators_dict[v["public_key"]["value"]] = v
+
+ updates_dict = {}
+ for u in updates:
+ decoder = get_public_key_decoder(u["public_key"])
+ public_key64 = base64.b64encode(decoder(u["public_key"]["value"])).decode("utf-8")
+ updates_dict[public_key64] = {
+ "public_key": {"type": "ed25519-base64", "value": public_key64},
+ "voting_power": u["power"],
+ }
+
+ new_validators_dict = {**validators_dict, **updates_dict}
+ return list(new_validators_dict.values())
+
+
+def get_public_key_decoder(pk):
+ encoding = pk["type"]
+ decoder = base64.b64decode
+
+ if encoding == "ed25519-base16":
+ decoder = base64.b16decode
+ elif encoding == "ed25519-base32":
+ decoder = base64.b32decode
+ elif encoding == "ed25519-base64":
+ decoder = base64.b64decode
+ else:
+ raise InvalidPublicKey("Invalid `type` specified for public key `value`")
+
+ return decoder
def encode_transaction(value):
"""Encode a transaction (dict) to Base64."""
- return base64.b64encode(json.dumps(value).encode('utf8')).decode('utf8')
+ return base64.b64encode(json.dumps(value).encode("utf8")).decode("utf8")
def decode_transaction(raw):
"""Decode a transaction from bytes to a dict."""
- return json.loads(raw.decode('utf8'))
+ return json.loads(raw.decode("utf8"))
def decode_transaction_base64(value):
"""Decode a transaction from Base64."""
- return json.loads(base64.b64decode(value.encode('utf8')).decode('utf8'))
+ return json.loads(base64.b64decode(value.encode("utf8")).decode("utf8"))
def calculate_hash(key_list):
if not key_list:
- return ''
+ return ""
full_hash = sha3_256()
for key in key_list:
- full_hash.update(key.encode('utf8'))
+ full_hash.update(key.encode("utf8"))
return full_hash.hexdigest()
@@ -59,24 +111,23 @@ def merkleroot(hashes):
# i.e. an empty list, then the hash of the empty string is returned.
# This seems too easy but maybe that is good enough? TO REVIEW!
if not hashes:
- return sha3_256(b'').hexdigest()
+ return sha3_256(b"").hexdigest()
# XXX END TEMPORARY -- MUST REVIEW ...
if len(hashes) == 1:
return hexlify(hashes[0]).decode()
if len(hashes) % 2 == 1:
hashes.append(hashes[-1])
- parent_hashes = [
- sha3_256(hashes[i] + hashes[i + 1]).digest()
- for i in range(0, len(hashes) - 1, 2)
- ]
+ parent_hashes = [sha3_256(hashes[i] + hashes[i + 1]).digest() for i in range(0, len(hashes) - 1, 2)]
return merkleroot(parent_hashes)
+# ripemd160 is only available below python 3.9.13
+@DeprecationWarning
def public_key64_to_address(base64_public_key):
"""Note this only compatible with Tendermint 0.19.x"""
ed25519_public_key = public_key_from_base64(base64_public_key)
encoded_public_key = amino_encoded_public_key(ed25519_public_key)
- return hashlib.new('ripemd160', encoded_public_key).hexdigest().upper()
+ return hashlib.new("ripemd160", encoded_public_key).hexdigest().upper()
def public_key_from_base64(base64_public_key):
@@ -93,8 +144,8 @@ def public_key_to_base64(ed25519_public_key):
def key_to_base64(ed25519_key):
ed25519_key = bytes.fromhex(ed25519_key)
- return base64.b64encode(ed25519_key).decode('utf-8')
+ return base64.b64encode(ed25519_key).decode("utf-8")
def amino_encoded_public_key(ed25519_public_key):
- return bytes.fromhex('1624DE6220{}'.format(ed25519_public_key))
+ return bytes.fromhex("1624DE6220{}".format(ed25519_public_key))
diff --git a/planetmint/transactions/common/__init__.py b/planetmint/transactions/common/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/planetmint/transactions/common/crypto.py b/planetmint/transactions/common/crypto.py
deleted file mode 100644
index 9205c27..0000000
--- a/planetmint/transactions/common/crypto.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
-# Separate all crypto code so that we can easily test several implementations
-from collections import namedtuple
-
-try:
- from hashlib import sha3_256
-except ImportError:
- from sha3 import sha3_256
-
-from cryptoconditions import crypto
-
-
-CryptoKeypair = namedtuple('CryptoKeypair', ('private_key', 'public_key'))
-
-
-def hash_data(data):
- """Hash the provided data using SHA3-256"""
- return sha3_256(data.encode()).hexdigest()
-
-
-def generate_key_pair():
- """Generates a cryptographic key pair.
-
- Returns:
- :class:`~planetmint.transactions.common.crypto.CryptoKeypair`: A
- :obj:`collections.namedtuple` with named fields
- :attr:`~planetmint.transactions.common.crypto.CryptoKeypair.private_key` and
- :attr:`~planetmint.transactions.common.crypto.CryptoKeypair.public_key`.
-
- """
- # TODO FOR CC: Adjust interface so that this function becomes unnecessary
- return CryptoKeypair(
- *(k.decode() for k in crypto.ed25519_generate_key_pair()))
-
-
-PrivateKey = crypto.Ed25519SigningKey
-PublicKey = crypto.Ed25519VerifyingKey
-
-
-def key_pair_from_ed25519_key(hex_private_key):
- """Generate base58 encode public-private key pair from a hex encoded private key"""
- priv_key = crypto.Ed25519SigningKey(bytes.fromhex(hex_private_key)[:32], encoding='bytes')
- public_key = priv_key.get_verifying_key()
- return CryptoKeypair(private_key=priv_key.encode(encoding='base58').decode('utf-8'),
- public_key=public_key.encode(encoding='base58').decode('utf-8'))
-
-
-def public_key_from_ed25519_key(hex_public_key):
- """Generate base58 public key from hex encoded public key"""
- public_key = crypto.Ed25519VerifyingKey(bytes.fromhex(hex_public_key), encoding='bytes')
- return public_key.encode(encoding='base58').decode('utf-8')
diff --git a/planetmint/transactions/common/exceptions.py b/planetmint/transactions/common/exceptions.py
deleted file mode 100644
index ed0c307..0000000
--- a/planetmint/transactions/common/exceptions.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
-"""Custom exceptions used in the `planetmint` package.
-"""
-from planetmint.exceptions import BigchainDBError
-
-
-class ConfigurationError(BigchainDBError):
- """Raised when there is a problem with server configuration"""
-
-
-class DatabaseDoesNotExist(BigchainDBError):
- """Raised when trying to delete the database but the db is not there"""
-
-
-class StartupError(BigchainDBError):
- """Raised when there is an error starting up the system"""
-
-
-class CyclicBlockchainError(BigchainDBError):
- """Raised when there is a cycle in the blockchain"""
-
-
-class KeypairMismatchException(BigchainDBError):
- """Raised if the private key(s) provided for signing don't match any of the
- current owner(s)
- """
-
-
-class OperationError(BigchainDBError):
- """Raised when an operation cannot go through"""
-
-
-################################################################################
-# Validation errors
-#
-# All validation errors (which are handleable errors, not faults) should
-# subclass ValidationError. However, where possible they should also have their
-# own distinct type to differentiate them from other validation errors,
-# especially for the purposes of testing.
-
-
-class ValidationError(BigchainDBError):
- """Raised if there was an error in validation"""
-
-
-class DoubleSpend(ValidationError):
- """Raised if a double spend is found"""
-
-
-class InvalidHash(ValidationError):
- """Raised if there was an error checking the hash for a particular
- operation
- """
-
-
-class SchemaValidationError(ValidationError):
- """Raised if there was any error validating an object's schema"""
-
-
-class InvalidSignature(ValidationError):
- """Raised if there was an error checking the signature for a particular
- operation
- """
-
-
-class AssetIdMismatch(ValidationError):
- """Raised when multiple transaction inputs related to different assets"""
-
-
-class AmountError(ValidationError):
- """Raised when there is a problem with a transaction's output amounts"""
-
-
-class InputDoesNotExist(ValidationError):
- """Raised if a transaction input does not exist"""
-
-
-class TransactionOwnerError(ValidationError):
- """Raised if a user tries to transfer a transaction they don't own"""
-
-
-class DuplicateTransaction(ValidationError):
- """Raised if a duplicated transaction is found"""
-
-
-class ThresholdTooDeep(ValidationError):
- """Raised if threshold condition is too deep"""
-
-
-class MultipleValidatorOperationError(ValidationError):
- """Raised when a validator update pending but new request is submited"""
-
-
-class MultipleInputsError(ValidationError):
- """Raised if there were multiple inputs when only one was expected"""
-
-
-class InvalidProposer(ValidationError):
- """Raised if the public key is not a part of the validator set"""
-
-
-class UnequalValidatorSet(ValidationError):
- """Raised if the validator sets differ"""
-
-
-class InvalidPowerChange(ValidationError):
- """Raised if proposed power change in validator set is >=1/3 total power"""
-
-
-class InvalidPublicKey(ValidationError):
- """Raised if public key doesn't match the encoding type"""
diff --git a/planetmint/transactions/common/input.py b/planetmint/transactions/common/input.py
deleted file mode 100644
index ab123cb..0000000
--- a/planetmint/transactions/common/input.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
-from cryptoconditions import Fulfillment
-from cryptoconditions.exceptions import ASN1DecodeError, ASN1EncodeError
-
-from planetmint.transactions.common.exceptions import InvalidSignature
-from .utils import _fulfillment_to_details, _fulfillment_from_details
-from .output import Output
-from .transaction_link import TransactionLink
-
-class Input(object):
- """A Input is used to spend assets locked by an Output.
-
- Wraps around a Crypto-condition Fulfillment.
-
- Attributes:
- fulfillment (:class:`cryptoconditions.Fulfillment`): A Fulfillment
- to be signed with a private key.
- owners_before (:obj:`list` of :obj:`str`): A list of owners after a
- Transaction was confirmed.
- fulfills (:class:`~planetmint.transactions.common.transaction. TransactionLink`,
- optional): A link representing the input of a `TRANSFER`
- Transaction.
- """
-
- def __init__(self, fulfillment, owners_before, fulfills=None):
- """Create an instance of an :class:`~.Input`.
-
- Args:
- fulfillment (:class:`cryptoconditions.Fulfillment`): A
- Fulfillment to be signed with a private key.
- owners_before (:obj:`list` of :obj:`str`): A list of owners
- after a Transaction was confirmed.
- fulfills (:class:`~planetmint.transactions.common.transaction.
- TransactionLink`, optional): A link representing the input
- of a `TRANSFER` Transaction.
- """
- if fulfills is not None and not isinstance(fulfills, TransactionLink):
- raise TypeError('`fulfills` must be a TransactionLink instance')
- if not isinstance(owners_before, list):
- raise TypeError('`owners_before` must be a list instance')
-
- self.fulfillment = fulfillment
- self.fulfills = fulfills
- self.owners_before = owners_before
-
- def __eq__(self, other):
- # TODO: If `other !== Fulfillment` return `False`
- return self.to_dict() == other.to_dict()
-
- # NOTE: This function is used to provide a unique key for a given
- # Input to suppliment memoization
- def __hash__(self):
- return hash((self.fulfillment, self.fulfills))
-
- def to_dict(self):
- """Transforms the object to a Python dictionary.
-
- Note:
- If an Input hasn't been signed yet, this method returns a
- dictionary representation.
-
- Returns:
- dict: The Input as an alternative serialization format.
- """
- try:
- fulfillment = self.fulfillment.serialize_uri()
- except (TypeError, AttributeError, ASN1EncodeError, ASN1DecodeError):
- fulfillment = _fulfillment_to_details(self.fulfillment)
-
- try:
- # NOTE: `self.fulfills` can be `None` and that's fine
- fulfills = self.fulfills.to_dict()
- except AttributeError:
- fulfills = None
-
- input_ = {
- 'owners_before': self.owners_before,
- 'fulfills': fulfills,
- 'fulfillment': fulfillment,
- }
- return input_
-
- @classmethod
- def generate(cls, public_keys):
- # TODO: write docstring
- # The amount here does not really matter. It is only use on the
- # output data model but here we only care about the fulfillment
- output = Output.generate(public_keys, 1)
- return cls(output.fulfillment, public_keys)
-
- @classmethod
- def from_dict(cls, data):
- """Transforms a Python dictionary to an Input object.
-
- Note:
- Optionally, this method can also serialize a Cryptoconditions-
- Fulfillment that is not yet signed.
-
- Args:
- data (dict): The Input to be transformed.
-
- Returns:
- :class:`~planetmint.transactions.common.transaction.Input`
-
- Raises:
- InvalidSignature: If an Input's URI couldn't be parsed.
- """
- fulfillment = data['fulfillment']
- if not isinstance(fulfillment, (Fulfillment, type(None))):
- try:
- fulfillment = Fulfillment.from_uri(data['fulfillment'])
- except ASN1DecodeError:
- # TODO Remove as it is legacy code, and simply fall back on
- # ASN1DecodeError
- raise InvalidSignature("Fulfillment URI couldn't been parsed")
- except TypeError:
- # NOTE: See comment about this special case in
- # `Input.to_dict`
- fulfillment = _fulfillment_from_details(data['fulfillment'])
- fulfills = TransactionLink.from_dict(data['fulfills'])
- return cls(fulfillment, data['owners_before'], fulfills)
diff --git a/planetmint/transactions/common/memoize.py b/planetmint/transactions/common/memoize.py
deleted file mode 100644
index b814e51..0000000
--- a/planetmint/transactions/common/memoize.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import functools
-import codecs
-from functools import lru_cache
-
-
-class HDict(dict):
- def __hash__(self):
- return hash(codecs.decode(self['id'], 'hex'))
-
-
-@lru_cache(maxsize=16384)
-def from_dict(func, *args, **kwargs):
- return func(*args, **kwargs)
-
-
-def memoize_from_dict(func):
-
- @functools.wraps(func)
- def memoized_func(*args, **kwargs):
-
- if args[1].get('id', None):
- args = list(args)
- args[1] = HDict(args[1])
- new_args = tuple(args)
- return from_dict(func, *new_args, **kwargs)
- else:
- return func(*args, **kwargs)
-
- return memoized_func
-
-
-class ToDictWrapper():
- def __init__(self, tx):
- self.tx = tx
-
- def __eq__(self, other):
- return self.tx.id == other.tx.id
-
- def __hash__(self):
- return hash(self.tx.id)
-
-
-@lru_cache(maxsize=16384)
-def to_dict(func, tx_wrapped):
- return func(tx_wrapped.tx)
-
-
-def memoize_to_dict(func):
-
- @functools.wraps(func)
- def memoized_func(*args, **kwargs):
-
- if args[0].id:
- return to_dict(func, ToDictWrapper(args[0]))
- else:
- return func(*args, **kwargs)
-
- return memoized_func
diff --git a/planetmint/transactions/common/output.py b/planetmint/transactions/common/output.py
deleted file mode 100644
index 6462941..0000000
--- a/planetmint/transactions/common/output.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
-from functools import reduce
-
-import base58
-from cryptoconditions import Fulfillment, ThresholdSha256, Ed25519Sha256
-
-from planetmint.transactions.common.exceptions import AmountError
-from .utils import _fulfillment_to_details, _fulfillment_from_details
-
-class Output(object):
- """An Output is used to lock an asset.
-
- Wraps around a Crypto-condition Condition.
-
- Attributes:
- fulfillment (:class:`cryptoconditions.Fulfillment`): A Fulfillment
- to extract a Condition from.
- public_keys (:obj:`list` of :obj:`str`, optional): A list of
- owners before a Transaction was confirmed.
- """
-
- MAX_AMOUNT = 9 * 10 ** 18
-
- def __init__(self, fulfillment, public_keys=None, amount=1):
- """Create an instance of a :class:`~.Output`.
-
- Args:
- fulfillment (:class:`cryptoconditions.Fulfillment`): A
- Fulfillment to extract a Condition from.
- public_keys (:obj:`list` of :obj:`str`, optional): A list of
- owners before a Transaction was confirmed.
- amount (int): The amount of Assets to be locked with this
- Output.
-
- Raises:
- TypeError: if `public_keys` is not instance of `list`.
- """
- if not isinstance(public_keys, list) and public_keys is not None:
- raise TypeError('`public_keys` must be a list instance or None')
- if not isinstance(amount, int):
- raise TypeError('`amount` must be an int')
- if amount < 1:
- raise AmountError('`amount` must be greater than 0')
- if amount > self.MAX_AMOUNT:
- raise AmountError('`amount` must be <= %s' % self.MAX_AMOUNT)
-
- self.fulfillment = fulfillment
- self.amount = amount
- self.public_keys = public_keys
-
- def __eq__(self, other):
- # TODO: If `other !== Condition` return `False`
- return self.to_dict() == other.to_dict()
-
- def to_dict(self):
- """Transforms the object to a Python dictionary.
-
- Note:
- A dictionary serialization of the Input the Output was
- derived from is always provided.
-
- Returns:
- dict: The Output as an alternative serialization format.
- """
- # TODO FOR CC: It must be able to recognize a hashlock condition
- # and fulfillment!
- condition = {}
- try:
- condition['details'] = _fulfillment_to_details(self.fulfillment)
- except AttributeError:
- pass
-
- try:
- condition['uri'] = self.fulfillment.condition_uri
- except AttributeError:
- condition['uri'] = self.fulfillment
-
- output = {
- 'public_keys': self.public_keys,
- 'condition': condition,
- 'amount': str(self.amount),
- }
- return output
-
- @classmethod
- def generate(cls, public_keys, amount):
- """Generates a Output from a specifically formed tuple or list.
-
- Note:
- If a ThresholdCondition has to be generated where the threshold
- is always the number of subconditions it is split between, a
- list of the following structure is sufficient:
-
- [(address|condition)*, [(address|condition)*, ...], ...]
-
- Args:
- public_keys (:obj:`list` of :obj:`str`): The public key of
- the users that should be able to fulfill the Condition
- that is being created.
- amount (:obj:`int`): The amount locked by the Output.
-
- Returns:
- An Output that can be used in a Transaction.
-
- Raises:
- TypeError: If `public_keys` is not an instance of `list`.
- ValueError: If `public_keys` is an empty list.
- """
- threshold = len(public_keys)
- if not isinstance(amount, int):
- raise TypeError('`amount` must be a int')
- if amount < 1:
- raise AmountError('`amount` needs to be greater than zero')
- if not isinstance(public_keys, list):
- raise TypeError('`public_keys` must be an instance of list')
- if len(public_keys) == 0:
- raise ValueError('`public_keys` needs to contain at least one'
- 'owner')
- elif len(public_keys) == 1 and not isinstance(public_keys[0], list):
- if isinstance(public_keys[0], Fulfillment):
- ffill = public_keys[0]
- else:
- ffill = Ed25519Sha256(
- public_key=base58.b58decode(public_keys[0]))
- return cls(ffill, public_keys, amount=amount)
- else:
- initial_cond = ThresholdSha256(threshold=threshold)
- threshold_cond = reduce(cls._gen_condition, public_keys,
- initial_cond)
- return cls(threshold_cond, public_keys, amount=amount)
-
- @classmethod
- def _gen_condition(cls, initial, new_public_keys):
- """Generates ThresholdSha256 conditions from a list of new owners.
-
- Note:
- This method is intended only to be used with a reduce function.
- For a description on how to use this method, see
- :meth:`~.Output.generate`.
-
- Args:
- initial (:class:`cryptoconditions.ThresholdSha256`):
- A Condition representing the overall root.
- new_public_keys (:obj:`list` of :obj:`str`|str): A list of new
- owners or a single new owner.
-
- Returns:
- :class:`cryptoconditions.ThresholdSha256`:
- """
- try:
- threshold = len(new_public_keys)
- except TypeError:
- threshold = None
-
- if isinstance(new_public_keys, list) and len(new_public_keys) > 1:
- ffill = ThresholdSha256(threshold=threshold)
- reduce(cls._gen_condition, new_public_keys, ffill)
- elif isinstance(new_public_keys, list) and len(new_public_keys) <= 1:
- raise ValueError('Sublist cannot contain single owner')
- else:
- try:
- new_public_keys = new_public_keys.pop()
- except AttributeError:
- pass
- # NOTE: Instead of submitting base58 encoded addresses, a user
- # of this class can also submit fully instantiated
- # Cryptoconditions. In the case of casting
- # `new_public_keys` to a Ed25519Fulfillment with the
- # result of a `TypeError`, we're assuming that
- # `new_public_keys` is a Cryptocondition then.
- if isinstance(new_public_keys, Fulfillment):
- ffill = new_public_keys
- else:
- ffill = Ed25519Sha256(
- public_key=base58.b58decode(new_public_keys))
- initial.add_subfulfillment(ffill)
- return initial
-
- @classmethod
- def from_dict(cls, data):
- """Transforms a Python dictionary to an Output object.
-
- Note:
- To pass a serialization cycle multiple times, a
- Cryptoconditions Fulfillment needs to be present in the
- passed-in dictionary, as Condition URIs are not serializable
- anymore.
-
- Args:
- data (dict): The dict to be transformed.
-
- Returns:
- :class:`~planetmint.transactions.common.transaction.Output`
- """
- try:
- fulfillment = _fulfillment_from_details(data['condition']['details'])
- except KeyError:
- # NOTE: Hashlock condition case
- fulfillment = data['condition']['uri']
- try:
- amount = int(data['amount'])
- except ValueError:
- raise AmountError('Invalid amount: %s' % data['amount'])
- return cls(fulfillment, data['public_keys'], amount)
diff --git a/planetmint/transactions/common/schema/README.md b/planetmint/transactions/common/schema/README.md
deleted file mode 100644
index cb8db1f..0000000
--- a/planetmint/transactions/common/schema/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-
-
-# Introduction
-
-This directory contains the schemas for the different JSON documents Planetmint uses.
-
-The aim is to provide:
-
-- a strict definition of the data structures used in Planetmint,
-- a language-independent tool to validate the structure of incoming/outcoming
- data. (There are several ready to use
- [implementations](http://json-schema.org/implementations.html) written in
- different languages.)
-
-## Sources
-
-The files defining the JSON Schema for transactions (`transaction_*.yaml`)
-are based on the [Planetmint Transactions Specs](https://github.com/planetmint/BEPs/tree/master/tx-specs).
-If you want to add a new transaction version,
-you must write a spec for it first.
-(You can't change the JSON Schema files for old versions.
-Those were used to validate old transactions
-and are needed to re-check those transactions.)
-
-There used to be a file defining the JSON Schema for votes, named `vote.yaml`.
-It was used by Planetmint version 1.3.0 and earlier.
-If you want a copy of the latest `vote.yaml` file,
-then you can get it from the version 1.3.0 release on GitHub, at
-[https://github.com/planetmint/planetmint/blob/v1.3.0/planetmint/common/schema/vote.yaml](https://github.com/planetmint/planetmint/blob/v1.3.0/planetmint/common/schema/vote.yaml).
-
-## Learn about JSON Schema
-
-A good resource is [Understanding JSON Schema](http://spacetelescope.github.io/understanding-json-schema/index.html).
-It provides a *more accessible documentation for JSON schema* than the [specs](http://json-schema.org/documentation.html).
-
-## If it's supposed to be JSON, why's everything in YAML D:?
-
-YAML is great for its conciseness and friendliness towards human-editing in comparision to JSON.
-
-Although YAML is a superset of JSON, at the end of the day, JSON Schema processors, like
-[json-schema](http://python-jsonschema.readthedocs.io/en/latest/), take in a native object (e.g.
-Python dicts or JavaScript objects) as the schema used for validation. As long as we can serialize
-the YAML into what the JSON Schema processor expects (almost always as simple as loading the YAML
-like you would with a JSON file), it's the same as using JSON.
-
-Specific advantages of using YAML:
- - Legibility, especially when nesting
- - Multi-line string literals, that make it easy to include descriptions that can be [auto-generated
- into Sphinx documentation](/docs/server/generate_schema_documentation.py)
diff --git a/planetmint/transactions/common/schema/__init__.py b/planetmint/transactions/common/schema/__init__.py
deleted file mode 100644
index 041df5f..0000000
--- a/planetmint/transactions/common/schema/__init__.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
-"""Schema validation related functions and data"""
-import os.path
-import logging
-
-import jsonschema
-import yaml
-import rapidjson
-
-from planetmint.transactions.common.exceptions import SchemaValidationError
-
-
-logger = logging.getLogger(__name__)
-
-
-def _load_schema(name, version, path=__file__):
- """Load a schema from disk"""
- path = os.path.join(os.path.dirname(path), version, name + '.yaml')
- with open(path) as handle:
- schema = yaml.safe_load(handle)
- fast_schema = rapidjson.Validator(rapidjson.dumps(schema))
- return path, (schema, fast_schema)
-
-
-# TODO: make this an env var from a config file
-TX_SCHEMA_VERSION = 'v3.0'
-
-TX_SCHEMA_PATH, TX_SCHEMA_COMMON = _load_schema('transaction',
- TX_SCHEMA_VERSION)
-_, TX_SCHEMA_CREATE = _load_schema('transaction_create',
- TX_SCHEMA_VERSION)
-_, TX_SCHEMA_TRANSFER = _load_schema('transaction_transfer',
- TX_SCHEMA_VERSION)
-
-_, TX_SCHEMA_VALIDATOR_ELECTION = _load_schema('transaction_validator_election',
- TX_SCHEMA_VERSION)
-
-_, TX_SCHEMA_CHAIN_MIGRATION_ELECTION = _load_schema('transaction_chain_migration_election',
- TX_SCHEMA_VERSION)
-
-_, TX_SCHEMA_VOTE = _load_schema('transaction_vote', TX_SCHEMA_VERSION)
-
-
-def _validate_schema(schema, body):
- """Validate data against a schema"""
-
- # Note
- #
- # Schema validation is currently the major CPU bottleneck of
- # Planetmint. the `jsonschema` library validates python data structures
- # directly and produces nice error messages, but validation takes 4+ ms
- # per transaction which is pretty slow. The rapidjson library validates
- # much faster at 1.5ms, however it produces _very_ poor error messages.
- # For this reason we use both, rapidjson as an optimistic pathway and
- # jsonschema as a fallback in case there is a failure, so we can produce
- # a helpful error message.
-
- try:
- schema[1](rapidjson.dumps(body))
- except ValueError as exc:
- try:
- jsonschema.validate(body, schema[0])
- except jsonschema.ValidationError as exc2:
- raise SchemaValidationError(str(exc2)) from exc2
- logger.warning('code problem: jsonschema did not raise an exception, wheras rapidjson raised %s', exc)
- raise SchemaValidationError(str(exc)) from exc
-
-
-def validate_transaction_schema(tx):
- """Validate a transaction dict.
-
- TX_SCHEMA_COMMON contains properties that are common to all types of
- transaction. TX_SCHEMA_[TRANSFER|CREATE] add additional constraints on top.
- """
- _validate_schema(TX_SCHEMA_COMMON, tx)
- if tx['operation'] == 'TRANSFER':
- _validate_schema(TX_SCHEMA_TRANSFER, tx)
- else:
- _validate_schema(TX_SCHEMA_CREATE, tx)
diff --git a/planetmint/transactions/common/schema/v1.0/transaction.yaml b/planetmint/transactions/common/schema/v1.0/transaction.yaml
deleted file mode 100644
index 3546d78..0000000
--- a/planetmint/transactions/common/schema/v1.0/transaction.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-additionalProperties: false
-title: Transaction Schema
-required:
-- id
-- inputs
-- outputs
-- operation
-- metadata
-- asset
-- version
-properties:
- id:
- anyOf:
- - "$ref": "#/definitions/sha3_hexdigest"
- - type: 'null'
- operation:
- "$ref": "#/definitions/operation"
- asset:
- "$ref": "#/definitions/asset"
- inputs:
- type: array
- title: "Transaction inputs"
- items:
- "$ref": "#/definitions/input"
- outputs:
- type: array
- items:
- "$ref": "#/definitions/output"
- metadata:
- "$ref": "#/definitions/metadata"
- version:
- type: string
- pattern: "^1\\.0$"
-definitions:
- offset:
- type: integer
- minimum: 0
- base58:
- pattern: "[1-9a-zA-Z^OIl]{43,44}"
- type: string
- public_keys:
- anyOf:
- - type: array
- items:
- "$ref": "#/definitions/base58"
- - type: 'null'
- sha3_hexdigest:
- pattern: "[0-9a-f]{64}"
- type: string
- uuid4:
- pattern: "[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}"
- type: string
- operation:
- type: string
- enum:
- - CREATE
- - TRANSFER
- - GENESIS
- asset:
- type: object
- additionalProperties: false
- properties:
- id:
- "$ref": "#/definitions/sha3_hexdigest"
- data:
- anyOf:
- - type: object
- additionalProperties: true
- - type: 'null'
- output:
- type: object
- additionalProperties: false
- required:
- - amount
- - condition
- - public_keys
- properties:
- amount:
- type: string
- pattern: "^[0-9]{1,20}$"
- condition:
- type: object
- additionalProperties: false
- required:
- - details
- - uri
- properties:
- details:
- "$ref": "#/definitions/condition_details"
- uri:
- type: string
- pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
- (fpt=(ed25519|threshold)-sha-256(&)?|cost=[0-9]+(&)?|\
- subtypes=ed25519-sha-256(&)?){2,3}$"
- public_keys:
- "$ref": "#/definitions/public_keys"
- input:
- type: "object"
- additionalProperties: false
- required:
- - owners_before
- - fulfillment
- properties:
- owners_before:
- "$ref": "#/definitions/public_keys"
- fulfillment:
- anyOf:
- - type: string
- pattern: "^[a-zA-Z0-9_-]*$"
- - "$ref": "#/definitions/condition_details"
- fulfills:
- anyOf:
- - type: 'object'
- additionalProperties: false
- required:
- - output_index
- - transaction_id
- properties:
- output_index:
- "$ref": "#/definitions/offset"
- transaction_id:
- "$ref": "#/definitions/sha3_hexdigest"
- - type: 'null'
- metadata:
- anyOf:
- - type: object
- additionalProperties: true
- minProperties: 1
- - type: 'null'
- condition_details:
- anyOf:
- - type: object
- additionalProperties: false
- required:
- - type
- - public_key
- properties:
- type:
- type: string
- pattern: "^ed25519-sha-256$"
- public_key:
- "$ref": "#/definitions/base58"
- - type: object
- additionalProperties: false
- required:
- - type
- - threshold
- - subconditions
- properties:
- type:
- type: "string"
- pattern: "^threshold-sha-256$"
- threshold:
- type: integer
- minimum: 1
- maximum: 100
- subconditions:
- type: array
- items:
- "$ref": "#/definitions/condition_details"
diff --git a/planetmint/transactions/common/schema/v1.0/transaction_create.yaml b/planetmint/transactions/common/schema/v1.0/transaction_create.yaml
deleted file mode 100644
index d43b543..0000000
--- a/planetmint/transactions/common/schema/v1.0/transaction_create.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-title: Transaction Schema - CREATE/GENESIS specific constraints
-required:
-- asset
-- inputs
-properties:
- asset:
- additionalProperties: false
- properties:
- data:
- anyOf:
- - type: object
- additionalProperties: true
- - type: 'null'
- required:
- - data
- inputs:
- type: array
- title: "Transaction inputs"
- maxItems: 1
- minItems: 1
- items:
- type: "object"
- required:
- - fulfills
- properties:
- fulfills:
- type: "null"
diff --git a/planetmint/transactions/common/schema/v1.0/transaction_transfer.yaml b/planetmint/transactions/common/schema/v1.0/transaction_transfer.yaml
deleted file mode 100644
index 0ac4023..0000000
--- a/planetmint/transactions/common/schema/v1.0/transaction_transfer.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-title: Transaction Schema - TRANSFER specific properties
-required:
-- asset
-properties:
- asset:
- additionalProperties: false
- properties:
- id:
- "$ref": "#/definitions/sha3_hexdigest"
- required:
- - id
- inputs:
- type: array
- title: "Transaction inputs"
- minItems: 1
- items:
- type: "object"
- required:
- - fulfills
- properties:
- fulfills:
- type: "object"
-definitions:
- sha3_hexdigest:
- pattern: "[0-9a-f]{64}"
- type: string
diff --git a/planetmint/transactions/common/schema/v2.0/transaction.yaml b/planetmint/transactions/common/schema/v2.0/transaction.yaml
deleted file mode 100644
index 604302f..0000000
--- a/planetmint/transactions/common/schema/v2.0/transaction.yaml
+++ /dev/null
@@ -1,170 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-additionalProperties: false
-title: Transaction Schema
-required:
-- id
-- inputs
-- outputs
-- operation
-- metadata
-- asset
-- version
-properties:
- id:
- anyOf:
- - "$ref": "#/definitions/sha3_hexdigest"
- - type: 'null'
- operation:
- "$ref": "#/definitions/operation"
- asset:
- "$ref": "#/definitions/asset"
- inputs:
- type: array
- title: "Transaction inputs"
- items:
- "$ref": "#/definitions/input"
- outputs:
- type: array
- items:
- "$ref": "#/definitions/output"
- metadata:
- "$ref": "#/definitions/metadata"
- version:
- type: string
- pattern: "^2\\.0$"
-definitions:
- offset:
- type: integer
- minimum: 0
- base58:
- pattern: "[1-9a-zA-Z^OIl]{43,44}"
- type: string
- public_keys:
- anyOf:
- - type: array
- items:
- "$ref": "#/definitions/base58"
- - type: 'null'
- sha3_hexdigest:
- pattern: "[0-9a-f]{64}"
- type: string
- uuid4:
- pattern: "[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}"
- type: string
- operation:
- type: string
- enum:
- - CREATE
- - TRANSFER
- - VALIDATOR_ELECTION
- - CHAIN_MIGRATION_ELECTION
- - VOTE
- asset:
- type: object
- additionalProperties: false
- properties:
- id:
- "$ref": "#/definitions/sha3_hexdigest"
- data:
- anyOf:
- - type: object
- additionalProperties: true
- - type: 'null'
- output:
- type: object
- additionalProperties: false
- required:
- - amount
- - condition
- - public_keys
- properties:
- amount:
- type: string
- pattern: "^[0-9]{1,20}$"
- condition:
- type: object
- additionalProperties: false
- required:
- - details
- - uri
- properties:
- details:
- "$ref": "#/definitions/condition_details"
- uri:
- type: string
- pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
- (fpt=(ed25519|threshold)-sha-256(&)?|cost=[0-9]+(&)?|\
- subtypes=ed25519-sha-256(&)?){2,3}$"
- public_keys:
- "$ref": "#/definitions/public_keys"
- input:
- type: "object"
- additionalProperties: false
- required:
- - owners_before
- - fulfillment
- properties:
- owners_before:
- "$ref": "#/definitions/public_keys"
- fulfillment:
- anyOf:
- - type: string
- pattern: "^[a-zA-Z0-9_-]*$"
- - "$ref": "#/definitions/condition_details"
- fulfills:
- anyOf:
- - type: 'object'
- additionalProperties: false
- required:
- - output_index
- - transaction_id
- properties:
- output_index:
- "$ref": "#/definitions/offset"
- transaction_id:
- "$ref": "#/definitions/sha3_hexdigest"
- - type: 'null'
- metadata:
- anyOf:
- - type: object
- additionalProperties: true
- minProperties: 1
- - type: 'null'
- condition_details:
- anyOf:
- - type: object
- additionalProperties: false
- required:
- - type
- - public_key
- properties:
- type:
- type: string
- pattern: "^ed25519-sha-256$"
- public_key:
- "$ref": "#/definitions/base58"
- - type: object
- additionalProperties: false
- required:
- - type
- - threshold
- - subconditions
- properties:
- type:
- type: "string"
- pattern: "^threshold-sha-256$"
- threshold:
- type: integer
- minimum: 1
- maximum: 100
- subconditions:
- type: array
- items:
- "$ref": "#/definitions/condition_details"
diff --git a/planetmint/transactions/common/schema/v2.0/transaction_chain_migration_election.yaml b/planetmint/transactions/common/schema/v2.0/transaction_chain_migration_election.yaml
deleted file mode 100644
index d5c5f4a..0000000
--- a/planetmint/transactions/common/schema/v2.0/transaction_chain_migration_election.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-title: Chain Migration Election Schema - Propose a halt in block production to allow for a version change
-required:
-- operation
-- asset
-- outputs
-properties:
- operation:
- type: string
- value: "CHAIN_MIGRATION_ELECTION"
- asset:
- additionalProperties: false
- properties:
- data:
- additionalProperties: false
- properties:
- seed:
- type: string
- required:
- - data
- outputs:
- type: array
- items:
- "$ref": "#/definitions/output"
-definitions:
- output:
- type: object
- properties:
- condition:
- type: object
- required:
- - uri
- properties:
- uri:
- type: string
- pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
- (fpt=ed25519-sha-256(&)?|cost=[0-9]+(&)?|\
- subtypes=ed25519-sha-256(&)?){2,3}$"
diff --git a/planetmint/transactions/common/schema/v2.0/transaction_create.yaml b/planetmint/transactions/common/schema/v2.0/transaction_create.yaml
deleted file mode 100644
index d3c7ea2..0000000
--- a/planetmint/transactions/common/schema/v2.0/transaction_create.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-title: Transaction Schema - CREATE specific constraints
-required:
-- asset
-- inputs
-properties:
- asset:
- additionalProperties: false
- properties:
- data:
- anyOf:
- - type: object
- additionalProperties: true
- - type: 'null'
- required:
- - data
- inputs:
- type: array
- title: "Transaction inputs"
- maxItems: 1
- minItems: 1
- items:
- type: "object"
- required:
- - fulfills
- properties:
- fulfills:
- type: "null"
diff --git a/planetmint/transactions/common/schema/v2.0/transaction_transfer.yaml b/planetmint/transactions/common/schema/v2.0/transaction_transfer.yaml
deleted file mode 100644
index 0ac4023..0000000
--- a/planetmint/transactions/common/schema/v2.0/transaction_transfer.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-title: Transaction Schema - TRANSFER specific properties
-required:
-- asset
-properties:
- asset:
- additionalProperties: false
- properties:
- id:
- "$ref": "#/definitions/sha3_hexdigest"
- required:
- - id
- inputs:
- type: array
- title: "Transaction inputs"
- minItems: 1
- items:
- type: "object"
- required:
- - fulfills
- properties:
- fulfills:
- type: "object"
-definitions:
- sha3_hexdigest:
- pattern: "[0-9a-f]{64}"
- type: string
diff --git a/planetmint/transactions/common/schema/v2.0/transaction_validator_election.yaml b/planetmint/transactions/common/schema/v2.0/transaction_validator_election.yaml
deleted file mode 100644
index f93353c..0000000
--- a/planetmint/transactions/common/schema/v2.0/transaction_validator_election.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-title: Validator Election Schema - Propose a change to validator set
-required:
-- operation
-- asset
-- outputs
-properties:
- operation:
- type: string
- value: "VALIDATOR_ELECTION"
- asset:
- additionalProperties: false
- properties:
- data:
- additionalProperties: false
- properties:
- node_id:
- type: string
- seed:
- type: string
- public_key:
- type: object
- additionalProperties: false
- required:
- - value
- - type
- properties:
- value:
- type: string
- type:
- type: string
- enum:
- - ed25519-base16
- - ed25519-base32
- - ed25519-base64
- power:
- "$ref": "#/definitions/positiveInteger"
- required:
- - node_id
- - public_key
- - power
- required:
- - data
- outputs:
- type: array
- items:
- "$ref": "#/definitions/output"
-definitions:
- output:
- type: object
- properties:
- condition:
- type: object
- required:
- - uri
- properties:
- uri:
- type: string
- pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
- (fpt=ed25519-sha-256(&)?|cost=[0-9]+(&)?|\
- subtypes=ed25519-sha-256(&)?){2,3}$"
diff --git a/planetmint/transactions/common/schema/v2.0/transaction_vote.yaml b/planetmint/transactions/common/schema/v2.0/transaction_vote.yaml
deleted file mode 100644
index 64ed6ee..0000000
--- a/planetmint/transactions/common/schema/v2.0/transaction_vote.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-title: Vote Schema - Vote on an election
-required:
-- operation
-- outputs
-properties:
- operation:
- type: string
- value: "VOTE"
- outputs:
- type: array
- items:
- "$ref": "#/definitions/output"
-definitions:
- output:
- type: object
- properties:
- condition:
- type: object
- required:
- - uri
- properties:
- uri:
- type: string
- pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
- (fpt=ed25519-sha-256(&)?|cost=[0-9]+(&)?|\
- subtypes=ed25519-sha-256(&)?){2,3}$"
diff --git a/planetmint/transactions/common/schema/v3.0/transaction.yaml b/planetmint/transactions/common/schema/v3.0/transaction.yaml
deleted file mode 100644
index ca64ce9..0000000
--- a/planetmint/transactions/common/schema/v3.0/transaction.yaml
+++ /dev/null
@@ -1,174 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-additionalProperties: false
-title: Transaction Schema
-required:
-- id
-- inputs
-- outputs
-- operation
-- metadata
-- assets
-- version
-properties:
- id:
- anyOf:
- - "$ref": "#/definitions/sha3_hexdigest"
- - type: 'null'
- operation:
- "$ref": "#/definitions/operation"
- assets:
- type: array
- items:
- "$ref": "#/definitions/asset"
- inputs:
- type: array
- title: "Transaction inputs"
- items:
- "$ref": "#/definitions/input"
- outputs:
- type: array
- items:
- "$ref": "#/definitions/output"
- metadata:
- "$ref": "#/definitions/metadata"
- version:
- type: string
- pattern: "^2\\.0$"
-definitions:
- offset:
- type: integer
- minimum: 0
- base58:
- pattern: "[1-9a-zA-Z^OIl]{43,44}"
- type: string
- public_keys:
- anyOf:
- - type: array
- items:
- "$ref": "#/definitions/base58"
- - type: 'null'
- sha3_hexdigest:
- pattern: "[0-9a-f]{64}"
- type: string
- uuid4:
- pattern: "[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}"
- type: string
- operation:
- type: string
- enum:
- - CREATE
- - TRANSFER
- - VALIDATOR_ELECTION
- - CHAIN_MIGRATION_ELECTION
- - VOTE
- - COMPOSE
- - DECOMPOSE
- asset:
- type: object
- additionalProperties: false
- properties:
- id:
- "$ref": "#/definitions/sha3_hexdigest"
- data:
- anyOf:
- - type: object
- additionalProperties: true
- - type: 'null'
- output:
- type: object
- additionalProperties: false
- required:
- - amount
- - condition
- - public_keys
- properties:
- amount:
- type: string
- pattern: "^[0-9]{1,20}$"
- condition:
- type: object
- additionalProperties: false
- required:
- - details
- - uri
- properties:
- details:
- "$ref": "#/definitions/condition_details"
- uri:
- type: string
- pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
- (fpt=(ed25519|threshold)-sha-256(&)?|cost=[0-9]+(&)?|\
- subtypes=ed25519-sha-256(&)?){2,3}$"
- public_keys:
- "$ref": "#/definitions/public_keys"
- input:
- type: "object"
- additionalProperties: false
- required:
- - owners_before
- - fulfillment
- properties:
- owners_before:
- "$ref": "#/definitions/public_keys"
- fulfillment:
- anyOf:
- - type: string
- pattern: "^[a-zA-Z0-9_-]*$"
- - "$ref": "#/definitions/condition_details"
- fulfills:
- anyOf:
- - type: 'object'
- additionalProperties: false
- required:
- - output_index
- - transaction_id
- properties:
- output_index:
- "$ref": "#/definitions/offset"
- transaction_id:
- "$ref": "#/definitions/sha3_hexdigest"
- - type: 'null'
- metadata:
- anyOf:
- - type: object
- additionalProperties: true
- minProperties: 1
- - type: 'null'
- condition_details:
- anyOf:
- - type: object
- additionalProperties: false
- required:
- - type
- - public_key
- properties:
- type:
- type: string
- pattern: "^ed25519-sha-256$"
- public_key:
- "$ref": "#/definitions/base58"
- - type: object
- additionalProperties: false
- required:
- - type
- - threshold
- - subconditions
- properties:
- type:
- type: "string"
- pattern: "^threshold-sha-256$"
- threshold:
- type: integer
- minimum: 1
- maximum: 100
- subconditions:
- type: array
- items:
- "$ref": "#/definitions/condition_details"
diff --git a/planetmint/transactions/common/schema/v3.0/transaction_chain_migration_election.yaml b/planetmint/transactions/common/schema/v3.0/transaction_chain_migration_election.yaml
deleted file mode 100644
index 932c7b1..0000000
--- a/planetmint/transactions/common/schema/v3.0/transaction_chain_migration_election.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-title: Chain Migration Election Schema - Propose a halt in block production to allow for a version change
-required:
-- operation
-- assets
-- outputs
-properties:
- operation:
- type: string
- value: "CHAIN_MIGRATION_ELECTION"
- assets:
- type: array
- minItems: 1
- maxItems: 1
- items:
- "$ref": "#/definitions/asset"
- outputs:
- type: array
- items:
- "$ref": "#/definitions/output"
-definitions:
- asset:
- additionalProperties: false
- properties:
- data:
- additionalProperties: false
- properties:
- seed:
- type: string
- required:
- - data
- output:
- type: object
- properties:
- condition:
- type: object
- required:
- - uri
- properties:
- uri:
- type: string
- pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
- (fpt=ed25519-sha-256(&)?|cost=[0-9]+(&)?|\
- subtypes=ed25519-sha-256(&)?){2,3}$"
diff --git a/planetmint/transactions/common/schema/v3.0/transaction_create.yaml b/planetmint/transactions/common/schema/v3.0/transaction_create.yaml
deleted file mode 100644
index 3a34a46..0000000
--- a/planetmint/transactions/common/schema/v3.0/transaction_create.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-title: Transaction Schema - CREATE specific constraints
-required:
-- assets
-- inputs
-properties:
- assets:
- type: array
- minItems: 1
- maxItems: 1
- items:
- "$ref": "#/definitions/asset"
- inputs:
- type: array
- title: "Transaction inputs"
- maxItems: 1
- minItems: 1
- items:
- type: "object"
- required:
- - fulfills
- properties:
- fulfills:
- type: "null"
-definitions:
- asset:
- additionalProperties: false
- properties:
- data:
- anyOf:
- - type: object
- additionalProperties: true
- - type: 'null'
- required:
- - data
\ No newline at end of file
diff --git a/planetmint/transactions/common/schema/v3.0/transaction_transfer.yaml b/planetmint/transactions/common/schema/v3.0/transaction_transfer.yaml
deleted file mode 100644
index 1bc74e5..0000000
--- a/planetmint/transactions/common/schema/v3.0/transaction_transfer.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-title: Transaction Schema - TRANSFER specific properties
-required:
-- assets
-properties:
- assets:
- type: array
- minItems: 1
- items:
- "$ref": "#/definitions/asset"
- inputs:
- type: array
- title: "Transaction inputs"
- minItems: 1
- items:
- type: "object"
- required:
- - fulfills
- properties:
- fulfills:
- type: "object"
-definitions:
- sha3_hexdigest:
- pattern: "[0-9a-f]{64}"
- type: string
- asset:
- additionalProperties: false
- properties:
- id:
- "$ref": "#/definitions/sha3_hexdigest"
- required:
- - id
diff --git a/planetmint/transactions/common/schema/v3.0/transaction_validator_election.yaml b/planetmint/transactions/common/schema/v3.0/transaction_validator_election.yaml
deleted file mode 100644
index 0d7c93b..0000000
--- a/planetmint/transactions/common/schema/v3.0/transaction_validator_election.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-title: Validator Election Schema - Propose a change to validator set
-required:
-- operation
-- assets
-- outputs
-properties:
- operation:
- type: string
- value: "VALIDATOR_ELECTION"
- assets:
- type: array
- minItems: 1
- maxItems: 1
- items:
- "$ref": "#/definitions/asset"
- outputs:
- type: array
- items:
- "$ref": "#/definitions/output"
-definitions:
- output:
- type: object
- properties:
- condition:
- type: object
- required:
- - uri
- properties:
- uri:
- type: string
- pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
- (fpt=ed25519-sha-256(&)?|cost=[0-9]+(&)?|\
- subtypes=ed25519-sha-256(&)?){2,3}$"
- asset:
- additionalProperties: false
- properties:
- data:
- additionalProperties: false
- properties:
- node_id:
- type: string
- seed:
- type: string
- public_key:
- type: object
- additionalProperties: false
- required:
- - value
- - type
- properties:
- value:
- type: string
- type:
- type: string
- enum:
- - ed25519-base16
- - ed25519-base32
- - ed25519-base64
- power:
- "$ref": "#/definitions/positiveInteger"
- required:
- - node_id
- - public_key
- - power
- required:
- - data
diff --git a/planetmint/transactions/common/schema/v3.0/transaction_vote.yaml b/planetmint/transactions/common/schema/v3.0/transaction_vote.yaml
deleted file mode 100644
index 64ed6ee..0000000
--- a/planetmint/transactions/common/schema/v3.0/transaction_vote.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
----
-"$schema": "http://json-schema.org/draft-04/schema#"
-type: object
-title: Vote Schema - Vote on an election
-required:
-- operation
-- outputs
-properties:
- operation:
- type: string
- value: "VOTE"
- outputs:
- type: array
- items:
- "$ref": "#/definitions/output"
-definitions:
- output:
- type: object
- properties:
- condition:
- type: object
- required:
- - uri
- properties:
- uri:
- type: string
- pattern: "^ni:///sha-256;([a-zA-Z0-9_-]{0,86})[?]\
- (fpt=ed25519-sha-256(&)?|cost=[0-9]+(&)?|\
- subtypes=ed25519-sha-256(&)?){2,3}$"
diff --git a/planetmint/transactions/common/transaction.py b/planetmint/transactions/common/transaction.py
deleted file mode 100644
index 45d1051..0000000
--- a/planetmint/transactions/common/transaction.py
+++ /dev/null
@@ -1,761 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
-"""Transaction related models to parse and construct transaction
-payloads.
-
-Attributes:
- UnspentOutput (namedtuple): Object holding the information
- representing an unspent output.
-
-"""
-from collections import namedtuple
-from copy import deepcopy
-from functools import lru_cache
-import rapidjson
-
-import base58
-from cryptoconditions import Fulfillment, ThresholdSha256, Ed25519Sha256
-from cryptoconditions.exceptions import (
- ParsingError, ASN1DecodeError, ASN1EncodeError)
-try:
- from hashlib import sha3_256
-except ImportError:
- from sha3 import sha3_256
-
-from planetmint.transactions.common.crypto import PrivateKey, hash_data
-from planetmint.transactions.common.exceptions import (
- KeypairMismatchException, InputDoesNotExist, DoubleSpend,
- InvalidHash, InvalidSignature, AmountError, AssetIdMismatch)
-from planetmint.transactions.common.utils import serialize
-from .memoize import memoize_from_dict, memoize_to_dict
-from .input import Input
-from .output import Output
-from .transaction_link import TransactionLink
-
-UnspentOutput = namedtuple(
- 'UnspentOutput', (
- # TODO 'utxo_hash': sha3_256(f'{txid}{output_index}'.encode())
- # 'utxo_hash', # noqa
- 'transaction_id',
- 'output_index',
- 'amount',
- 'asset_id',
- 'condition_uri',
- )
-)
-
-class Transaction(object):
- """A Transaction is used to create and transfer assets.
-
- Note:
- For adding Inputs and Outputs, this class provides methods
- to do so.
-
- Attributes:
- operation (str): Defines the operation of the Transaction.
- inputs (:obj:`list` of :class:`~planetmint.transactions.common.
- transaction.Input`, optional): Define the assets to
- spend.
- outputs (:obj:`list` of :class:`~planetmint.transactions.common.
- transaction.Output`, optional): Define the assets to lock.
- assets (:obj:`list` of :obj:`dict`): Asset payload for this Transaction. ``CREATE``
- Transactions require a list containing exactly one dict with a ``data``
- property while ``TRANSFER`` Transactions require a list containing a dict with a
- ``id`` property.
- metadata (dict):
- Metadata to be stored along with the Transaction.
- version (string): Defines the version number of a Transaction.
- """
-
- CREATE = 'CREATE'
- TRANSFER = 'TRANSFER'
- ALLOWED_OPERATIONS = (CREATE, TRANSFER)
- VERSION = '2.0'
-
- def __init__(self, operation, assets, inputs=None, outputs=None,
- metadata=None, version=None, hash_id=None, tx_dict=None):
- """The constructor allows to create a customizable Transaction.
-
- Note:
- When no `version` is provided, one is being
- generated by this method.
-
- Args:
- operation (str): Defines the operation of the Transaction.
- assets (:obj:`list` of :obj:`dict`): Asset payload for this Transaction.
- inputs (:obj:`list` of :class:`~planetmint.transactions.common.
- transaction.Input`, optional): Define the assets to
- outputs (:obj:`list` of :class:`~planetmint.transactions.common.
- transaction.Output`, optional): Define the assets to
- lock.
- metadata (dict): Metadata to be stored along with the
- Transaction.
- version (string): Defines the version number of a Transaction.
- hash_id (string): Hash id of the transaction.
- """
- if operation not in self.ALLOWED_OPERATIONS:
- allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS)
- raise ValueError('`operation` must be one of {}'
- .format(allowed_ops))
-
- # Asset payloads for 'CREATE' operations must be None or
- # dicts holding a `data` property. Asset payloads for 'TRANSFER'
- # operations must be dicts holding an `id` property.
-
- if (operation == self.CREATE and
- assets is not None and not (isinstance(assets, list) and 'data' in assets[0])):
- raise TypeError(('`asset` must be None or a list of length 1 with a dict holding a `data` '
- " property instance for '{}' Transactions".format(operation)))
- elif (operation == self.TRANSFER and
- assets is not None and not (isinstance(assets, list) and all('id' in asset for asset in assets))):
- raise TypeError(('`asset` must be a list containing dicts holding an `id` property'))
-
- if outputs and not isinstance(outputs, list):
- raise TypeError('`outputs` must be a list instance or None')
-
- if inputs and not isinstance(inputs, list):
- raise TypeError('`inputs` must be a list instance or None')
-
- if metadata is not None and not isinstance(metadata, dict):
- raise TypeError('`metadata` must be a dict or None')
-
- self.version = version if version is not None else self.VERSION
- self.operation = operation
- self.assets = assets
- self.inputs = inputs or []
- self.outputs = outputs or []
- self.metadata = metadata
- self._id = hash_id
- self.tx_dict = tx_dict
-
- @property
- def unspent_outputs(self):
- """UnspentOutput: The outputs of this transaction, in a data
- structure containing relevant information for storing them in
- a UTXO set, and performing validation.
- """
- # TODO: figure out how these must be structured for multi asset support
- if self.operation == self.CREATE:
- self._asset_id = self._id
- elif self.operation == self.TRANSFER:
- # TODO: check if this will also work for multiple assets per tx
- self._asset_id = [asset['id'] for asset in self.assets][0]
- return (UnspentOutput(
- transaction_id=self._id,
- output_index=output_index,
- amount=output.amount,
- asset_id=self._asset_id,
- condition_uri=output.fulfillment.condition_uri,
- ) for output_index, output in enumerate(self.outputs))
-
- @property
- def spent_outputs(self):
- """Tuple of :obj:`dict`: Inputs of this transaction. Each input
- is represented as a dictionary containing a transaction id and
- output index.
- """
- return (
- input_.fulfills.to_dict()
- for input_ in self.inputs if input_.fulfills
- )
-
- @property
- def serialized(self):
- return Transaction._to_str(self.to_dict())
-
- def _hash(self):
- self._id = hash_data(self.serialized)
-
- def __eq__(self, other):
- try:
- other = other.to_dict()
- except AttributeError:
- return False
- return self.to_dict() == other
-
- def to_inputs(self, indices=None):
- """Converts a Transaction's outputs to spendable inputs.
-
- Note:
- Takes the Transaction's outputs and derives inputs
- from that can then be passed into `Transaction.transfer` as
- `inputs`.
- A list of integers can be passed to `indices` that
- defines which outputs should be returned as inputs.
- If no `indices` are passed (empty list or None) all
- outputs of the Transaction are returned.
-
- Args:
- indices (:obj:`list` of int): Defines which
- outputs should be returned as inputs.
-
- Returns:
- :obj:`list` of :class:`~planetmint.transactions.common.transaction.
- Input`
- """
- # NOTE: If no indices are passed, we just assume to take all outputs
- # as inputs.
- indices = indices or range(len(self.outputs))
- return [
- Input(self.outputs[idx].fulfillment,
- self.outputs[idx].public_keys,
- TransactionLink(self.id, idx))
- for idx in indices
- ]
-
- def add_input(self, input_):
- """Adds an input to a Transaction's list of inputs.
-
- Args:
- input_ (:class:`~planetmint.transactions.common.transaction.
- Input`): An Input to be added to the Transaction.
- """
- if not isinstance(input_, Input):
- raise TypeError('`input_` must be a Input instance')
- self.inputs.append(input_)
-
- def add_output(self, output):
- """Adds an output to a Transaction's list of outputs.
-
- Args:
- output (:class:`~planetmint.transactions.common.transaction.
- Output`): An Output to be added to the
- Transaction.
- """
- if not isinstance(output, Output):
- raise TypeError('`output` must be an Output instance or None')
- self.outputs.append(output)
-
- def sign(self, private_keys):
- """Fulfills a previous Transaction's Output by signing Inputs.
-
- Note:
- This method works only for the following Cryptoconditions
- currently:
- - Ed25519Fulfillment
- - ThresholdSha256
- Furthermore, note that all keys required to fully sign the
- Transaction have to be passed to this method. A subset of all
- will cause this method to fail.
-
- Args:
- private_keys (:obj:`list` of :obj:`str`): A complete list of
- all private keys needed to sign all Fulfillments of this
- Transaction.
-
- Returns:
- :class:`~planetmint.transactions.common.transaction.Transaction`
- """
- # TODO: Singing should be possible with at least one of all private
- # keys supplied to this method.
- if private_keys is None or not isinstance(private_keys, list):
- raise TypeError('`private_keys` must be a list instance')
-
- # NOTE: Generate public keys from private keys and match them in a
- # dictionary:
- # key: public_key
- # value: private_key
- def gen_public_key(private_key):
- # TODO FOR CC: Adjust interface so that this function becomes
- # unnecessary
-
- # cc now provides a single method `encode` to return the key
- # in several different encodings.
- public_key = private_key.get_verifying_key().encode()
- # Returned values from cc are always bytestrings so here we need
- # to decode to convert the bytestring into a python str
- return public_key.decode()
-
- key_pairs = {gen_public_key(PrivateKey(private_key)):
- PrivateKey(private_key) for private_key in private_keys}
-
- tx_dict = self.to_dict()
- tx_dict = Transaction._remove_signatures(tx_dict)
- tx_serialized = Transaction._to_str(tx_dict)
- for i, input_ in enumerate(self.inputs):
- self.inputs[i] = self._sign_input(input_, tx_serialized, key_pairs)
-
- self._hash()
-
- return self
-
- @classmethod
- def _sign_input(cls, input_, message, key_pairs):
- """Signs a single Input.
-
- Note:
- This method works only for the following Cryptoconditions
- currently:
- - Ed25519Fulfillment
- - ThresholdSha256.
-
- Args:
- input_ (:class:`~planetmint.transactions.common.transaction.
- Input`) The Input to be signed.
- message (str): The message to be signed
- key_pairs (dict): The keys to sign the Transaction with.
- """
- if isinstance(input_.fulfillment, Ed25519Sha256):
- return cls._sign_simple_signature_fulfillment(input_, message,
- key_pairs)
- elif isinstance(input_.fulfillment, ThresholdSha256):
- return cls._sign_threshold_signature_fulfillment(input_, message,
- key_pairs)
- else:
- raise ValueError(
- 'Fulfillment couldn\'t be matched to '
- 'Cryptocondition fulfillment type.')
-
- @classmethod
- def _sign_simple_signature_fulfillment(cls, input_, message, key_pairs):
- """Signs a Ed25519Fulfillment.
-
- Args:
- input_ (:class:`~planetmint.transactions.common.transaction.
- Input`) The input to be signed.
- message (str): The message to be signed
- key_pairs (dict): The keys to sign the Transaction with.
- """
- # NOTE: To eliminate the dangers of accidentally signing a condition by
- # reference, we remove the reference of input_ here
- # intentionally. If the user of this class knows how to use it,
- # this should never happen, but then again, never say never.
- input_ = deepcopy(input_)
- public_key = input_.owners_before[0]
- message = sha3_256(message.encode())
- if input_.fulfills:
- message.update('{}{}'.format(
- input_.fulfills.txid, input_.fulfills.output).encode())
-
- try:
- # cryptoconditions makes no assumptions of the encoding of the
- # message to sign or verify. It only accepts bytestrings
- input_.fulfillment.sign(
- message.digest(), base58.b58decode(key_pairs[public_key].encode()))
- except KeyError:
- raise KeypairMismatchException('Public key {} is not a pair to '
- 'any of the private keys'
- .format(public_key))
- return input_
-
- @classmethod
- def _sign_threshold_signature_fulfillment(cls, input_, message, key_pairs):
- """Signs a ThresholdSha256.
-
- Args:
- input_ (:class:`~planetmint.transactions.common.transaction.
- Input`) The Input to be signed.
- message (str): The message to be signed
- key_pairs (dict): The keys to sign the Transaction with.
- """
- input_ = deepcopy(input_)
- message = sha3_256(message.encode())
- if input_.fulfills:
- message.update('{}{}'.format(
- input_.fulfills.txid, input_.fulfills.output).encode())
-
- for owner_before in set(input_.owners_before):
- # TODO: CC should throw a KeypairMismatchException, instead of
- # our manual mapping here
-
- # TODO FOR CC: Naming wise this is not so smart,
- # `get_subcondition` in fact doesn't return a
- # condition but a fulfillment
-
- # TODO FOR CC: `get_subcondition` is singular. One would not
- # expect to get a list back.
- ccffill = input_.fulfillment
- subffills = ccffill.get_subcondition_from_vk(
- base58.b58decode(owner_before))
- if not subffills:
- raise KeypairMismatchException('Public key {} cannot be found '
- 'in the fulfillment'
- .format(owner_before))
- try:
- private_key = key_pairs[owner_before]
- except KeyError:
- raise KeypairMismatchException('Public key {} is not a pair '
- 'to any of the private keys'
- .format(owner_before))
-
- # cryptoconditions makes no assumptions of the encoding of the
- # message to sign or verify. It only accepts bytestrings
- for subffill in subffills:
- subffill.sign(
- message.digest(), base58.b58decode(private_key.encode()))
- return input_
-
- def inputs_valid(self, outputs=None):
- """Validates the Inputs in the Transaction against given
- Outputs.
-
- Note:
- Given a `CREATE` Transaction is passed,
- dummy values for Outputs are submitted for validation that
- evaluate parts of the validation-checks to `True`.
-
- Args:
- outputs (:obj:`list` of :class:`~planetmint.transactions.common.
- transaction.Output`): A list of Outputs to check the
- Inputs against.
-
- Returns:
- bool: If all Inputs are valid.
- """
- if self.operation == self.CREATE:
- # NOTE: Since in the case of a `CREATE`-transaction we do not have
- # to check for outputs, we're just submitting dummy
- # values to the actual method. This simplifies it's logic
- # greatly, as we do not have to check against `None` values.
- return self._inputs_valid(['dummyvalue'
- for _ in self.inputs])
- elif self.operation == self.TRANSFER:
- return self._inputs_valid([output.fulfillment.condition_uri
- for output in outputs])
- else:
- allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS)
- raise TypeError('`operation` must be one of {}'
- .format(allowed_ops))
-
- def _inputs_valid(self, output_condition_uris):
- """Validates an Input against a given set of Outputs.
-
- Note:
- The number of `output_condition_uris` must be equal to the
- number of Inputs a Transaction has.
-
- Args:
- output_condition_uris (:obj:`list` of :obj:`str`): A list of
- Outputs to check the Inputs against.
-
- Returns:
- bool: If all Outputs are valid.
- """
-
- if len(self.inputs) != len(output_condition_uris):
- raise ValueError('Inputs and '
- 'output_condition_uris must have the same count')
-
- tx_dict = self.tx_dict if self.tx_dict else self.to_dict()
- tx_dict = Transaction._remove_signatures(tx_dict)
- tx_dict['id'] = None
- tx_serialized = Transaction._to_str(tx_dict)
-
- def validate(i, output_condition_uri=None):
- """Validate input against output condition URI"""
- return self._input_valid(self.inputs[i], self.operation,
- tx_serialized, output_condition_uri)
-
- return all(validate(i, cond)
- for i, cond in enumerate(output_condition_uris))
-
- @lru_cache(maxsize=16384)
- def _input_valid(self, input_, operation, message, output_condition_uri=None):
- """Validates a single Input against a single Output.
-
- Note:
- In case of a `CREATE` Transaction, this method
- does not validate against `output_condition_uri`.
-
- Args:
- input_ (:class:`~planetmint.transactions.common.transaction.
- Input`) The Input to be signed.
- operation (str): The type of Transaction.
- message (str): The fulfillment message.
- output_condition_uri (str, optional): An Output to check the
- Input against.
-
- Returns:
- bool: If the Input is valid.
- """
- ccffill = input_.fulfillment
- try:
- parsed_ffill = Fulfillment.from_uri(ccffill.serialize_uri())
- except (TypeError, ValueError,
- ParsingError, ASN1DecodeError, ASN1EncodeError):
- return False
-
- if operation == self.CREATE:
- # NOTE: In the case of a `CREATE` transaction, the
- # output is always valid.
- output_valid = True
- else:
- output_valid = output_condition_uri == ccffill.condition_uri
-
- message = sha3_256(message.encode())
- if input_.fulfills:
- message.update('{}{}'.format(
- input_.fulfills.txid, input_.fulfills.output).encode())
-
- # NOTE: We pass a timestamp to `.validate`, as in case of a timeout
- # condition we'll have to validate against it
-
- # cryptoconditions makes no assumptions of the encoding of the
- # message to sign or verify. It only accepts bytestrings
- ffill_valid = parsed_ffill.validate(message=message.digest())
- return output_valid and ffill_valid
-
- # This function is required by `lru_cache` to create a key for memoization
- def __hash__(self):
- return hash(self.id)
-
- @memoize_to_dict
- def to_dict(self):
- """Transforms the object to a Python dictionary.
-
- Returns:
- dict: The Transaction as an alternative serialization format.
- """
- return {
- 'inputs': [input_.to_dict() for input_ in self.inputs],
- 'outputs': [output.to_dict() for output in self.outputs],
- 'operation': str(self.operation),
- 'metadata': self.metadata,
- 'assets': [asset for asset in self.assets],
- 'version': self.version,
- 'id': self._id,
- }
-
- @staticmethod
- # TODO: Remove `_dict` prefix of variable.
- def _remove_signatures(tx_dict):
- """Takes a Transaction dictionary and removes all signatures.
-
- Args:
- tx_dict (dict): The Transaction to remove all signatures from.
-
- Returns:
- dict
-
- """
- # NOTE: We remove the reference since we need `tx_dict` only for the
- # transaction's hash
- tx_dict = deepcopy(tx_dict)
- for input_ in tx_dict['inputs']:
- # NOTE: Not all Cryptoconditions return a `signature` key (e.g.
- # ThresholdSha256), so setting it to `None` in any
- # case could yield incorrect signatures. This is why we only
- # set it to `None` if it's set in the dict.
- input_['fulfillment'] = None
- return tx_dict
-
- @staticmethod
- def _to_hash(value):
- return hash_data(value)
-
- @property
- def id(self):
- return self._id
-
- def to_hash(self):
- return self.to_dict()['id']
-
- @staticmethod
- def _to_str(value):
- return serialize(value)
-
- # TODO: This method shouldn't call `_remove_signatures`
- def __str__(self):
- tx = Transaction._remove_signatures(self.to_dict())
- return Transaction._to_str(tx)
-
- @classmethod
- def get_asset_ids(cls, transactions):
- """Get the asset id from a list of :class:`~.Transactions`.
-
- This is useful when we want to check if the multiple inputs of a
- transaction are related to the same asset id.
-
- Args:
- transactions (:obj:`list` of :class:`~planetmint.transactions.common.
- transaction.Transaction`): A list of Transactions.
- Usually input Transactions that should have a matching
- asset ID.
-
- Returns:
- str: ID of the asset.
-
- Raises:
- :exc:`AssetIdMismatch`: If the inputs are related to different
- assets.
- """
-
- if not isinstance(transactions, list):
- transactions = [transactions]
-
- # create a set of the transactions' asset ids
- asset_ids = []
- for tx in transactions:
- if tx.operation == tx.CREATE:
- asset_ids.append(tx.id)
- else:
- asset_ids.extend([asset['id'] for asset in tx.assets])
-
- return asset_ids
-
- @staticmethod
- def validate_id(tx_body):
- """Validate the transaction ID of a transaction
-
- Args:
- tx_body (dict): The Transaction to be transformed.
- """
- # NOTE: Remove reference to avoid side effects
- # tx_body = deepcopy(tx_body)
- tx_body = rapidjson.loads(rapidjson.dumps(tx_body))
-
- try:
- proposed_tx_id = tx_body['id']
- except KeyError:
- raise InvalidHash('No transaction id found!')
-
- tx_body['id'] = None
-
- tx_body_serialized = Transaction._to_str(tx_body)
- valid_tx_id = Transaction._to_hash(tx_body_serialized)
-
- if proposed_tx_id != valid_tx_id:
- err_msg = ("The transaction's id '{}' isn't equal to "
- "the hash of its body, i.e. it's not valid.")
- raise InvalidHash(err_msg.format(proposed_tx_id))
-
- @classmethod
- @memoize_from_dict
- def from_dict(cls, tx, skip_schema_validation=True):
- """Transforms a Python dictionary to a Transaction object.
-
- Args:
- tx_body (dict): The Transaction to be transformed.
-
- Returns:
- :class:`~planetmint.transactions.common.transaction.Transaction`
- """
- operation = tx.get('operation', Transaction.CREATE) if isinstance(tx, dict) else Transaction.CREATE
- cls = Transaction.resolve_class(operation)
-
- if not skip_schema_validation:
- cls.validate_id(tx)
- cls.validate_schema(tx)
-
- inputs = [Input.from_dict(input_) for input_ in tx['inputs']]
- outputs = [Output.from_dict(output) for output in tx['outputs']]
- return cls(tx['operation'], tx['assets'], inputs, outputs,
- tx['metadata'], tx['version'], hash_id=tx['id'], tx_dict=tx)
-
- @classmethod
- def from_db(cls, planet, tx_dict_list):
- """Helper method that reconstructs a transaction dict that was returned
- from the database. It checks what asset_id to retrieve, retrieves the
- asset from the asset table and reconstructs the transaction.
-
- Args:
- planet (:class:`~planetmint.tendermint.Planetmint`): An instance
- of Planetmint used to perform database queries.
- tx_dict_list (:list:`dict` or :obj:`dict`): The transaction dict or
- list of transaction dict as returned from the database.
-
- Returns:
- :class:`~Transaction`
-
- """
- return_list = True
- if isinstance(tx_dict_list, dict):
- tx_dict_list = [tx_dict_list]
- return_list = False
-
- tx_map = {}
- tx_ids = []
- for tx in tx_dict_list:
- tx.update({'metadata': None})
- tx_map[tx['id']] = tx
- tx_ids.append(tx['id'])
-
- # TODO: Find occurences of get_assets and refactor
- # NOTE: Open issue for get_assets and this logic, it won't hold up for COMPOSE/DECOMPOSE
- assets = list(planet.get_assets(tx_ids))
- for asset in assets:
- if asset is not None:
- if tx_map.get(asset.get('id', None), None) is not None:
- tx = tx_map[asset['id']]
- del asset['id']
- tx['assets'] = [asset]
-
- tx_ids = list(tx_map.keys())
- metadata_list = list(planet.get_metadata(tx_ids))
- for metadata in metadata_list:
- tx = tx_map[metadata['id']]
- tx.update({'metadata': metadata.get('metadata')})
-
- if return_list:
- tx_list = []
- for tx_id, tx in tx_map.items():
- tx_list.append(cls.from_dict(tx))
- return tx_list
- else:
- tx = list(tx_map.values())[0]
- return cls.from_dict(tx)
-
- type_registry = {}
-
- @staticmethod
- def register_type(tx_type, tx_class):
- Transaction.type_registry[tx_type] = tx_class
-
- def resolve_class(operation):
- """For the given `tx` based on the `operation` key return its implementation class"""
-
- create_txn_class = Transaction.type_registry.get(Transaction.CREATE)
- return Transaction.type_registry.get(operation, create_txn_class)
-
- @classmethod
- def validate_schema(cls, tx):
- pass
-
- def validate_transfer_inputs(self, planet, current_transactions=[]):
- # store the inputs so that we can check if the asset ids match
- input_txs = []
- input_conditions = []
- for input_ in self.inputs:
- input_txid = input_.fulfills.txid
- input_tx = planet.get_transaction(input_txid)
-
- if input_tx is None:
- for ctxn in current_transactions:
- if ctxn.id == input_txid:
- input_tx = ctxn
-
- if input_tx is None:
- raise InputDoesNotExist("input `{}` doesn't exist"
- .format(input_txid))
-
- spent = planet.get_spent(input_txid, input_.fulfills.output,
- current_transactions)
- if spent:
- raise DoubleSpend('input `{}` was already spent'
- .format(input_txid))
-
- output = input_tx.outputs[input_.fulfills.output]
- input_conditions.append(output)
- input_txs.append(input_tx)
-
- # Validate that all inputs are distinct
- links = [i.fulfills.to_uri() for i in self.inputs]
- if len(links) != len(set(links)):
- raise DoubleSpend('tx "{}" spends inputs twice'.format(self.id))
-
- input_amount = sum([input_condition.amount for input_condition in input_conditions])
- output_amount = sum([output_condition.amount for output_condition in self.outputs])
-
- if output_amount != input_amount:
- raise AmountError(('The amount used in the inputs `{}`'
- ' needs to be same as the amount used'
- ' in the outputs `{}`')
- .format(input_amount, output_amount))
-
- if not self.inputs_valid(input_conditions):
- raise InvalidSignature('Transaction signature is invalid.')
-
- return True
diff --git a/planetmint/transactions/common/transaction_link.py b/planetmint/transactions/common/transaction_link.py
deleted file mode 100644
index fcdbeb1..0000000
--- a/planetmint/transactions/common/transaction_link.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright © 2020 Interplanetary Database Association e.V.,
-# Planetmint and IPDB software contributors.
-# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
-# Code is Apache-2.0 and docs are CC-BY-4.0
-
-class TransactionLink(object):
- """An object for unidirectional linking to a Transaction's Output.
-
- Attributes:
- txid (str, optional): A Transaction to link to.
- output (int, optional): An output's index in a Transaction with id
- `txid`.
- """
-
- def __init__(self, txid=None, output=None):
- """Create an instance of a :class:`~.TransactionLink`.
-
- Note:
- In an IPLD implementation, this class is not necessary anymore,
- as an IPLD link can simply point to an object, as well as an
- objects properties. So instead of having a (de)serializable
- class, we can have a simple IPLD link of the form:
- `//transaction/outputs/