mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Add code, have fun!
This commit is contained in:
parent
15d88d3dd9
commit
841cabe8c6
66
.gitignore
vendored
66
.gitignore
vendored
@ -1 +1,67 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
env/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
.hypothesis/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
|
||||
# Sphinx documentation
|
||||
docs/build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Ipython Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
|
15
.travis.yml
Normal file
15
.travis.yml
Normal file
@ -0,0 +1,15 @@
|
||||
sudo: false
|
||||
language: python
|
||||
python: 3.5
|
||||
|
||||
env:
|
||||
- TOX_ENV=py34
|
||||
- TOX_ENV=py35
|
||||
|
||||
install:
|
||||
- pip install tox
|
||||
|
||||
script: tox -e $TOX_ENV
|
||||
|
||||
after_success:
|
||||
- codecov
|
50
CODE_OF_CONDUCT.md
Normal file
50
CODE_OF_CONDUCT.md
Normal file
@ -0,0 +1,50 @@
|
||||
# Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of
|
||||
fostering an open and welcoming community, we pledge to respect all people who
|
||||
contribute to the project.
|
||||
|
||||
We are committed to making participation in this project a harassment-free
|
||||
experience for everyone, regardless of level of experience, gender, gender
|
||||
identity and expression, sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information, such as physical or electronic
|
||||
addresses, without explicit permission
|
||||
* Deliberate intimidation
|
||||
* Other unethical or unprofessional conduct
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
By adopting this Code of Conduct, project maintainers commit themselves to
|
||||
fairly and consistently applying these principles to every aspect of managing
|
||||
this project. Project maintainers who do not follow or enforce the Code of
|
||||
Conduct may be permanently removed from the project team.
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior directed at yourself or another community member may be
|
||||
reported by contacting a project maintainer at [conduct@bigchaindb.com](mailto:conduct@bigchaindb.com). All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is appropriate to the circumstances. Maintainers are
|
||||
obligated to maintain confidentiality with regard to the reporter of an
|
||||
incident.
|
||||
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 1.3.0, available at
|
||||
[http://contributor-covenant.org/version/1/3/0/][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/3/0/
|
109
CONTRIBUTING.md
Normal file
109
CONTRIBUTING.md
Normal file
@ -0,0 +1,109 @@
|
||||
# How to Contribute to the BigchainDB Project
|
||||
|
||||
There are many ways you can contribute to the BigchainDB project, some very easy and others more involved. We want to be friendly and welcoming to all potential contributors, so we ask that everyone involved abide by some simple guidelines outlined in our [Code of Conduct](./CODE_OF_CONDUCT.md).
|
||||
|
||||
## Easy Ways to Contribute
|
||||
|
||||
The BigchainDB community has a Google Group and a Slack chat. Our [Community page](https://www.bigchaindb.com/community) has more information about those.
|
||||
|
||||
You can also follow us on Twitter [@BigchainDB](https://twitter.com/BigchainDB).
|
||||
|
||||
If you want to file a bug report, suggest a feature, or ask a code-related question, please go to the `BigchainDB/bigchaindb` repository on GitHub and [create a new Issue](https://github.com/bigchaindb/bigchaindb/issues/new). (You will need a [GitHub account](https://github.com/signup/free) (free).) Please describe the issue clearly, including steps to reproduce when it is a bug.
|
||||
|
||||
## How to Contribute Code or Documentation
|
||||
|
||||
### Step 0 - Prepare and Familiarize Yourself
|
||||
|
||||
To contribute code or documentation, you need a [GitHub account](https://github.com/signup/free).
|
||||
|
||||
Familiarize yourself with how we do coding and documentation in the BigchainDB project, including:
|
||||
|
||||
* our Python Style Guide (coming soon)
|
||||
* [our documentation strategy](./docs/README.md) (including code documentation)
|
||||
* our Documentation Style Guide (coming soon)
|
||||
* the Gitflow Git workflow (also called git-flow):
|
||||
* [DataSift's introduction](https://datasift.github.io/gitflow/IntroducingGitFlow.html)
|
||||
* [Atlassian's tutorial](https://www.atlassian.com/git/tutorials/comparing-workflows/gitflow-workflow)
|
||||
* [the original blog post](http://nvie.com/posts/a-successful-git-branching-model/)
|
||||
* [semantic versioning](http://semver.org/)
|
||||
|
||||
### Step 1 - Fork bigchaindb on GitHub
|
||||
|
||||
In your web browser, go to [the BigchainDB repository on GitHub](https://github.com/bigchaindb/bigchaindb) and click the `Fork` button in the top right corner. This creates a new Git repository named `bigchaindb` in _your_ GitHub account.
|
||||
|
||||
### Step 2 - Clone Your Fork
|
||||
|
||||
(This only has to be done once.) In your local terminal, use Git to clone _your_ `bigchaindb` repository to your local computer. Also add the original GitHub bigchaindb/bigchaindb repository as a remote named `upstream` (a convention):
|
||||
```bash
|
||||
git clone git@github.com:your-github-username/bigchaindb.git
|
||||
cd bigchaindb
|
||||
git add upstream git@github.com:BigchainDB/bigchaindb.git
|
||||
```
|
||||
|
||||
### Step 3 - Fetch and Merge the Latest from `upstream/develop`
|
||||
|
||||
Switch to the `develop` branch locally, fetch all `upstream` branches, and merge the just-fetched `upstream/develop` branch with the local `develop` branch:
|
||||
```bash
|
||||
git checkout develop
|
||||
git fetch upstream
|
||||
git merge upstream/develop
|
||||
```
|
||||
|
||||
### Step 4 - Create a New Branch for Each Bug/Feature
|
||||
|
||||
If your new branch is to **fix a bug** identified in a specific GitHub Issue with number `ISSNO`, then name your new branch `bug/ISSNO/short-description-here`. For example, `bug/67/fix-leap-year-crash`.
|
||||
|
||||
If your new branch is to **add a feature** requested in a specific GitHub Issue with number `ISSNO`, then name your new branch `feat/ISSNO/short-description-here`. For example, `feat/135/blue-background-on-mondays`.
|
||||
|
||||
Otherwise, please give your new branch a short, descriptive, all-lowercase name.
|
||||
```bash
|
||||
git checkout -b new-branch-name
|
||||
```
|
||||
|
||||
### Step 5 - Make Edits, git add, git commit
|
||||
|
||||
With your new branch checked out locally, make changes or additions to the code or documentation, git add them, and git commit them.
|
||||
```bash
|
||||
git add new-or-changed-file
|
||||
git commit -m "Short description of new or changed things"
|
||||
```
|
||||
|
||||
Remember to write tests for new code. If you don't, our code (test) coverage will go down, and we won't be able to accept your code. (We have some hard checks that run on all new pull requests and code coverage is one of them.)
|
||||
|
||||
Please run all existing tests to make sure you didn't break something.
|
||||
|
||||
Remember to write or modify documentation to reflect your additions or changes.
|
||||
|
||||
You will want to merge changes from upstream (i.e. the original repository) into your new branch from time to time, using something like:
|
||||
```bash
|
||||
git fetch upstream
|
||||
git merge upstream/develop
|
||||
```
|
||||
|
||||
### Step 6 - Push Your New Branch to origin
|
||||
|
||||
Make sure you've commited all the additions or changes you want to include in your pull request. Then push your new branch to origin (i.e. _your_ remote bigchaindb repository).
|
||||
```bash
|
||||
git push origin new-branch-name
|
||||
```
|
||||
|
||||
### Step 7 - Create a Pull Request
|
||||
|
||||
Go to the GitHub website and to _your_ remote bigchaindb repository (i.e. something like https://github.com/your-user-name/bigchaindb).
|
||||
|
||||
See [GitHub's documentation on how to initiate and send a pull request](https://help.github.com/articles/using-pull-requests/). Note that the destination repository should be `BigchainDB/bigchaindb` and the destination branch will typically be `develop` (because we use the Gitflow workflow).
|
||||
|
||||
If this is the first time you've submitted a pull request to BigchainDB, then you must read and accept the Contributor License Agreement (CLA) before we can merge your contributions. That can be found at [https://www.bigchaindb.com/cla](https://www.bigchaindb.com/cla).
|
||||
|
||||
Once you accept and submit the CLA, we'll email you with further instructions. (We will send you a long random string to put in the comments section of your pull request, along with the text, "I have read and agree to the terms of the BigchainDB Contributor License Agreement.")
|
||||
|
||||
Someone will then merge your branch or suggest changes. If we suggsest changes, you won't have to open a new pull request, you can just push new code to the same branch (on `origin`) as you did before creating the pull request.
|
||||
|
||||
## Quick Links
|
||||
|
||||
* [BigchainDB Community links](https://www.bigchaindb.com/community) (e.g. mailing list, Slack)
|
||||
* [General GitHub Documentation](https://help.github.com/)
|
||||
* [Code of Conduct](./CODE_OF_CONDUCT.md)
|
||||
* [Contributor License Agreement](https://www.bigchaindb.com/cla)
|
||||
|
||||
(Note: GitHub automatically links to CONTRIBUTING.md when a contributor creates an Issue or opens a Pull Request.)
|
14
Dockerfile
Normal file
14
Dockerfile
Normal file
@ -0,0 +1,14 @@
|
||||
FROM python:3.5
|
||||
|
||||
RUN apt-get update && apt-get -y install vim
|
||||
|
||||
RUN mkdir -p /usr/src/app
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
RUN pip install --upgrade pip
|
||||
RUN pip install --no-cache-dir pytest pytest-cov
|
||||
RUN pip install --no-cache-dir ipython ipdb
|
||||
|
||||
COPY . /usr/src/app/
|
||||
|
||||
RUN python setup.py develop
|
76
README.md
76
README.md
@ -1 +1,75 @@
|
||||
# bigchaindb
|
||||
# BigchainDB
|
||||
[
|
||||
](https://codeship.com/projects/115150) [](https://codecov.io/github/ascribe/bigchain?branch=master)
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
Documentation is available at https://bigchaindb.readthedocs.org/
|
||||
|
||||
## Getting started
|
||||
|
||||
#### Install RethinkDB on Ubuntu
|
||||
|
||||
```sh
|
||||
# install rethinkdb https://rethinkdb.com/docs/install/ubuntu/
|
||||
$ source /etc/lsb-release && echo "deb http://download.rethinkdb.com/apt $DISTRIB_CODENAME main" | sudo tee /etc/apt/sources.list.d/rethinkdb.list
|
||||
$ wget -qO- http://download.rethinkdb.com/apt/pubkey.gpg | sudo apt-key add -
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install rethinkdb
|
||||
|
||||
# start rethinkdb
|
||||
$ rethinkdb
|
||||
```
|
||||
|
||||
#### Install BigchainDB
|
||||
```sh
|
||||
$ pip install bigchaindb
|
||||
```
|
||||
|
||||
#### Running BigchainDB
|
||||
Currently BigchainDB only supports Python 3.4+
|
||||
|
||||
|
||||
Start the main process. If it's the first time `bigchaindb` will generate a default
|
||||
configuration file for you.
|
||||
```sh
|
||||
$ bigchaindb start
|
||||
```
|
||||
|
||||
Generate some tests transactions:
|
||||
|
||||
```sh
|
||||
$ bigchaindb-benchmark load # add '-m' if you want to use all your cores
|
||||
```
|
||||
|
||||
To know more about the bigchain command run
|
||||
```sh
|
||||
$ bigchaindb -h
|
||||
```
|
||||
|
||||
#### Importing `BigchainDB` from the interpreter (python/ipython)
|
||||
Make sure your `rethinkdb` process is running.
|
||||
|
||||
```python
|
||||
>>> from bigchaindb import Bigchain
|
||||
>>> b = Bigchain()
|
||||
>>> b.me
|
||||
'2B8C8PJxhycFzn4wncRhBNmMWwE5Frr9nLBUa1dGGxj5W'
|
||||
```
|
||||
|
||||
#### Configuration
|
||||
|
||||
BigchainDB creates a default configuration file on `$HOME/.bigchaindb` on the
|
||||
first run.
|
||||
|
||||
```sh
|
||||
$ bigchaindb show-config
|
||||
```
|
||||
|
||||
#### Testing
|
||||
|
||||
```
|
||||
$ py.test -v
|
||||
```
|
||||
|
44
bigchaindb/__init__.py
Normal file
44
bigchaindb/__init__.py
Normal file
@ -0,0 +1,44 @@
|
||||
import os
|
||||
import copy
|
||||
|
||||
from bigchaindb.core import Bigchain # noqa
|
||||
|
||||
|
||||
def e(key, default=None, conv=None):
|
||||
'''Get the environment variable `key`, fallback to `default`
|
||||
if nothing is found.
|
||||
|
||||
Keyword arguments:
|
||||
key -- the key to look for in the environment
|
||||
default -- the default value if nothing is found (default: None)
|
||||
conv -- a callable used to convert the value (default: use the type of the
|
||||
default value)
|
||||
'''
|
||||
|
||||
val = os.environ.get(key, default)
|
||||
|
||||
if conv or default is not None:
|
||||
conv = conv or type(default)
|
||||
return conv(val)
|
||||
|
||||
return val
|
||||
|
||||
|
||||
config = {
|
||||
'database': {
|
||||
'host': e('BIGCHAIN_DATABASE_HOST', default='localhost'),
|
||||
'port': e('BIGCHAIN_DATABASE_PORT', default=28015),
|
||||
'name': e('BIGCHAIN_DATABASE_NAME', default='bigchain')
|
||||
},
|
||||
'keypair': {
|
||||
'public': e('BIGCHAIN_KEYPAIR_PUBLIC'),
|
||||
'private': e('BIGCHAIN_KEYPAIR_PRIVATE')
|
||||
},
|
||||
'keyring': [
|
||||
]
|
||||
}
|
||||
|
||||
# We need to maintain a backup copy of the original config dict in case
|
||||
# the user wants to reconfigure the node. Check ``bigchaindb.config_utils``
|
||||
# for more info.
|
||||
_config = copy.deepcopy(config)
|
214
bigchaindb/block.py
Normal file
214
bigchaindb/block.py
Normal file
@ -0,0 +1,214 @@
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
import queue
|
||||
|
||||
import rethinkdb as r
|
||||
|
||||
from bigchaindb import Bigchain
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Block(object):
|
||||
|
||||
def __init__(self, q_new_transaction):
|
||||
"""
|
||||
Initialize the class with the needed
|
||||
"""
|
||||
self._q_new_transaction = q_new_transaction
|
||||
self.q_new_transaction = None
|
||||
self.q_tx_to_validate = mp.Queue()
|
||||
self.q_tx_validated = mp.Queue()
|
||||
self.q_tx_delete = mp.Queue()
|
||||
self.q_block = mp.Queue()
|
||||
|
||||
def filter_by_assignee(self):
|
||||
"""
|
||||
Handle transactions that are assigned to me
|
||||
"""
|
||||
|
||||
# create a bigchain instance
|
||||
b = Bigchain()
|
||||
|
||||
while True:
|
||||
tx = self.q_new_transaction.get()
|
||||
|
||||
# poison pill
|
||||
if tx == 'stop':
|
||||
self.q_tx_to_validate.put('stop')
|
||||
return
|
||||
|
||||
if tx['assignee'] == b.me:
|
||||
tx.pop('assignee')
|
||||
self.q_tx_to_validate.put(tx)
|
||||
|
||||
def validate_transactions(self):
|
||||
"""
|
||||
Checks if the incoming transactions are valid
|
||||
"""
|
||||
|
||||
# create a bigchain instance
|
||||
b = Bigchain()
|
||||
|
||||
while True:
|
||||
tx = self.q_tx_to_validate.get()
|
||||
|
||||
# poison pill
|
||||
if tx == 'stop':
|
||||
self.q_tx_delete.put('stop')
|
||||
self.q_tx_validated.put('stop')
|
||||
return
|
||||
|
||||
self.q_tx_delete.put(tx['id'])
|
||||
if b.is_valid_transaction(tx):
|
||||
self.q_tx_validated.put(tx)
|
||||
|
||||
def create_blocks(self):
|
||||
"""
|
||||
Create a block with valid transactions
|
||||
"""
|
||||
|
||||
# create a bigchain instance
|
||||
b = Bigchain()
|
||||
stop = False
|
||||
|
||||
while True:
|
||||
|
||||
# read up to 1000 transactions
|
||||
validated_transactions = []
|
||||
for i in range(1000):
|
||||
try:
|
||||
tx = self.q_tx_validated.get(timeout=5)
|
||||
except queue.Empty:
|
||||
break
|
||||
|
||||
# poison pill
|
||||
if tx == 'stop':
|
||||
stop = True
|
||||
break
|
||||
|
||||
validated_transactions.append(tx)
|
||||
|
||||
# if there are no transactions skip block creation
|
||||
if validated_transactions:
|
||||
# create block
|
||||
block = b.create_block(validated_transactions)
|
||||
self.q_block.put(block)
|
||||
|
||||
if stop:
|
||||
self.q_block.put('stop')
|
||||
return
|
||||
|
||||
def write_blocks(self):
|
||||
"""
|
||||
Write blocks to the bigchain
|
||||
"""
|
||||
|
||||
# create bigchain instance
|
||||
b = Bigchain()
|
||||
|
||||
# Write blocks
|
||||
while True:
|
||||
block = self.q_block.get()
|
||||
|
||||
# poison pill
|
||||
if block == 'stop':
|
||||
return
|
||||
|
||||
b.write_block(block)
|
||||
|
||||
def delete_transactions(self):
|
||||
"""
|
||||
Delete transactions from the backlog
|
||||
"""
|
||||
# create bigchain instance
|
||||
b = Bigchain()
|
||||
stop = False
|
||||
|
||||
while True:
|
||||
# try to delete in batch to reduce io
|
||||
tx_to_delete = []
|
||||
for i in range(1000):
|
||||
try:
|
||||
tx = self.q_tx_delete.get(timeout=5)
|
||||
except queue.Empty:
|
||||
break
|
||||
|
||||
# poison pill
|
||||
if tx == 'stop':
|
||||
stop = True
|
||||
break
|
||||
|
||||
tx_to_delete.append(tx)
|
||||
|
||||
if tx_to_delete:
|
||||
r.table('backlog').get_all(*tx_to_delete).delete(durability='soft').run(b.conn)
|
||||
|
||||
if stop:
|
||||
return
|
||||
|
||||
def bootstrap(self):
|
||||
"""
|
||||
Get transactions from the backlog that may have been assigned to this while it was
|
||||
online (not listening to the changefeed)
|
||||
"""
|
||||
# create bigchain instance
|
||||
b = Bigchain()
|
||||
|
||||
# create a queue to store initial results
|
||||
q_initial = mp.Queue()
|
||||
|
||||
# get initial results
|
||||
initial_results = r.table('backlog')\
|
||||
.between([b.me, r.minval], [b.me, r.maxval], index='assignee__transaction_timestamp')\
|
||||
.order_by(index=r.asc('assignee__transaction_timestamp'))\
|
||||
.run(b.conn)
|
||||
|
||||
# add results to the queue
|
||||
for result in initial_results:
|
||||
q_initial.put(result)
|
||||
q_initial.put('stop')
|
||||
|
||||
return q_initial
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Bootstrap and start the processes
|
||||
"""
|
||||
logger.info('bootstraping block module...')
|
||||
self.q_new_transaction = self.bootstrap()
|
||||
logger.info('finished reading past transactions')
|
||||
self._start()
|
||||
logger.info('finished bootstraping block module...')
|
||||
|
||||
logger.info('starting block module...')
|
||||
self.q_new_transaction = self._q_new_transaction
|
||||
self._start()
|
||||
logger.info('exiting block module...')
|
||||
|
||||
def _start(self):
|
||||
"""
|
||||
Initialize, spawn, and start the processes
|
||||
"""
|
||||
|
||||
# initialize the processes
|
||||
p_filter = mp.Process(name='filter_transactions', target=self.filter_by_assignee)
|
||||
p_validate = mp.Process(name='validate_transactions', target=self.validate_transactions)
|
||||
p_blocks = mp.Process(name='create_blocks', target=self.create_blocks)
|
||||
p_write = mp.Process(name='write_blocks', target=self.write_blocks)
|
||||
p_delete = mp.Process(name='delete_transactions', target=self.delete_transactions)
|
||||
|
||||
# start the processes
|
||||
p_filter.start()
|
||||
p_validate.start()
|
||||
p_blocks.start()
|
||||
p_write.start()
|
||||
p_delete.start()
|
||||
|
||||
# join processes
|
||||
p_filter.join()
|
||||
p_validate.join()
|
||||
p_blocks.join()
|
||||
p_write.join()
|
||||
p_delete.join()
|
0
bigchaindb/commands/__init__.py
Normal file
0
bigchaindb/commands/__init__.py
Normal file
117
bigchaindb/commands/bigchain.py
Normal file
117
bigchaindb/commands/bigchain.py
Normal file
@ -0,0 +1,117 @@
|
||||
"""Command line interface for the `bigchain` command."""
|
||||
|
||||
|
||||
import os
|
||||
import logging
|
||||
import argparse
|
||||
|
||||
import bigchaindb
|
||||
import bigchaindb.config_utils
|
||||
from bigchaindb import db
|
||||
from bigchaindb.commands.utils import base_parser, start
|
||||
from bigchaindb.processes import Processes
|
||||
from bigchaindb.crypto import generate_key_pair
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def run_show_config(args):
|
||||
"""Show the current configuration"""
|
||||
from pprint import pprint
|
||||
|
||||
bigchaindb.config_utils.file_config(args.config)
|
||||
pprint(bigchaindb.config)
|
||||
|
||||
|
||||
def run_configure(args, skip_if_exists=False):
|
||||
"""Run a script to configure the current node.
|
||||
|
||||
Args:
|
||||
skip_if_exists (bool): skip the function if a conf file already exists
|
||||
"""
|
||||
config_path = args.config or bigchaindb.config_utils.CONFIG_DEFAULT_PATH
|
||||
proceed = args.yes
|
||||
config_file_exists = os.path.exists(config_path)
|
||||
|
||||
if config_file_exists and skip_if_exists:
|
||||
return
|
||||
|
||||
if config_file_exists and not proceed:
|
||||
want = input('Config file `{}` exists, do you want to override it? '
|
||||
'(cannot be undone) [y/n]: '.format(config_path))
|
||||
if not want:
|
||||
return
|
||||
|
||||
# Patch the default configuration with the new values
|
||||
conf = bigchaindb._config
|
||||
print('Generating keypair')
|
||||
conf['keypair']['private'], conf['keypair']['public'] = generate_key_pair()
|
||||
|
||||
for key in ('host', 'port', 'name'):
|
||||
val = conf['database'][key]
|
||||
conf['database'][key] = input('Database {}? (default `{}`): '.format(key, val)) or val
|
||||
|
||||
bigchaindb.config_utils.write_config(conf, config_path)
|
||||
print('Ready to go!')
|
||||
|
||||
|
||||
def run_init(args):
|
||||
"""Initialize the database"""
|
||||
bigchaindb.config_utils.file_config(args.config)
|
||||
db.init()
|
||||
|
||||
|
||||
def run_drop(args):
|
||||
"""Drop the database"""
|
||||
bigchaindb.config_utils.file_config(args.config)
|
||||
db.drop(assume_yes=args.yes)
|
||||
|
||||
|
||||
def run_start(args):
|
||||
"""Start the processes to run the node"""
|
||||
run_configure(args, skip_if_exists=True)
|
||||
bigchaindb.config_utils.file_config(args.config)
|
||||
try:
|
||||
db.init()
|
||||
except db.DatabaseAlreadyExistsException:
|
||||
pass
|
||||
p = Processes()
|
||||
logger.info('Start bigchaindb main process')
|
||||
p.start()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Control your bigchain node.',
|
||||
parents=[base_parser])
|
||||
|
||||
# all the commands are contained in the subparsers object,
|
||||
# the command selected by the user will be stored in `args.command`
|
||||
# that is used by the `main` function to select which other
|
||||
# function to call.
|
||||
subparsers = parser.add_subparsers(title='Commands',
|
||||
dest='command')
|
||||
|
||||
subparsers.add_parser('configure',
|
||||
help='Prepare the config file and create the node keypair')
|
||||
|
||||
# parser for database level commands
|
||||
subparsers.add_parser('init',
|
||||
help='Init the database')
|
||||
|
||||
subparsers.add_parser('drop',
|
||||
help='Drop the database')
|
||||
|
||||
# TODO how about just config, or info?
|
||||
subparsers.add_parser('show-config',
|
||||
help='Show the current configuration')
|
||||
|
||||
subparsers.add_parser('start',
|
||||
help='Start bigchain')
|
||||
|
||||
start(parser, globals())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
85
bigchaindb/commands/bigchain_benchmark.py
Normal file
85
bigchaindb/commands/bigchain_benchmark.py
Normal file
@ -0,0 +1,85 @@
|
||||
'''Command line interface for the `bigchain-benchmark` command.'''
|
||||
import logging
|
||||
import argparse
|
||||
|
||||
import logstats
|
||||
|
||||
import bigchaindb
|
||||
import bigchaindb.config_utils
|
||||
from bigchaindb.util import ProcessGroup
|
||||
from bigchaindb.commands.utils import base_parser, start
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
USER_PUBLIC_KEY = 'qZAN9Ngs1v4qP1T5UBYw75M5f2ej7mAJx8gBMF4BBWtZ'
|
||||
|
||||
|
||||
def _run_load(tx_left, stats):
|
||||
logstats.thread.start(stats)
|
||||
b = bigchaindb.Bigchain()
|
||||
|
||||
while True:
|
||||
tx = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE')
|
||||
tx_signed = b.sign_transaction(tx, b.me_private)
|
||||
b.write_transaction(tx_signed)
|
||||
|
||||
stats['transactions'] += 1
|
||||
|
||||
if tx_left is not None:
|
||||
tx_left -= 1
|
||||
if tx_left == 0:
|
||||
break
|
||||
|
||||
|
||||
def run_load(args):
|
||||
bigchaindb.config_utils.file_config(args.config)
|
||||
logger.info('Starting %s processes', args.multiprocess)
|
||||
stats = logstats.Logstats()
|
||||
logstats.thread.start(stats)
|
||||
|
||||
tx_left = None
|
||||
if args.count > 0:
|
||||
tx_left = int(args.count / args.multiprocess)
|
||||
|
||||
workers = ProcessGroup(concurrency=args.multiprocess,
|
||||
target=_run_load,
|
||||
args=(tx_left, stats.get_child()))
|
||||
workers.start()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Benchmark your bigchain federation.',
|
||||
parents=[base_parser])
|
||||
|
||||
# all the commands are contained in the subparsers object,
|
||||
# the command selected by the user will be stored in `args.command`
|
||||
# that is used by the `main` function to select which other
|
||||
# function to call.
|
||||
subparsers = parser.add_subparsers(title='Commands',
|
||||
dest='command')
|
||||
|
||||
# parser for database level commands
|
||||
load_parser = subparsers.add_parser('load',
|
||||
help='Write transactions to the backlog')
|
||||
|
||||
load_parser.add_argument('-m', '--multiprocess',
|
||||
nargs='?',
|
||||
type=int,
|
||||
default=False,
|
||||
help='Spawn multiple processes to run the command, '
|
||||
'if no value is provided, the number of processes '
|
||||
'is equal to the number of cores of the host machine')
|
||||
|
||||
load_parser.add_argument('-c', '--count',
|
||||
default=0,
|
||||
type=int,
|
||||
help='Number of transactions to push. If the parameter -m '
|
||||
'is set, the count is distributed equally to all the '
|
||||
'processes')
|
||||
|
||||
start(parser, globals())
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
52
bigchaindb/commands/utils.py
Normal file
52
bigchaindb/commands/utils.py
Normal file
@ -0,0 +1,52 @@
|
||||
"""Utility functions and basic common arguments for ``argparse.ArgumentParser``."""
|
||||
|
||||
import argparse
|
||||
import multiprocessing as mp
|
||||
|
||||
|
||||
def start(parser, scope):
|
||||
"""Utility function to execute a subcommand.
|
||||
|
||||
The function will look up in the ``scope`` if there is a function called ``run_<parser.args.command>``
|
||||
and will run it using ``parser.args`` as first positional argument.
|
||||
|
||||
Args:
|
||||
parser: an ArgumentParser instance.
|
||||
scope (dict): map containing (eventually) the functions to be called.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: if ``scope`` doesn't contain a function called ``run_<parser.args.command>``.
|
||||
"""
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.command:
|
||||
parser.print_help()
|
||||
return
|
||||
|
||||
# look up in the current scope for a function called 'run_<command>'
|
||||
# replacing all the dashes '-' with the lowercase character '_'
|
||||
func = scope.get('run_' + args.command.replace('-', '_'))
|
||||
|
||||
# if no command has been found, raise a `NotImplementedError`
|
||||
if not func:
|
||||
raise NotImplementedError('Command `{}` not yet implemented'.format(args.command))
|
||||
|
||||
args.multiprocess = getattr(args, 'multiprocess', False)
|
||||
|
||||
if args.multiprocess is False:
|
||||
args.multiprocess = 1
|
||||
elif args.multiprocess is None:
|
||||
args.multiprocess = mp.cpu_count()
|
||||
|
||||
func(args)
|
||||
|
||||
|
||||
base_parser = argparse.ArgumentParser(add_help=False)
|
||||
|
||||
base_parser.add_argument('-c', '--config',
|
||||
help='Specify the location of the configuration file')
|
||||
|
||||
base_parser.add_argument('-y', '--yes', '--yes-please',
|
||||
action='store_true',
|
||||
help='Assume "yes" as answer to all prompts and run '
|
||||
'non-interactively')
|
99
bigchaindb/config_utils.py
Normal file
99
bigchaindb/config_utils.py
Normal file
@ -0,0 +1,99 @@
|
||||
"""Utils to configure Bigchain.
|
||||
|
||||
By calling `file_config`, the global configuration (stored in
|
||||
`bigchain.config`) will be updated with the values contained in the
|
||||
configuration file.
|
||||
|
||||
Note that there is a precedence in reading configuration values:
|
||||
- [not yet] command line;
|
||||
- local config file;
|
||||
- environment vars;
|
||||
- default config file (contained in `bigchain.__init__`).
|
||||
"""
|
||||
|
||||
import os
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import collections
|
||||
|
||||
import bigchaindb
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
CONFIG_DEFAULT_PATH = os.path.join(os.path.expanduser('~'), '.bigchaindb')
|
||||
|
||||
|
||||
# Thanks Alex <3
|
||||
# http://stackoverflow.com/a/3233356/597097
|
||||
def update(d, u):
|
||||
"""Recursively update a mapping."""
|
||||
for k, v in u.items():
|
||||
if isinstance(v, collections.Mapping):
|
||||
r = update(d.get(k, {}), v)
|
||||
d[k] = r
|
||||
else:
|
||||
d[k] = u[k]
|
||||
return d
|
||||
|
||||
|
||||
def file_config(filename=None):
|
||||
"""Read a configuration file and merge it with the default configuration.
|
||||
|
||||
Args:
|
||||
filename (str): the JSON file with the configuration. Defaults to ``None``.
|
||||
If ``None``, the HOME of the current user and the string ``.bigchaindb`` will be used.
|
||||
|
||||
Note:
|
||||
The function merges the values in ``filename`` with the **default configuration**,
|
||||
so any update made to ``bigchaindb.config`` will be lost.
|
||||
"""
|
||||
if not filename:
|
||||
filename = CONFIG_DEFAULT_PATH
|
||||
|
||||
with open(filename) as f:
|
||||
newconfig = json.load(f)
|
||||
|
||||
dict_config(newconfig)
|
||||
logger.info('Configuration loaded from `{}`'.format(filename))
|
||||
|
||||
|
||||
def dict_config(newconfig):
|
||||
"""Merge the provided configuration with the default one.
|
||||
|
||||
Args:
|
||||
newconfig (dict): a dictionary with the configuration to load.
|
||||
|
||||
Note:
|
||||
The function merges ``newconfig`` with the **default configuration**, so any
|
||||
update made to ``bigchaindb.config`` will be lost.
|
||||
"""
|
||||
bigchaindb.config = copy.deepcopy(bigchaindb._config)
|
||||
update(bigchaindb.config, newconfig)
|
||||
bigchaindb.config['CONFIGURED'] = True
|
||||
|
||||
|
||||
def write_config(newconfig, filename=None):
|
||||
"""Write the provided configuration to a specific location.
|
||||
|
||||
Args:
|
||||
newconfig (dict): a dictionary with the configuration to load.
|
||||
filename (str): the name of the file that will store the new configuration. Defaults to ``None``.
|
||||
If ``None``, the HOME of the current user and the string ``.bigchaindb`` will be used.
|
||||
"""
|
||||
if not filename:
|
||||
filename = CONFIG_DEFAULT_PATH
|
||||
|
||||
with open(filename, 'w') as f:
|
||||
json.dump(newconfig, f)
|
||||
|
||||
|
||||
def autoconfigure():
|
||||
"""Run ``file_config`` if the module has not been initialized.
|
||||
"""
|
||||
if bigchaindb.config.get('CONFIGURED'):
|
||||
return
|
||||
try:
|
||||
file_config()
|
||||
except FileNotFoundError:
|
||||
logger.warning('Cannot find your config file. Run `bigchaindb configure` to create one')
|
||||
|
653
bigchaindb/core.py
Normal file
653
bigchaindb/core.py
Normal file
@ -0,0 +1,653 @@
|
||||
import rethinkdb as r
|
||||
import time
|
||||
import random
|
||||
import json
|
||||
import rapidjson
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb import exceptions
|
||||
from bigchaindb.crypto import hash_data, PublicKey, PrivateKey, generate_key_pair
|
||||
|
||||
|
||||
class GenesisBlockAlreadyExistsError(Exception):
|
||||
pass
|
||||
|
||||
class KeypairNotFoundException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Bigchain(object):
|
||||
"""Bigchain API
|
||||
|
||||
Create, read, sign, write transactions to the database
|
||||
"""
|
||||
|
||||
def __init__(self, host=None, port=None, dbname=None,
|
||||
public_key=None, private_key=None, keyring=[]):
|
||||
"""Initialize the Bigchain instance
|
||||
|
||||
There are three ways in which the Bigchain instance can get its parameters.
|
||||
The order by which the parameters are chosen are:
|
||||
|
||||
1. Setting them by passing them to the `__init__` method itself.
|
||||
2. Setting them as environment variables
|
||||
3. Reading them from the `config.json` file.
|
||||
|
||||
Args:
|
||||
host (str): hostname where the rethinkdb is running.
|
||||
port (int): port in which rethinkb is running (usually 28015).
|
||||
dbname (str): the name of the database to connect to (usually bigchain).
|
||||
public_key (str): the base58 encoded public key for the ECDSA secp256k1 curve.
|
||||
private_key (str): the base58 encoded private key for the ECDSA secp256k1 curve.
|
||||
keyring (list[str]): list of base58 encoded public keys of the federation nodes.
|
||||
|
||||
"""
|
||||
bigchaindb.config_utils.autoconfigure()
|
||||
self.host = host or bigchaindb.config['database']['host']
|
||||
self.port = port or bigchaindb.config['database']['port']
|
||||
self.dbname = dbname or bigchaindb.config['database']['name']
|
||||
self.me = public_key or bigchaindb.config['keypair']['public']
|
||||
self.me_private = private_key or bigchaindb.config['keypair']['private']
|
||||
self.federation_nodes = keyring or bigchaindb.config['keyring']
|
||||
|
||||
if not self.me or not self.me_private:
|
||||
raise KeypairNotFoundException()
|
||||
|
||||
self._conn = None
|
||||
|
||||
@property
|
||||
def conn(self):
|
||||
if not self._conn:
|
||||
self._conn = self.reconnect()
|
||||
return self._conn
|
||||
|
||||
def reconnect(self):
|
||||
return r.connect(host=self.host, port=self.port, db=self.dbname)
|
||||
|
||||
def create_transaction(self, current_owner, new_owner, tx_input, operation, payload=None):
|
||||
"""Create a new transaction
|
||||
|
||||
A transaction in the bigchain is a transfer of a digital asset between two entities represented
|
||||
by public keys.
|
||||
|
||||
Currently the bigchain supports two types of operations:
|
||||
|
||||
`CREATE` - Only federation nodes are allowed to use this operation. In a create operation
|
||||
a federation node creates a digital asset in the bigchain and assigns that asset to a public
|
||||
key. The owner of the private key can then decided to transfer this digital asset by using the
|
||||
`transaction id` of the transaction as an input in a `TRANSFER` transaction.
|
||||
|
||||
`TRANSFER` - A transfer operation allows for a transfer of the digital assets between entities.
|
||||
|
||||
Args:
|
||||
current_owner (str): base58 encoded public key of the current owner of the asset.
|
||||
new_owner (str): base58 encoded public key of the new owner of the digital asset.
|
||||
tx_input (str): id of the transaction to use as input.
|
||||
operation (str): Either `CREATE` or `TRANSFER` operation.
|
||||
payload (Optional[dict]): dictionary with information about asset.
|
||||
|
||||
Returns:
|
||||
dict: unsigned transaction.
|
||||
|
||||
|
||||
Raises:
|
||||
TypeError: if the optional ``payload`` argument is not a ``dict``.
|
||||
"""
|
||||
data = None
|
||||
if payload is not None:
|
||||
if isinstance(payload, dict):
|
||||
hash_payload = hash_data(self.serialize(payload))
|
||||
data = {
|
||||
'hash': hash_payload,
|
||||
'payload': payload
|
||||
}
|
||||
else:
|
||||
raise TypeError('`payload` must be an dict instance')
|
||||
|
||||
hash_payload = hash_data(self.serialize(payload))
|
||||
data = {
|
||||
'hash': hash_payload,
|
||||
'payload': payload
|
||||
}
|
||||
|
||||
tx = {
|
||||
'current_owner': current_owner,
|
||||
'new_owner': new_owner,
|
||||
'input': tx_input,
|
||||
'operation': operation,
|
||||
'timestamp': self.timestamp(),
|
||||
'data': data
|
||||
}
|
||||
|
||||
# serialize and convert to bytes
|
||||
tx_serialized = self.serialize(tx)
|
||||
tx_hash = hash_data(tx_serialized)
|
||||
|
||||
# create the transaction
|
||||
transaction = {
|
||||
'id': tx_hash,
|
||||
'transaction': tx
|
||||
}
|
||||
|
||||
return transaction
|
||||
|
||||
def sign_transaction(self, transaction, private_key):
|
||||
"""Sign a transaction
|
||||
|
||||
A transaction signed with the `current_owner` corresponding private key.
|
||||
|
||||
Args:
|
||||
transaction (dict): transaction to sign.
|
||||
private_key (str): base58 encoded private key to create a signature of the transaction.
|
||||
|
||||
Returns:
|
||||
dict: transaction with the `signature` field included.
|
||||
|
||||
"""
|
||||
private_key = PrivateKey(private_key)
|
||||
signature = private_key.sign(self.serialize(transaction))
|
||||
signed_transaction = transaction.copy()
|
||||
signed_transaction.update({'signature': signature})
|
||||
return signed_transaction
|
||||
|
||||
def verify_signature(self, signed_transaction):
|
||||
"""Verify the signature of a transaction
|
||||
|
||||
A valid transaction should have been signed `current_owner` corresponding private key.
|
||||
|
||||
Args:
|
||||
signed_transaction (dict): a transaction with the `signature` included.
|
||||
|
||||
Returns:
|
||||
bool: True if the signature is correct, False otherwise.
|
||||
|
||||
"""
|
||||
data = signed_transaction.copy()
|
||||
|
||||
# if assignee field in the transaction, remove it
|
||||
if 'assignee' in data:
|
||||
data.pop('assignee')
|
||||
|
||||
signature = data.pop('signature')
|
||||
public_key_base58 = signed_transaction['transaction']['current_owner']
|
||||
public_key = PublicKey(public_key_base58)
|
||||
return public_key.verify(self.serialize(data), signature)
|
||||
|
||||
def write_transaction(self, signed_transaction):
|
||||
"""Write the transaction to bigchain.
|
||||
|
||||
When first writing a transaction to the bigchain the transaction will be kept in a backlog until
|
||||
it has been validated by the nodes of the federation.
|
||||
|
||||
Args:
|
||||
singed_transaction (dict): transaction with the `signature` included.
|
||||
|
||||
Returns:
|
||||
dict: database response
|
||||
"""
|
||||
|
||||
# we will assign this transaction to `one` node. This way we make sure that there are no duplicate
|
||||
# transactions on the bigchain
|
||||
|
||||
if self.federation_nodes:
|
||||
assignee = random.choice(self.federation_nodes)
|
||||
else:
|
||||
# I am the only node
|
||||
assignee = self.me
|
||||
|
||||
# update the transaction
|
||||
signed_transaction.update({'assignee': assignee})
|
||||
|
||||
# write to the backlog
|
||||
response = r.table('backlog').insert(signed_transaction, durability='soft').run(self.conn)
|
||||
return response
|
||||
|
||||
# TODO: the same `txid` can be in two different blocks
|
||||
def get_transaction(self, txid):
|
||||
"""Retrieve a transaction with `txid` from bigchain.
|
||||
|
||||
Queries the bigchain for a transaction that was already included in a block.
|
||||
|
||||
Args:
|
||||
txid (str): transaction id of the transaction to query
|
||||
|
||||
Returns:
|
||||
A dict with the transaction details if the transaction was found.
|
||||
|
||||
If no transaction with that `txid` was found it returns `None`
|
||||
|
||||
"""
|
||||
response = r.table('bigchain').concat_map(lambda doc: doc['block']['transactions'])\
|
||||
.filter(lambda transaction: transaction['id'] == txid).run(self.conn)
|
||||
|
||||
# transaction ids should be unique
|
||||
transactions = list(response)
|
||||
if transactions:
|
||||
if len(transactions) != 1:
|
||||
raise Exception('Transaction ids should be unique. There is a problem with the chain')
|
||||
else:
|
||||
return transactions[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_tx_by_payload_hash(self, payload_hash):
|
||||
"""Retrieves transactions related to a digital asset.
|
||||
|
||||
When creating a transaction one of the optional arguments is the `payload`. The payload is a generic
|
||||
dict that contains information about the digital asset.
|
||||
|
||||
To make it easy to query the bigchain for that digital asset we create a sha3-256 hash of the
|
||||
serialized payload and store it with the transaction. This makes it easy for developers to keep track
|
||||
of their digital assets in bigchain.
|
||||
|
||||
Args:
|
||||
payload_hash (str): sha3-256 hash of the serialized payload.
|
||||
|
||||
Returns:
|
||||
A list of transactions containing that payload. If no transaction exists with that payload it
|
||||
returns `None`
|
||||
|
||||
"""
|
||||
cursor = r.table('bigchain')\
|
||||
.get_all(payload_hash, index='payload_hash')\
|
||||
.run(self.conn)
|
||||
|
||||
transactions = list(cursor)
|
||||
return transactions
|
||||
|
||||
def get_spent(self, txid):
|
||||
"""Check if a `txid` was already used as an input.
|
||||
|
||||
A transaction can be used as an input for another transaction. Bigchain needs to make sure that a
|
||||
given `txid` is only used once.
|
||||
|
||||
Args:
|
||||
txid (str): transaction id.
|
||||
|
||||
Returns:
|
||||
The transaction that used the `txid` as an input if it exists else it returns `None`
|
||||
|
||||
"""
|
||||
|
||||
# checks if an input was already spent
|
||||
# checks if the bigchain has any transaction with input `transaction_id`
|
||||
response = r.table('bigchain').concat_map(lambda doc: doc['block']['transactions'])\
|
||||
.filter(lambda transaction: transaction['transaction']['input'] == txid).run(self.conn)
|
||||
|
||||
# a transaction_id should have been spent at most one time
|
||||
transactions = list(response)
|
||||
if transactions:
|
||||
if len(transactions) != 1:
|
||||
raise Exception('`{}` was spent more then once. There is a problem with the chain'.format(
|
||||
txid))
|
||||
else:
|
||||
return transactions[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_owned_ids(self, owner):
|
||||
"""Retrieve a list of `txids` that can we used has inputs.
|
||||
|
||||
Args:
|
||||
owner (str): base58 encoded public key.
|
||||
|
||||
Returns:
|
||||
list: list of `txids` currently owned by `owner`
|
||||
|
||||
"""
|
||||
response = r.table('bigchain')\
|
||||
.concat_map(lambda doc: doc['block']['transactions'])\
|
||||
.filter({'transaction': {'new_owner': owner}})\
|
||||
.pluck('id')['id']\
|
||||
.run(self.conn)
|
||||
owned = []
|
||||
|
||||
# remove all inputs already spent
|
||||
for tx_input in list(response):
|
||||
if not self.get_spent(tx_input):
|
||||
owned.append(tx_input)
|
||||
|
||||
return owned
|
||||
|
||||
def validate_transaction(self, transaction):
|
||||
"""Validate a transaction.
|
||||
|
||||
Args:
|
||||
transaction (dict): transaction to validate.
|
||||
|
||||
Returns:
|
||||
The transaction if the transaction is valid else it raises and exception
|
||||
describing the reason why the transaction is invalid.
|
||||
|
||||
Raises:
|
||||
OperationError: if the transaction operation is not supported
|
||||
TransactionDoesNotExist: if the input of the transaction is not found
|
||||
TransactionOwnerError: if the new transaction is using an input it doesn't own
|
||||
DoubleSpend: if the transaction is a double spend
|
||||
InvalidHash: if the hash of the transaction is wrong
|
||||
InvalidSignature: if the signature of the transaction is wrong
|
||||
"""
|
||||
# If the operation is CREATE the transaction should have no inputs and should be signed by a
|
||||
# federation node
|
||||
if transaction['transaction']['operation'] == 'CREATE':
|
||||
if transaction['transaction']['input']:
|
||||
raise ValueError('A CREATE operation has no inputs')
|
||||
if transaction['transaction']['current_owner'] not in self.federation_nodes + [self.me]:
|
||||
raise exceptions.OperationError('Only federation nodes can use the operation `CREATE`')
|
||||
|
||||
else:
|
||||
# check if the input exists, is owned by the current_owner
|
||||
if not transaction['transaction']['input']:
|
||||
raise ValueError('Only `CREATE` transactions can have null inputs')
|
||||
|
||||
tx_input = self.get_transaction(transaction['transaction']['input'])
|
||||
if not tx_input:
|
||||
raise exceptions.TransactionDoesNotExist('input `{}` does not exist in the bigchain'.format(
|
||||
transaction['transaction']['input']))
|
||||
|
||||
if tx_input['transaction']['new_owner'] != transaction['transaction']['current_owner']:
|
||||
raise exceptions.TransactionOwnerError('current_owner `{}` does not own the input `{}`'.format(
|
||||
transaction['transaction']['current_owner'], transaction['transaction']['input']))
|
||||
|
||||
# check if the input was already spent
|
||||
spent = self.get_spent(tx_input['id'])
|
||||
if spent:
|
||||
raise exceptions.DoubleSpend('input `{}` was already spent'.format(
|
||||
transaction['transaction']['input']))
|
||||
|
||||
# Check hash of the transaction
|
||||
calculated_hash = hash_data(self.serialize(transaction['transaction']))
|
||||
if calculated_hash != transaction['id']:
|
||||
raise exceptions.InvalidHash()
|
||||
|
||||
# Check signature
|
||||
if not self.verify_signature(transaction):
|
||||
raise exceptions.InvalidSignature()
|
||||
|
||||
return transaction
|
||||
|
||||
def is_valid_transaction(self, transaction):
|
||||
"""Check whether a transacion is valid or invalid.
|
||||
|
||||
Similar to `validate_transaction` but does not raise an exception if the transaction is valid.
|
||||
|
||||
Args:
|
||||
transaction (dict): transaction to check.
|
||||
|
||||
Returns:
|
||||
bool: `True` if the transaction is valid, `False` otherwise
|
||||
|
||||
"""
|
||||
try:
|
||||
self.validate_transaction(transaction)
|
||||
return transaction
|
||||
except (ValueError, exceptions.OperationError, exceptions.TransactionDoesNotExist,
|
||||
exceptions.TransactionOwnerError, exceptions.DoubleSpend,
|
||||
exceptions.InvalidHash, exceptions.InvalidSignature):
|
||||
return False
|
||||
|
||||
def create_block(self, validated_transactions):
|
||||
"""Creates a block given a list of `validated_transactions`.
|
||||
|
||||
Note that this method does not validate the transactions. Transactions should be validated before
|
||||
calling create_block.
|
||||
|
||||
Args:
|
||||
validated_transactions (list): list of validated transactions.
|
||||
|
||||
Returns:
|
||||
dict: created block.
|
||||
|
||||
"""
|
||||
# Create the new block
|
||||
block = {
|
||||
'timestamp': self.timestamp(),
|
||||
'transactions': validated_transactions,
|
||||
'node_pubkey': self.me,
|
||||
'voters': self.federation_nodes + [self.me]
|
||||
}
|
||||
|
||||
# Calculate the hash of the new block
|
||||
block_data = self.serialize(block)
|
||||
block_hash = hash_data(block_data)
|
||||
block_signature = PrivateKey(self.me_private).sign(block_data)
|
||||
|
||||
block = {
|
||||
'id': block_hash,
|
||||
'block': block,
|
||||
'signature': block_signature,
|
||||
'votes': []
|
||||
}
|
||||
|
||||
return block
|
||||
|
||||
# TODO: check that the votings structure is correctly constructed
|
||||
def validate_block(self, block):
|
||||
"""Validate a block.
|
||||
|
||||
Args:
|
||||
block (dict): block to validate.
|
||||
|
||||
Returns:
|
||||
The block if the block is valid else it raises and exception
|
||||
describing the reason why the block is invalid.
|
||||
|
||||
"""
|
||||
|
||||
# 1. Check if current hash is correct
|
||||
calculated_hash = hash_data(self.serialize(block['block']))
|
||||
if calculated_hash != block['id']:
|
||||
raise exceptions.InvalidHash()
|
||||
|
||||
# 2. Validate all transactions in the block
|
||||
for transaction in block['block']['transactions']:
|
||||
if not self.is_valid_transaction(transaction):
|
||||
# this will raise the exception
|
||||
self.validate_transaction(transaction)
|
||||
|
||||
return block
|
||||
|
||||
def is_valid_block(self, block):
|
||||
"""Check whether a block is valid or invalid.
|
||||
|
||||
Similar to `validate_block` but does not raise an exception if the block is invalid.
|
||||
|
||||
Args:
|
||||
block (dict): block to check.
|
||||
|
||||
Returns:
|
||||
bool: `True` if the block is valid, `False` otherwise.
|
||||
|
||||
"""
|
||||
try:
|
||||
self.validate_block(block)
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def write_block(self, block, durability='soft'):
|
||||
"""Write a block to bigchain.
|
||||
|
||||
Args:
|
||||
block (dict): block to write to bigchain.
|
||||
|
||||
"""
|
||||
block_serialized = rapidjson.dumps(block)
|
||||
r.table('bigchain').insert(r.json(block_serialized), durability=durability).run(self.conn)
|
||||
|
||||
# TODO: Decide if we need this method
|
||||
def transaction_exists(self, transaction_id):
|
||||
response = r.table('bigchain').get_all(transaction_id, index='transaction_id').run(self.conn)
|
||||
return True if len(response.items) > 0 else False
|
||||
|
||||
def create_genesis_block(self):
|
||||
"""Create the genesis block
|
||||
|
||||
Block created when bigchain is first initialized. This method is not atomic, there might be concurrency
|
||||
problems if multiple instances try to write the genesis block when the BigchainDB Federation is started,
|
||||
but it's a highly unlikely scenario.
|
||||
"""
|
||||
|
||||
# 1. create one transaction
|
||||
# 2. create the block with one transaction
|
||||
# 3. write the block to the bigchain
|
||||
|
||||
|
||||
blocks_count = r.table('bigchain').count().run(self.conn)
|
||||
|
||||
if blocks_count:
|
||||
raise GenesisBlockAlreadyExistsError('Cannot create the Genesis block')
|
||||
|
||||
|
||||
payload = {'message': 'Hello World from the Bigchain'}
|
||||
transaction = self.create_transaction(self.me, self.me, None, 'GENESIS', payload=payload)
|
||||
transaction_signed = self.sign_transaction(transaction, self.me_private)
|
||||
|
||||
# create the block
|
||||
block = self.create_block([transaction_signed])
|
||||
# add block number before writing
|
||||
block['block_number'] = 0
|
||||
self.write_block(block, durability='hard')
|
||||
|
||||
return block
|
||||
|
||||
def vote(self, block, previous_block_id, decision, invalid_reason=None):
|
||||
"""Cast your vote on the block given the previous_block_hash and the decision (valid/invalid)
|
||||
return the block to the updated in the database.
|
||||
|
||||
Args:
|
||||
block (dict): Block to vote.
|
||||
previous_block_id (str): The id of the previous block.
|
||||
decision (bool): Whether the block is valid or invalid.
|
||||
invalid_reason (Optional[str]): Reason the block is invalid
|
||||
|
||||
"""
|
||||
vote = {
|
||||
'voting_for_block': block['id'],
|
||||
'previous_block': previous_block_id,
|
||||
'is_block_valid': decision,
|
||||
'invalid_reason': invalid_reason,
|
||||
'timestamp': self.timestamp()
|
||||
}
|
||||
|
||||
vote_data = self.serialize(vote)
|
||||
signature = PrivateKey(self.me_private).sign(vote_data)
|
||||
|
||||
vote_signed = {
|
||||
'node_pubkey': self.me,
|
||||
'signature': signature,
|
||||
'vote': vote
|
||||
}
|
||||
|
||||
return vote_signed
|
||||
|
||||
def write_vote(self, block, vote, block_number):
|
||||
"""
|
||||
Write the vote to the database
|
||||
"""
|
||||
update = {'votes': r.row['votes'].append(vote)}
|
||||
|
||||
# We need to *not* override the existing block_number, if any
|
||||
# FIXME: MIGHT HAVE RACE CONDITIONS WITH THE OTHER NODES IN THE FEDERATION
|
||||
if 'block_number' not in block:
|
||||
update['block_number'] = block_number
|
||||
|
||||
r.table('bigchain')\
|
||||
.get(vote['vote']['voting_for_block'])\
|
||||
.update(update)\
|
||||
.run(self.conn)
|
||||
|
||||
def get_last_voted_block(self):
|
||||
"""
|
||||
Returns the last block that this node voted on
|
||||
"""
|
||||
# query bigchain for all blocks this node is a voter but didn't voted on
|
||||
last_voted = r.table('bigchain')\
|
||||
.filter(r.row['block']['voters'].contains(self.me))\
|
||||
.filter(lambda doc: doc['votes'].contains(lambda vote: vote['node_pubkey'] == self.me))\
|
||||
.order_by(r.desc('block_number'))\
|
||||
.limit(1)\
|
||||
.run(self.conn)
|
||||
|
||||
# return last vote if last vote exists else return Genesis block
|
||||
last_voted = list(last_voted)
|
||||
if not last_voted:
|
||||
return list(r.table('bigchain')
|
||||
.filter(r.row['block_number'] == 0)
|
||||
.run(self.conn))[0]
|
||||
|
||||
return last_voted[0]
|
||||
|
||||
def get_unvoted_blocks(self):
|
||||
"""
|
||||
Return all the blocks that has not been voted by this node.
|
||||
"""
|
||||
|
||||
unvoted = r.table('bigchain')\
|
||||
.filter(lambda doc: doc['votes'].contains(lambda vote: vote['node_pubkey'] == self.me).not_())\
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))\
|
||||
.run(self.conn)
|
||||
|
||||
if unvoted and unvoted[0].get('block_number') == 0:
|
||||
unvoted.pop(0)
|
||||
|
||||
return unvoted
|
||||
|
||||
@staticmethod
|
||||
def serialize(data):
|
||||
"""Static method used to serialize a dict into a JSON formatted string.
|
||||
|
||||
This method enforces rules like the separator and order of keys. This ensures that all dicts
|
||||
are serialized in the same way.
|
||||
|
||||
This is specially important for hashing data. We need to make sure that everyone serializes their data
|
||||
in the same way so that we do not have hash mismatches for the same structure due to serialization
|
||||
differences.
|
||||
|
||||
Args:
|
||||
data (dict): dict to serialize
|
||||
|
||||
Returns:
|
||||
str: JSON formatted string
|
||||
|
||||
"""
|
||||
return json.dumps(data, skipkeys=False, ensure_ascii=False,
|
||||
separators=(',', ':'), sort_keys=True)
|
||||
|
||||
@staticmethod
|
||||
def deserialize(data):
|
||||
"""Static method used to deserialize a JSON formatted string into a dict.
|
||||
|
||||
Args:
|
||||
data (str): JSON formatted string.
|
||||
|
||||
Returns:
|
||||
dict: dict resulting from the serialization of a JSON formatted string.
|
||||
|
||||
"""
|
||||
return json.loads(data, encoding="utf-8")
|
||||
|
||||
@staticmethod
|
||||
def timestamp():
|
||||
"""Static method to calculate a UTC timestamp with microsecond precision.
|
||||
|
||||
Returns:
|
||||
str: UTC timestamp.
|
||||
|
||||
"""
|
||||
dt = datetime.utcnow()
|
||||
return "{0:.6f}".format(time.mktime(dt.timetuple()) + dt.microsecond / 1e6)
|
||||
|
||||
@staticmethod
|
||||
def generate_keys():
|
||||
"""Generates a key pair.
|
||||
|
||||
Returns:
|
||||
tuple: `(private_key, public_key)`. ECDSA key pair using the secp256k1 curve encoded
|
||||
in base58.
|
||||
|
||||
"""
|
||||
# generates and returns the keys serialized in hex
|
||||
return generate_key_pair()
|
151
bigchaindb/crypto.py
Normal file
151
bigchaindb/crypto.py
Normal file
@ -0,0 +1,151 @@
|
||||
# Separate all crypto code so that we can easily test several implementations
|
||||
|
||||
import hashlib
|
||||
import sha3
|
||||
import binascii
|
||||
import base58
|
||||
import bitcoin
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives.asymmetric import ec
|
||||
from cryptography.hazmat.primitives import hashes
|
||||
from cryptography.exceptions import InvalidSignature
|
||||
|
||||
|
||||
class PrivateKey(object):
|
||||
"""
|
||||
PrivateKey instance
|
||||
"""
|
||||
|
||||
def __init__(self, key):
|
||||
"""
|
||||
Instantiate the private key with the private_value encoded in base58
|
||||
"""
|
||||
private_value = self.decode(key)
|
||||
private_numbers = self._private_value_to_cryptography_private_numbers(private_value)
|
||||
self.private_key = self._cryptography_private_key_from_private_numbers(private_numbers)
|
||||
|
||||
def sign(self, data):
|
||||
"""
|
||||
Sign data with private key
|
||||
"""
|
||||
signer = self.private_key.signer(ec.ECDSA(hashes.SHA256()))
|
||||
signer.update(data.encode('utf-8'))
|
||||
signature = signer.finalize()
|
||||
return binascii.hexlify(signature).decode('utf-8')
|
||||
|
||||
|
||||
@staticmethod
|
||||
def encode(private_value):
|
||||
"""
|
||||
Encode the decimal number private_value to base58
|
||||
"""
|
||||
private_value_hex = bitcoin.encode_privkey(private_value, 'hex')
|
||||
private_value_base58 = base58.b58encode(bytes.fromhex(private_value_hex))
|
||||
return private_value_base58
|
||||
|
||||
@staticmethod
|
||||
def decode(key):
|
||||
"""
|
||||
Decode the base58 private_value to decimale
|
||||
"""
|
||||
private_value_hex = binascii.hexlify(base58.b58decode(key))
|
||||
private_value = bitcoin.decode_privkey(private_value_hex)
|
||||
return private_value
|
||||
|
||||
def _private_value_to_public_values(self, private_value):
|
||||
"""
|
||||
Return the public values from the private value
|
||||
"""
|
||||
public_value_x, public_value_y = bitcoin.privkey_to_pubkey(private_value)
|
||||
return (public_value_x, public_value_y)
|
||||
|
||||
def _private_value_to_cryptography_private_numbers(self, private_value):
|
||||
"""
|
||||
Return an instance of cryptography PrivateNumbers from the decimal private_value
|
||||
"""
|
||||
public_value_x, public_value_y = self._private_value_to_public_values(private_value)
|
||||
public_numbers = PublicKey._public_values_to_cryptography_public_numbers(public_value_x, public_value_y)
|
||||
private_numbers = ec.EllipticCurvePrivateNumbers(private_value, public_numbers)
|
||||
return private_numbers
|
||||
|
||||
@staticmethod
|
||||
def _cryptography_private_key_from_private_numbers(private_numbers):
|
||||
"""
|
||||
Return an instace of cryptography PrivateKey from a cryptography instance of PrivateNumbers
|
||||
"""
|
||||
return private_numbers.private_key(default_backend())
|
||||
|
||||
|
||||
class PublicKey(object):
|
||||
|
||||
def __init__(self, key):
|
||||
"""
|
||||
Instantiate the public key with the compressed public value encoded in base58
|
||||
"""
|
||||
public_value_x, public_value_y = self.decode(key)
|
||||
public_numbers = self._public_values_to_cryptography_public_numbers(public_value_x, public_value_y)
|
||||
self.public_key = self._criptography_public_key_from_public_numbers(public_numbers)
|
||||
|
||||
def verify(self, data, signature):
|
||||
verifier = self.public_key.verifier(binascii.unhexlify(signature), ec.ECDSA(hashes.SHA256()))
|
||||
verifier.update(data.encode('utf-8'))
|
||||
try:
|
||||
verifier.verify()
|
||||
except InvalidSignature:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def encode(public_value_x, public_value_y):
|
||||
"""
|
||||
Encode the public key represented by the decimal values x and y to base58
|
||||
"""
|
||||
public_value_compressed_hex = bitcoin.encode_pubkey([public_value_x, public_value_y], 'hex_compressed')
|
||||
public_value_compressed_base58 = base58.b58encode(bytes.fromhex(public_value_compressed_hex))
|
||||
return public_value_compressed_base58
|
||||
|
||||
@staticmethod
|
||||
def decode(public_value_compressed_base58):
|
||||
"""
|
||||
Decode the base58 public_value to the decimal x and y values
|
||||
"""
|
||||
public_value_compressed_hex = binascii.hexlify(base58.b58decode(public_value_compressed_base58))
|
||||
public_value_x, public_value_y = bitcoin.decode_pubkey(public_value_compressed_hex.decode())
|
||||
return (public_value_x, public_value_y)
|
||||
|
||||
@staticmethod
|
||||
def _public_values_to_cryptography_public_numbers(public_value_x, public_value_y):
|
||||
"""
|
||||
Return an instance of cryptography PublicNumbers from the decimal x and y values
|
||||
"""
|
||||
public_numbers = ec.EllipticCurvePublicNumbers(public_value_x, public_value_y, ec.SECP256K1())
|
||||
return public_numbers
|
||||
|
||||
def _criptography_public_key_from_public_numbers(self, public_numbers):
|
||||
"""
|
||||
Return an instance of cryptography PublicKey from a cryptography instance of PublicNumbers
|
||||
"""
|
||||
return public_numbers.public_key(default_backend())
|
||||
|
||||
|
||||
def generate_key_pair():
|
||||
"""
|
||||
Generate a new key pair and return the pair encoded in base58
|
||||
"""
|
||||
# Private key
|
||||
private_key = ec.generate_private_key(ec.SECP256K1, default_backend())
|
||||
private_value = private_key.private_numbers().private_value
|
||||
private_value_base58 = PrivateKey.encode(private_value)
|
||||
|
||||
# Public key
|
||||
public_key = private_key.public_key()
|
||||
public_value_x, public_value_y = public_key.public_numbers().x, public_key.public_numbers().y
|
||||
public_value_compressed_base58 = PublicKey.encode(public_value_x, public_value_y)
|
||||
|
||||
return (private_value_base58, public_value_compressed_base58)
|
||||
|
||||
|
||||
def hash_data(data):
|
||||
return hashlib.sha3_256(data.encode()).hexdigest()
|
2
bigchaindb/db/__init__.py
Normal file
2
bigchaindb/db/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
# TODO can we use explicit imports?
|
||||
from bigchaindb.db.utils import *
|
89
bigchaindb/db/utils.py
Normal file
89
bigchaindb/db/utils.py
Normal file
@ -0,0 +1,89 @@
|
||||
"""Utils to initialize and drop the database."""
|
||||
|
||||
import logging
|
||||
|
||||
import rethinkdb as r
|
||||
|
||||
import bigchaindb
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class DatabaseAlreadyExistsException(Exception):
|
||||
pass
|
||||
|
||||
def get_conn():
|
||||
'''Get the connection to the database.'''
|
||||
|
||||
return r.connect(bigchaindb.config['database']['host'],
|
||||
bigchaindb.config['database']['port'])
|
||||
|
||||
|
||||
def init():
|
||||
conn = get_conn()
|
||||
dbname = bigchaindb.config['database']['name']
|
||||
|
||||
if r.db_list().contains(dbname).run(conn):
|
||||
raise DatabaseAlreadyExistsException('Database `{}` already exists'.format(dbname))
|
||||
|
||||
logger.info('Create:')
|
||||
logger.info(' - database `%s`', dbname)
|
||||
try:
|
||||
r.db_create(dbname).run(conn)
|
||||
except r.ReqlOpFailedError as e:
|
||||
logger.info(e.message)
|
||||
return
|
||||
|
||||
logger.info(' - tables')
|
||||
# create the tables
|
||||
r.db(dbname).table_create('bigchain').run(conn)
|
||||
r.db(dbname).table_create('backlog').run(conn)
|
||||
|
||||
logger.info(' - indexes')
|
||||
# create the secondary indexes
|
||||
# to order blocks by timestamp
|
||||
r.db(dbname).table('bigchain').index_create('block_timestamp', r.row['block']['timestamp']).run(conn)
|
||||
# to order blocks by block number
|
||||
r.db(dbname).table('bigchain').index_create('block_number', r.row['block']['block_number']).run(conn)
|
||||
# to order transactions by timestamp
|
||||
r.db(dbname).table('backlog').index_create('transaction_timestamp', r.row['transaction']['timestamp']).run(conn)
|
||||
# to query the bigchain for a transaction id
|
||||
r.db(dbname).table('bigchain').index_create('transaction_id',
|
||||
r.row['block']['transactions']['id'], multi=True).run(conn)
|
||||
# compound index to read transactions from the backlog per assignee
|
||||
r.db(dbname).table('backlog')\
|
||||
.index_create('assignee__transaction_timestamp', [r.row['assignee'], r.row['transaction']['timestamp']])\
|
||||
.run(conn)
|
||||
# secondary index for payload hash
|
||||
r.db(dbname).table('bigchain')\
|
||||
.index_create('payload_hash', r.row['block']['transactions']['transaction']['data']['hash'], multi=True)\
|
||||
.run(conn)
|
||||
|
||||
# wait for rethinkdb to finish creating secondary indexes
|
||||
r.db(dbname).table('backlog').index_wait().run(conn)
|
||||
r.db(dbname).table('bigchain').index_wait().run(conn)
|
||||
|
||||
logger.info(' - genesis block')
|
||||
b = bigchaindb.Bigchain()
|
||||
b.create_genesis_block()
|
||||
logger.info('Done, have fun!')
|
||||
|
||||
|
||||
def drop(assume_yes=False):
|
||||
conn = get_conn()
|
||||
dbname = bigchaindb.config['database']['name']
|
||||
|
||||
if assume_yes:
|
||||
response = 'y'
|
||||
else:
|
||||
response = input('Do you want to drop `{}` database? [y/n]: '.format(dbname))
|
||||
|
||||
if response == 'y':
|
||||
try:
|
||||
logger.info('Drop database `%s`', dbname)
|
||||
r.db_drop(dbname).run(conn)
|
||||
logger.info('Done.')
|
||||
except r.ReqlOpFailedError as e:
|
||||
logger.info(e.message)
|
||||
else:
|
||||
logger.info('Drop aborted')
|
21
bigchaindb/exceptions.py
Normal file
21
bigchaindb/exceptions.py
Normal file
@ -0,0 +1,21 @@
|
||||
"""Custom exceptions used in the `bigchaindb` package.
|
||||
"""
|
||||
|
||||
class OperationError(Exception):
|
||||
"""Raised when an operation cannot go through"""
|
||||
|
||||
class TransactionDoesNotExist(Exception):
|
||||
"""Raised if the transaction is not in the database"""
|
||||
|
||||
class TransactionOwnerError(Exception):
|
||||
"""Raised if a user tries to transfer a transaction they don't own"""
|
||||
|
||||
class DoubleSpend(Exception):
|
||||
"""Raised if a double spend is found"""
|
||||
|
||||
class InvalidHash(Exception):
|
||||
"""Raised if there was an error checking the hash for a particular operation"""
|
||||
|
||||
class InvalidSignature(Exception):
|
||||
"""Raised if there was an error checking the signature for a particular operation"""
|
||||
|
82
bigchaindb/processes.py
Normal file
82
bigchaindb/processes.py
Normal file
@ -0,0 +1,82 @@
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
|
||||
import rethinkdb as r
|
||||
|
||||
from bigchaindb import Bigchain
|
||||
from bigchaindb.voter import Voter
|
||||
from bigchaindb.block import Block
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Processes(object):
|
||||
|
||||
def __init__(self):
|
||||
# initialize the class
|
||||
self.q_new_block = mp.Queue()
|
||||
self.q_new_transaction = mp.Queue()
|
||||
|
||||
def map_backlog(self):
|
||||
# listen to changes on the backlog and redirect the changes
|
||||
# to the correct queues
|
||||
|
||||
# create a bigchain instance
|
||||
b = Bigchain()
|
||||
|
||||
for change in r.table('backlog').changes().run(b.conn):
|
||||
|
||||
# insert
|
||||
if change['old_val'] is None:
|
||||
self.q_new_transaction.put(change['new_val'])
|
||||
|
||||
# delete
|
||||
if change['new_val'] is None:
|
||||
pass
|
||||
|
||||
# update
|
||||
if change['new_val'] is not None and change['old_val'] is not None:
|
||||
pass
|
||||
|
||||
def map_bigchain(self):
|
||||
# listen to changes on the bigchain and redirect the changes
|
||||
# to the correct queues
|
||||
|
||||
# create a bigchain instance
|
||||
b = Bigchain()
|
||||
|
||||
for change in r.table('bigchain').changes().run(b.conn):
|
||||
|
||||
# insert
|
||||
if change['old_val'] is None:
|
||||
self.q_new_block.put(change['new_val'])
|
||||
|
||||
# delete
|
||||
elif change['new_val'] is None:
|
||||
pass
|
||||
|
||||
# update
|
||||
elif change['new_val'] is not None and change['old_val'] is not None:
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
# instantiate block and voter
|
||||
block = Block(self.q_new_transaction)
|
||||
|
||||
# initialize the processes
|
||||
p_map_bigchain = mp.Process(name='bigchain_mapper', target=self.map_bigchain)
|
||||
p_map_backlog = mp.Process(name='backlog_mapper', target=self.map_backlog)
|
||||
p_block = mp.Process(name='block', target=block.start)
|
||||
p_voter = Voter(self.q_new_block)
|
||||
|
||||
# start the processes
|
||||
logger.info('starting bigchain mapper')
|
||||
p_map_bigchain.start()
|
||||
logger.info('starting backlog mapper')
|
||||
p_map_backlog.start()
|
||||
logger.info('starting block')
|
||||
p_block.start()
|
||||
|
||||
logger.info('starting voter')
|
||||
p_voter.start()
|
24
bigchaindb/util.py
Normal file
24
bigchaindb/util.py
Normal file
@ -0,0 +1,24 @@
|
||||
import multiprocessing as mp
|
||||
|
||||
|
||||
class ProcessGroup(object):
|
||||
|
||||
def __init__(self, concurrency=None, group=None, target=None, name=None,
|
||||
args=None, kwargs=None, daemon=None):
|
||||
self.concurrency = concurrency or mp.cpu_count()
|
||||
self.group = group
|
||||
self.target = target
|
||||
self.name = name
|
||||
self.args = args or ()
|
||||
self.kwargs = kwargs or {}
|
||||
self.daemon = daemon
|
||||
self.processes = []
|
||||
|
||||
def start(self):
|
||||
for i in range(self.concurrency):
|
||||
proc = mp.Process(group=self.group, target=self.target,
|
||||
name=self.name, args=self.args,
|
||||
kwargs=self.kwargs, daemon=self.daemon)
|
||||
proc.start()
|
||||
self.processes.append(proc)
|
||||
|
188
bigchaindb/voter.py
Normal file
188
bigchaindb/voter.py
Normal file
@ -0,0 +1,188 @@
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
import ctypes
|
||||
|
||||
from bigchaindb import Bigchain
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BlockStream(object):
|
||||
"""
|
||||
Combine the stream of new blocks coming from the changefeed with the list of unvoted blocks.
|
||||
|
||||
This is a utility class that abstracts the source of data for the `Voter`.
|
||||
"""
|
||||
|
||||
def __init__(self, new_blocks):
|
||||
"""
|
||||
Create a new BlockStream instance.
|
||||
|
||||
Args:
|
||||
new_block (queue): a queue of new blocks
|
||||
"""
|
||||
|
||||
b = Bigchain()
|
||||
self.new_blocks = new_blocks
|
||||
# TODO: there might be duplicate blocks since we *first* get the changefeed and only *then* we query the
|
||||
# database to get the old blocks.
|
||||
|
||||
# TODO how about a one liner, something like:
|
||||
# self.unvoted_blocks = b.get_unvoted_blocks() if not b.federation_nodes else []
|
||||
self.unvoted_blocks = []
|
||||
if not b.federation_nodes:
|
||||
self.unvoted_blocks = b.get_unvoted_blocks()
|
||||
|
||||
def get(self):
|
||||
"""
|
||||
Return the next block to be processed.
|
||||
"""
|
||||
try:
|
||||
# FIXME: apparently RethinkDB returns a list instead of a cursor when using `order_by`.
|
||||
# We might change the `pop` in the future, when the driver will return a cursor.
|
||||
# We have a test for this, so if the driver implementation changes we will get a failure:
|
||||
# - tests/test_voter.py::TestBlockStream::test_if_old_blocks_get_should_return_old_block_first
|
||||
return self.unvoted_blocks.pop(0)
|
||||
except IndexError:
|
||||
return self.new_blocks.get()
|
||||
|
||||
|
||||
class Voter(object):
|
||||
|
||||
def __init__(self, q_new_block):
|
||||
"""
|
||||
Initialize the class with the needed queues.
|
||||
|
||||
Initialize with a queue where new blocks added to the bigchain will be put
|
||||
"""
|
||||
self.q_new_block = q_new_block
|
||||
self.q_blocks_to_validate = mp.Queue()
|
||||
self.q_validated_block = mp.Queue()
|
||||
self.q_voted_block = mp.Queue()
|
||||
self.v_previous_block_id = mp.Value(ctypes.c_char_p)
|
||||
self.v_previous_block_number = mp.Value(ctypes.c_uint64)
|
||||
|
||||
def feed_blocks(self):
|
||||
"""
|
||||
Prepare the queue with blocks to validate
|
||||
"""
|
||||
|
||||
block_stream = BlockStream(self.q_new_block)
|
||||
while True:
|
||||
# poison pill
|
||||
block = block_stream.get()
|
||||
if block == 'stop':
|
||||
self.q_blocks_to_validate.put('stop')
|
||||
return
|
||||
|
||||
self.q_blocks_to_validate.put(block)
|
||||
|
||||
def validate(self):
|
||||
"""
|
||||
Checks if incoming blocks are valid or not
|
||||
"""
|
||||
|
||||
# create a bigchain instance. All processes should create their own bigchcain instance so that they all
|
||||
# have their own connection to the database
|
||||
b = Bigchain()
|
||||
|
||||
logger.info('voter waiting for new blocks')
|
||||
while True:
|
||||
new_block = self.q_blocks_to_validate.get()
|
||||
|
||||
# poison pill
|
||||
if new_block == 'stop':
|
||||
self.q_validated_block.put('stop')
|
||||
return
|
||||
|
||||
logger.info('new_block arrived to voter')
|
||||
block_number = self.v_previous_block_number.value + 1
|
||||
validity = b.is_valid_block(new_block)
|
||||
|
||||
self.q_validated_block.put((new_block,
|
||||
self.v_previous_block_id.value.decode(),
|
||||
block_number,
|
||||
validity))
|
||||
|
||||
self.v_previous_block_id.value = new_block['id'].encode()
|
||||
self.v_previous_block_number.value = block_number
|
||||
|
||||
def vote(self):
|
||||
"""
|
||||
Votes on the block based on the decision of the validation
|
||||
"""
|
||||
|
||||
# create a bigchain instance
|
||||
b = Bigchain()
|
||||
|
||||
while True:
|
||||
elem = self.q_validated_block.get()
|
||||
|
||||
# poison pill
|
||||
if elem == 'stop':
|
||||
self.q_voted_block.put('stop')
|
||||
return
|
||||
|
||||
validated_block, previous_block_id, block_number, decision = elem
|
||||
vote = b.vote(validated_block, previous_block_id, decision)
|
||||
self.q_voted_block.put((validated_block, vote, block_number))
|
||||
|
||||
def update_block(self):
|
||||
"""
|
||||
Appends the vote in the bigchain table
|
||||
"""
|
||||
|
||||
# create a bigchain instance
|
||||
b = Bigchain()
|
||||
|
||||
while True:
|
||||
elem = self.q_voted_block.get()
|
||||
|
||||
# poison pill
|
||||
if elem == 'stop':
|
||||
logger.info('clean exit')
|
||||
return
|
||||
|
||||
block, vote, block_number = elem
|
||||
logger.info('updating block %s with number %s and with vote %s', block['id'], block_number, vote)
|
||||
b.write_vote(block, vote, block_number)
|
||||
|
||||
def bootstrap(self):
|
||||
"""
|
||||
Before starting handling the new blocks received by the changefeed we need to handle unvoted blocks
|
||||
added to the bigchain while the process was down
|
||||
|
||||
We also need to set the previous_block_id and the previous block_number
|
||||
"""
|
||||
|
||||
b = Bigchain()
|
||||
last_voted = b.get_last_voted_block()
|
||||
|
||||
self.v_previous_block_number.value = last_voted['block_number']
|
||||
self.v_previous_block_id.value = last_voted['id'].encode()
|
||||
|
||||
def kill(self):
|
||||
"""
|
||||
Terminate processes
|
||||
"""
|
||||
self.q_new_block.put('stop')
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Initialize, spawn, and start the processes
|
||||
"""
|
||||
|
||||
self.bootstrap()
|
||||
|
||||
# initialize the processes
|
||||
p_feed_blocks = mp.Process(name='block_feeder', target=self.feed_blocks)
|
||||
p_validate = mp.Process(name='block_validator', target=self.validate)
|
||||
p_vote = mp.Process(name='block_voter', target=self.vote)
|
||||
p_update = mp.Process(name='block_updater', target=self.update_block)
|
||||
|
||||
# start the processes
|
||||
p_feed_blocks.start()
|
||||
p_validate.start()
|
||||
p_vote.start()
|
||||
p_update.start()
|
23
docker-compose.yml
Normal file
23
docker-compose.yml
Normal file
@ -0,0 +1,23 @@
|
||||
rethinkdb:
|
||||
image: rethinkdb
|
||||
ports:
|
||||
- "9999:8080"
|
||||
- "28015"
|
||||
volumes_from:
|
||||
- rethinkdb-data
|
||||
|
||||
rethinkdb-data:
|
||||
image: rethinkdb
|
||||
volumes:
|
||||
- /data
|
||||
command: "true"
|
||||
|
||||
bigchain:
|
||||
build: .
|
||||
volumes:
|
||||
- ./:/usr/src/app/
|
||||
links:
|
||||
- rethinkdb
|
||||
environment:
|
||||
BIGCHAIN_DATABASE_HOST: rethinkdb
|
||||
command: bigchain start
|
3
pytest.ini
Normal file
3
pytest.ini
Normal file
@ -0,0 +1,3 @@
|
||||
[pytest]
|
||||
testpaths = tests
|
||||
norecursedirs = .* *.egg *.egg-info env* devenv* docs
|
1
requirements.txt
Normal file
1
requirements.txt
Normal file
@ -0,0 +1 @@
|
||||
-r requirements/common.txt
|
9
requirements/ci.txt
Normal file
9
requirements/ci.txt
Normal file
@ -0,0 +1,9 @@
|
||||
-r common.txt
|
||||
|
||||
pytest==2.8.2
|
||||
pytest-cov
|
||||
coverage
|
||||
codecov
|
||||
pep8
|
||||
pyflakes
|
||||
pylint
|
12
requirements/common.txt
Normal file
12
requirements/common.txt
Normal file
@ -0,0 +1,12 @@
|
||||
rethinkdb==2.2.0.post1
|
||||
pysha3==0.3
|
||||
pytz==2015.7
|
||||
repoze.lru==0.6
|
||||
fake-factory==0.5.3
|
||||
tornado==4.3
|
||||
cryptography==1.2.1
|
||||
statsd==3.2.1
|
||||
python-rapidjson==0.0.6
|
||||
logstats==0.2.1
|
||||
base58==0.2.2
|
||||
bitcoin==1.1.42
|
6
setup.cfg
Normal file
6
setup.cfg
Normal file
@ -0,0 +1,6 @@
|
||||
[aliases]
|
||||
test=pytest
|
||||
|
||||
[coverage:run]
|
||||
source = .
|
||||
omit = *test*
|
25
setup.py
25
setup.py
@ -8,7 +8,7 @@ from setuptools import setup
|
||||
|
||||
setup(
|
||||
name='BigchainDB',
|
||||
version='0.0.0.dev1',
|
||||
version='0.0.0',
|
||||
description='BigchainDB: A Scalable Blockchain Database',
|
||||
long_description=__doc__,
|
||||
url='https://github.com/BigchainDB/bigchaindb/',
|
||||
@ -25,5 +25,26 @@ setup(
|
||||
'Programming Language :: Python :: 3.5',
|
||||
],
|
||||
|
||||
packages=[],
|
||||
packages=['bigchaindb', 'bigchaindb.commands', 'bigchaindb.db'],
|
||||
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'bigchaindb=bigchaindb.commands.bigchain:main',
|
||||
'bigchaindb-benchmark=bigchaindb.commands.bigchain_benchmark:main'
|
||||
],
|
||||
},
|
||||
install_requires=[
|
||||
'rethinkdb==2.2.0.post1',
|
||||
'pysha3==0.3',
|
||||
'pytz==2015.7',
|
||||
'tornado==4.3',
|
||||
'cryptography==1.2.1',
|
||||
'statsd==3.2.1',
|
||||
'python-rapidjson==0.0.6',
|
||||
'logstats==0.2.1',
|
||||
'base58==0.2.2',
|
||||
'bitcoin==1.1.42',
|
||||
],
|
||||
setup_requires=['pytest-runner'],
|
||||
tests_require=['pytest'],
|
||||
)
|
||||
|
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
49
tests/conftest.py
Normal file
49
tests/conftest.py
Normal file
@ -0,0 +1,49 @@
|
||||
"""
|
||||
Fixtures and setup / teardown functions
|
||||
|
||||
Tasks:
|
||||
1. setup test database before starting the tests
|
||||
2. delete test database after running the tests
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
import bigchaindb
|
||||
import bigchaindb.config_utils
|
||||
|
||||
|
||||
config = {
|
||||
'database': {
|
||||
'name': 'bigchain_test'
|
||||
},
|
||||
'keypair': {
|
||||
'private': '3i2FDXp87N9ExXSvWxqBAw9EgzoxxGTQNKbtxmWBpTyL',
|
||||
'public': '29Tw3ozmSRtN8XNofvsu5RdoQRk9gAonfpkFvRZDmhTPo'
|
||||
}
|
||||
}
|
||||
|
||||
# Test user. inputs will be created for this user. Cryptography Keys
|
||||
USER_PRIVATE_KEY = 'GmRZxQdQv7tooMijXytQkexKuFN6mJocciJarAmMwTX2'
|
||||
USER_PUBLIC_KEY = 'r3cEu8GNoz8rYpNJ61k7GqfR8VEvdUbtyHce8u1kaYwh'
|
||||
|
||||
|
||||
@pytest.fixture(scope='function', autouse=True)
|
||||
def restore_config(request):
|
||||
bigchaindb.config_utils.dict_config(config)
|
||||
|
||||
|
||||
# FIXME: make this fixtures work :)
|
||||
# @pytest.fixture
|
||||
# def config():
|
||||
# return config
|
||||
#
|
||||
#
|
||||
# @pytest.fixture
|
||||
# def user_private_key():
|
||||
# return USER_PRIVATE_KEY
|
||||
#
|
||||
#
|
||||
# @pytest.fixture
|
||||
# def user_public_key():
|
||||
# return USER_PUBLIC_KEY
|
||||
#
|
0
tests/db/__init__.py
Normal file
0
tests/db/__init__.py
Normal file
74
tests/db/conftest.py
Normal file
74
tests/db/conftest.py
Normal file
@ -0,0 +1,74 @@
|
||||
"""
|
||||
Fixtures and setup / teardown functions
|
||||
|
||||
Tasks:
|
||||
1. setup test database before starting the tests
|
||||
2. delete test database after running the tests
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import rethinkdb as r
|
||||
|
||||
from bigchaindb import Bigchain
|
||||
from bigchaindb.db import get_conn
|
||||
|
||||
from ..conftest import config, USER_PRIVATE_KEY, USER_PUBLIC_KEY
|
||||
|
||||
|
||||
NOOP = None
|
||||
|
||||
@pytest.fixture(scope='module', autouse=True)
|
||||
def setup_database(request):
|
||||
print('Initializing test db')
|
||||
get_conn().repl()
|
||||
try:
|
||||
r.db_create('bigchain_test').run()
|
||||
except r.ReqlOpFailedError as e:
|
||||
if e.message == 'Database `bigchain_test` already exists.':
|
||||
print(e.message)
|
||||
print('Deleting `bigchain_test` database.')
|
||||
r.db_drop('bigchain_test').run()
|
||||
r.db_create('bigchain_test').run()
|
||||
else:
|
||||
raise
|
||||
|
||||
print('Finished initializing test db')
|
||||
|
||||
# setup tables
|
||||
r.db('bigchain_test').table_create('bigchain').run()
|
||||
r.db('bigchain_test').table_create('backlog').run()
|
||||
# create the secondary indexes
|
||||
# to order blocks by timestamp
|
||||
r.db('bigchain_test').table('bigchain').index_create('block_timestamp', r.row['block']['timestamp']).run()
|
||||
# to order blocks by block number
|
||||
r.db('bigchain_test').table('bigchain').index_create('block_number', r.row['block']['block_number']).run()
|
||||
# to order transactions by timestamp
|
||||
r.db('bigchain_test').table('backlog').index_create('transaction_timestamp', r.row['transaction']['timestamp']).run()
|
||||
# compound index to read transactions from the backlog per assignee
|
||||
r.db('bigchain_test').table('backlog')\
|
||||
.index_create('assignee__transaction_timestamp', [r.row['assignee'], r.row['transaction']['timestamp']])\
|
||||
.run()
|
||||
|
||||
def fin():
|
||||
print('Deleting `bigchain_test` database')
|
||||
get_conn().repl()
|
||||
r.db_drop('bigchain_test').run()
|
||||
print('Finished deleting `bigchain_test`')
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
|
||||
@pytest.fixture(scope='function', autouse=True)
|
||||
def cleanup_tables(request):
|
||||
def fin():
|
||||
get_conn().repl()
|
||||
r.db('bigchain_test').table('bigchain').delete().run()
|
||||
r.db('bigchain_test').table('backlog').delete().run()
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def b():
|
||||
return Bigchain()
|
||||
|
766
tests/db/test_bigchain_api.py
Normal file
766
tests/db/test_bigchain_api.py
Normal file
@ -0,0 +1,766 @@
|
||||
import multiprocessing as mp
|
||||
import random
|
||||
import time
|
||||
|
||||
import pytest
|
||||
import rethinkdb as r
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb import exceptions
|
||||
from bigchaindb import Bigchain
|
||||
from bigchaindb.crypto import hash_data, PrivateKey, PublicKey, generate_key_pair
|
||||
from bigchaindb.voter import Voter
|
||||
from bigchaindb.block import Block
|
||||
|
||||
import bigchaindb.config_utils
|
||||
from .conftest import USER_PUBLIC_KEY, USER_PRIVATE_KEY
|
||||
|
||||
|
||||
def create_inputs(amount=1, b=None):
|
||||
# 1. create the genesis block
|
||||
b = b or Bigchain()
|
||||
try:
|
||||
b.create_genesis_block()
|
||||
except bigchaindb.core.GenesisBlockAlreadyExistsError:
|
||||
pass
|
||||
|
||||
# 2. create block with transactions for `USER` to spend
|
||||
transactions = []
|
||||
for i in range(amount):
|
||||
tx = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE')
|
||||
tx_signed = b.sign_transaction(tx, b.me_private)
|
||||
transactions.append(tx_signed)
|
||||
b.write_transaction(tx_signed)
|
||||
|
||||
block = b.create_block(transactions)
|
||||
b.write_block(block, durability='hard')
|
||||
return block
|
||||
|
||||
|
||||
@pytest.mark.skipif(reason='Some tests throw a ResourceWarning that might result in some weird '
|
||||
'exceptions while running the tests. The problem seems to *not* '
|
||||
'interfere with the correctness of the tests. ')
|
||||
def test_remove_unclosed_sockets():
|
||||
pass
|
||||
|
||||
class TestBigchainApi(object):
|
||||
|
||||
def test_create_transaction(self, b):
|
||||
tx = b.create_transaction('a', 'b', 'c', 'd')
|
||||
|
||||
assert sorted(tx) == sorted(['id', 'transaction'])
|
||||
assert sorted(tx['transaction']) == sorted(['current_owner', 'new_owner', 'input', 'operation',
|
||||
'timestamp', 'data'])
|
||||
|
||||
def test_create_transaction_with_unsupported_payload_raises(self, b):
|
||||
with pytest.raises(TypeError):
|
||||
b.create_transaction('a', 'b', 'c', 'd', payload=[])
|
||||
|
||||
|
||||
def test_transaction_hash(self, b):
|
||||
payload = {'cats': 'are awesome'}
|
||||
tx = b.create_transaction('a', 'b', 'c', 'd', payload)
|
||||
tx_calculated = {
|
||||
'current_owner': 'a',
|
||||
'new_owner': 'b',
|
||||
'input': 'c',
|
||||
'operation': 'd',
|
||||
'timestamp': tx['transaction']['timestamp'],
|
||||
'data': {
|
||||
'hash': hash_data(b.serialize(payload)),
|
||||
'payload': payload
|
||||
}
|
||||
}
|
||||
assert tx['transaction']['data'] == tx_calculated['data']
|
||||
# assert tx_hash == tx_calculated_hash
|
||||
|
||||
def test_transaction_signature(self, b):
|
||||
sk, vk = b.generate_keys()
|
||||
tx = b.create_transaction(vk, 'b', 'c', 'd')
|
||||
tx_signed = b.sign_transaction(tx, sk)
|
||||
|
||||
assert 'signature' in tx_signed
|
||||
assert b.verify_signature(tx_signed)
|
||||
|
||||
def test_serializer(self, b):
|
||||
tx = b.create_transaction('a', 'b', 'c', 'd')
|
||||
assert b.deserialize(b.serialize(tx)) == tx
|
||||
|
||||
def test_write_transaction(self, b):
|
||||
create_inputs()
|
||||
input_tx = b.get_owned_ids(USER_PUBLIC_KEY).pop()
|
||||
tx = b.create_transaction(USER_PUBLIC_KEY, 'b', input_tx, 'd')
|
||||
tx_signed = b.sign_transaction(tx, USER_PRIVATE_KEY)
|
||||
response = b.write_transaction(tx_signed)
|
||||
|
||||
assert response['skipped'] == 0
|
||||
assert response['deleted'] == 0
|
||||
assert response['unchanged'] == 0
|
||||
assert response['errors'] == 0
|
||||
assert response['replaced'] == 0
|
||||
assert response['inserted'] == 1
|
||||
|
||||
def test_read_transaction(self, b):
|
||||
create_inputs()
|
||||
input_tx = b.get_owned_ids(USER_PUBLIC_KEY).pop()
|
||||
tx = b.create_transaction(USER_PUBLIC_KEY, 'b', input_tx, 'd')
|
||||
tx_signed = b.sign_transaction(tx, USER_PRIVATE_KEY)
|
||||
b.write_transaction(tx_signed)
|
||||
|
||||
# create block and write it to the bighcain before retrieving the transaction
|
||||
block = b.create_block([tx_signed])
|
||||
b.write_block(block, durability='hard')
|
||||
|
||||
response = b.get_transaction(tx_signed["id"])
|
||||
assert b.serialize(tx_signed) == b.serialize(response)
|
||||
|
||||
def test_assign_transaction_one_node(self, b):
|
||||
create_inputs()
|
||||
input_tx = b.get_owned_ids(USER_PUBLIC_KEY).pop()
|
||||
tx = b.create_transaction(USER_PUBLIC_KEY, 'b', input_tx, 'd')
|
||||
tx_signed = b.sign_transaction(tx, USER_PRIVATE_KEY)
|
||||
b.write_transaction(tx_signed)
|
||||
|
||||
# retrieve the transaction
|
||||
response = r.table('backlog').get(tx_signed['id']).run(b.conn)
|
||||
|
||||
# check if the assignee is the current node
|
||||
assert response['assignee'] == b.me
|
||||
|
||||
def test_assign_transaction_multiple_nodes(self, b):
|
||||
# create 5 federation nodes
|
||||
for _ in range(5):
|
||||
b.federation_nodes.append(b.generate_keys()[1])
|
||||
create_inputs(20, b=b)
|
||||
|
||||
# test assignee for several transactions
|
||||
for _ in range(20):
|
||||
input_tx = b.get_owned_ids(USER_PUBLIC_KEY).pop()
|
||||
tx = b.create_transaction(USER_PUBLIC_KEY, 'b', input_tx, 'd')
|
||||
tx_signed = b.sign_transaction(tx, USER_PRIVATE_KEY)
|
||||
b.write_transaction(tx_signed)
|
||||
|
||||
# retrieve the transaction
|
||||
response = r.table('backlog').get(tx_signed['id']).run(b.conn)
|
||||
|
||||
# check if the assignee is the federation_nodes
|
||||
assert response['assignee'] in b.federation_nodes
|
||||
|
||||
def test_genesis_block(self, b):
|
||||
create_inputs()
|
||||
response = list(r.table('bigchain')
|
||||
.filter(r.row['block_number'] == 0)
|
||||
.run(b.conn))[0]
|
||||
|
||||
assert response['block_number'] == 0
|
||||
assert len(response['block']['transactions']) == 1
|
||||
assert response['block']['transactions'][0]['transaction']['operation'] == 'GENESIS'
|
||||
assert response['block']['transactions'][0]['transaction']['input'] is None
|
||||
|
||||
def test_create_genesis_block_fails_if_table_not_empty(self, b):
|
||||
b.create_genesis_block()
|
||||
|
||||
with pytest.raises(bigchaindb.core.GenesisBlockAlreadyExistsError):
|
||||
b.create_genesis_block()
|
||||
|
||||
genesis_blocks = list(r.table('bigchain')
|
||||
.filter(r.row['block_number'] == 0)
|
||||
.run(b.conn))
|
||||
|
||||
assert len(genesis_blocks) == 1
|
||||
|
||||
@pytest.mark.skipif(reason='This test may not make sense after changing the chainification mode')
|
||||
def test_get_last_block(self, b):
|
||||
# get the number of blocks
|
||||
num_blocks = r.table('bigchain').count().run(b.conn)
|
||||
|
||||
# get the last block
|
||||
last_block = b.get_last_block()
|
||||
|
||||
assert last_block['block']['block_number'] == num_blocks - 1
|
||||
|
||||
@pytest.mark.skipif(reason='This test may not make sense after changing the chainification mode')
|
||||
def test_get_last_block_id(self, b):
|
||||
last_block = b.get_last_block()
|
||||
last_block_id = b.get_last_block_id()
|
||||
|
||||
assert last_block_id == last_block['id']
|
||||
|
||||
@pytest.mark.skipif(reason='This test may not make sense after changing the chainification mode')
|
||||
def test_get_previous_block(self, b):
|
||||
last_block = b.get_last_block()
|
||||
new_block = b.create_block([])
|
||||
b.write_block(new_block, durability='hard')
|
||||
|
||||
prev_block = b.get_previous_block(new_block)
|
||||
|
||||
assert prev_block == last_block
|
||||
|
||||
@pytest.mark.skipif(reason='This test may not make sense after changing the chainification mode')
|
||||
def test_get_previous_block_id(self, b):
|
||||
last_block = b.get_last_block()
|
||||
new_block = b.create_block([])
|
||||
b.write_block(new_block, durability='hard')
|
||||
|
||||
prev_block_id = b.get_previous_block_id(new_block)
|
||||
|
||||
assert prev_block_id == last_block['id']
|
||||
|
||||
def test_create_new_block(self, b):
|
||||
new_block = b.create_block([])
|
||||
block_hash = hash_data(b.serialize(new_block['block']))
|
||||
|
||||
assert new_block['block']['voters'] == [b.me]
|
||||
assert new_block['block']['node_pubkey'] == b.me
|
||||
assert PublicKey(b.me).verify(b.serialize(new_block['block']), new_block['signature']) == True
|
||||
assert new_block['id'] == block_hash
|
||||
assert new_block['votes'] == []
|
||||
|
||||
def test_get_last_voted_block_returns_genesis_if_no_votes_has_been_casted(self, b):
|
||||
b.create_genesis_block()
|
||||
genesis = list(r.table('bigchain')
|
||||
.filter(r.row['block_number'] == 0)
|
||||
.run(b.conn))[0]
|
||||
assert b.get_last_voted_block() == genesis
|
||||
|
||||
def test_get_last_voted_block_returns_the_correct_block(self, b):
|
||||
genesis = b.create_genesis_block()
|
||||
|
||||
assert b.get_last_voted_block() == genesis
|
||||
|
||||
block_1 = b.create_block([])
|
||||
block_2 = b.create_block([])
|
||||
block_3 = b.create_block([])
|
||||
|
||||
b.write_block(block_1, durability='hard')
|
||||
b.write_block(block_2, durability='hard')
|
||||
b.write_block(block_3, durability='hard')
|
||||
|
||||
b.write_vote(block_1, b.vote(block_1, b.get_last_voted_block(), True), 1)
|
||||
assert b.get_last_voted_block()['id'] == block_1['id']
|
||||
|
||||
b.write_vote(block_2, b.vote(block_2, b.get_last_voted_block(), True), 2)
|
||||
assert b.get_last_voted_block()['id'] == block_2['id']
|
||||
|
||||
b.write_vote(block_3, b.vote(block_3, b.get_last_voted_block(), True), 3)
|
||||
assert b.get_last_voted_block()['id'] == block_3['id']
|
||||
|
||||
|
||||
class TestTransactionValidation(object):
|
||||
|
||||
def test_create_operation_with_inputs(self, b):
|
||||
tx = b.create_transaction('a', 'b', 'c', 'CREATE')
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
b.validate_transaction(tx)
|
||||
|
||||
assert excinfo.value.args[0] == 'A CREATE operation has no inputs'
|
||||
assert b.is_valid_transaction(tx) == False
|
||||
|
||||
def test_create_operation_not_federation_node(self, b):
|
||||
tx = b.create_transaction('a', 'b', None, 'CREATE')
|
||||
with pytest.raises(exceptions.OperationError) as excinfo:
|
||||
b.validate_transaction(tx)
|
||||
|
||||
assert excinfo.value.args[0] == 'Only federation nodes can use the operation `CREATE`'
|
||||
assert b.is_valid_transaction(tx) == False
|
||||
|
||||
def test_non_create_operation_no_inputs(self, b):
|
||||
tx = b.create_transaction('a', 'b', None, 'd')
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
b.validate_transaction(tx)
|
||||
|
||||
assert excinfo.value.args[0] == 'Only `CREATE` transactions can have null inputs'
|
||||
assert b.is_valid_transaction(tx) == False
|
||||
|
||||
def test_non_create_input_not_found(self, b):
|
||||
tx = b.create_transaction('a', 'b', 'c', 'd')
|
||||
with pytest.raises(exceptions.TransactionDoesNotExist) as excinfo:
|
||||
b.validate_transaction(tx)
|
||||
|
||||
assert excinfo.value.args[0] == 'input `c` does not exist in the bigchain'
|
||||
assert b.is_valid_transaction(tx) == False
|
||||
|
||||
def test_non_create_valid_input_wrong_owner(self, b):
|
||||
create_inputs()
|
||||
valid_input = b.get_owned_ids(USER_PUBLIC_KEY).pop()
|
||||
tx = b.create_transaction('a', 'b', valid_input, 'c')
|
||||
with pytest.raises(exceptions.TransactionOwnerError) as excinfo:
|
||||
b.validate_transaction(tx)
|
||||
|
||||
assert excinfo.value.args[0] == 'current_owner `a` does not own the input `{}`'.format(valid_input)
|
||||
assert b.is_valid_transaction(tx) == False
|
||||
|
||||
def test_non_create_double_spend(self, b):
|
||||
create_inputs()
|
||||
input_valid = b.get_owned_ids(USER_PUBLIC_KEY).pop()
|
||||
tx_valid = b.create_transaction(USER_PUBLIC_KEY, 'b', input_valid, 'd')
|
||||
tx_valid_signed = b.sign_transaction(tx_valid, USER_PRIVATE_KEY)
|
||||
b.write_transaction(tx_valid_signed)
|
||||
|
||||
# create and write block to bigchain
|
||||
block = b.create_block([tx_valid_signed])
|
||||
b.write_block(block, durability='hard')
|
||||
|
||||
# create another transaction with the same input
|
||||
tx_double_spend = b.create_transaction(USER_PUBLIC_KEY, 'd', input_valid, 'd')
|
||||
with pytest.raises(exceptions.DoubleSpend) as excinfo:
|
||||
b.validate_transaction(tx_double_spend)
|
||||
|
||||
assert excinfo.value.args[0] == 'input `{}` was already spent'.format(input_valid)
|
||||
assert b.is_valid_transaction(tx_double_spend) == False
|
||||
|
||||
def test_wrong_transaction_hash(self, b):
|
||||
create_inputs()
|
||||
input_valid = b.get_owned_ids(USER_PUBLIC_KEY).pop()
|
||||
tx_valid = b.create_transaction(USER_PUBLIC_KEY, 'b', input_valid, 'd')
|
||||
|
||||
# change the transaction hash
|
||||
tx_valid.update({'id': 'abcd'})
|
||||
with pytest.raises(exceptions.InvalidHash):
|
||||
b.validate_transaction(tx_valid)
|
||||
assert b.is_valid_transaction(tx_valid) == False
|
||||
|
||||
def test_wrong_signature(self, b):
|
||||
create_inputs()
|
||||
input_valid = b.get_owned_ids(USER_PUBLIC_KEY).pop()
|
||||
tx_valid = b.create_transaction(USER_PUBLIC_KEY, 'b', input_valid, 'd')
|
||||
|
||||
wrong_private_key = '4fyvJe1aw2qHZ4UNRYftXK7JU7zy9bCqoU5ps6Ne3xrY'
|
||||
|
||||
tx_invalid_signed = b.sign_transaction(tx_valid, wrong_private_key)
|
||||
with pytest.raises(exceptions.InvalidSignature):
|
||||
b.validate_transaction(tx_invalid_signed)
|
||||
assert b.is_valid_transaction(tx_invalid_signed) == False
|
||||
|
||||
def test_valid_create_transaction(self, b):
|
||||
tx = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE')
|
||||
tx_signed = b.sign_transaction(tx, b.me_private)
|
||||
assert tx_signed == b.validate_transaction(tx_signed)
|
||||
assert tx_signed == b.is_valid_transaction(tx_signed)
|
||||
|
||||
def test_valid_non_create_transaction(self, b):
|
||||
create_inputs()
|
||||
input_valid = b.get_owned_ids(USER_PUBLIC_KEY).pop()
|
||||
tx_valid = b.create_transaction(USER_PUBLIC_KEY, 'b', input_valid, 'd')
|
||||
|
||||
tx_valid_signed = b.sign_transaction(tx_valid, USER_PRIVATE_KEY)
|
||||
assert tx_valid_signed == b.validate_transaction(tx_valid_signed)
|
||||
assert tx_valid_signed == b.is_valid_transaction(tx_valid_signed)
|
||||
|
||||
|
||||
class TestBlockValidation(object):
|
||||
|
||||
def test_wrong_block_hash(self, b):
|
||||
block = b.create_block([])
|
||||
|
||||
# change block hash
|
||||
block.update({'id': 'abc'})
|
||||
with pytest.raises(exceptions.InvalidHash) as excinfo:
|
||||
b.validate_block(block)
|
||||
|
||||
@pytest.mark.skipif(reason='Separated tx validation from block creation.')
|
||||
def test_invalid_transactions_in_block(self, b):
|
||||
# invalid transaction
|
||||
create_inputs()
|
||||
valid_input = b.get_owned_ids(USER_PUBLIC_KEY).pop()
|
||||
tx_invalid = b.create_transaction('a', 'b', valid_input, 'c')
|
||||
|
||||
block = b.create_block([tx_invalid])
|
||||
assert invalid_transactions == [tx_invalid]
|
||||
|
||||
# create a block with invalid transactions
|
||||
block = {
|
||||
'timestamp': b.timestamp(),
|
||||
'transactions': [tx_invalid],
|
||||
'node_pubkey': b.me,
|
||||
'voters': b.federation_nodes
|
||||
}
|
||||
|
||||
block_data = b.serialize(block)
|
||||
block_hash = hash_data(block_data)
|
||||
block_signature = PrivateKey(b.me_private).sign(block_data)
|
||||
|
||||
block = {
|
||||
'id': block_hash,
|
||||
'block': block,
|
||||
'signature': block_signature,
|
||||
'votes': []
|
||||
}
|
||||
|
||||
with pytest.raises(exceptions.TransactionOwnerError) as excinfo:
|
||||
b.validate_block(block)
|
||||
|
||||
assert excinfo.value.args[0] == 'current_owner `a` does not own the input `{}`'.format(valid_input)
|
||||
|
||||
def test_invalid_block_id(self, b):
|
||||
block = b.create_block([])
|
||||
|
||||
# change block hash
|
||||
block.update({'id': 'abc'})
|
||||
with pytest.raises(exceptions.InvalidHash):
|
||||
b.validate_block(block)
|
||||
|
||||
def test_valid_block(self, b):
|
||||
create_inputs()
|
||||
# create valid transaction
|
||||
input_valid = b.get_owned_ids(USER_PUBLIC_KEY).pop()
|
||||
tx_valid = b.create_transaction(USER_PUBLIC_KEY, 'b', input_valid, 'd')
|
||||
tx_valid_signed = b.sign_transaction(tx_valid, USER_PRIVATE_KEY)
|
||||
|
||||
# create valid block
|
||||
block = b.create_block([tx_valid_signed])
|
||||
|
||||
assert block == b.validate_block(block)
|
||||
assert b.is_valid_block(block)
|
||||
|
||||
|
||||
class TestBigchainCrypto(object):
|
||||
PRIVATE_VALUE = 64328150571824492670917070117568709277186368319388887463636481841106388379832
|
||||
PUBLIC_VALUE_X = 48388170575736684074633245566225141536152842355597159440179742847497614196929
|
||||
PUBLIC_VALUE_Y = 65233479152484407841598798165960909560839872511163322973341535484598825150846
|
||||
|
||||
PRIVATE_VALUE_B58 = 'AaAp4xBavbe6VGeQF2mWdSKNM1r6HfR2Z1tAY6aUkwdq'
|
||||
PUBLIC_VALUE_COMPRESSED_B58 = 'ifEi3UuTDT4CqUUKiS5omgeDodhu2aRFHVp6LoahbEVe'
|
||||
|
||||
def test_private_key_encode(self):
|
||||
private_value_base58 = PrivateKey.encode(self.PRIVATE_VALUE)
|
||||
assert private_value_base58 == self.PRIVATE_VALUE_B58
|
||||
|
||||
def test_private_key_decode(self):
|
||||
private_value = PrivateKey.decode(self.PRIVATE_VALUE_B58)
|
||||
assert private_value == self.PRIVATE_VALUE
|
||||
|
||||
def test_public_key_encode(self):
|
||||
public_value_compressed_base58 = PublicKey.encode(self.PUBLIC_VALUE_X, self.PUBLIC_VALUE_Y)
|
||||
assert public_value_compressed_base58 == self.PUBLIC_VALUE_COMPRESSED_B58
|
||||
|
||||
def test_public_key_decode(self):
|
||||
public_value_x, public_value_y = PublicKey.decode(self.PUBLIC_VALUE_COMPRESSED_B58)
|
||||
assert public_value_x == self.PUBLIC_VALUE_X
|
||||
assert public_value_y == self.PUBLIC_VALUE_Y
|
||||
|
||||
def test_sign_verify(self):
|
||||
message = 'Hello World!'
|
||||
public_key = PublicKey(self.PUBLIC_VALUE_COMPRESSED_B58)
|
||||
private_key = PrivateKey(self.PRIVATE_VALUE_B58)
|
||||
assert public_key.verify(message, private_key.sign(message)) == True
|
||||
|
||||
def test_generate_key_pair(self):
|
||||
private_value_base58, public_value_compressed_base58 = generate_key_pair()
|
||||
assert PrivateKey.encode(
|
||||
PrivateKey.decode(private_value_base58)) == private_value_base58
|
||||
assert PublicKey.encode(
|
||||
*PublicKey.decode(public_value_compressed_base58)) == public_value_compressed_base58
|
||||
|
||||
|
||||
class TestBigchainVoter(object):
|
||||
|
||||
def test_valid_block_voting(self, b):
|
||||
# create queue and voter
|
||||
q_new_block = mp.Queue()
|
||||
voter = Voter(q_new_block)
|
||||
|
||||
genesis = b.create_genesis_block()
|
||||
# create valid block
|
||||
block = b.create_block([])
|
||||
# assert block is valid
|
||||
assert b.is_valid_block(block)
|
||||
b.write_block(block, durability='hard')
|
||||
|
||||
# insert into queue
|
||||
# FIXME: we disable this because the voter can currently vote more than one time for a block
|
||||
# q_new_block.put(block)
|
||||
|
||||
# vote
|
||||
voter.start()
|
||||
# wait for vote to be written
|
||||
time.sleep(1)
|
||||
voter.kill()
|
||||
|
||||
# retrive block from bigchain
|
||||
bigchain_block = r.table('bigchain').get(block['id']).run(b.conn)
|
||||
|
||||
# validate vote
|
||||
assert len(bigchain_block['votes']) == 1
|
||||
vote = bigchain_block['votes'][0]
|
||||
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == genesis['id']
|
||||
assert vote['vote']['is_block_valid'] == True
|
||||
assert vote['vote']['invalid_reason'] == None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert PublicKey(b.me).verify(b.serialize(vote['vote']), vote['signature']) == True
|
||||
|
||||
def test_invalid_block_voting(self, b):
|
||||
# create queue and voter
|
||||
q_new_block = mp.Queue()
|
||||
voter = Voter(q_new_block)
|
||||
|
||||
# create transaction
|
||||
transaction = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE')
|
||||
transaction_signed = b.sign_transaction(transaction, b.me_private)
|
||||
|
||||
genesis = b.create_genesis_block()
|
||||
# create invalid block
|
||||
block = b.create_block([transaction_signed])
|
||||
# change transaction id to make it invalid
|
||||
block['block']['transactions'][0]['id'] = 'abc'
|
||||
assert b.is_valid_block(block) == False
|
||||
b.write_block(block, durability='hard')
|
||||
|
||||
# insert into queue
|
||||
# FIXME: we disable this because the voter can currently vote more than one time for a block
|
||||
# q_new_block.put(block)
|
||||
|
||||
# vote
|
||||
voter.start()
|
||||
# wait for the vote to be written
|
||||
time.sleep(1)
|
||||
voter.kill()
|
||||
|
||||
|
||||
# retrive block from bigchain
|
||||
bigchain_block = r.table('bigchain').get(block['id']).run(b.conn)
|
||||
|
||||
# validate vote
|
||||
assert len(bigchain_block['votes']) == 1
|
||||
vote = bigchain_block['votes'][0]
|
||||
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == genesis['id']
|
||||
assert vote['vote']['is_block_valid'] == False
|
||||
assert vote['vote']['invalid_reason'] == None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert PublicKey(b.me).verify(b.serialize(vote['vote']), vote['signature']) == True
|
||||
|
||||
def test_vote_creation_valid(self, b):
|
||||
# create valid block
|
||||
block = b.create_block([])
|
||||
# retrieve vote
|
||||
vote = b.vote(block, 'abc', True)
|
||||
|
||||
# assert vote is correct
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == 'abc'
|
||||
assert vote['vote']['is_block_valid'] == True
|
||||
assert vote['vote']['invalid_reason'] == None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert PublicKey(b.me).verify(b.serialize(vote['vote']), vote['signature']) == True
|
||||
|
||||
def test_vote_creation_invalid(self, b):
|
||||
# create valid block
|
||||
block = b.create_block([])
|
||||
# retrieve vote
|
||||
vote = b.vote(block, 'abc', False)
|
||||
|
||||
# assert vote is correct
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == 'abc'
|
||||
assert vote['vote']['is_block_valid'] == False
|
||||
assert vote['vote']['invalid_reason'] == None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert PublicKey(b.me).verify(b.serialize(vote['vote']), vote['signature']) == True
|
||||
|
||||
|
||||
class TestBigchainBlock(object):
|
||||
|
||||
def test_by_assignee(self, b):
|
||||
# create transactions and randomly assigne them
|
||||
transactions = mp.Queue()
|
||||
count_assigned_to_me = 0
|
||||
for i in range(100):
|
||||
tx = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE')
|
||||
assignee = random.choice([b.me, 'aaa', 'bbb', 'ccc'])
|
||||
if assignee == b.me:
|
||||
count_assigned_to_me += 1
|
||||
|
||||
tx['assignee'] = assignee
|
||||
transactions.put(tx)
|
||||
transactions.put('stop')
|
||||
|
||||
# create a block instance
|
||||
block = Block(transactions)
|
||||
block.q_new_transaction = transactions
|
||||
# filter the transactions
|
||||
block.filter_by_assignee()
|
||||
|
||||
# check if the number of transactions assigned to the node is the same as the number in
|
||||
# the queue minus 'stop'
|
||||
assert block.q_tx_to_validate.qsize() - 1 == count_assigned_to_me
|
||||
|
||||
def test_validate_transactions(self, b):
|
||||
# create transactions and randomly invalidate some of them by changing the hash
|
||||
transactions = mp.Queue()
|
||||
count_valid = 0
|
||||
for i in range(100):
|
||||
valid = random.choice([True, False])
|
||||
tx = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE')
|
||||
tx = b.sign_transaction(tx, b.me_private)
|
||||
if not valid:
|
||||
tx['id'] = 'a' * 64
|
||||
else:
|
||||
count_valid += 1
|
||||
transactions.put(tx)
|
||||
transactions.put('stop')
|
||||
|
||||
# create a block instance
|
||||
block = Block(transactions)
|
||||
block.q_tx_to_validate = transactions
|
||||
# validate transactions
|
||||
block.validate_transactions()
|
||||
|
||||
# check if the number of valid transactions
|
||||
assert block.q_tx_validated.qsize() - 1 == count_valid
|
||||
assert block.q_tx_delete.qsize() - 1 == 100
|
||||
|
||||
def test_create_block(self, b):
|
||||
# create transactions
|
||||
transactions = mp.Queue()
|
||||
for i in range(100):
|
||||
tx = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE')
|
||||
tx = b.sign_transaction(tx, b.me_private)
|
||||
transactions.put(tx)
|
||||
transactions.put('stop')
|
||||
|
||||
# create a block instance
|
||||
block = Block(transactions)
|
||||
block.q_tx_validated = transactions
|
||||
# create blocks
|
||||
block.create_blocks()
|
||||
|
||||
# check if the number of valid transactions
|
||||
assert block.q_block.qsize() - 1 == 1
|
||||
|
||||
def test_write_block(self, b):
|
||||
# create transactions
|
||||
transactions = []
|
||||
blocks = mp.Queue()
|
||||
for i in range(100):
|
||||
tx = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE')
|
||||
tx = b.sign_transaction(tx, b.me_private)
|
||||
transactions.append(tx)
|
||||
|
||||
# create block
|
||||
block = b.create_block(transactions)
|
||||
blocks.put(block)
|
||||
blocks.put('stop')
|
||||
|
||||
# create a block instance
|
||||
block = Block(transactions)
|
||||
block.q_block = blocks
|
||||
|
||||
# make sure that we only have the genesis block in bigchain
|
||||
r.table('bigchain').delete().run(b.conn)
|
||||
b.create_genesis_block()
|
||||
|
||||
# write blocks
|
||||
block.write_blocks()
|
||||
# lets give it some time for the block to be written
|
||||
time.sleep(1)
|
||||
|
||||
# check if the number of blocks in bigchain increased
|
||||
assert r.table('bigchain').count() == 2
|
||||
|
||||
def test_delete_transactions(self, b):
|
||||
# make sure that there are no transactions in the backlog
|
||||
r.table('backlog').delete().run(b.conn)
|
||||
|
||||
# create and write transactions to the backlog
|
||||
transactions = mp.Queue()
|
||||
for i in range(100):
|
||||
tx = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE')
|
||||
tx = b.sign_transaction(tx, b.me_private)
|
||||
b.write_transaction(tx)
|
||||
transactions.put(tx['id'])
|
||||
transactions.put('stop')
|
||||
|
||||
# create a block instance
|
||||
block = Block(transactions)
|
||||
block.q_tx_delete = transactions
|
||||
|
||||
# make sure that there are transactions on the backlog
|
||||
r.table('backlog').count().run(b.conn) == 100
|
||||
|
||||
# run the delete process
|
||||
block.delete_transactions()
|
||||
# give the db time
|
||||
time.sleep(1)
|
||||
|
||||
# check if all transactions were deleted from the backlog
|
||||
assert r.table('backlog').count() == 0
|
||||
|
||||
def test_bootstrap(self, b):
|
||||
# make sure that there are no transactions in the backlog
|
||||
r.table('backlog').delete().run(b.conn)
|
||||
|
||||
# create and write transactions to the backlog
|
||||
for i in range(100):
|
||||
tx = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE')
|
||||
tx = b.sign_transaction(tx, b.me_private)
|
||||
b.write_transaction(tx)
|
||||
|
||||
# create a block instance
|
||||
block = Block(None)
|
||||
|
||||
# run bootstrap
|
||||
initial_results = block.bootstrap()
|
||||
|
||||
# we should have gotten a queue with 100 results
|
||||
assert initial_results.qsize() - 1 == 100
|
||||
|
||||
def test_start(self, b):
|
||||
# start with 100 transactions in the backlog and 100 in the changefeed
|
||||
|
||||
# make sure that there are no transactions in the backlog
|
||||
r.table('backlog').delete().run(b.conn)
|
||||
|
||||
# create and write transactions to the backlog
|
||||
for i in range(100):
|
||||
tx = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE')
|
||||
tx = b.sign_transaction(tx, b.me_private)
|
||||
b.write_transaction(tx)
|
||||
|
||||
# create 100 more transactions to emulate the changefeed
|
||||
new_transactions = mp.Queue()
|
||||
for i in range(100):
|
||||
tx = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE')
|
||||
tx = b.sign_transaction(tx, b.me_private)
|
||||
b.write_transaction(tx)
|
||||
new_transactions.put(tx)
|
||||
new_transactions.put('stop')
|
||||
|
||||
# create a block instance
|
||||
block = Block(new_transactions)
|
||||
|
||||
# start the block processes
|
||||
block.start()
|
||||
|
||||
assert new_transactions.qsize() == 0
|
||||
assert r.table('backlog').count() == 0
|
||||
assert r.table('bigchain').count() == 2
|
||||
|
||||
def test_empty_queues(self, b):
|
||||
# create empty queue
|
||||
new_transactions = mp.Queue()
|
||||
|
||||
# create block instance
|
||||
block = Block(new_transactions)
|
||||
|
||||
# create block_process
|
||||
p_block = mp.Process(target=block.start)
|
||||
|
||||
# start block process
|
||||
p_block.start()
|
||||
|
||||
# wait for 6 seconds to give it time for an empty queue exception to occur
|
||||
time.sleep(6)
|
||||
|
||||
# send the poison pill
|
||||
new_transactions.put('stop')
|
||||
|
||||
# join the process
|
||||
p_block.join()
|
||||
|
||||
def test_duplicated_transactions(self):
|
||||
pytest.skip('We may have duplicates in the initial_results and changefeed')
|
||||
|
262
tests/db/test_voter.py
Normal file
262
tests/db/test_voter.py
Normal file
@ -0,0 +1,262 @@
|
||||
import pytest
|
||||
import time
|
||||
import rethinkdb as r
|
||||
import multiprocessing as mp
|
||||
|
||||
from bigchaindb import Bigchain
|
||||
from bigchaindb.voter import Voter, BlockStream
|
||||
from bigchaindb.crypto import PublicKey
|
||||
|
||||
from .conftest import USER_PUBLIC_KEY
|
||||
|
||||
|
||||
class TestBigchainVoter(object):
|
||||
|
||||
def test_valid_block_voting(self, b):
|
||||
q_new_block = mp.Queue()
|
||||
|
||||
genesis = b.create_genesis_block()
|
||||
|
||||
# create valid block
|
||||
block = b.create_block([])
|
||||
# assert block is valid
|
||||
assert b.is_valid_block(block)
|
||||
b.write_block(block, durability='hard')
|
||||
|
||||
# create queue and voter
|
||||
voter = Voter(q_new_block)
|
||||
|
||||
# vote
|
||||
voter.start()
|
||||
# wait for vote to be written
|
||||
time.sleep(1)
|
||||
voter.kill()
|
||||
|
||||
# retrive block from bigchain
|
||||
blocks = list(r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))
|
||||
.run(b.conn))
|
||||
|
||||
|
||||
# validate vote
|
||||
assert len(blocks[1]['votes']) == 1
|
||||
vote = blocks[1]['votes'][0]
|
||||
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == genesis['id']
|
||||
assert vote['vote']['is_block_valid'] == True
|
||||
assert vote['vote']['invalid_reason'] == None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert PublicKey(b.me).verify(b.serialize(vote['vote']), vote['signature']) == True
|
||||
|
||||
|
||||
def test_invalid_block_voting(self, b):
|
||||
# create queue and voter
|
||||
q_new_block = mp.Queue()
|
||||
voter = Voter(q_new_block)
|
||||
|
||||
# create transaction
|
||||
transaction = b.create_transaction(b.me, USER_PUBLIC_KEY, None, 'CREATE')
|
||||
transaction_signed = b.sign_transaction(transaction, b.me_private)
|
||||
|
||||
genesis = b.create_genesis_block()
|
||||
|
||||
# create invalid block
|
||||
block = b.create_block([transaction_signed])
|
||||
# change transaction id to make it invalid
|
||||
block['block']['transactions'][0]['id'] = 'abc'
|
||||
assert not b.is_valid_block(block)
|
||||
b.write_block(block, durability='hard')
|
||||
|
||||
|
||||
# vote
|
||||
voter.start()
|
||||
time.sleep(1)
|
||||
voter.kill()
|
||||
|
||||
# retrive block from bigchain
|
||||
blocks = list(r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))
|
||||
.run(b.conn))
|
||||
|
||||
# validate vote
|
||||
assert len(blocks[1]['votes']) == 1
|
||||
vote = blocks[1]['votes'][0]
|
||||
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == genesis['id']
|
||||
assert vote['vote']['is_block_valid'] == False
|
||||
assert vote['vote']['invalid_reason'] == None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert PublicKey(b.me).verify(b.serialize(vote['vote']), vote['signature']) == True
|
||||
|
||||
def test_vote_creation_valid(self, b):
|
||||
# create valid block
|
||||
block = b.create_block([])
|
||||
# retrieve vote
|
||||
vote = b.vote(block, 'abc', True)
|
||||
|
||||
# assert vote is correct
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == 'abc'
|
||||
assert vote['vote']['is_block_valid'] == True
|
||||
assert vote['vote']['invalid_reason'] == None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert PublicKey(b.me).verify(b.serialize(vote['vote']), vote['signature']) == True
|
||||
|
||||
def test_vote_creation_invalid(self, b):
|
||||
# create valid block
|
||||
block = b.create_block([])
|
||||
# retrieve vote
|
||||
vote = b.vote(block, 'abc', False)
|
||||
|
||||
# assert vote is correct
|
||||
assert vote['vote']['voting_for_block'] == block['id']
|
||||
assert vote['vote']['previous_block'] == 'abc'
|
||||
assert vote['vote']['is_block_valid'] == False
|
||||
assert vote['vote']['invalid_reason'] == None
|
||||
assert vote['node_pubkey'] == b.me
|
||||
assert PublicKey(b.me).verify(b.serialize(vote['vote']), vote['signature']) == True
|
||||
|
||||
def test_voter_considers_unvoted_blocks_when_single_node(self, b):
|
||||
# simulate a voter going donw in a single node environment
|
||||
b.create_genesis_block()
|
||||
|
||||
# insert blocks in the database while the voter process is not listening
|
||||
# (these blocks won't appear in the changefeed)
|
||||
block_1 = b.create_block([])
|
||||
b.write_block(block_1, durability='hard')
|
||||
block_2 = b.create_block([])
|
||||
b.write_block(block_2, durability='hard')
|
||||
|
||||
# voter is back online, we simulate that by creating a queue and a Voter instance
|
||||
q_new_block = mp.Queue()
|
||||
voter = Voter(q_new_block)
|
||||
|
||||
# create a new block that will appear in the changefeed
|
||||
block_3 = b.create_block([])
|
||||
b.write_block(block_3, durability='hard')
|
||||
|
||||
# put the last block in the queue
|
||||
q_new_block.put(block_3)
|
||||
|
||||
# vote
|
||||
voter.start()
|
||||
time.sleep(1)
|
||||
voter.kill()
|
||||
|
||||
# retrive blocks from bigchain
|
||||
blocks = list(r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))
|
||||
.run(b.conn))
|
||||
|
||||
# FIXME: remove genesis block, we don't vote on it (might change in the future)
|
||||
blocks.pop(0)
|
||||
|
||||
assert all(block['votes'][0]['node_pubkey'] == b.me for block in blocks)
|
||||
|
||||
def test_voter_chains_blocks_with_the_previous_ones(self, b):
|
||||
b.create_genesis_block()
|
||||
block_1 = b.create_block([])
|
||||
b.write_block(block_1, durability='hard')
|
||||
block_2 = b.create_block([])
|
||||
b.write_block(block_2, durability='hard')
|
||||
|
||||
q_new_block = mp.Queue()
|
||||
|
||||
voter = Voter(q_new_block)
|
||||
voter.start()
|
||||
time.sleep(1)
|
||||
voter.kill()
|
||||
|
||||
|
||||
# retrive blocks from bigchain
|
||||
blocks = list(r.table('bigchain')
|
||||
.order_by(r.asc((r.row['block']['timestamp'])))
|
||||
.run(b.conn))
|
||||
|
||||
assert blocks[0]['block_number'] == 0
|
||||
assert blocks[1]['block_number'] == 1
|
||||
assert blocks[2]['block_number'] == 2
|
||||
|
||||
# we don't vote on the genesis block right now
|
||||
# assert blocks[0]['votes'][0]['vote']['voting_for_block'] == genesis['id']
|
||||
assert blocks[1]['votes'][0]['vote']['voting_for_block'] == block_1['id']
|
||||
assert blocks[2]['votes'][0]['vote']['voting_for_block'] == block_2['id']
|
||||
|
||||
@pytest.mark.skipif(reason='Updating the block_number must be atomic')
|
||||
def test_updating_block_number_must_be_atomic(self):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
class TestBlockStream(object):
|
||||
|
||||
def test_if_federation_size_is_greater_than_one_ignore_past_blocks(self, b):
|
||||
for _ in range(5):
|
||||
b.federation_nodes.append(b.generate_keys()[1])
|
||||
new_blocks = mp.Queue()
|
||||
bs = BlockStream(new_blocks)
|
||||
block_1 = b.create_block([])
|
||||
new_blocks.put(block_1)
|
||||
assert block_1 == bs.get()
|
||||
|
||||
def test_if_no_old_blocks_get_should_return_new_blocks(self, b):
|
||||
new_blocks = mp.Queue()
|
||||
bs = BlockStream(new_blocks)
|
||||
|
||||
# create two blocks
|
||||
block_1 = b.create_block([])
|
||||
block_2 = b.create_block([])
|
||||
|
||||
# write the blocks
|
||||
b.write_block(block_1, durability='hard')
|
||||
b.write_block(block_2, durability='hard')
|
||||
|
||||
# simulate a changefeed
|
||||
new_blocks.put(block_1)
|
||||
new_blocks.put(block_2)
|
||||
|
||||
# and check if we get exactly these two blocks
|
||||
assert bs.get() == block_1
|
||||
assert bs.get() == block_2
|
||||
|
||||
|
||||
def test_if_old_blocks_get_should_return_old_block_first(self, b):
|
||||
# create two blocks
|
||||
block_1 = b.create_block([])
|
||||
block_2 = b.create_block([])
|
||||
|
||||
# write the blocks
|
||||
b.write_block(block_1, durability='hard')
|
||||
b.write_block(block_2, durability='hard')
|
||||
|
||||
new_blocks = mp.Queue()
|
||||
bs = BlockStream(new_blocks)
|
||||
|
||||
# assert len(list(bs.old_blocks)) == 2
|
||||
# import pdb; pdb.set_trace()
|
||||
# from pprint import pprint as pp
|
||||
# pp(bs.old_blocks)
|
||||
# pp(block_1)
|
||||
# pp(block_2)
|
||||
|
||||
# create two new blocks that will appear in the changefeed
|
||||
block_3 = b.create_block([])
|
||||
block_4 = b.create_block([])
|
||||
|
||||
# simulate a changefeed
|
||||
new_blocks.put(block_3)
|
||||
new_blocks.put(block_4)
|
||||
|
||||
assert len(bs.unvoted_blocks) == 2
|
||||
|
||||
# and check if we get the old blocks first
|
||||
assert bs.get() == block_1
|
||||
assert bs.get() == block_2
|
||||
assert bs.get() == block_3
|
||||
assert bs.get() == block_4
|
||||
|
||||
@pytest.mark.skipif(reason='We may have duplicated blocks when retrieving the BlockStream')
|
||||
def test_ignore_duplicated_blocks_when_retrieving_the_blockstream(self):
|
||||
pass
|
0
tests/utils/__init__.py
Normal file
0
tests/utils/__init__.py
Normal file
38
tests/utils/test_config_utils.py
Normal file
38
tests/utils/test_config_utils.py
Normal file
@ -0,0 +1,38 @@
|
||||
import copy
|
||||
|
||||
import pytest
|
||||
|
||||
import bigchaindb
|
||||
from bigchaindb import config_utils
|
||||
|
||||
|
||||
ORIGINAL_CONFIG = copy.deepcopy(bigchaindb.config)
|
||||
|
||||
|
||||
@pytest.fixture(scope='function', autouse=True)
|
||||
def clean_config():
|
||||
bigchaindb.config = copy.deepcopy(ORIGINAL_CONFIG)
|
||||
|
||||
|
||||
def test_bigchain_instance_is_initialized_when_conf_provided():
|
||||
assert 'CONFIGURED' not in bigchaindb.config
|
||||
|
||||
config_utils.dict_config({'keypair': {'public': 'a', 'private': 'b'}})
|
||||
|
||||
assert bigchaindb.config['CONFIGURED'] == True
|
||||
b = bigchaindb.Bigchain()
|
||||
|
||||
assert b.me
|
||||
assert b.me_private
|
||||
|
||||
|
||||
def test_bigchain_instance_raises_when_not_configured(monkeypatch):
|
||||
assert 'CONFIGURED' not in bigchaindb.config
|
||||
|
||||
# We need to disable ``bigchaindb.config_utils.autoconfigure`` to avoid reading
|
||||
# from existing configurations
|
||||
monkeypatch.setattr(config_utils, 'autoconfigure', lambda: 0)
|
||||
|
||||
with pytest.raises(bigchaindb.core.KeypairNotFoundException):
|
||||
bigchaindb.Bigchain()
|
||||
|
Loading…
x
Reference in New Issue
Block a user