mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge branch 'master' into change-federation-to-consortium
This commit is contained in:
commit
da92d1e1c2
@ -15,6 +15,13 @@ For reference, the possible headings are:
|
||||
* **External Contributors** to list contributors outside of BigchainDB GmbH.
|
||||
* **Notes**
|
||||
|
||||
## [0.9.4] - 2017-03-16
|
||||
Tag name: v0.9.4
|
||||
|
||||
### Fixed
|
||||
Fixed #1271 (false double spend error). Thanks to @jmduque for reporting the
|
||||
problem along with a very detailed diagnosis and useful recommendations.
|
||||
|
||||
## [0.9.3] - 2017-03-06
|
||||
Tag name: v0.9.3
|
||||
|
||||
|
39
Dockerfile
39
Dockerfile
@ -1,33 +1,32 @@
|
||||
FROM ubuntu:xenial
|
||||
|
||||
# From http://stackoverflow.com/a/38553499
|
||||
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y locales
|
||||
|
||||
RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
|
||||
echo 'LANG="en_US.UTF-8"'>/etc/default/locale && \
|
||||
dpkg-reconfigure --frontend=noninteractive locales && \
|
||||
update-locale LANG=en_US.UTF-8
|
||||
|
||||
ENV LANG en_US.UTF-8
|
||||
|
||||
# The `apt-get update` command executed with the install instructions should
|
||||
# not use a locally cached storage layer. Force update the cache again.
|
||||
# https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#run
|
||||
RUN apt-get update && apt-get -y install python3 python3-pip libffi-dev \
|
||||
&& pip3 install --upgrade pip \
|
||||
&& pip3 install --upgrade setuptools
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
RUN mkdir -p /usr/src/app
|
||||
|
||||
COPY . /usr/src/app/
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
RUN pip3 install --no-cache-dir -e .
|
||||
RUN locale-gen en_US.UTF-8 && \
|
||||
apt-get -q update && \
|
||||
apt-get install -qy --no-install-recommends \
|
||||
python3 \
|
||||
python3-pip \
|
||||
libffi-dev \
|
||||
python3-dev \
|
||||
build-essential && \
|
||||
\
|
||||
pip3 install --upgrade --no-cache-dir pip setuptools && \
|
||||
\
|
||||
pip3 install --no-cache-dir -e . && \
|
||||
\
|
||||
apt-get remove -qy --purge gcc cpp binutils perl && \
|
||||
apt-get -qy autoremove && \
|
||||
apt-get -q clean all && \
|
||||
rm -rf /usr/share/perl /usr/share/perl5 /usr/share/man /usr/share/info /usr/share/doc && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
WORKDIR /data
|
||||
|
||||
ENV BIGCHAINDB_CONFIG_PATH /data/.bigchaindb
|
||||
|
@ -12,7 +12,7 @@ The `Bigchain` class is defined here. Most operations outlined in the [whitepap
|
||||
|
||||
### [`models.py`](./models.py)
|
||||
|
||||
`Block`, `Transaction`, and `Asset` classes are defined here. The classes mirror the block and transaction structure from the [documentation](https://docs.bigchaindb.com/projects/server/en/latest/topic-guides/models.html), but also include methods for validation and signing.
|
||||
`Block`, `Transaction`, and `Asset` classes are defined here. The classes mirror the block and transaction structure from the [documentation](https://docs.bigchaindb.com/projects/server/en/latest/data-models/index.html), but also include methods for validation and signing.
|
||||
|
||||
### [`consensus.py`](./consensus.py)
|
||||
|
||||
|
@ -15,7 +15,3 @@ class OperationError(BackendError):
|
||||
|
||||
class DuplicateKeyError(OperationError):
|
||||
"""Exception raised when an insert fails because the key is not unique"""
|
||||
|
||||
|
||||
class BigchainDBCritical(Exception):
|
||||
"""Unhandleable error that requires attention"""
|
||||
|
@ -153,14 +153,22 @@ def get_spent(conn, transaction_id, output):
|
||||
cursor = conn.run(
|
||||
conn.collection('bigchain').aggregate([
|
||||
{'$match': {
|
||||
'block.transactions.inputs.fulfills.txid': transaction_id,
|
||||
'block.transactions.inputs.fulfills.output': output
|
||||
'block.transactions.inputs': {
|
||||
'$elemMatch': {
|
||||
'fulfills.txid': transaction_id,
|
||||
'fulfills.output': output,
|
||||
},
|
||||
},
|
||||
}},
|
||||
{'$unwind': '$block.transactions'},
|
||||
{'$match': {
|
||||
'block.transactions.inputs.fulfills.txid': transaction_id,
|
||||
'block.transactions.inputs.fulfills.output': output
|
||||
}}
|
||||
'block.transactions.inputs': {
|
||||
'$elemMatch': {
|
||||
'fulfills.txid': transaction_id,
|
||||
'fulfills.output': output,
|
||||
},
|
||||
},
|
||||
}},
|
||||
]))
|
||||
# we need to access some nested fields before returning so lets use a
|
||||
# generator to avoid having to read all records on the cursor at this point
|
||||
|
@ -9,15 +9,11 @@ import copy
|
||||
import json
|
||||
import sys
|
||||
|
||||
import logstats
|
||||
|
||||
from bigchaindb.common import crypto
|
||||
from bigchaindb.common.exceptions import (StartupError,
|
||||
DatabaseAlreadyExists,
|
||||
KeypairNotFoundException)
|
||||
import bigchaindb
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.utils import ProcessGroup
|
||||
from bigchaindb import backend, processes
|
||||
from bigchaindb.backend import schema
|
||||
from bigchaindb.backend.admin import (set_replicas, set_shards, add_replicas,
|
||||
@ -206,39 +202,6 @@ def run_start(args):
|
||||
processes.start()
|
||||
|
||||
|
||||
def _run_load(tx_left, stats):
|
||||
logstats.thread.start(stats)
|
||||
b = bigchaindb.Bigchain()
|
||||
|
||||
while True:
|
||||
tx = Transaction.create([b.me], [([b.me], 1)])
|
||||
tx = tx.sign([b.me_private])
|
||||
b.write_transaction(tx)
|
||||
|
||||
stats['transactions'] += 1
|
||||
|
||||
if tx_left is not None:
|
||||
tx_left -= 1
|
||||
if tx_left == 0:
|
||||
break
|
||||
|
||||
|
||||
@configure_bigchaindb
|
||||
def run_load(args):
|
||||
logger.info('Starting %s processes', args.multiprocess)
|
||||
stats = logstats.Logstats()
|
||||
logstats.thread.start(stats)
|
||||
|
||||
tx_left = None
|
||||
if args.count > 0:
|
||||
tx_left = int(args.count / args.multiprocess)
|
||||
|
||||
workers = ProcessGroup(concurrency=args.multiprocess,
|
||||
target=_run_load,
|
||||
args=(tx_left, stats.get_child()))
|
||||
workers.start()
|
||||
|
||||
|
||||
@configure_bigchaindb
|
||||
def run_set_shards(args):
|
||||
conn = backend.connect()
|
||||
@ -373,25 +336,6 @@ def create_parser():
|
||||
help='A list of space separated hosts to '
|
||||
'remove from the replicaset. Each host '
|
||||
'should be in the form `host:port`.')
|
||||
|
||||
load_parser = subparsers.add_parser('load',
|
||||
help='Write transactions to the backlog')
|
||||
|
||||
load_parser.add_argument('-m', '--multiprocess',
|
||||
nargs='?',
|
||||
type=int,
|
||||
default=False,
|
||||
help='Spawn multiple processes to run the command, '
|
||||
'if no value is provided, the number of processes '
|
||||
'is equal to the number of cores of the host machine')
|
||||
|
||||
load_parser.add_argument('-c', '--count',
|
||||
default=0,
|
||||
type=int,
|
||||
help='Number of transactions to push. If the parameter -m '
|
||||
'is set, the count is distributed equally to all the '
|
||||
'processes')
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
|
@ -1,18 +1,31 @@
|
||||
# Separate all crypto code so that we can easily test several implementations
|
||||
from collections import namedtuple
|
||||
|
||||
import sha3
|
||||
from cryptoconditions import crypto
|
||||
|
||||
|
||||
CryptoKeypair = namedtuple('CryptoKeypair', ('private_key', 'public_key'))
|
||||
|
||||
|
||||
def hash_data(data):
|
||||
"""Hash the provided data using SHA3-256"""
|
||||
return sha3.sha3_256(data.encode()).hexdigest()
|
||||
|
||||
|
||||
def generate_key_pair():
|
||||
"""Generates a cryptographic key pair.
|
||||
|
||||
Returns:
|
||||
:class:`~bigchaindb.common.crypto.CryptoKeypair`: A
|
||||
:obj:`collections.namedtuple` with named fields
|
||||
:attr:`~bigchaindb.common.crypto.CryptoKeypair.private_key` and
|
||||
:attr:`~bigchaindb.common.crypto.CryptoKeypair.public_key`.
|
||||
|
||||
"""
|
||||
# TODO FOR CC: Adjust interface so that this function becomes unnecessary
|
||||
private_key, public_key = crypto.ed25519_generate_key_pair()
|
||||
return private_key.decode(), public_key.decode()
|
||||
return CryptoKeypair(
|
||||
*(k.decode() for k in crypto.ed25519_generate_key_pair()))
|
||||
|
||||
|
||||
PrivateKey = crypto.Ed25519SigningKey
|
||||
|
@ -7,40 +7,6 @@ class ConfigurationError(BigchainDBError):
|
||||
"""Raised when there is a problem with server configuration"""
|
||||
|
||||
|
||||
class OperationError(BigchainDBError):
|
||||
"""Raised when an operation cannot go through"""
|
||||
|
||||
|
||||
class TransactionDoesNotExist(BigchainDBError):
|
||||
"""Raised if the transaction is not in the database"""
|
||||
|
||||
|
||||
class TransactionOwnerError(BigchainDBError):
|
||||
"""Raised if a user tries to transfer a transaction they don't own"""
|
||||
|
||||
|
||||
class DoubleSpend(BigchainDBError):
|
||||
"""Raised if a double spend is found"""
|
||||
|
||||
|
||||
class ValidationError(BigchainDBError):
|
||||
"""Raised if there was an error in validation"""
|
||||
|
||||
|
||||
class InvalidHash(ValidationError):
|
||||
"""Raised if there was an error checking the hash for a particular
|
||||
operation"""
|
||||
|
||||
|
||||
class SchemaValidationError(ValidationError):
|
||||
"""Raised if there was any error validating an object's schema"""
|
||||
|
||||
|
||||
class InvalidSignature(BigchainDBError):
|
||||
"""Raised if there was an error checking the signature for a particular
|
||||
operation"""
|
||||
|
||||
|
||||
class DatabaseAlreadyExists(BigchainDBError):
|
||||
"""Raised when trying to create the database but the db is already there"""
|
||||
|
||||
@ -49,6 +15,18 @@ class DatabaseDoesNotExist(BigchainDBError):
|
||||
"""Raised when trying to delete the database but the db is not there"""
|
||||
|
||||
|
||||
class StartupError(BigchainDBError):
|
||||
"""Raised when there is an error starting up the system"""
|
||||
|
||||
|
||||
class GenesisBlockAlreadyExistsError(BigchainDBError):
|
||||
"""Raised when trying to create the already existing genesis block"""
|
||||
|
||||
|
||||
class CyclicBlockchainError(BigchainDBError):
|
||||
"""Raised when there is a cycle in the blockchain"""
|
||||
|
||||
|
||||
class KeypairNotFoundException(BigchainDBError):
|
||||
"""Raised if operation cannot proceed because the keypair was not given"""
|
||||
|
||||
@ -58,34 +36,73 @@ class KeypairMismatchException(BigchainDBError):
|
||||
current owner(s)"""
|
||||
|
||||
|
||||
class StartupError(BigchainDBError):
|
||||
"""Raised when there is an error starting up the system"""
|
||||
class OperationError(BigchainDBError):
|
||||
"""Raised when an operation cannot go through"""
|
||||
|
||||
|
||||
class ImproperVoteError(BigchainDBError):
|
||||
################################################################################
|
||||
# Validation errors
|
||||
#
|
||||
# All validation errors (which are handleable errors, not faults) should
|
||||
# subclass ValidationError. However, where possible they should also have their
|
||||
# own distinct type to differentiate them from other validation errors,
|
||||
# especially for the purposes of testing.
|
||||
|
||||
|
||||
class ValidationError(BigchainDBError):
|
||||
"""Raised if there was an error in validation"""
|
||||
|
||||
|
||||
class DoubleSpend(ValidationError):
|
||||
"""Raised if a double spend is found"""
|
||||
|
||||
|
||||
class InvalidHash(ValidationError):
|
||||
"""Raised if there was an error checking the hash for a particular
|
||||
operation"""
|
||||
|
||||
|
||||
class SchemaValidationError(ValidationError):
|
||||
"""Raised if there was any error validating an object's schema"""
|
||||
|
||||
|
||||
class InvalidSignature(ValidationError):
|
||||
"""Raised if there was an error checking the signature for a particular
|
||||
operation"""
|
||||
|
||||
|
||||
class ImproperVoteError(ValidationError):
|
||||
"""Raised if a vote is not constructed correctly, or signed incorrectly"""
|
||||
|
||||
|
||||
class MultipleVotesError(BigchainDBError):
|
||||
class MultipleVotesError(ValidationError):
|
||||
"""Raised if a voter has voted more than once"""
|
||||
|
||||
|
||||
class GenesisBlockAlreadyExistsError(BigchainDBError):
|
||||
"""Raised when trying to create the already existing genesis block"""
|
||||
|
||||
|
||||
class CyclicBlockchainError(BigchainDBError):
|
||||
"""Raised when there is a cycle in the blockchain"""
|
||||
|
||||
|
||||
class TransactionNotInValidBlock(BigchainDBError):
|
||||
class TransactionNotInValidBlock(ValidationError):
|
||||
"""Raised when a transfer transaction is attempting to fulfill the
|
||||
outputs of a transaction that is in an invalid or undecided block"""
|
||||
|
||||
|
||||
class AssetIdMismatch(BigchainDBError):
|
||||
class AssetIdMismatch(ValidationError):
|
||||
"""Raised when multiple transaction inputs related to different assets"""
|
||||
|
||||
|
||||
class AmountError(BigchainDBError):
|
||||
class AmountError(ValidationError):
|
||||
"""Raised when there is a problem with a transaction's output amounts"""
|
||||
|
||||
|
||||
class InputDoesNotExist(ValidationError):
|
||||
"""Raised if a transaction input does not exist"""
|
||||
|
||||
|
||||
class TransactionOwnerError(ValidationError):
|
||||
"""Raised if a user tries to transfer a transaction they don't own"""
|
||||
|
||||
|
||||
class SybilError(ValidationError):
|
||||
"""If a block or vote comes from an unidentifiable node"""
|
||||
|
||||
|
||||
class DuplicateTransaction(ValidationError):
|
||||
"""Raised if a duplicated transaction is found"""
|
||||
|
@ -28,7 +28,9 @@ def _load_schema(name):
|
||||
return path, schema
|
||||
|
||||
|
||||
TX_SCHEMA_PATH, TX_SCHEMA = _load_schema('transaction')
|
||||
TX_SCHEMA_PATH, TX_SCHEMA_COMMON = _load_schema('transaction')
|
||||
_, TX_SCHEMA_CREATE = _load_schema('transaction_create')
|
||||
_, TX_SCHEMA_TRANSFER = _load_schema('transaction_transfer')
|
||||
VOTE_SCHEMA_PATH, VOTE_SCHEMA = _load_schema('vote')
|
||||
|
||||
|
||||
@ -41,8 +43,17 @@ def _validate_schema(schema, body):
|
||||
|
||||
|
||||
def validate_transaction_schema(tx):
|
||||
""" Validate a transaction dict """
|
||||
_validate_schema(TX_SCHEMA, tx)
|
||||
"""
|
||||
Validate a transaction dict.
|
||||
|
||||
TX_SCHEMA_COMMON contains properties that are common to all types of
|
||||
transaction. TX_SCHEMA_[TRANSFER|CREATE] add additional constraints on top.
|
||||
"""
|
||||
_validate_schema(TX_SCHEMA_COMMON, tx)
|
||||
if tx['operation'] == 'TRANSFER':
|
||||
_validate_schema(TX_SCHEMA_TRANSFER, tx)
|
||||
else:
|
||||
_validate_schema(TX_SCHEMA_CREATE, tx)
|
||||
|
||||
|
||||
def validate_vote_schema(vote):
|
||||
|
28
bigchaindb/common/schema/transaction_create.yaml
Normal file
28
bigchaindb/common/schema/transaction_create.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
"$schema": "http://json-schema.org/draft-04/schema#"
|
||||
type: object
|
||||
title: Transaction Schema - CREATE/GENESIS specific constraints
|
||||
required:
|
||||
- asset
|
||||
- inputs
|
||||
properties:
|
||||
asset:
|
||||
additionalProperties: false
|
||||
properties:
|
||||
data:
|
||||
anyOf:
|
||||
- type: object
|
||||
additionalProperties: true
|
||||
- type: 'null'
|
||||
inputs:
|
||||
type: array
|
||||
title: "Transaction inputs"
|
||||
maxItems: 1
|
||||
minItems: 1
|
||||
items:
|
||||
type: "object"
|
||||
required:
|
||||
- fulfills
|
||||
properties:
|
||||
fulfills:
|
||||
type: "null"
|
29
bigchaindb/common/schema/transaction_transfer.yaml
Normal file
29
bigchaindb/common/schema/transaction_transfer.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
"$schema": "http://json-schema.org/draft-04/schema#"
|
||||
type: object
|
||||
title: Transaction Schema - TRANSFER specific properties
|
||||
required:
|
||||
- asset
|
||||
properties:
|
||||
asset:
|
||||
additionalProperties: false
|
||||
properties:
|
||||
id:
|
||||
"$ref": "#/definitions/sha3_hexdigest"
|
||||
description: |
|
||||
ID of the transaction that created the asset.
|
||||
inputs:
|
||||
type: array
|
||||
title: "Transaction inputs"
|
||||
minItems: 1
|
||||
items:
|
||||
type: "object"
|
||||
required:
|
||||
- fulfills
|
||||
properties:
|
||||
fulfills:
|
||||
type: "object"
|
||||
definitions:
|
||||
sha3_hexdigest:
|
||||
pattern: "[0-9a-f]{64}"
|
||||
type: string
|
@ -768,20 +768,19 @@ class Transaction(object):
|
||||
key_pairs (dict): The keys to sign the Transaction with.
|
||||
"""
|
||||
input_ = deepcopy(input_)
|
||||
for owner_before in input_.owners_before:
|
||||
try:
|
||||
# TODO: CC should throw a KeypairMismatchException, instead of
|
||||
# our manual mapping here
|
||||
for owner_before in set(input_.owners_before):
|
||||
# TODO: CC should throw a KeypairMismatchException, instead of
|
||||
# our manual mapping here
|
||||
|
||||
# TODO FOR CC: Naming wise this is not so smart,
|
||||
# `get_subcondition` in fact doesn't return a
|
||||
# condition but a fulfillment
|
||||
# TODO FOR CC: Naming wise this is not so smart,
|
||||
# `get_subcondition` in fact doesn't return a
|
||||
# condition but a fulfillment
|
||||
|
||||
# TODO FOR CC: `get_subcondition` is singular. One would not
|
||||
# expect to get a list back.
|
||||
ccffill = input_.fulfillment
|
||||
subffill = ccffill.get_subcondition_from_vk(owner_before)[0]
|
||||
except IndexError:
|
||||
# TODO FOR CC: `get_subcondition` is singular. One would not
|
||||
# expect to get a list back.
|
||||
ccffill = input_.fulfillment
|
||||
subffills = ccffill.get_subcondition_from_vk(owner_before)
|
||||
if not subffills:
|
||||
raise KeypairMismatchException('Public key {} cannot be found '
|
||||
'in the fulfillment'
|
||||
.format(owner_before))
|
||||
@ -794,7 +793,8 @@ class Transaction(object):
|
||||
|
||||
# cryptoconditions makes no assumptions of the encoding of the
|
||||
# message to sign or verify. It only accepts bytestrings
|
||||
subffill.sign(tx_serialized.encode(), private_key)
|
||||
for subffill in subffills:
|
||||
subffill.sign(tx_serialized.encode(), private_key)
|
||||
self.inputs[index] = input_
|
||||
|
||||
def inputs_valid(self, outputs=None):
|
||||
@ -999,7 +999,8 @@ class Transaction(object):
|
||||
transactions = [transactions]
|
||||
|
||||
# create a set of the transactions' asset ids
|
||||
asset_ids = {tx.id if tx.operation == Transaction.CREATE else tx.asset['id']
|
||||
asset_ids = {tx.id if tx.operation == Transaction.CREATE
|
||||
else tx.asset['id']
|
||||
for tx in transactions}
|
||||
|
||||
# check that all the transasctions have the same asset id
|
||||
@ -1009,7 +1010,7 @@ class Transaction(object):
|
||||
return asset_ids.pop()
|
||||
|
||||
@staticmethod
|
||||
def validate_structure(tx_body):
|
||||
def validate_id(tx_body):
|
||||
"""Validate the transaction ID of a transaction
|
||||
|
||||
Args:
|
||||
@ -1041,7 +1042,7 @@ class Transaction(object):
|
||||
Returns:
|
||||
:class:`~bigchaindb.common.transaction.Transaction`
|
||||
"""
|
||||
cls.validate_structure(tx)
|
||||
cls.validate_id(tx)
|
||||
inputs = [Input.from_dict(input_) for input_ in tx['inputs']]
|
||||
outputs = [Output.from_dict(output) for output in tx['outputs']]
|
||||
return cls(tx['operation'], tx['asset'], inputs, outputs,
|
||||
|
@ -1,11 +1,4 @@
|
||||
import logging
|
||||
|
||||
from bigchaindb.utils import verify_vote_signature
|
||||
from bigchaindb.common.schema import (SchemaValidationError,
|
||||
validate_vote_schema)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from bigchaindb.voting import Voting
|
||||
|
||||
|
||||
class BaseConsensusRules():
|
||||
@ -16,34 +9,15 @@ class BaseConsensusRules():
|
||||
All methods listed below must be implemented.
|
||||
|
||||
"""
|
||||
voting = Voting
|
||||
|
||||
@staticmethod
|
||||
def validate_transaction(bigchain, transaction):
|
||||
"""See :meth:`bigchaindb.models.Transaction.validate`
|
||||
for documentation.
|
||||
|
||||
"""
|
||||
for documentation."""
|
||||
return transaction.validate(bigchain)
|
||||
|
||||
@staticmethod
|
||||
def validate_block(bigchain, block):
|
||||
"""See :meth:`bigchaindb.models.Block.validate` for documentation."""
|
||||
return block.validate(bigchain)
|
||||
|
||||
@staticmethod
|
||||
def verify_vote(voters, signed_vote):
|
||||
"""Verify the signature of a vote.
|
||||
|
||||
Refer to the documentation of
|
||||
:func:`bigchaindb.utils.verify_signature`.
|
||||
"""
|
||||
if verify_vote_signature(voters, signed_vote):
|
||||
try:
|
||||
validate_vote_schema(signed_vote)
|
||||
return True
|
||||
except SchemaValidationError as exc:
|
||||
logger.warning(exc)
|
||||
else:
|
||||
logger.warning('Vote failed signature verification: '
|
||||
'%s with voters: %s', signed_vote, voters)
|
||||
return False
|
||||
|
@ -1,9 +1,7 @@
|
||||
import random
|
||||
import math
|
||||
import collections
|
||||
from time import time
|
||||
|
||||
from itertools import compress
|
||||
from bigchaindb import exceptions as core_exceptions
|
||||
from bigchaindb.common import crypto, exceptions
|
||||
from bigchaindb.common.utils import gen_timestamp, serialize
|
||||
from bigchaindb.common.transaction import TransactionLink
|
||||
@ -11,7 +9,6 @@ from bigchaindb.common.transaction import TransactionLink
|
||||
import bigchaindb
|
||||
|
||||
from bigchaindb import backend, config_utils, utils
|
||||
from bigchaindb.backend import exceptions as backend_exceptions
|
||||
from bigchaindb.consensus import BaseConsensusRules
|
||||
from bigchaindb.models import Block, Transaction
|
||||
|
||||
@ -72,6 +69,9 @@ class Bigchain(object):
|
||||
if not self.me or not self.me_private:
|
||||
raise exceptions.KeypairNotFoundException()
|
||||
|
||||
federation = property(lambda self: set(self.nodes_except_me + [self.me]))
|
||||
""" Set of federation member public keys """
|
||||
|
||||
def write_transaction(self, signed_transaction):
|
||||
"""Write the transaction to bigchain.
|
||||
|
||||
@ -110,19 +110,10 @@ class Bigchain(object):
|
||||
dict: database response or None if no reassignment is possible
|
||||
"""
|
||||
|
||||
if self.nodes_except_me:
|
||||
try:
|
||||
federation_nodes = self.nodes_except_me + [self.me]
|
||||
index_current_assignee = federation_nodes.index(transaction['assignee'])
|
||||
new_assignee = random.choice(federation_nodes[:index_current_assignee] +
|
||||
federation_nodes[index_current_assignee + 1:])
|
||||
except ValueError:
|
||||
# current assignee not in federation
|
||||
new_assignee = random.choice(self.nodes_except_me)
|
||||
|
||||
else:
|
||||
# There is no other node to assign to
|
||||
new_assignee = self.me
|
||||
other_nodes = tuple(
|
||||
self.federation.difference([transaction['assignee']])
|
||||
)
|
||||
new_assignee = random.choice(other_nodes) if other_nodes else self.me
|
||||
|
||||
return backend.query.update_transaction(
|
||||
self.connection, transaction['id'],
|
||||
@ -162,31 +153,6 @@ class Bigchain(object):
|
||||
|
||||
return self.consensus.validate_transaction(self, transaction)
|
||||
|
||||
def is_valid_transaction(self, transaction):
|
||||
"""Check whether a transaction is valid or invalid.
|
||||
|
||||
Similar to :meth:`~bigchaindb.Bigchain.validate_transaction`
|
||||
but never raises an exception. It returns :obj:`False` if
|
||||
the transaction is invalid.
|
||||
|
||||
Args:
|
||||
transaction (:Class:`~bigchaindb.models.Transaction`): transaction
|
||||
to check.
|
||||
|
||||
Returns:
|
||||
The :class:`~bigchaindb.models.Transaction` instance if valid,
|
||||
otherwise :obj:`False`.
|
||||
"""
|
||||
|
||||
try:
|
||||
return self.validate_transaction(transaction)
|
||||
except (ValueError, exceptions.OperationError,
|
||||
exceptions.TransactionDoesNotExist,
|
||||
exceptions.TransactionOwnerError, exceptions.DoubleSpend,
|
||||
exceptions.InvalidHash, exceptions.InvalidSignature,
|
||||
exceptions.TransactionNotInValidBlock, exceptions.AmountError):
|
||||
return False
|
||||
|
||||
def is_new_transaction(self, txid, exclude_block_id=None):
|
||||
"""
|
||||
Return True if the transaction does not exist in any
|
||||
@ -219,8 +185,7 @@ class Bigchain(object):
|
||||
|
||||
if include_status:
|
||||
if block:
|
||||
status = self.block_election_status(block_id,
|
||||
block['block']['voters'])
|
||||
status = self.block_election_status(block)
|
||||
return block, status
|
||||
else:
|
||||
return block
|
||||
@ -321,19 +286,15 @@ class Bigchain(object):
|
||||
blocks = backend.query.get_blocks_status_from_transaction(self.connection, txid)
|
||||
if blocks:
|
||||
# Determine the election status of each block
|
||||
validity = {
|
||||
block['id']: self.block_election_status(
|
||||
block['id'],
|
||||
block['block']['voters']
|
||||
) for block in blocks
|
||||
}
|
||||
validity = {block['id']: self.block_election_status(block)
|
||||
for block in blocks}
|
||||
|
||||
# NOTE: If there are multiple valid blocks with this transaction,
|
||||
# something has gone wrong
|
||||
if list(validity.values()).count(Bigchain.BLOCK_VALID) > 1:
|
||||
block_ids = str([block for block in validity
|
||||
if validity[block] == Bigchain.BLOCK_VALID])
|
||||
raise backend_exceptions.BigchainDBCritical(
|
||||
raise core_exceptions.CriticalDoubleInclusion(
|
||||
'Transaction {tx} is present in '
|
||||
'multiple valid blocks: {block_ids}'
|
||||
.format(tx=txid, block_ids=block_ids))
|
||||
@ -386,10 +347,9 @@ class Bigchain(object):
|
||||
if self.get_transaction(transaction['id']):
|
||||
num_valid_transactions += 1
|
||||
if num_valid_transactions > 1:
|
||||
raise exceptions.DoubleSpend(('`{}` was spent more than'
|
||||
' once. There is a problem'
|
||||
' with the chain')
|
||||
.format(txid))
|
||||
raise core_exceptions.CriticalDoubleSpend(
|
||||
'`{}` was spent more than once. There is a problem'
|
||||
' with the chain'.format(txid))
|
||||
|
||||
if num_valid_transactions:
|
||||
return Transaction.from_dict(transactions[0])
|
||||
@ -490,7 +450,7 @@ class Bigchain(object):
|
||||
raise exceptions.OperationError('Empty block creation is not '
|
||||
'allowed')
|
||||
|
||||
voters = self.nodes_except_me + [self.me]
|
||||
voters = list(self.federation)
|
||||
block = Block(validated_transactions, self.me, gen_timestamp(), voters)
|
||||
block = block.sign(self.me_private)
|
||||
|
||||
@ -509,36 +469,20 @@ class Bigchain(object):
|
||||
"""
|
||||
return self.consensus.validate_block(self, block)
|
||||
|
||||
def has_previous_vote(self, block_id, voters):
|
||||
def has_previous_vote(self, block_id):
|
||||
"""Check for previous votes from this node
|
||||
|
||||
Args:
|
||||
block_id (str): the id of the block to check
|
||||
voters (list(str)): the voters of the block to check
|
||||
|
||||
Returns:
|
||||
bool: :const:`True` if this block already has a
|
||||
valid vote from this node, :const:`False` otherwise.
|
||||
|
||||
Raises:
|
||||
ImproperVoteError: If there is already a vote,
|
||||
but the vote is invalid.
|
||||
|
||||
"""
|
||||
votes = list(backend.query.get_votes_by_block_id_and_voter(self.connection, block_id, self.me))
|
||||
|
||||
if len(votes) > 1:
|
||||
raise exceptions.MultipleVotesError('Block {block_id} has {n_votes} votes from public key {me}'
|
||||
.format(block_id=block_id, n_votes=str(len(votes)), me=self.me))
|
||||
has_previous_vote = False
|
||||
if votes:
|
||||
if utils.verify_vote_signature(voters, votes[0]):
|
||||
has_previous_vote = True
|
||||
else:
|
||||
raise exceptions.ImproperVoteError('Block {block_id} already has an incorrectly signed vote '
|
||||
'from public key {me}'.format(block_id=block_id, me=self.me))
|
||||
|
||||
return has_previous_vote
|
||||
el, _ = self.consensus.voting.partition_eligible_votes(votes, [self.me])
|
||||
return bool(el)
|
||||
|
||||
def write_block(self, block):
|
||||
"""Write a block to bigchain.
|
||||
@ -638,69 +582,15 @@ class Bigchain(object):
|
||||
# XXX: should this return instaces of Block?
|
||||
return backend.query.get_unvoted_blocks(self.connection, self.me)
|
||||
|
||||
def block_election_status(self, block_id, voters):
|
||||
"""Tally the votes on a block, and return the status: valid, invalid, or undecided."""
|
||||
def block_election(self, block):
|
||||
if type(block) != dict:
|
||||
block = block.to_dict()
|
||||
votes = list(backend.query.get_votes_by_block_id(self.connection,
|
||||
block['id']))
|
||||
return self.consensus.voting.block_election(block, votes,
|
||||
self.federation)
|
||||
|
||||
votes = list(backend.query.get_votes_by_block_id(self.connection, block_id))
|
||||
n_voters = len(voters)
|
||||
|
||||
voter_counts = collections.Counter([vote['node_pubkey'] for vote in votes])
|
||||
for node in voter_counts:
|
||||
if voter_counts[node] > 1:
|
||||
raise exceptions.MultipleVotesError(
|
||||
'Block {block_id} has multiple votes ({n_votes}) from voting node {node_id}'
|
||||
.format(block_id=block_id, n_votes=str(voter_counts[node]), node_id=node))
|
||||
|
||||
if len(votes) > n_voters:
|
||||
raise exceptions.MultipleVotesError('Block {block_id} has {n_votes} votes cast, but only {n_voters} voters'
|
||||
.format(block_id=block_id, n_votes=str(len(votes)),
|
||||
n_voters=str(n_voters)))
|
||||
|
||||
# vote_cast is the list of votes e.g. [True, True, False]
|
||||
vote_cast = [vote['vote']['is_block_valid'] for vote in votes]
|
||||
# prev_block are the ids of the nominal prev blocks e.g.
|
||||
# ['block1_id', 'block1_id', 'block2_id']
|
||||
prev_block = [vote['vote']['previous_block'] for vote in votes]
|
||||
# vote_validity checks whether a vote is valid
|
||||
# or invalid, e.g. [False, True, True]
|
||||
vote_validity = [self.consensus.verify_vote(voters, vote) for vote in votes]
|
||||
|
||||
# element-wise product of stated vote and validity of vote
|
||||
# vote_cast = [True, True, False] and
|
||||
# vote_validity = [False, True, True] gives
|
||||
# [True, False]
|
||||
# Only the correctly signed votes are tallied.
|
||||
vote_list = list(compress(vote_cast, vote_validity))
|
||||
|
||||
# Total the votes. Here, valid and invalid refer
|
||||
# to the vote cast, not whether the vote itself
|
||||
# is valid or invalid.
|
||||
n_valid_votes = sum(vote_list)
|
||||
n_invalid_votes = len(vote_cast) - n_valid_votes
|
||||
|
||||
# The use of ceiling and floor is to account for the case of an
|
||||
# even number of voters where half the voters have voted 'invalid'
|
||||
# and half 'valid'. In this case, the block should be marked invalid
|
||||
# to avoid a tie. In the case of an odd number of voters this is not
|
||||
# relevant, since one side must be a majority.
|
||||
if n_invalid_votes >= math.ceil(n_voters / 2):
|
||||
return Bigchain.BLOCK_INVALID
|
||||
elif n_valid_votes > math.floor(n_voters / 2):
|
||||
# The block could be valid, but we still need to check if votes
|
||||
# agree on the previous block.
|
||||
#
|
||||
# First, only consider blocks with legitimate votes
|
||||
prev_block_list = list(compress(prev_block, vote_validity))
|
||||
# Next, only consider the blocks with 'yes' votes
|
||||
prev_block_valid_list = list(compress(prev_block_list, vote_list))
|
||||
counts = collections.Counter(prev_block_valid_list)
|
||||
# Make sure the majority vote agrees on previous node.
|
||||
# The majority vote must be the most common, by definition.
|
||||
# If it's not, there is no majority agreement on the previous
|
||||
# block.
|
||||
if counts.most_common()[0][1] > math.floor(n_voters / 2):
|
||||
return Bigchain.BLOCK_VALID
|
||||
else:
|
||||
return Bigchain.BLOCK_INVALID
|
||||
else:
|
||||
return Bigchain.BLOCK_UNDECIDED
|
||||
def block_election_status(self, block):
|
||||
"""Tally the votes on a block, and return the status:
|
||||
valid, invalid, or undecided."""
|
||||
return self.block_election(block)['status']
|
||||
|
@ -1,2 +1,10 @@
|
||||
class BigchainDBError(Exception):
|
||||
"""Base class for BigchainDB exceptions."""
|
||||
|
||||
|
||||
class CriticalDoubleSpend(BigchainDBError):
|
||||
"""Data integrity error that requires attention"""
|
||||
|
||||
|
||||
class CriticalDoubleInclusion(BigchainDBError):
|
||||
"""Data integrity error that requires attention"""
|
||||
|
@ -1,9 +1,10 @@
|
||||
from bigchaindb.common.crypto import hash_data, PublicKey, PrivateKey
|
||||
from bigchaindb.common.exceptions import (InvalidHash, InvalidSignature,
|
||||
OperationError, DoubleSpend,
|
||||
TransactionDoesNotExist,
|
||||
DoubleSpend, InputDoesNotExist,
|
||||
TransactionNotInValidBlock,
|
||||
AssetIdMismatch, AmountError)
|
||||
AssetIdMismatch, AmountError,
|
||||
SybilError,
|
||||
DuplicateTransaction)
|
||||
from bigchaindb.common.transaction import Transaction
|
||||
from bigchaindb.common.utils import gen_timestamp, serialize
|
||||
from bigchaindb.common.schema import validate_transaction_schema
|
||||
@ -11,7 +12,7 @@ from bigchaindb.common.schema import validate_transaction_schema
|
||||
|
||||
class Transaction(Transaction):
|
||||
def validate(self, bigchain):
|
||||
"""Validate a transaction.
|
||||
"""Validate transaction spend
|
||||
|
||||
Args:
|
||||
bigchain (Bigchain): an instantiated bigchaindb.Bigchain object.
|
||||
@ -22,45 +23,11 @@ class Transaction(Transaction):
|
||||
invalid.
|
||||
|
||||
Raises:
|
||||
OperationError: if the transaction operation is not supported
|
||||
TransactionDoesNotExist: if the input of the transaction is not
|
||||
found
|
||||
TransactionNotInValidBlock: if the input of the transaction is not
|
||||
in a valid block
|
||||
TransactionOwnerError: if the new transaction is using an input it
|
||||
doesn't own
|
||||
DoubleSpend: if the transaction is a double spend
|
||||
InvalidHash: if the hash of the transaction is wrong
|
||||
InvalidSignature: if the signature of the transaction is wrong
|
||||
ValidationError: If the transaction is invalid
|
||||
"""
|
||||
if len(self.inputs) == 0:
|
||||
raise ValueError('Transaction contains no inputs')
|
||||
|
||||
input_conditions = []
|
||||
inputs_defined = all([input_.fulfills for input_ in self.inputs])
|
||||
|
||||
# validate amounts
|
||||
if any(output.amount < 1 for output in self.outputs):
|
||||
raise AmountError('`amount` needs to be greater than zero')
|
||||
|
||||
if self.operation in (Transaction.CREATE, Transaction.GENESIS):
|
||||
# validate asset
|
||||
if self.asset['data'] is not None and not isinstance(self.asset['data'], dict):
|
||||
raise TypeError(('`asset.data` must be a dict instance or '
|
||||
'None for `CREATE` transactions'))
|
||||
# validate inputs
|
||||
if inputs_defined:
|
||||
raise ValueError('A CREATE operation has no inputs')
|
||||
elif self.operation == Transaction.TRANSFER:
|
||||
# validate asset
|
||||
if not isinstance(self.asset['id'], str):
|
||||
raise ValueError(('`asset.id` must be a string for '
|
||||
'`TRANSFER` transations'))
|
||||
# check inputs
|
||||
if not inputs_defined:
|
||||
raise ValueError('Only `CREATE` transactions can have null '
|
||||
'inputs')
|
||||
|
||||
if self.operation == Transaction.TRANSFER:
|
||||
# store the inputs so that we can check if the asset ids match
|
||||
input_txs = []
|
||||
for input_ in self.inputs:
|
||||
@ -69,8 +36,8 @@ class Transaction(Transaction):
|
||||
get_transaction(input_txid, include_status=True)
|
||||
|
||||
if input_tx is None:
|
||||
raise TransactionDoesNotExist("input `{}` doesn't exist"
|
||||
.format(input_txid))
|
||||
raise InputDoesNotExist("input `{}` doesn't exist"
|
||||
.format(input_txid))
|
||||
|
||||
if status != bigchain.TX_VALID:
|
||||
raise TransactionNotInValidBlock(
|
||||
@ -85,8 +52,6 @@ class Transaction(Transaction):
|
||||
output = input_tx.outputs[input_.fulfills.output]
|
||||
input_conditions.append(output)
|
||||
input_txs.append(input_tx)
|
||||
if output.amount < 1:
|
||||
raise AmountError('`amount` needs to be greater than zero')
|
||||
|
||||
# Validate that all inputs are distinct
|
||||
links = [i.fulfills.to_uri() for i in self.inputs]
|
||||
@ -100,11 +65,6 @@ class Transaction(Transaction):
|
||||
' match the asset id of the'
|
||||
' transaction'))
|
||||
|
||||
# validate the amounts
|
||||
for output in self.outputs:
|
||||
if output.amount < 1:
|
||||
raise AmountError('`amount` needs to be greater than zero')
|
||||
|
||||
input_amount = sum([input_condition.amount for input_condition in input_conditions])
|
||||
output_amount = sum([output_condition.amount for output_condition in self.outputs])
|
||||
|
||||
@ -114,11 +74,6 @@ class Transaction(Transaction):
|
||||
' in the outputs `{}`')
|
||||
.format(input_amount, output_amount))
|
||||
|
||||
else:
|
||||
allowed_operations = ', '.join(Transaction.ALLOWED_OPERATIONS)
|
||||
raise TypeError('`operation`: `{}` must be either {}.'
|
||||
.format(self.operation, allowed_operations))
|
||||
|
||||
if not self.inputs_valid(input_conditions):
|
||||
raise InvalidSignature('Transaction signature is invalid.')
|
||||
|
||||
@ -205,18 +160,8 @@ class Block(object):
|
||||
raised.
|
||||
|
||||
Raises:
|
||||
OperationError: If a non-federation node signed the Block.
|
||||
InvalidSignature: If a Block's signature is invalid or if the
|
||||
block contains a transaction with an invalid signature.
|
||||
OperationError: if the transaction operation is not supported
|
||||
TransactionDoesNotExist: if the input of the transaction is not
|
||||
found
|
||||
TransactionNotInValidBlock: if the input of the transaction is not
|
||||
in a valid block
|
||||
TransactionOwnerError: if the new transaction is using an input it
|
||||
doesn't own
|
||||
DoubleSpend: if the transaction is a double spend
|
||||
InvalidHash: if the hash of the transaction is wrong
|
||||
ValidationError: If the block or any transaction in the block does
|
||||
not validate
|
||||
"""
|
||||
|
||||
self._validate_block(bigchain)
|
||||
@ -232,13 +177,11 @@ class Block(object):
|
||||
object.
|
||||
|
||||
Raises:
|
||||
OperationError: If a non-federation node signed the Block.
|
||||
InvalidSignature: If a Block's signature is invalid.
|
||||
ValidationError: If there is a problem with the block
|
||||
"""
|
||||
# Check if the block was created by a federation node
|
||||
possible_voters = (bigchain.nodes_except_me + [bigchain.me])
|
||||
if self.node_pubkey not in possible_voters:
|
||||
raise OperationError('Only federation nodes can create blocks')
|
||||
if self.node_pubkey not in bigchain.federation:
|
||||
raise SybilError('Only federation nodes can create blocks')
|
||||
|
||||
# Check that the signature is valid
|
||||
if not self.is_signature_valid():
|
||||
@ -251,17 +194,12 @@ class Block(object):
|
||||
bigchain (Bigchain): an instantiated bigchaindb.Bigchain object.
|
||||
|
||||
Raises:
|
||||
OperationError: if the transaction operation is not supported
|
||||
TransactionDoesNotExist: if the input of the transaction is not
|
||||
found
|
||||
TransactionNotInValidBlock: if the input of the transaction is not
|
||||
in a valid block
|
||||
TransactionOwnerError: if the new transaction is using an input it
|
||||
doesn't own
|
||||
DoubleSpend: if the transaction is a double spend
|
||||
InvalidHash: if the hash of the transaction is wrong
|
||||
InvalidSignature: if the signature of the transaction is wrong
|
||||
ValidationError: If an invalid transaction is found
|
||||
"""
|
||||
txids = [tx.id for tx in self.transactions]
|
||||
if len(txids) != len(set(txids)):
|
||||
raise DuplicateTransaction('Block has duplicate transaction')
|
||||
|
||||
for tx in self.transactions:
|
||||
# If a transaction is not valid, `validate_transactions` will
|
||||
# throw an an exception and block validation will be canceled.
|
||||
@ -341,10 +279,10 @@ class Block(object):
|
||||
dict: The Block as a dict.
|
||||
|
||||
Raises:
|
||||
OperationError: If the Block doesn't contain any transactions.
|
||||
ValueError: If the Block doesn't contain any transactions.
|
||||
"""
|
||||
if len(self.transactions) == 0:
|
||||
raise OperationError('Empty block creation is not allowed')
|
||||
raise ValueError('Empty block creation is not allowed')
|
||||
|
||||
block = {
|
||||
'timestamp': self.timestamp,
|
||||
|
@ -13,8 +13,7 @@ import bigchaindb
|
||||
from bigchaindb import backend
|
||||
from bigchaindb.backend.changefeed import ChangeFeed
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.common.exceptions import (SchemaValidationError, InvalidHash,
|
||||
InvalidSignature, AmountError)
|
||||
from bigchaindb.common.exceptions import ValidationError
|
||||
from bigchaindb import Bigchain
|
||||
|
||||
|
||||
@ -31,7 +30,7 @@ class BlockPipeline:
|
||||
def __init__(self):
|
||||
"""Initialize the BlockPipeline creator"""
|
||||
self.bigchain = Bigchain()
|
||||
self.txs = []
|
||||
self.txs = tx_collector()
|
||||
|
||||
def filter_tx(self, tx):
|
||||
"""Filter a transaction.
|
||||
@ -63,8 +62,7 @@ class BlockPipeline:
|
||||
"""
|
||||
try:
|
||||
tx = Transaction.from_dict(tx)
|
||||
except (SchemaValidationError, InvalidHash, InvalidSignature,
|
||||
AmountError):
|
||||
except ValidationError:
|
||||
return None
|
||||
|
||||
# If transaction is in any VALID or UNDECIDED block we
|
||||
@ -74,12 +72,14 @@ class BlockPipeline:
|
||||
return None
|
||||
|
||||
# If transaction is not valid it should not be included
|
||||
if not self.bigchain.is_valid_transaction(tx):
|
||||
try:
|
||||
tx.validate(self.bigchain)
|
||||
return tx
|
||||
except ValidationError as e:
|
||||
logger.warning('Invalid tx: %s', e)
|
||||
self.bigchain.delete_transaction(tx.id)
|
||||
return None
|
||||
|
||||
return tx
|
||||
|
||||
def create(self, tx, timeout=False):
|
||||
"""Create a block.
|
||||
|
||||
@ -98,11 +98,10 @@ class BlockPipeline:
|
||||
:class:`~bigchaindb.models.Block`: The block,
|
||||
if a block is ready, or ``None``.
|
||||
"""
|
||||
if tx:
|
||||
self.txs.append(tx)
|
||||
if len(self.txs) == 1000 or (timeout and self.txs):
|
||||
block = self.bigchain.create_block(self.txs)
|
||||
self.txs = []
|
||||
txs = self.txs.send(tx)
|
||||
if len(txs) == 1000 or (timeout and txs):
|
||||
block = self.bigchain.create_block(txs)
|
||||
self.txs = tx_collector()
|
||||
return block
|
||||
|
||||
def write(self, block):
|
||||
@ -134,6 +133,27 @@ class BlockPipeline:
|
||||
return block
|
||||
|
||||
|
||||
def tx_collector():
|
||||
""" A helper to deduplicate transactions """
|
||||
|
||||
def snowflake():
|
||||
txids = set()
|
||||
txs = []
|
||||
while True:
|
||||
tx = yield txs
|
||||
if tx:
|
||||
if tx.id not in txids:
|
||||
txids.add(tx.id)
|
||||
txs.append(tx)
|
||||
else:
|
||||
logger.info('Refusing to add tx to block twice: ' +
|
||||
tx.id)
|
||||
|
||||
s = snowflake()
|
||||
s.send(None)
|
||||
return s
|
||||
|
||||
|
||||
def create_pipeline():
|
||||
"""Create and return the pipeline of operations to be distributed
|
||||
on different processes."""
|
||||
|
@ -16,6 +16,7 @@ from bigchaindb import Bigchain
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger_results = logging.getLogger('pipeline.election.results')
|
||||
|
||||
|
||||
class Election:
|
||||
@ -32,14 +33,29 @@ class Election:
|
||||
next_vote: The next vote.
|
||||
|
||||
"""
|
||||
next_block = self.bigchain.get_block(
|
||||
next_vote['vote']['voting_for_block'])
|
||||
try:
|
||||
block_id = next_vote['vote']['voting_for_block']
|
||||
node = next_vote['node_pubkey']
|
||||
except KeyError:
|
||||
return
|
||||
|
||||
block_status = self.bigchain.block_election_status(next_block['id'],
|
||||
next_block['block']['voters'])
|
||||
if block_status == self.bigchain.BLOCK_INVALID:
|
||||
next_block = self.bigchain.get_block(block_id)
|
||||
|
||||
result = self.bigchain.block_election(next_block)
|
||||
if result['status'] == self.bigchain.BLOCK_INVALID:
|
||||
return Block.from_dict(next_block)
|
||||
|
||||
# Log the result
|
||||
if result['status'] != self.bigchain.BLOCK_UNDECIDED:
|
||||
msg = 'node:%s block:%s status:%s' % \
|
||||
(node, block_id, result['status'])
|
||||
# Extra data can be accessed via the log formatter.
|
||||
# See logging.dictConfig.
|
||||
logger_results.debug(msg, extra={
|
||||
'current_vote': next_vote,
|
||||
'election_result': result,
|
||||
})
|
||||
|
||||
def requeue_transactions(self, invalid_block):
|
||||
"""
|
||||
Liquidates transactions from invalid blocks so they can be processed again
|
||||
|
@ -48,8 +48,7 @@ class Vote:
|
||||
[([self.bigchain.me], 1)])
|
||||
|
||||
def validate_block(self, block):
|
||||
if not self.bigchain.has_previous_vote(block['id'],
|
||||
block['block']['voters']):
|
||||
if not self.bigchain.has_previous_vote(block['id']):
|
||||
try:
|
||||
block = Block.from_dict(block)
|
||||
except (exceptions.InvalidHash):
|
||||
@ -61,7 +60,7 @@ class Vote:
|
||||
return block['id'], [self.invalid_dummy_tx]
|
||||
try:
|
||||
block._validate_block(self.bigchain)
|
||||
except (exceptions.OperationError, exceptions.InvalidSignature):
|
||||
except exceptions.ValidationError:
|
||||
# XXX: if a block is invalid we should skip the `validate_tx`
|
||||
# step, but since we are in a pipeline we cannot just jump to
|
||||
# another function. Hackish solution: generate an invalid
|
||||
@ -105,7 +104,13 @@ class Vote:
|
||||
if not new:
|
||||
return False, block_id, num_tx
|
||||
|
||||
valid = bool(self.bigchain.is_valid_transaction(tx))
|
||||
try:
|
||||
tx.validate(self.bigchain)
|
||||
valid = True
|
||||
except exceptions.ValidationError as e:
|
||||
logger.warning('Invalid tx: %s', e)
|
||||
valid = False
|
||||
|
||||
return valid, block_id, num_tx
|
||||
|
||||
def vote(self, tx_validity, block_id, num_tx):
|
||||
|
@ -3,9 +3,6 @@ import threading
|
||||
import queue
|
||||
import multiprocessing as mp
|
||||
|
||||
from bigchaindb.common import crypto
|
||||
from bigchaindb.common.utils import serialize
|
||||
|
||||
|
||||
class ProcessGroup(object):
|
||||
|
||||
@ -116,30 +113,6 @@ def condition_details_has_owner(condition_details, owner):
|
||||
return False
|
||||
|
||||
|
||||
def verify_vote_signature(voters, signed_vote):
|
||||
"""Verify the signature of a vote
|
||||
|
||||
A valid vote should have been signed by a voter's private key.
|
||||
|
||||
Args:
|
||||
voters (list): voters of the block that is under election
|
||||
signed_vote (dict): a vote with the `signature` included.
|
||||
|
||||
Returns:
|
||||
bool: True if the signature is correct, False otherwise.
|
||||
"""
|
||||
|
||||
signature = signed_vote['signature']
|
||||
pk_base58 = signed_vote['node_pubkey']
|
||||
|
||||
# immediately return False if the voter is not in the block voter list
|
||||
if pk_base58 not in voters:
|
||||
return False
|
||||
|
||||
public_key = crypto.PublicKey(pk_base58)
|
||||
return public_key.verify(serialize(signed_vote['vote']).encode(), signature)
|
||||
|
||||
|
||||
def is_genesis_block(block):
|
||||
"""Check if the block is the genesis block.
|
||||
|
||||
|
@ -12,14 +12,14 @@ UNDECIDED = 'undecided'
|
||||
|
||||
class Voting:
|
||||
"""
|
||||
Everything to do with creating and checking votes.
|
||||
Everything to do with verifying and counting votes for block election.
|
||||
|
||||
All functions in this class should be referentially transparent, that is,
|
||||
they always give the same output for a given input. This makes it easier
|
||||
to test. This also means no logging!
|
||||
|
||||
Assumptions regarding data:
|
||||
* Vote is a dictionary, but it is not assumed that any properties are.
|
||||
* Vote is a dictionary, but no assumptions are made on it's properties.
|
||||
* Everything else is assumed to be structurally correct, otherwise errors
|
||||
may be thrown.
|
||||
"""
|
||||
@ -30,10 +30,11 @@ class Voting:
|
||||
Calculate the election status of a block.
|
||||
"""
|
||||
eligible_voters = set(block['block']['voters']) & set(keyring)
|
||||
n_voters = len(eligible_voters)
|
||||
eligible_votes, ineligible_votes = \
|
||||
cls.partition_eligible_votes(votes, eligible_voters)
|
||||
n_voters = len(eligible_voters)
|
||||
results = cls.count_votes(eligible_votes)
|
||||
results['block_id'] = block['id']
|
||||
results['status'] = cls.decide_votes(n_voters, **results['counts'])
|
||||
results['ineligible'] = ineligible_votes
|
||||
return results
|
||||
|
@ -9,20 +9,7 @@ import logging
|
||||
from flask import current_app, request
|
||||
from flask_restful import Resource, reqparse
|
||||
|
||||
|
||||
from bigchaindb.common.exceptions import (
|
||||
AmountError,
|
||||
DoubleSpend,
|
||||
InvalidHash,
|
||||
InvalidSignature,
|
||||
SchemaValidationError,
|
||||
OperationError,
|
||||
TransactionDoesNotExist,
|
||||
TransactionOwnerError,
|
||||
TransactionNotInValidBlock,
|
||||
ValidationError,
|
||||
)
|
||||
|
||||
from bigchaindb.common.exceptions import SchemaValidationError, ValidationError
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.web.views.base import make_error
|
||||
from bigchaindb.web.views import parameters
|
||||
@ -84,7 +71,7 @@ class TransactionListApi(Resource):
|
||||
message='Invalid transaction schema: {}'.format(
|
||||
e.__cause__.message)
|
||||
)
|
||||
except (ValidationError, InvalidSignature) as e:
|
||||
except ValidationError as e:
|
||||
return make_error(
|
||||
400,
|
||||
'Invalid transaction ({}): {}'.format(type(e).__name__, e)
|
||||
@ -93,15 +80,7 @@ class TransactionListApi(Resource):
|
||||
with pool() as bigchain:
|
||||
try:
|
||||
bigchain.validate_transaction(tx_obj)
|
||||
except (ValueError,
|
||||
OperationError,
|
||||
TransactionDoesNotExist,
|
||||
TransactionOwnerError,
|
||||
DoubleSpend,
|
||||
InvalidHash,
|
||||
InvalidSignature,
|
||||
TransactionNotInValidBlock,
|
||||
AmountError) as e:
|
||||
except ValidationError as e:
|
||||
return make_error(
|
||||
400,
|
||||
'Invalid transaction ({}): {}'.format(type(e).__name__, e)
|
||||
|
@ -39,7 +39,6 @@ fi
|
||||
|
||||
echo "NUM_NODES = "$NUM_NODES
|
||||
echo "BRANCH = "$BRANCH
|
||||
echo "WHAT_TO_DEPLOY = "$WHAT_TO_DEPLOY
|
||||
echo "SSH_KEY_NAME" = $SSH_KEY_NAME
|
||||
echo "USE_KEYPAIRS_FILE = "$USE_KEYPAIRS_FILE
|
||||
echo "IMAGE_ID = "$IMAGE_ID
|
||||
@ -85,7 +84,7 @@ if [[ $CONFILES_COUNT != $NUM_NODES ]]; then
|
||||
fi
|
||||
|
||||
# Auto-generate the tag to apply to all nodes in the cluster
|
||||
TAG="BDB-"$WHAT_TO_DEPLOY"-"`date +%m-%d@%H:%M`
|
||||
TAG="BDB-Server-"`date +%m-%d@%H:%M`
|
||||
echo "TAG = "$TAG
|
||||
|
||||
# Change the file permissions on the SSH private key file
|
||||
@ -121,25 +120,24 @@ fab install_base_software
|
||||
fab get_pip3
|
||||
fab upgrade_setuptools
|
||||
|
||||
if [ "$WHAT_TO_DEPLOY" == "servers" ]; then
|
||||
# (Re)create the RethinkDB configuration file conf/rethinkdb.conf
|
||||
if [ "$ENABLE_WEB_ADMIN" == "True" ]; then
|
||||
if [ "$BIND_HTTP_TO_LOCALHOST" == "True" ]; then
|
||||
python create_rethinkdb_conf.py --enable-web-admin --bind-http-to-localhost
|
||||
else
|
||||
python create_rethinkdb_conf.py --enable-web-admin
|
||||
fi
|
||||
# (Re)create the RethinkDB configuration file conf/rethinkdb.conf
|
||||
if [ "$ENABLE_WEB_ADMIN" == "True" ]; then
|
||||
if [ "$BIND_HTTP_TO_LOCALHOST" == "True" ]; then
|
||||
python create_rethinkdb_conf.py --enable-web-admin --bind-http-to-localhost
|
||||
else
|
||||
python create_rethinkdb_conf.py
|
||||
python create_rethinkdb_conf.py --enable-web-admin
|
||||
fi
|
||||
# Rollout RethinkDB and start it
|
||||
fab prep_rethinkdb_storage:$USING_EBS
|
||||
fab install_rethinkdb
|
||||
fab configure_rethinkdb
|
||||
fab delete_rethinkdb_data
|
||||
fab start_rethinkdb
|
||||
else
|
||||
python create_rethinkdb_conf.py
|
||||
fi
|
||||
|
||||
# Rollout RethinkDB and start it
|
||||
fab prep_rethinkdb_storage:$USING_EBS
|
||||
fab install_rethinkdb
|
||||
fab configure_rethinkdb
|
||||
fab delete_rethinkdb_data
|
||||
fab start_rethinkdb
|
||||
|
||||
# Rollout BigchainDB (but don't start it yet)
|
||||
if [ "$BRANCH" == "pypi" ]; then
|
||||
fab install_bigchaindb_from_pypi
|
||||
@ -156,48 +154,40 @@ fi
|
||||
|
||||
# Configure BigchainDB on all nodes
|
||||
|
||||
if [ "$WHAT_TO_DEPLOY" == "servers" ]; then
|
||||
# The idea is to send a bunch of locally-created configuration
|
||||
# files out to each of the instances / nodes.
|
||||
# The idea is to send a bunch of locally-created configuration
|
||||
# files out to each of the instances / nodes.
|
||||
|
||||
# Assume a set of $NUM_NODES BigchaindB config files
|
||||
# already exists in the confiles directory.
|
||||
# One can create a set using a command like
|
||||
# ./make_confiles.sh confiles $NUM_NODES
|
||||
# (We can't do that here now because this virtual environment
|
||||
# is a Python 2 environment that may not even have
|
||||
# bigchaindb installed, so bigchaindb configure can't be called)
|
||||
# Assume a set of $NUM_NODES BigchaindB config files
|
||||
# already exists in the confiles directory.
|
||||
# One can create a set using a command like
|
||||
# ./make_confiles.sh confiles $NUM_NODES
|
||||
# (We can't do that here now because this virtual environment
|
||||
# is a Python 2 environment that may not even have
|
||||
# bigchaindb installed, so bigchaindb configure can't be called)
|
||||
|
||||
# Transform the config files in the confiles directory
|
||||
# to have proper keyrings etc.
|
||||
if [ "$USE_KEYPAIRS_FILE" == "True" ]; then
|
||||
python clusterize_confiles.py -k confiles $NUM_NODES
|
||||
else
|
||||
python clusterize_confiles.py confiles $NUM_NODES
|
||||
fi
|
||||
|
||||
# Send one of the config files to each instance
|
||||
for (( HOST=0 ; HOST<$NUM_NODES ; HOST++ )); do
|
||||
CONFILE="bcdb_conf"$HOST
|
||||
echo "Sending "$CONFILE
|
||||
fab set_host:$HOST send_confile:$CONFILE
|
||||
done
|
||||
|
||||
# Initialize BigchainDB (i.e. Create the RethinkDB database,
|
||||
# the tables, the indexes, and genesis glock). Note that
|
||||
# this will only be sent to one of the nodes, see the
|
||||
# definition of init_bigchaindb() in fabfile.py to see why.
|
||||
fab init_bigchaindb
|
||||
fab set_shards:$NUM_NODES
|
||||
echo "To set the replication factor to 3, do: fab set_replicas:3"
|
||||
echo "To start BigchainDB on all the nodes, do: fab start_bigchaindb"
|
||||
# Transform the config files in the confiles directory
|
||||
# to have proper keyrings etc.
|
||||
if [ "$USE_KEYPAIRS_FILE" == "True" ]; then
|
||||
python clusterize_confiles.py -k confiles $NUM_NODES
|
||||
else
|
||||
# Deploying clients
|
||||
fab send_client_confile:client_confile
|
||||
|
||||
# Start sending load from the clients to the servers
|
||||
fab start_bigchaindb_load
|
||||
python clusterize_confiles.py confiles $NUM_NODES
|
||||
fi
|
||||
|
||||
# Send one of the config files to each instance
|
||||
for (( HOST=0 ; HOST<$NUM_NODES ; HOST++ )); do
|
||||
CONFILE="bcdb_conf"$HOST
|
||||
echo "Sending "$CONFILE
|
||||
fab set_host:$HOST send_confile:$CONFILE
|
||||
done
|
||||
|
||||
# Initialize BigchainDB (i.e. Create the RethinkDB database,
|
||||
# the tables, the indexes, and genesis glock). Note that
|
||||
# this will only be sent to one of the nodes, see the
|
||||
# definition of init_bigchaindb() in fabfile.py to see why.
|
||||
fab init_bigchaindb
|
||||
fab set_shards:$NUM_NODES
|
||||
echo "To set the replication factor to 3, do: fab set_replicas:3"
|
||||
echo "To start BigchainDB on all the nodes, do: fab start_bigchaindb"
|
||||
|
||||
# cleanup
|
||||
rm add2known_hosts.sh
|
||||
|
@ -23,10 +23,6 @@ NUM_NODES=3
|
||||
# It's where to get the BigchainDB code to be deployed on the nodes
|
||||
BRANCH="master"
|
||||
|
||||
# WHAT_TO_DEPLOY is either "servers" or "clients"
|
||||
# What do you want to deploy?
|
||||
WHAT_TO_DEPLOY="servers"
|
||||
|
||||
# SSH_KEY_NAME is the name of the SSH private key file
|
||||
# in $HOME/.ssh/
|
||||
# It is used for SSH communications with AWS instances.
|
||||
|
15
deploy-cluster-aws/fabfile.py
vendored
15
deploy-cluster-aws/fabfile.py
vendored
@ -237,15 +237,6 @@ def send_confile(confile):
|
||||
run('bigchaindb show-config')
|
||||
|
||||
|
||||
@task
|
||||
@parallel
|
||||
def send_client_confile(confile):
|
||||
put(confile, 'tempfile')
|
||||
run('mv tempfile ~/.bigchaindb')
|
||||
print('For this node, bigchaindb show-config says:')
|
||||
run('bigchaindb show-config')
|
||||
|
||||
|
||||
# Initialize BigchainDB
|
||||
# i.e. create the database, the tables,
|
||||
# the indexes, and the genesis block.
|
||||
@ -278,12 +269,6 @@ def start_bigchaindb():
|
||||
sudo('screen -d -m bigchaindb -y start &', pty=False)
|
||||
|
||||
|
||||
@task
|
||||
@parallel
|
||||
def start_bigchaindb_load():
|
||||
sudo('screen -d -m bigchaindb load &', pty=False)
|
||||
|
||||
|
||||
# Install and run New Relic
|
||||
@task
|
||||
@parallel
|
||||
|
@ -26,7 +26,7 @@ import boto3
|
||||
from awscommon import get_naeips
|
||||
|
||||
|
||||
SETTINGS = ['NUM_NODES', 'BRANCH', 'WHAT_TO_DEPLOY', 'SSH_KEY_NAME',
|
||||
SETTINGS = ['NUM_NODES', 'BRANCH', 'SSH_KEY_NAME',
|
||||
'USE_KEYPAIRS_FILE', 'IMAGE_ID', 'INSTANCE_TYPE', 'SECURITY_GROUP',
|
||||
'USING_EBS', 'EBS_VOLUME_SIZE', 'EBS_OPTIMIZED',
|
||||
'ENABLE_WEB_ADMIN', 'BIND_HTTP_TO_LOCALHOST']
|
||||
@ -77,9 +77,6 @@ if not isinstance(NUM_NODES, int):
|
||||
if not isinstance(BRANCH, str):
|
||||
raise SettingsTypeError('BRANCH should be a string')
|
||||
|
||||
if not isinstance(WHAT_TO_DEPLOY, str):
|
||||
raise SettingsTypeError('WHAT_TO_DEPLOY should be a string')
|
||||
|
||||
if not isinstance(SSH_KEY_NAME, str):
|
||||
raise SettingsTypeError('SSH_KEY_NAME should be a string')
|
||||
|
||||
@ -117,11 +114,6 @@ if NUM_NODES > 64:
|
||||
'The AWS deployment configuration file sets it to {}'.
|
||||
format(NUM_NODES))
|
||||
|
||||
if WHAT_TO_DEPLOY not in ['servers', 'clients']:
|
||||
raise ValueError('WHAT_TO_DEPLOY should be either "servers" or "clients". '
|
||||
'The AWS deployment configuration file sets it to {}'.
|
||||
format(WHAT_TO_DEPLOY))
|
||||
|
||||
if SSH_KEY_NAME in ['not-set-yet', '', None]:
|
||||
raise ValueError('SSH_KEY_NAME should be set. '
|
||||
'The AWS deployment configuration file sets it to {}'.
|
||||
|
@ -5,7 +5,7 @@ There is some specialized terminology associated with BigchainDB. To get started
|
||||
|
||||
## Node
|
||||
|
||||
A **BigchainDB node** is a machine or set of closely-linked machines running RethinkDB Server, BigchainDB Server, and related software. (A "machine" might be a bare-metal server, a virtual machine or a container.) Each node is controlled by one person or organization.
|
||||
A **BigchainDB node** is a machine or set of closely-linked machines running RethinkDB/MongoDB Server, BigchainDB Server, and related software. (A "machine" might be a bare-metal server, a virtual machine or a container.) Each node is controlled by one person or organization.
|
||||
|
||||
|
||||
## Cluster
|
||||
|
@ -0,0 +1,163 @@
|
||||
Kubernetes Template: Add a BigchainDB Node to an Existing BigchainDB Cluster
|
||||
============================================================================
|
||||
|
||||
This page describes how to deploy a BigchainDB node using Kubernetes,
|
||||
and how to add that node to an existing BigchainDB cluster.
|
||||
It assumes you already have a running Kubernetes cluster
|
||||
where you can deploy the new BigchainDB node.
|
||||
|
||||
If you want to deploy the first BigchainDB node in a BigchainDB cluster,
|
||||
or a stand-alone BigchainDB node,
|
||||
then see :doc:`the page about that <node-on-kubernetes>`.
|
||||
|
||||
|
||||
Terminology Used
|
||||
----------------
|
||||
|
||||
``existing cluster`` will refer to one of the existing Kubernetes clusters
|
||||
hosting one of the existing BigchainDB nodes.
|
||||
|
||||
``ctx-1`` will refer to the kubectl context of the existing cluster.
|
||||
|
||||
``new cluster`` will refer to the new Kubernetes cluster that will run a new
|
||||
BigchainDB node (including a BigchainDB instance and a MongoDB instance).
|
||||
|
||||
``ctx-2`` will refer to the kubectl context of the new cluster.
|
||||
|
||||
``new MongoDB instance`` will refer to the MongoDB instance in the new cluster.
|
||||
|
||||
``existing MongoDB instance`` will refer to the MongoDB instance in the
|
||||
existing cluster.
|
||||
|
||||
``new BigchainDB instance`` will refer to the BigchainDB instance in the new
|
||||
cluster.
|
||||
|
||||
``existing BigchainDB instance`` will refer to the BigchainDB instance in the
|
||||
existing cluster.
|
||||
|
||||
|
||||
Step 1: Prerequisites
|
||||
---------------------
|
||||
|
||||
* A public/private key pair for the new BigchainDB instance.
|
||||
|
||||
* The public key should be shared offline with the other existing BigchainDB
|
||||
nodes in the existing BigchainDB cluster.
|
||||
|
||||
* You will need the public keys of all the existing BigchainDB nodes.
|
||||
|
||||
* A new Kubernetes cluster setup with kubectl configured to access it.
|
||||
|
||||
* Some familiarity with deploying a BigchainDB node on Kubernetes.
|
||||
See our :doc:`other docs about that <node-on-kubernetes>`.
|
||||
|
||||
Note: If you are managing multiple Kubernetes clusters, from your local
|
||||
system, you can run ``kubectl config view`` to list all the contexts that
|
||||
are available for the local kubectl.
|
||||
To target a specific cluster, add a ``--context`` flag to the kubectl CLI. For
|
||||
example:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context ctx-1 apply -f example.yaml
|
||||
$ kubectl --context ctx-2 apply -f example.yaml
|
||||
$ kubectl --context ctx-1 proxy --port 8001
|
||||
$ kubectl --context ctx-2 proxy --port 8002
|
||||
|
||||
|
||||
Step 2: Prepare the New Kubernetes Cluster
|
||||
------------------------------------------
|
||||
|
||||
Follow the steps in the sections to set up Storage Classes and Persistent Volume
|
||||
Claims, and to run MongoDB in the new cluster:
|
||||
|
||||
1. :ref:`Add Storage Classes <Step 3: Create Storage Classes>`
|
||||
2. :ref:`Add Persistent Volume Claims <Step 4: Create Persistent Volume Claims>`
|
||||
3. :ref:`Create the Config Map <Step 5: Create the Config Map - Optional>`
|
||||
4. :ref:`Run MongoDB instance <Step 6: Run MongoDB as a StatefulSet>`
|
||||
|
||||
|
||||
Step 3: Add the New MongoDB Instance to the Existing Replica Set
|
||||
----------------------------------------------------------------
|
||||
|
||||
Note that by ``replica set``, we are referring to the MongoDB replica set,
|
||||
not a Kubernetes' ``ReplicaSet``.
|
||||
|
||||
If you are not the administrator of an existing BigchainDB node, you
|
||||
will have to coordinate offline with an existing administrator so that they can
|
||||
add the new MongoDB instance to the replica set.
|
||||
|
||||
Add the new instance of MongoDB from an existing instance by accessing the
|
||||
``mongo`` shell.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context ctx-1 exec -it mdb-0 -c mongodb -- /bin/bash
|
||||
root@mdb-0# mongo --port 27017
|
||||
|
||||
One can only add members to a replica set from the ``PRIMARY`` instance.
|
||||
The ``mongo`` shell prompt should state that this is the primary member in the
|
||||
replica set.
|
||||
If not, then you can use the ``rs.status()`` command to find out who the
|
||||
primary is and login to the ``mongo`` shell in the primary.
|
||||
|
||||
Run the ``rs.add()`` command with the FQDN and port number of the other instances:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
PRIMARY> rs.add("<fqdn>:<port>")
|
||||
|
||||
|
||||
Step 4: Verify the Replica Set Membership
|
||||
-----------------------------------------
|
||||
|
||||
You can use the ``rs.conf()`` and the ``rs.status()`` commands available in the
|
||||
mongo shell to verify the replica set membership.
|
||||
|
||||
The new MongoDB instance should be listed in the membership information
|
||||
displayed.
|
||||
|
||||
|
||||
Step 5: Start the New BigchainDB Instance
|
||||
-----------------------------------------
|
||||
|
||||
Get the file ``bigchaindb-dep.yaml`` from GitHub using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/bigchaindb/bigchaindb-dep.yaml
|
||||
|
||||
Note that we set the ``BIGCHAINDB_DATABASE_HOST`` to ``mdb`` which is the name
|
||||
of the MongoDB service defined earlier.
|
||||
|
||||
Edit the ``BIGCHAINDB_KEYPAIR_PUBLIC`` with the public key of this instance,
|
||||
the ``BIGCHAINDB_KEYPAIR_PRIVATE`` with the private key of this instance and
|
||||
the ``BIGCHAINDB_KEYRING`` with a ``:`` delimited list of all the public keys
|
||||
in the BigchainDB cluster.
|
||||
|
||||
Create the required Deployment using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context ctx-2 apply -f bigchaindb-dep.yaml
|
||||
|
||||
You can check its status using the command ``kubectl get deploy -w``
|
||||
|
||||
|
||||
Step 6: Restart the Existing BigchainDB Instance(s)
|
||||
---------------------------------------------------
|
||||
|
||||
Add the public key of the new BigchainDB instance to the keyring of all the
|
||||
existing BigchainDB instances and update the BigchainDB instances using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl --context ctx-1 replace -f bigchaindb-dep.yaml
|
||||
|
||||
This will create a "rolling deployment" in Kubernetes where a new instance of
|
||||
BigchainDB will be created, and if the health check on the new instance is
|
||||
successful, the earlier one will be terminated. This ensures that there is
|
||||
zero downtime during updates.
|
||||
|
||||
You can SSH to an existing BigchainDB instance and run the ``bigchaindb
|
||||
show-config`` command to check that the keyring is updated.
|
@ -15,4 +15,4 @@ If you find the cloud deployment templates for nodes helpful, then you may also
|
||||
azure-quickstart-template
|
||||
template-kubernetes-azure
|
||||
node-on-kubernetes
|
||||
|
||||
add-node-on-kubernetes
|
||||
|
@ -1,9 +1,13 @@
|
||||
Run a BigchainDB Node in a Kubernetes Cluster
|
||||
=============================================
|
||||
Kubernetes Template: Deploy a Single BigchainDB Node
|
||||
====================================================
|
||||
|
||||
Assuming you already have a `Kubernetes <https://kubernetes.io/>`_
|
||||
cluster up and running, this page describes how to run a
|
||||
BigchainDB node in it.
|
||||
This page describes how to deploy the first BigchainDB node
|
||||
in a BigchainDB cluster, or a stand-alone BigchainDB node,
|
||||
using `Kubernetes <https://kubernetes.io/>`_.
|
||||
It assumes you already have a running Kubernetes cluster.
|
||||
|
||||
If you want to add a new BigchainDB node to an existing BigchainDB cluster,
|
||||
refer to :doc:`the page about that <add-node-on-kubernetes>`.
|
||||
|
||||
|
||||
Step 1: Install kubectl
|
||||
@ -31,19 +35,37 @@ then you can get the ``~/.kube/config`` file using:
|
||||
--resource-group <name of resource group containing the cluster> \
|
||||
--name <ACS cluster name>
|
||||
|
||||
If it asks for a password (to unlock the SSH key)
|
||||
and you enter the correct password,
|
||||
but you get an error message,
|
||||
then try adding ``--ssh-key-file ~/.ssh/<name>``
|
||||
to the above command (i.e. the path to the private key).
|
||||
|
||||
Step 3: Create a StorageClass
|
||||
-----------------------------
|
||||
|
||||
Step 3: Create Storage Classes
|
||||
------------------------------
|
||||
|
||||
MongoDB needs somewhere to store its data persistently,
|
||||
outside the container where MongoDB is running.
|
||||
Our MongoDB Docker container
|
||||
(based on the official MongoDB Docker container)
|
||||
exports two volume mounts with correct
|
||||
permissions from inside the container:
|
||||
|
||||
* The directory where the mongod instance stores its data: ``/data/db``.
|
||||
There's more explanation in the MongoDB docs about `storage.dbpath <https://docs.mongodb.com/manual/reference/configuration-options/#storage.dbPath>`_.
|
||||
|
||||
* The directory where the mongodb instance stores the metadata for a sharded
|
||||
cluster: ``/data/configdb/``.
|
||||
There's more explanation in the MongoDB docs about `sharding.configDB <https://docs.mongodb.com/manual/reference/configuration-options/#sharding.configDB>`_.
|
||||
|
||||
Explaining how Kubernetes handles persistent volumes,
|
||||
and the associated terminology,
|
||||
is beyond the scope of this documentation;
|
||||
see `the Kubernetes docs about persistent volumes
|
||||
<https://kubernetes.io/docs/user-guide/persistent-volumes>`_.
|
||||
|
||||
The first thing to do is create a Kubernetes StorageClass.
|
||||
The first thing to do is create the Kubernetes storage classes.
|
||||
|
||||
**Azure.** First, you need an Azure storage account.
|
||||
If you deployed your Kubernetes cluster on Azure
|
||||
@ -57,7 +79,6 @@ Standard storage is lower-cost and lower-performance.
|
||||
It uses hard disk drives (HDD).
|
||||
LRS means locally-redundant storage: three replicas
|
||||
in the same data center.
|
||||
|
||||
Premium storage is higher-cost and higher-performance.
|
||||
It uses solid state drives (SSD).
|
||||
At the time of writing,
|
||||
@ -67,29 +88,26 @@ the PersistentVolumeClaim would get stuck in a "Pending" state.
|
||||
For future reference, the command to create a storage account is
|
||||
`az storage account create <https://docs.microsoft.com/en-us/cli/azure/storage/account#create>`_.
|
||||
|
||||
Create a Kubernetes Storage Class named ``slow``
|
||||
by writing a file named ``azureStorageClass.yml`` containing:
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: slow
|
||||
provisioner: kubernetes.io/azure-disk
|
||||
parameters:
|
||||
skuName: Standard_LRS
|
||||
location: <region where your cluster is located>
|
||||
|
||||
and then:
|
||||
Get the file ``mongo-sc.yaml`` from GitHub using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f azureStorageClass.yml
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-sc.yaml
|
||||
|
||||
You may have to update the ``parameters.location`` field in both the files to
|
||||
specify the location you are using in Azure.
|
||||
|
||||
Create the required storage classes using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f mongo-sc.yaml
|
||||
|
||||
|
||||
You can check if it worked using ``kubectl get storageclasses``.
|
||||
|
||||
Note that there is no line of the form
|
||||
**Azure.** Note that there is no line of the form
|
||||
``storageAccount: <azure storage account name>``
|
||||
under ``parameters:``. When we included one
|
||||
and then created a PersistentVolumeClaim based on it,
|
||||
@ -99,27 +117,16 @@ Kubernetes just looks for a storageAccount
|
||||
with the specified skuName and location.
|
||||
|
||||
|
||||
Step 4: Create a PersistentVolumeClaim
|
||||
--------------------------------------
|
||||
Step 4: Create Persistent Volume Claims
|
||||
---------------------------------------
|
||||
|
||||
Next, you'll create a PersistentVolumeClaim named ``mongoclaim``.
|
||||
Create a file named ``mongoclaim.yml``
|
||||
with the following contents:
|
||||
Next, you will create two PersistentVolumeClaim objects ``mongo-db-claim`` and
|
||||
``mongo-configdb-claim``.
|
||||
Get the file ``mongo-pvc.yaml`` from GitHub using:
|
||||
|
||||
.. code:: yaml
|
||||
.. code:: bash
|
||||
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: mongoclaim
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: slow
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-pvc.yaml
|
||||
|
||||
Note how there's no explicit mention of Azure, AWS or whatever.
|
||||
``ReadWriteOnce`` (RWO) means the volume can be mounted as
|
||||
@ -128,67 +135,280 @@ read-write by a single Kubernetes node.
|
||||
by AzureDisk.)
|
||||
``storage: 20Gi`` means the volume has a size of 20
|
||||
`gibibytes <https://en.wikipedia.org/wiki/Gibibyte>`_.
|
||||
(You can change that if you like.)
|
||||
|
||||
Create ``mongoclaim`` in your Kubernetes cluster:
|
||||
You may want to update the ``spec.resources.requests.storage`` field in both
|
||||
the files to specify a different disk size.
|
||||
|
||||
Create the required Persistent Volume Claims using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f mongoclaim.yml
|
||||
$ kubectl apply -f mongo-pvc.yaml
|
||||
|
||||
You can check its status using:
|
||||
|
||||
.. code:: bash
|
||||
You can check its status using: ``kubectl get pvc -w``
|
||||
|
||||
$ kubectl get pvc
|
||||
|
||||
Initially, the status of ``mongoclaim`` might be "Pending"
|
||||
Initially, the status of persistent volume claims might be "Pending"
|
||||
but it should become "Bound" fairly quickly.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl describe pvc
|
||||
Name: mongoclaim
|
||||
Namespace: default
|
||||
StorageClass: slow
|
||||
Status: Bound
|
||||
Volume: pvc-ebed81f1-fdca-11e6-abf0-000d3a27ab21
|
||||
Labels: <none>
|
||||
Capacity: 20Gi
|
||||
Access Modes: RWO
|
||||
No events.
|
||||
Step 5: Create the Config Map - Optional
|
||||
----------------------------------------
|
||||
|
||||
This step is required only if you are planning to set up multiple
|
||||
`BigchainDB nodes
|
||||
<https://docs.bigchaindb.com/en/latest/terminology.html#node>`_.
|
||||
|
||||
Step 5: Deploy MongoDB & BigchainDB
|
||||
-----------------------------------
|
||||
MongoDB reads the local ``/etc/hosts`` file while bootstrapping a replica set
|
||||
to resolve the hostname provided to the ``rs.initiate()`` command. It needs to
|
||||
ensure that the replica set is being initialized in the same instance where
|
||||
the MongoDB instance is running.
|
||||
|
||||
Now you can deploy MongoDB and BigchainDB to your Kubernetes cluster.
|
||||
Currently, the way we do that is we create a StatefulSet with two
|
||||
containers: BigchainDB and MongoDB. (In the future, we'll put them
|
||||
in separate pods, and we'll ensure those pods are in different nodes.)
|
||||
We expose BigchainDB's port 9984 (the HTTP API port)
|
||||
and MongoDB's port 27017 using a Kubernetes Service.
|
||||
To achieve this, you will create a ConfigMap with the FQDN of the MongoDB instance
|
||||
and populate the ``/etc/hosts`` file with this value so that a replica set can
|
||||
be created seamlessly.
|
||||
|
||||
Get the file ``node-mdb-ss.yaml`` from GitHub using:
|
||||
Get the file ``mongo-cm.yaml`` from GitHub using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/node-mdb-ss.yaml
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-cm.yaml
|
||||
|
||||
Take a look inside that file to see how it defines the Service
|
||||
and the StatefulSet.
|
||||
Note how the MongoDB container uses the ``mongoclaim`` PersistentVolumeClaim
|
||||
for its ``/data`` diretory (mount path).
|
||||
You may want to update the ``data.fqdn`` field in the file before creating the
|
||||
ConfigMap. ``data.fqdn`` field will be the DNS name of your MongoDB instance.
|
||||
This will be used by other MongoDB instances when forming a MongoDB
|
||||
replica set. It should resolve to the MongoDB instance in your cluster when
|
||||
you are done with the setup. This will help when you are adding more MongoDB
|
||||
instances to the replica set in the future.
|
||||
|
||||
Create the StatefulSet and Service in your cluster using:
|
||||
|
||||
**Azure.**
|
||||
In Kubernetes on ACS, the name you populate in the ``data.fqdn`` field
|
||||
will be used to configure a DNS name for the public IP assigned to the
|
||||
Kubernetes Service that is the frontend for the MongoDB instance.
|
||||
We suggest using a name that will already be available in Azure.
|
||||
We use ``mdb-instance-0``, ``mdb-instance-1`` and so on in this document,
|
||||
which gives us ``mdb-instance-0.<azure location>.cloudapp.azure.com``,
|
||||
``mdb-instance-1.<azure location>.cloudapp.azure.com``, etc. as the FQDNs.
|
||||
The ``<azure location>`` is the Azure datacenter location you are using,
|
||||
which can also be obtained using the ``az account list-locations`` command.
|
||||
You can also try to assign a name to an Public IP in Azure before starting
|
||||
the process, or use ``nslookup`` with the name you have in mind to check
|
||||
if it's available for use.
|
||||
In the rare chance that name in the ``data.fqdn`` field is not available,
|
||||
you must create a ConfigMap with a unique name and restart the
|
||||
MongoDB instance.
|
||||
|
||||
**Kubernetes on bare-metal or other cloud providers.**
|
||||
You need to provide the name resolution function
|
||||
by other means (using DNS providers like GoDaddy, CloudFlare or your own
|
||||
private DNS server). The DNS set up for other environments is currently
|
||||
beyond the scope of this document.
|
||||
|
||||
|
||||
Create the required ConfigMap using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f node-mdb-ss.yaml
|
||||
$ kubectl apply -f mongo-cm.yaml
|
||||
|
||||
You can check that they're working using:
|
||||
|
||||
You can check its status using: ``kubectl get cm``
|
||||
|
||||
Now you are ready to run MongoDB and BigchainDB on our Kubernetes cluster.
|
||||
|
||||
|
||||
Step 6: Run MongoDB as a StatefulSet
|
||||
------------------------------------
|
||||
|
||||
Get the file ``mongo-ss.yaml`` from GitHub using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl get services
|
||||
$ kubectl get statefulsets
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/mongodb/mongo-ss.yaml
|
||||
|
||||
|
||||
Note how the MongoDB container uses the ``mongo-db-claim`` and the
|
||||
``mongo-configdb-claim`` PersistentVolumeClaims for its ``/data/db`` and
|
||||
``/data/configdb`` diretories (mount path). Note also that we use the pod's
|
||||
``securityContext.capabilities.add`` specification to add the ``FOWNER``
|
||||
capability to the container.
|
||||
That is because MongoDB container has the user ``mongodb``, with uid ``999``
|
||||
and group ``mongodb``, with gid ``999``.
|
||||
When this container runs on a host with a mounted disk, the writes fail when
|
||||
there is no user with uid ``999``.
|
||||
To avoid this, we use the Docker feature of ``--cap-add=FOWNER``.
|
||||
This bypasses the uid and gid permission checks during writes and allows data
|
||||
to be persisted to disk.
|
||||
Refer to the
|
||||
`Docker docs <https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities>`_
|
||||
for details.
|
||||
|
||||
As we gain more experience running MongoDB in testing and production, we will
|
||||
tweak the ``resources.limits.cpu`` and ``resources.limits.memory``.
|
||||
We will also stop exposing port ``27017`` globally and/or allow only certain
|
||||
hosts to connect to the MongoDB instance in the future.
|
||||
|
||||
Create the required StatefulSet using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f mongo-ss.yaml
|
||||
|
||||
You can check its status using the commands ``kubectl get statefulsets -w``
|
||||
and ``kubectl get svc -w``
|
||||
|
||||
You may have to wait for up to 10 minutes for the disk to be created
|
||||
and attached on the first run. The pod can fail several times with the message
|
||||
saying that the timeout for mounting the disk was exceeded.
|
||||
|
||||
|
||||
Step 7: Initialize a MongoDB Replica Set - Optional
|
||||
---------------------------------------------------
|
||||
|
||||
This step is required only if you are planning to set up multiple
|
||||
`BigchainDB nodes
|
||||
<https://docs.bigchaindb.com/en/latest/terminology.html#node>`_.
|
||||
|
||||
|
||||
Login to the running MongoDB instance and access the mongo shell using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl exec -it mdb-0 -c mongodb -- /bin/bash
|
||||
root@mdb-0:/# mongo --port 27017
|
||||
|
||||
You will initiate the replica set by using the ``rs.initiate()`` command from the
|
||||
mongo shell. Its syntax is:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
rs.initiate({
|
||||
_id : "<replica-set-name",
|
||||
members: [ {
|
||||
_id : 0,
|
||||
host : "<fqdn of this instance>:<port number>"
|
||||
} ]
|
||||
})
|
||||
|
||||
An example command might look like:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
> rs.initiate({ _id : "bigchain-rs", members: [ { _id : 0, host :"mdb-instance-0.westeurope.cloudapp.azure.com:27017" } ] })
|
||||
|
||||
|
||||
where ``mdb-instance-0.westeurope.cloudapp.azure.com`` is the value stored in
|
||||
the ``data.fqdn`` field in the ConfigMap created using ``mongo-cm.yaml``.
|
||||
|
||||
|
||||
You should see changes in the mongo shell prompt from ``>``
|
||||
to ``bigchain-rs:OTHER>`` to ``bigchain-rs:SECONDARY>`` and finally
|
||||
to ``bigchain-rs:PRIMARY>``.
|
||||
|
||||
You can use the ``rs.conf()`` and the ``rs.status()`` commands to check the
|
||||
detailed replica set configuration now.
|
||||
|
||||
|
||||
Step 8: Create a DNS record - Optional
|
||||
--------------------------------------
|
||||
|
||||
This step is required only if you are planning to set up multiple
|
||||
`BigchainDB nodes
|
||||
<https://docs.bigchaindb.com/en/latest/terminology.html#node>`_.
|
||||
|
||||
**Azure.** Select the current Azure resource group and look for the ``Public IP``
|
||||
resource. You should see at least 2 entries there - one for the Kubernetes
|
||||
master and the other for the MongoDB instance. You may have to ``Refresh`` the
|
||||
Azure web page listing the resources in a resource group for the latest
|
||||
changes to be reflected.
|
||||
Select the ``Public IP`` resource that is attached to your service (it should
|
||||
have the Kubernetes cluster name along with a random string),
|
||||
select ``Configuration``, add the DNS name that was added in the
|
||||
ConfigMap earlier, click ``Save``, and wait for the changes to be applied.
|
||||
|
||||
To verify the DNS setting is operational, you can run ``nslookup <dns
|
||||
name added in ConfigMap>`` from your local Linux shell.
|
||||
|
||||
This will ensure that when you scale the replica set later, other MongoDB
|
||||
members in the replica set can reach this instance.
|
||||
|
||||
|
||||
Step 9: Run BigchainDB as a Deployment
|
||||
--------------------------------------
|
||||
|
||||
Get the file ``bigchaindb-dep.yaml`` from GitHub using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/bigchaindb/bigchaindb-dep.yaml
|
||||
|
||||
Note that we set the ``BIGCHAINDB_DATABASE_HOST`` to ``mdb`` which is the name
|
||||
of the MongoDB service defined earlier.
|
||||
|
||||
We also hardcode the ``BIGCHAINDB_KEYPAIR_PUBLIC``,
|
||||
``BIGCHAINDB_KEYPAIR_PRIVATE`` and ``BIGCHAINDB_KEYRING`` for now.
|
||||
|
||||
As we gain more experience running BigchainDB in testing and production, we
|
||||
will tweak the ``resources.limits`` values for CPU and memory, and as richer
|
||||
monitoring and probing becomes available in BigchainDB, we will tweak the
|
||||
``livenessProbe`` and ``readinessProbe`` parameters.
|
||||
|
||||
We also plan to specify scheduling policies for the BigchainDB deployment so
|
||||
that we ensure that BigchainDB and MongoDB are running in separate nodes, and
|
||||
build security around the globally exposed port ``9984``.
|
||||
|
||||
Create the required Deployment using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f bigchaindb-dep.yaml
|
||||
|
||||
You can check its status using the command ``kubectl get deploy -w``
|
||||
|
||||
|
||||
Step 10: Verify the BigchainDB Node Setup
|
||||
-----------------------------------------
|
||||
|
||||
Step 10.1: Testing Externally
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Try to access the ``<dns/ip of your exposed bigchaindb service endpoint>:9984``
|
||||
on your browser. You must receive a json output that shows the BigchainDB
|
||||
server version among other things.
|
||||
|
||||
Try to access the ``<dns/ip of your exposed mongodb service endpoint>:27017``
|
||||
on your browser. You must receive a message from MongoDB stating that it
|
||||
doesn't allow HTTP connections to the port anymore.
|
||||
|
||||
|
||||
Step 10.2: Testing Internally
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Run a container that provides utilities like ``nslookup``, ``curl`` and ``dig``
|
||||
on the cluster and query the internal DNS and IP endpoints.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl run -it toolbox -- image <docker image to run> --restart=Never --rm
|
||||
|
||||
It will drop you to the shell prompt.
|
||||
Now you can query for the ``mdb`` and ``bdb`` service details.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ nslookup mdb
|
||||
$ dig +noall +answer _mdb-port._tcp.mdb.default.svc.cluster.local SRV
|
||||
$ curl -X GET http://mdb:27017
|
||||
$ curl -X GET http://bdb:9984
|
||||
|
||||
There is a generic image based on alpine:3.5 with the required utilities
|
||||
hosted at Docker Hub under ``bigchaindb/toolbox``.
|
||||
The corresponding Dockerfile is `here
|
||||
<https://github.com/bigchaindb/bigchaindb/k8s/toolbox/Dockerfile>`_.
|
||||
You can use it as below to get started immediately:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl run -it toolbox --image bigchaindb/toolbox --restart=Never --rm
|
||||
|
||||
|
@ -94,7 +94,9 @@ Finally, you can deploy an ACS using something like:
|
||||
|
||||
$ az acs create --name <a made-up cluster name> \
|
||||
--resource-group <name of resource group created earlier> \
|
||||
--master-count 3 \
|
||||
--agent-count 3 \
|
||||
--admin-username ubuntu \
|
||||
--agent-vm-size Standard_D2_v2 \
|
||||
--dns-prefix <make up a name> \
|
||||
--ssh-key-value ~/.ssh/<name>.pub \
|
||||
@ -113,9 +115,6 @@ go to **Resource groups** (with the blue cube icon)
|
||||
and click on the one you created
|
||||
to see all the resources in it.
|
||||
|
||||
Next, you can :doc:`run a BigchainDB node on your new
|
||||
Kubernetes cluster <node-on-kubernetes>`.
|
||||
|
||||
|
||||
Optional: SSH to Your New Kubernetes Cluster Nodes
|
||||
--------------------------------------------------
|
||||
@ -125,21 +124,78 @@ You can SSH to one of the just-deployed Kubernetes "master" nodes
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ ssh -i ~/.ssh/<name>.pub azureuser@<master-ip-address-or-hostname>
|
||||
$ ssh -i ~/.ssh/<name>.pub ubuntu@<master-ip-address-or-hostname>
|
||||
|
||||
where you can get the IP address or hostname
|
||||
of a master node from the Azure Portal.
|
||||
Note how the default username is ``azureuser``.
|
||||
of a master node from the Azure Portal. For example:
|
||||
|
||||
The "agent" nodes don't get public IP addresses or hostnames,
|
||||
.. code:: bash
|
||||
|
||||
$ ssh -i ~/.ssh/mykey123.pub ubuntu@mydnsprefix.westeurope.cloudapp.azure.com
|
||||
|
||||
.. note::
|
||||
|
||||
All the master nodes should have the *same* IP address and hostname
|
||||
(also called the Master FQDN).
|
||||
|
||||
The "agent" nodes shouldn't get public IP addresses or hostnames,
|
||||
so you can't SSH to them *directly*,
|
||||
but you can first SSH to the master
|
||||
and then SSH to an agent from there
|
||||
(using the *private* IP address or hostname of the agent node).
|
||||
To do that, you either need to copy your SSH key pair to
|
||||
the master (a bad idea),
|
||||
or use something like
|
||||
`SSH agent forwarding <https://yakking.branchable.com/posts/ssh-A/>`_ (better).
|
||||
and then SSH to an agent from there.
|
||||
To do that, you could
|
||||
copy your SSH key pair to the master (a bad idea),
|
||||
or use SSH agent forwarding (better).
|
||||
To do the latter, do the following on the machine you used
|
||||
to SSH to the master:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ echo -e "Host <FQDN of the cluster from Azure Portal>\n ForwardAgent yes" >> ~/.ssh/config
|
||||
|
||||
To verify that SSH agent forwarding works properly,
|
||||
SSH to the one of the master nodes and do:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ echo "$SSH_AUTH_SOCK"
|
||||
|
||||
If you get an empty response,
|
||||
then SSH agent forwarding hasn't been set up correctly.
|
||||
If you get a non-empty response,
|
||||
then SSH agent forwarding should work fine
|
||||
and you can SSH to one of the agent nodes (from a master)
|
||||
using something like:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ ssh ssh ubuntu@k8s-agent-4AC80E97-0
|
||||
|
||||
where ``k8s-agent-4AC80E97-0`` is the name
|
||||
of a Kubernetes agent node in your Kubernetes cluster.
|
||||
You will have to replace it by the name
|
||||
of an agent node in your cluster.
|
||||
|
||||
|
||||
Optional: Delete the Kubernetes Cluster
|
||||
---------------------------------------
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ az acs delete \
|
||||
--name <ACS cluster name> \
|
||||
--resource-group <name of resource group containing the cluster>
|
||||
|
||||
|
||||
Optional: Delete the Resource Group
|
||||
-----------------------------------
|
||||
|
||||
CAUTION: You might end up deleting resources other than the ACS cluster.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ az group delete \
|
||||
--name <name of resource group containing the cluster>
|
||||
|
||||
|
||||
Next, you can :doc:`run a BigchainDB node on your new
|
||||
Kubernetes cluster <node-on-kubernetes>`.
|
||||
|
@ -86,7 +86,6 @@ Step 2 is to make an AWS deployment configuration file, if necessary. There's an
|
||||
```text
|
||||
NUM_NODES=3
|
||||
BRANCH="master"
|
||||
WHAT_TO_DEPLOY="servers"
|
||||
SSH_KEY_NAME="not-set-yet"
|
||||
USE_KEYPAIRS_FILE=False
|
||||
IMAGE_ID="ami-8504fdea"
|
||||
|
@ -13,7 +13,6 @@ BigchainDB Server Documentation
|
||||
server-reference/index
|
||||
drivers-clients/index
|
||||
clusters-feds/index
|
||||
topic-guides/index
|
||||
data-models/index
|
||||
schema/transaction
|
||||
schema/vote
|
||||
|
@ -69,16 +69,6 @@ e.g. `bigchaindb --dev-start-rethinkdb start`. Note that this will also shutdown
|
||||
The option `--dev-allow-temp-keypair` will generate a keypair on the fly if no keypair is found, this is useful when you want to run a temporary instance of BigchainDB in a Docker container, for example.
|
||||
|
||||
|
||||
## bigchaindb load
|
||||
|
||||
Write transactions to the backlog (for benchmarking tests). You can learn more about it using:
|
||||
```text
|
||||
$ bigchaindb load -h
|
||||
```
|
||||
|
||||
Note: This command uses the Python Server API to write transactions to the database. It _doesn't_ use the HTTP API or a driver that wraps the HTTP API.
|
||||
|
||||
|
||||
## bigchaindb set-shards
|
||||
|
||||
This command is specific to RethinkDB so it will only run if BigchainDB is
|
||||
|
@ -1,12 +0,0 @@
|
||||
Topic Guides
|
||||
============
|
||||
|
||||
.. note::
|
||||
|
||||
Most of the Topic Guides have been moved over to `the root BigchainDB project docs <https://docs.bigchaindb.com/en/latest/index.html>`_.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
models
|
@ -1,6 +0,0 @@
|
||||
# The Transaction, Block and Vote Models
|
||||
|
||||
This page about transaction concepts and data models was getting too big, so it was split into smaller pages. It will be deleted eventually, so update your links. Here's where you can find the new pages:
|
||||
|
||||
* [Transaction Concepts](https://docs.bigchaindb.com/en/latest/transaction-concepts.html)
|
||||
* [Data Models (all of them)](../data-models/index.html)
|
83
k8s/bigchaindb/bigchaindb-dep.yaml
Normal file
83
k8s/bigchaindb/bigchaindb-dep.yaml
Normal file
@ -0,0 +1,83 @@
|
||||
###############################################################
|
||||
# This config file runs bigchaindb:master as a k8s Deployment #
|
||||
# and it connects to the mongodb backend on a separate pod #
|
||||
###############################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: bdb
|
||||
namespace: default
|
||||
labels:
|
||||
name: bdb
|
||||
spec:
|
||||
selector:
|
||||
app: bdb
|
||||
ports:
|
||||
- port: 9984
|
||||
targetPort: 9984
|
||||
name: bdb-port
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: bdb
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: bdb
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: bigchaindb
|
||||
image: bigchaindb/bigchaindb:master
|
||||
args:
|
||||
- start
|
||||
env:
|
||||
- name: BIGCHAINDB_DATABASE_HOST
|
||||
value: mdb
|
||||
- name: BIGCHAINDB_DATABASE_PORT
|
||||
# TODO(Krish): remove hardcoded port
|
||||
value: "27017"
|
||||
- name: BIGCHAINDB_DATABASE_REPLICASET
|
||||
value: bigchain-rs
|
||||
- name: BIGCHAINDB_DATABASE_BACKEND
|
||||
value: mongodb
|
||||
- name: BIGCHAINDB_DATABASE_NAME
|
||||
value: bigchain
|
||||
- name: BIGCHAINDB_SERVER_BIND
|
||||
value: 0.0.0.0:9984
|
||||
- name: BIGCHAINDB_KEYPAIR_PUBLIC
|
||||
value: EEWUAhsk94ZUHhVw7qx9oZiXYDAWc9cRz93eMrsTG4kZ
|
||||
- name: BIGCHAINDB_KEYPAIR_PRIVATE
|
||||
value: 3CjmRhu718gT1Wkba3LfdqX5pfYuBdaMPLd7ENUga5dm
|
||||
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
|
||||
value: "120"
|
||||
- name: BIGCHAINDB_KEYRING
|
||||
value: ""
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9984
|
||||
hostPort: 9984
|
||||
name: bdb-port
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
restartPolicy: Always
|
89
k8s/deprecated.to.del/bdb-mdb-dep.yaml
Normal file
89
k8s/deprecated.to.del/bdb-mdb-dep.yaml
Normal file
@ -0,0 +1,89 @@
|
||||
###############################################################
|
||||
# This config file runs bigchaindb:latest and connects to the #
|
||||
# mongodb backend as a service #
|
||||
###############################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: bdb-mdb-service
|
||||
namespace: default
|
||||
labels:
|
||||
name: bdb-mdb-service
|
||||
spec:
|
||||
selector:
|
||||
app: bdb-mdb
|
||||
ports:
|
||||
- port: 9984
|
||||
targetPort: 9984
|
||||
name: bdb-api
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: bdb-mdb
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: bdb-mdb
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: bdb-mdb
|
||||
image: bigchaindb/bigchaindb:latest
|
||||
args:
|
||||
- start
|
||||
env:
|
||||
- name: BIGCHAINDB_DATABASE_HOST
|
||||
value: mdb-service
|
||||
- name: BIGCHAINDB_DATABASE_PORT
|
||||
value: "27017"
|
||||
- name: BIGCHAINDB_DATABASE_REPLICASET
|
||||
value: bigchain-rs
|
||||
- name: BIGCHIANDB_DATABASE_BACKEND
|
||||
value: mongodb
|
||||
- name: BIGCHAINDB_DATABASE_NAME
|
||||
value: bigchain
|
||||
- name: BIGCHAINDB_SERVER_BIND
|
||||
value: 0.0.0.0:9984
|
||||
- name: BIGCHAINDB_KEYPAIR_PUBLIC
|
||||
value: EEWUAhsk94ZUHhVw7qx9oZiXYDAWc9cRz93eMrsTG4kZ
|
||||
- name: BIGCHAINDB_KEYPAIR_PRIVATE
|
||||
value: 3CjmRhu718gT1Wkba3LfdqX5pfYuBdaMPLd7ENUga5dm
|
||||
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
|
||||
value: "120"
|
||||
- name: BIGCHAINDB_KEYRING
|
||||
value: ""
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9984
|
||||
hostPort: 9984
|
||||
name: bdb-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: bigchaindb-data
|
||||
mountPath: /data
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: bigchaindb-data
|
||||
hostPath:
|
||||
path: /disk/bigchaindb-data
|
87
k8s/deprecated.to.del/bdb-rdb-dep.yaml
Normal file
87
k8s/deprecated.to.del/bdb-rdb-dep.yaml
Normal file
@ -0,0 +1,87 @@
|
||||
###############################################################
|
||||
# This config file runs bigchaindb:latest and connects to the #
|
||||
# rethinkdb backend as a service #
|
||||
###############################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: bdb-rdb-service
|
||||
namespace: default
|
||||
labels:
|
||||
name: bdb-rdb-service
|
||||
spec:
|
||||
selector:
|
||||
app: bdb-rdb
|
||||
ports:
|
||||
- port: 9984
|
||||
targetPort: 9984
|
||||
name: bdb-rdb-api
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: bdb-rdb
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: bdb-rdb
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: bdb-rdb
|
||||
image: bigchaindb/bigchaindb:latest
|
||||
args:
|
||||
- start
|
||||
env:
|
||||
- name: BIGCHAINDB_DATABASE_HOST
|
||||
value: rdb-service
|
||||
- name: BIGCHAINDB_DATABASE_PORT
|
||||
value: "28015"
|
||||
- name: BIGCHIANDB_DATABASE_BACKEND
|
||||
value: rethinkdb
|
||||
- name: BIGCHAINDB_DATABASE_NAME
|
||||
value: bigchain
|
||||
- name: BIGCHAINDB_SERVER_BIND
|
||||
value: 0.0.0.0:9984
|
||||
- name: BIGCHAINDB_KEYPAIR_PUBLIC
|
||||
value: EEWUAhsk94ZUHhVw7qx9oZiXYDAWc9cRz93eMrsTG4kZ
|
||||
- name: BIGCHAINDB_KEYPAIR_PRIVATE
|
||||
value: 3CjmRhu718gT1Wkba3LfdqX5pfYuBdaMPLd7ENUga5dm
|
||||
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
|
||||
value: "120"
|
||||
- name: BIGCHAINDB_KEYRING
|
||||
value: ""
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9984
|
||||
hostPort: 9984
|
||||
name: bdb-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: bigchaindb-data
|
||||
mountPath: /data
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: bigchaindb-data
|
||||
hostPath:
|
||||
path: /disk/bigchaindb-data
|
57
k8s/deprecated.to.del/mongo-statefulset.yaml
Normal file
57
k8s/deprecated.to.del/mongo-statefulset.yaml
Normal file
@ -0,0 +1,57 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mongodb
|
||||
labels:
|
||||
name: mongodb
|
||||
spec:
|
||||
ports:
|
||||
- port: 27017
|
||||
targetPort: 27017
|
||||
clusterIP: None
|
||||
selector:
|
||||
role: mongodb
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: mongodb
|
||||
spec:
|
||||
serviceName: mongodb
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
role: mongodb
|
||||
environment: staging
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: mongo
|
||||
image: mongo:3.4.1
|
||||
command:
|
||||
- mongod
|
||||
- "--replSet"
|
||||
- bigchain-rs
|
||||
#- "--smallfiles"
|
||||
#- "--noprealloc"
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
volumeMounts:
|
||||
- name: mongo-persistent-storage
|
||||
mountPath: /data/db
|
||||
- name: mongo-sidecar
|
||||
image: cvallance/mongo-k8s-sidecar
|
||||
env:
|
||||
- name: MONGO_SIDECAR_POD_LABELS
|
||||
value: "role=mongo,environment=staging"
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: mongo-persistent-storage
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: "fast"
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
@ -42,8 +42,8 @@ spec:
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: bdb-server
|
||||
image: bigchaindb/bigchaindb:latest
|
||||
- name: bigchaindb
|
||||
image: bigchaindb/bigchaindb:master
|
||||
args:
|
||||
- start
|
||||
env:
|
89
k8s/deprecated.to.del/node-ss.yaml
Normal file
89
k8s/deprecated.to.del/node-ss.yaml
Normal file
@ -0,0 +1,89 @@
|
||||
#####################################################
|
||||
# This config file uses bdb v0.9.1 with bundled rdb #
|
||||
#####################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: bdb-service
|
||||
namespace: default
|
||||
labels:
|
||||
name: bdb-service
|
||||
spec:
|
||||
selector:
|
||||
app: bdb
|
||||
ports:
|
||||
- port: 9984
|
||||
targetPort: 9984
|
||||
name: bdb-http-api
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
name: bdb-rethinkdb-api
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: bdb
|
||||
namespace: default
|
||||
spec:
|
||||
serviceName: bdb
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
name: bdb
|
||||
labels:
|
||||
app: bdb
|
||||
annotations:
|
||||
pod.beta.kubernetes.io/init-containers: '[
|
||||
{
|
||||
"name": "bdb091-configure",
|
||||
"image": "bigchaindb/bigchaindb:0.9.1",
|
||||
"command": ["bigchaindb", "-y", "configure", "rethinkdb"],
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "bigchaindb-data",
|
||||
"mountPath": "/data"
|
||||
}
|
||||
]
|
||||
}
|
||||
]'
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: bdb091-server
|
||||
image: bigchaindb/bigchaindb:0.9.1
|
||||
args:
|
||||
- -c
|
||||
- /data/.bigchaindb
|
||||
- start
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9984
|
||||
hostPort: 9984
|
||||
name: bdb-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: bigchaindb-data
|
||||
mountPath: /data
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9984
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: bigchaindb-data
|
||||
hostPath:
|
||||
path: /disk/bigchaindb-data
|
75
k8s/deprecated.to.del/rethinkdb-ss.yaml
Normal file
75
k8s/deprecated.to.del/rethinkdb-ss.yaml
Normal file
@ -0,0 +1,75 @@
|
||||
####################################################
|
||||
# This config file runs rethinkdb:2.3 as a service #
|
||||
####################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: rdb-service
|
||||
namespace: default
|
||||
labels:
|
||||
name: rdb-service
|
||||
spec:
|
||||
selector:
|
||||
app: rdb
|
||||
ports:
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
name: rethinkdb-http-port
|
||||
- port: 28015
|
||||
targetPort: 28015
|
||||
name: rethinkdb-driver-port
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: rdb
|
||||
namespace: default
|
||||
spec:
|
||||
serviceName: rdb
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
name: rdb
|
||||
labels:
|
||||
app: rdb
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: rethinkdb
|
||||
image: rethinkdb:2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
hostPort: 8080
|
||||
name: rdb-http-port
|
||||
protocol: TCP
|
||||
- containerPort: 28015
|
||||
hostPort: 28015
|
||||
name: rdb-client-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: rdb-data
|
||||
mountPath: /data
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8080
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8080
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 10
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: rdb-data
|
||||
hostPath:
|
||||
path: /disk/rdb-data
|
12
k8s/mongodb/container/Dockerfile
Normal file
12
k8s/mongodb/container/Dockerfile
Normal file
@ -0,0 +1,12 @@
|
||||
FROM mongo:3.4.2
|
||||
LABEL maintainer "dev@bigchaindb.com"
|
||||
WORKDIR /
|
||||
RUN apt-get update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get autoremove \
|
||||
&& apt-get clean
|
||||
COPY mongod.conf.template /etc/mongod.conf.template
|
||||
COPY mongod_entrypoint/mongod_entrypoint /
|
||||
VOLUME /data/db /data/configdb
|
||||
EXPOSE 27017
|
||||
ENTRYPOINT ["/mongod_entrypoint"]
|
51
k8s/mongodb/container/Makefile
Normal file
51
k8s/mongodb/container/Makefile
Normal file
@ -0,0 +1,51 @@
|
||||
# Targets:
|
||||
# all: Cleans, formats src files, builds the code, builds the docker image
|
||||
# clean: Removes the binary and docker image
|
||||
# format: Formats the src files
|
||||
# build: Builds the code
|
||||
# docker: Builds the code and docker image
|
||||
# push: Push the docker image to Docker hub
|
||||
|
||||
GOCMD=go
|
||||
GOVET=$(GOCMD) tool vet
|
||||
GOINSTALL=$(GOCMD) install
|
||||
GOFMT=gofmt -s -w
|
||||
|
||||
DOCKER_IMAGE_NAME?=bigchaindb/mongodb
|
||||
DOCKER_IMAGE_TAG?=latest
|
||||
|
||||
PWD=$(shell pwd)
|
||||
BINARY_PATH=$(PWD)/mongod_entrypoint/
|
||||
BINARY_NAME=mongod_entrypoint
|
||||
MAIN_FILE = $(BINARY_PATH)/mongod_entrypoint.go
|
||||
SRC_FILES = $(BINARY_PATH)/mongod_entrypoint.go
|
||||
|
||||
.PHONY: all
|
||||
|
||||
all: clean build docker
|
||||
|
||||
clean:
|
||||
@echo "removing any pre-built binary";
|
||||
-@rm $(BINARY_PATH)/$(BINARY_NAME);
|
||||
@echo "remove any pre-built docker image";
|
||||
-@docker rmi $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG);
|
||||
|
||||
format:
|
||||
$(GOFMT) $(SRC_FILES)
|
||||
|
||||
build: format
|
||||
$(shell cd $(BINARY_PATH) && \
|
||||
export GOPATH="$(BINARY_PATH)" && \
|
||||
export GOBIN="$(BINARY_PATH)" && \
|
||||
CGO_ENABLED=0 GOOS=linux $(GOINSTALL) -ldflags "-s" -a -installsuffix cgo $(MAIN_FILE))
|
||||
|
||||
docker: build
|
||||
docker build \
|
||||
-t $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG) .;
|
||||
|
||||
vet:
|
||||
$(GOVET) .
|
||||
|
||||
push:
|
||||
docker push \
|
||||
$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG);
|
88
k8s/mongodb/container/README.md
Normal file
88
k8s/mongodb/container/README.md
Normal file
@ -0,0 +1,88 @@
|
||||
## Custom MongoDB container for BigchainDB Backend
|
||||
|
||||
### Need
|
||||
|
||||
* MongoDB needs the hostname provided in the rs.initiate() command to be
|
||||
resolvable through the hosts file locally.
|
||||
* In the future, with the introduction of TLS for inter-cluster MongoDB
|
||||
communications, we will need a way to specify detailed configuration.
|
||||
* We also need a way to overwrite certain parameters to suit our use case.
|
||||
|
||||
|
||||
### Step 1: Build the Latest Container
|
||||
|
||||
`make` from the root of this project.
|
||||
|
||||
|
||||
### Step 2: Run the Container
|
||||
|
||||
```
|
||||
docker run \
|
||||
--name=mdb1 \
|
||||
--publish=17017:17017 \
|
||||
--rm=true \
|
||||
bigchaindb/mongodb \
|
||||
--replica-set-name <replica set name> \
|
||||
--fqdn <fully qualified domain name of this instance> \
|
||||
--port <mongod port number for external connections>
|
||||
```
|
||||
|
||||
#### Step 3: Initialize the Replica Set
|
||||
|
||||
Login to one of the MongoDB containers, say mdb1:
|
||||
|
||||
`docker exec -it mdb1 bash`
|
||||
|
||||
Start the `mongo` shell:
|
||||
|
||||
`mongo --port 27017`
|
||||
|
||||
|
||||
Run the rs.initiate() command:
|
||||
```
|
||||
rs.initiate({
|
||||
_id : "<replica-set-name", members: [
|
||||
{
|
||||
_id : 0,
|
||||
host : "<fqdn of this instance>:<port number>"
|
||||
} ]
|
||||
})
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
rs.initiate({ _id : "test-repl-set", members: [ { _id : 0, host :
|
||||
"mdb-instance-0.westeurope.cloudapp.azure.com:27017" } ] })
|
||||
```
|
||||
|
||||
You should also see changes in the mongo shell prompt from `>` to
|
||||
`test-repl-set:OTHER>` to `test-repl-set:SECONDARY>` to finally
|
||||
`test-repl-set:PRIMARY>`.
|
||||
If this instance is not the primary, you can use the `rs.status()` command to
|
||||
find out who is the primary.
|
||||
|
||||
|
||||
#### Step 4: Add members to the Replica Set
|
||||
|
||||
We can only add members to a replica set from the PRIMARY instance.
|
||||
Login to the PRIMARY and open a `mongo` shell.
|
||||
|
||||
Run the rs.add() command with the ip and port number of the other
|
||||
containers/instances:
|
||||
```
|
||||
rs.add("<fqdn>:<port>")
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
Add mdb2 to replica set from mdb1:
|
||||
```
|
||||
rs.add("bdb-cluster-1.northeurope.cloudapp.azure.com:27017")
|
||||
```
|
||||
|
||||
Add mdb3 to replica set from mdb1:
|
||||
```
|
||||
rs.add("bdb-cluster-2.northeurope.cloudapp.azure.com:27017")
|
||||
```
|
||||
|
89
k8s/mongodb/container/mongod.conf.template
Normal file
89
k8s/mongodb/container/mongod.conf.template
Normal file
@ -0,0 +1,89 @@
|
||||
# mongod.conf
|
||||
|
||||
# for documentation of all options, see:
|
||||
# http://docs.mongodb.org/manual/reference/configuration-options/
|
||||
|
||||
# where to write logging data.
|
||||
systemLog:
|
||||
verbosity: 0
|
||||
#TODO traceAllExceptions: true
|
||||
timeStampFormat: iso8601-utc
|
||||
component:
|
||||
accessControl:
|
||||
verbosity: 0
|
||||
command:
|
||||
verbosity: 0
|
||||
control:
|
||||
verbosity: 0
|
||||
ftdc:
|
||||
verbosity: 0
|
||||
geo:
|
||||
verbosity: 0
|
||||
index:
|
||||
verbosity: 0
|
||||
network:
|
||||
verbosity: 0
|
||||
query:
|
||||
verbosity: 0
|
||||
replication:
|
||||
verbosity: 0
|
||||
sharding:
|
||||
verbosity: 0
|
||||
storage:
|
||||
verbosity: 0
|
||||
journal:
|
||||
verbosity: 0
|
||||
write:
|
||||
verbosity: 0
|
||||
|
||||
processManagement:
|
||||
fork: false
|
||||
pidFilePath: /tmp/mongod.pid
|
||||
|
||||
net:
|
||||
port: PORT
|
||||
bindIp: 0.0.0.0
|
||||
maxIncomingConnections: 8192
|
||||
wireObjectCheck: false
|
||||
unixDomainSocket:
|
||||
enabled: false
|
||||
pathPrefix: /tmp
|
||||
filePermissions: 0700
|
||||
http:
|
||||
enabled: false
|
||||
compression:
|
||||
compressors: snappy
|
||||
#ssl: TODO
|
||||
|
||||
#security: TODO
|
||||
|
||||
#setParameter:
|
||||
#notablescan: 1 TODO
|
||||
#logUserIds: 1 TODO
|
||||
|
||||
storage:
|
||||
dbPath: /data/db
|
||||
indexBuildRetry: true
|
||||
journal:
|
||||
enabled: true
|
||||
commitIntervalMs: 100
|
||||
directoryPerDB: true
|
||||
engine: wiredTiger
|
||||
wiredTiger:
|
||||
engineConfig:
|
||||
journalCompressor: snappy
|
||||
collectionConfig:
|
||||
blockCompressor: snappy
|
||||
indexConfig:
|
||||
prefixCompression: true # TODO false may affect performance?
|
||||
|
||||
operationProfiling:
|
||||
mode: slowOp
|
||||
slowOpThresholdMs: 100
|
||||
|
||||
replication:
|
||||
replSetName: REPLICA_SET_NAME
|
||||
enableMajorityReadConcern: true
|
||||
|
||||
#sharding:
|
||||
|
154
k8s/mongodb/container/mongod_entrypoint/mongod_entrypoint.go
Normal file
154
k8s/mongodb/container/mongod_entrypoint/mongod_entrypoint.go
Normal file
@ -0,0 +1,154 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"regexp"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
mongoConfFilePath string = "/etc/mongod.conf"
|
||||
mongoConfTemplateFilePath string = "/etc/mongod.conf.template"
|
||||
hostsFilePath string = "/etc/hosts"
|
||||
)
|
||||
|
||||
var (
|
||||
// Use the same entrypoint as the mongo:3.4.2 image; just supply it with
|
||||
// the mongod conf file with custom params
|
||||
mongoStartCmd []string = []string{"/entrypoint.sh", "mongod", "--config",
|
||||
mongoConfFilePath}
|
||||
)
|
||||
|
||||
// context struct stores the user input and the constraints for the specified
|
||||
// input. It also stores the keyword that needs to be replaced in the template
|
||||
// files.
|
||||
type context struct {
|
||||
cliInput string
|
||||
templateKeyword string
|
||||
regex string
|
||||
}
|
||||
|
||||
// sanity function takes the pre-defined constraints and the user inputs as
|
||||
// arguments and validates user input based on regex matching
|
||||
func sanity(input map[string]*context, fqdn, ip string) error {
|
||||
var format *regexp.Regexp
|
||||
for _, ctx := range input {
|
||||
format = regexp.MustCompile(ctx.regex)
|
||||
if format.MatchString(ctx.cliInput) == false {
|
||||
return errors.New(fmt.Sprintf(
|
||||
"Invalid value: '%s' for '%s'. Can be '%s'",
|
||||
ctx.cliInput,
|
||||
ctx.templateKeyword,
|
||||
ctx.regex))
|
||||
}
|
||||
}
|
||||
|
||||
format = regexp.MustCompile(`[a-z0-9-.]+`)
|
||||
if format.MatchString(fqdn) == false {
|
||||
return errors.New(fmt.Sprintf(
|
||||
"Invalid value: '%s' for FQDN. Can be '%s'",
|
||||
fqdn,
|
||||
format))
|
||||
}
|
||||
|
||||
if net.ParseIP(ip) == nil {
|
||||
return errors.New(fmt.Sprintf(
|
||||
"Invalid value: '%s' for IPv4. Can be a.b.c.d",
|
||||
ip))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createFile function takes the pre-defined keywords, user inputs, the
|
||||
// template file path and the new file path location as parameters, and
|
||||
// creates a new file at file path with all the keywords replaced by inputs.
|
||||
func createFile(input map[string]*context,
|
||||
template string, conf string) error {
|
||||
// read the template
|
||||
contents, err := ioutil.ReadFile(template)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// replace
|
||||
for _, ctx := range input {
|
||||
contents = bytes.Replace(contents, []byte(ctx.templateKeyword),
|
||||
[]byte(ctx.cliInput), -1)
|
||||
}
|
||||
// write
|
||||
err = ioutil.WriteFile(conf, contents, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateHostsFile takes the FQDN supplied as input to the container and adds
|
||||
// an entry to /etc/hosts
|
||||
func updateHostsFile(ip, fqdn string) error {
|
||||
fileHandle, err := os.OpenFile(hostsFilePath, os.O_APPEND|os.O_WRONLY,
|
||||
os.ModeAppend)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fileHandle.Close()
|
||||
// append
|
||||
_, err = fileHandle.WriteString(fmt.Sprintf("\n%s %s\n", ip, fqdn))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
var fqdn, ip string
|
||||
input := make(map[string]*context)
|
||||
|
||||
input["replica-set-name"] = &context{}
|
||||
input["replica-set-name"].regex = `[a-z]+`
|
||||
input["replica-set-name"].templateKeyword = "REPLICA_SET_NAME"
|
||||
flag.StringVar(&input["replica-set-name"].cliInput,
|
||||
"replica-set-name",
|
||||
"",
|
||||
"replica set name")
|
||||
|
||||
input["port"] = &context{}
|
||||
input["port"].regex = `[0-9]{4,5}`
|
||||
input["port"].templateKeyword = "PORT"
|
||||
flag.StringVar(&input["port"].cliInput,
|
||||
"port",
|
||||
"",
|
||||
"mongodb port number")
|
||||
|
||||
flag.StringVar(&fqdn, "fqdn", "", "FQDN of the MongoDB instance")
|
||||
flag.StringVar(&ip, "ip", "", "IPv4 address of the container")
|
||||
|
||||
flag.Parse()
|
||||
err := sanity(input, fqdn, ip)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = createFile(input, mongoConfTemplateFilePath, mongoConfFilePath)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = updateHostsFile(ip, fqdn)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Starting Mongod....")
|
||||
err = syscall.Exec(mongoStartCmd[0], mongoStartCmd[0:], os.Environ())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
13
k8s/mongodb/mongo-cm.yaml
Normal file
13
k8s/mongodb/mongo-cm.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
#####################################################################
|
||||
# This YAML file desribes a ConfigMap with the FQDN of the mongo #
|
||||
# instance to be started. MongoDB instance uses the value from this #
|
||||
# ConfigMap to bootstrap itself during startup. #
|
||||
#####################################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mdb-fqdn
|
||||
namespace: default
|
||||
data:
|
||||
fqdn: mdb-instance-0.westeurope.cloudapp.azure.com
|
35
k8s/mongodb/mongo-pvc.yaml
Normal file
35
k8s/mongodb/mongo-pvc.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
###########################################################
|
||||
# This section file desribes a k8s pvc for mongodb dbPath #
|
||||
###########################################################
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: mongo-db-claim
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: slow-db
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
# FIXME(Uncomment when ACS supports this!)
|
||||
# persistentVolumeReclaimPolicy: Retain
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
#############################################################
|
||||
# This YAML section desribes a k8s pvc for mongodb configDB #
|
||||
#############################################################
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: mongo-configdb-claim
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: slow-configdb
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
# FIXME(Uncomment when ACS supports this!)
|
||||
# persistentVolumeReclaimPolicy: Retain
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
23
k8s/mongodb/mongo-sc.yaml
Normal file
23
k8s/mongodb/mongo-sc.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
####################################################################
|
||||
# This YAML section desribes a StorageClass for the mongodb dbPath #
|
||||
####################################################################
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: slow-db
|
||||
provisioner: kubernetes.io/azure-disk
|
||||
parameters:
|
||||
skuName: Standard_LRS
|
||||
location: westeurope
|
||||
---
|
||||
######################################################################
|
||||
# This YAML section desribes a StorageClass for the mongodb configDB #
|
||||
######################################################################
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: slow-configdb
|
||||
provisioner: kubernetes.io/azure-disk
|
||||
parameters:
|
||||
skuName: Standard_LRS
|
||||
location: westeurope
|
92
k8s/mongodb/mongo-ss.yaml
Normal file
92
k8s/mongodb/mongo-ss.yaml
Normal file
@ -0,0 +1,92 @@
|
||||
########################################################################
|
||||
# This YAML file desribes a StatefulSet with a service for running and #
|
||||
# exposing a MongoDB service. #
|
||||
# It depends on the configdb and db k8s pvc. #
|
||||
########################################################################
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mdb
|
||||
namespace: default
|
||||
labels:
|
||||
name: mdb
|
||||
spec:
|
||||
selector:
|
||||
app: mdb
|
||||
ports:
|
||||
- port: 27017
|
||||
targetPort: 27017
|
||||
name: mdb-port
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: mdb
|
||||
namespace: default
|
||||
spec:
|
||||
serviceName: mdb
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
name: mdb
|
||||
labels:
|
||||
app: mdb
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: mongodb
|
||||
# TODO(FIXME): Do not use latest in production as it is harder to track
|
||||
# versions during updates and rollbacks. Also, once fixed, change the
|
||||
# imagePullPolicy to IfNotPresent for faster bootup
|
||||
image: bigchaindb/mongodb:latest
|
||||
env:
|
||||
- name: MONGODB_FQDN
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: mdb-fqdn
|
||||
key: fqdn
|
||||
- name: MONGODB_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
args:
|
||||
- --replica-set-name=bigchain-rs
|
||||
- --fqdn=$(MONGODB_FQDN)
|
||||
- --port=27017
|
||||
- --ip=$(MONGODB_POD_IP)
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- FOWNER
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
hostPort: 27017
|
||||
name: mdb-port
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: mdb-db
|
||||
mountPath: /data/db
|
||||
- name: mdb-configdb
|
||||
mountPath: /data/configdb
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 768Mi
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: mdb-port
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
periodSeconds: 15
|
||||
timeoutSeconds: 1
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: mdb-db
|
||||
persistentVolumeClaim:
|
||||
claimName: mongo-db-claim
|
||||
- name: mdb-configdb
|
||||
persistentVolumeClaim:
|
||||
claimName: mongo-configdb-claim
|
12
k8s/toolbox/Dockerfile
Normal file
12
k8s/toolbox/Dockerfile
Normal file
@ -0,0 +1,12 @@
|
||||
# Toolbox container for debugging
|
||||
# Run as:
|
||||
# docker run -it --rm --entrypoint sh bigchaindb/toolbox
|
||||
# kubectl run -it toolbox --image bigchaindb/toolbox --restart=Never --rm
|
||||
|
||||
FROM alpine:3.5
|
||||
MAINTAINER github.com/krish7919
|
||||
WORKDIR /
|
||||
|
||||
RUN apk add --no-cache curl bind-tools
|
||||
|
||||
ENTRYPOINT ["/bin/sh"]
|
12
k8s/toolbox/README.md
Normal file
12
k8s/toolbox/README.md
Normal file
@ -0,0 +1,12 @@
|
||||
## Docker container with debugging tools
|
||||
|
||||
* curl
|
||||
* bind-utils - provides nslookup, dig
|
||||
|
||||
## Build
|
||||
|
||||
`docker build -t bigchaindb/toolbox .`
|
||||
|
||||
## Push
|
||||
|
||||
`docker push bigchaindb/toolbox`
|
@ -1,3 +1,4 @@
|
||||
from bigchaindb.common.exceptions import ValidationError
|
||||
import pytest
|
||||
import random
|
||||
|
||||
@ -26,8 +27,8 @@ def test_validate_bad_asset_creation(b, user_pk):
|
||||
tx.asset['data'] = 'a'
|
||||
tx_signed = tx.sign([b.me_private])
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
b.validate_transaction(tx_signed)
|
||||
with pytest.raises(ValidationError):
|
||||
Transaction.from_dict(tx_signed.to_dict())
|
||||
|
||||
|
||||
@pytest.mark.bdb
|
||||
@ -92,15 +93,15 @@ def test_asset_id_mismatch(b, user_pk):
|
||||
|
||||
def test_create_invalid_divisible_asset(b, user_pk, user_sk):
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.common.exceptions import AmountError
|
||||
from bigchaindb.common.exceptions import ValidationError
|
||||
|
||||
# Asset amount must be more than 0
|
||||
tx = Transaction.create([user_pk], [([user_pk], 1)])
|
||||
tx.outputs[0].amount = 0
|
||||
tx.sign([user_sk])
|
||||
|
||||
with pytest.raises(AmountError):
|
||||
b.validate_transaction(tx)
|
||||
with pytest.raises(ValidationError):
|
||||
Transaction.from_dict(tx.to_dict())
|
||||
|
||||
|
||||
def test_create_valid_divisible_asset(b, user_pk, user_sk):
|
||||
@ -108,4 +109,4 @@ def test_create_valid_divisible_asset(b, user_pk, user_sk):
|
||||
|
||||
tx = Transaction.create([user_pk], [([user_pk], 2)])
|
||||
tx_signed = tx.sign([user_sk])
|
||||
assert b.is_valid_transaction(tx_signed)
|
||||
tx_signed.validate(b)
|
||||
|
@ -638,6 +638,7 @@ def test_divide(b, user_pk, user_sk):
|
||||
|
||||
|
||||
# Check that negative inputs are caught when creating a TRANSFER transaction
|
||||
@pytest.mark.skip(reason='part of tx structural tests')
|
||||
@pytest.mark.bdb
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_non_positive_amounts_on_transfer(b, user_pk):
|
||||
@ -662,6 +663,7 @@ def test_non_positive_amounts_on_transfer(b, user_pk):
|
||||
|
||||
|
||||
# Check that negative inputs are caught when validating a TRANSFER transaction
|
||||
@pytest.mark.skip(reason='part of tx structural tests')
|
||||
@pytest.mark.bdb
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_non_positive_amounts_on_transfer_validate(b, user_pk, user_sk):
|
||||
@ -704,6 +706,7 @@ def test_non_positive_amounts_on_create(b, user_pk):
|
||||
|
||||
|
||||
# Check that negative inputs are caught when validating a CREATE transaction
|
||||
@pytest.mark.skip(reason='part of tx structural tests')
|
||||
@pytest.mark.bdb
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_non_positive_amounts_on_create_validate(b, user_pk):
|
||||
|
@ -32,15 +32,15 @@ def mongodb_connection():
|
||||
port=bigchaindb.config['database']['port'])
|
||||
|
||||
|
||||
def test_get_connection_returns_the_correct_instance():
|
||||
def test_get_connection_returns_the_correct_instance(db_host, db_port):
|
||||
from bigchaindb.backend import connect
|
||||
from bigchaindb.backend.connection import Connection
|
||||
from bigchaindb.backend.mongodb.connection import MongoDBConnection
|
||||
|
||||
config = {
|
||||
'backend': 'mongodb',
|
||||
'host': 'localhost',
|
||||
'port': 27017,
|
||||
'host': db_host,
|
||||
'port': db_port,
|
||||
'name': 'test',
|
||||
'replicaset': 'bigchain-rs'
|
||||
}
|
||||
|
@ -159,6 +159,43 @@ def test_get_spent(signed_create_tx, signed_transfer_tx):
|
||||
assert spents[0] == signed_transfer_tx.to_dict()
|
||||
|
||||
|
||||
def test_get_spent_for_tx_with_multiple_inputs(carol):
|
||||
from bigchaindb.backend import connect, query
|
||||
from bigchaindb.models import Block, Transaction
|
||||
conn = connect()
|
||||
tx_0 = Transaction.create(
|
||||
[carol.public_key],
|
||||
[([carol.public_key], 1),
|
||||
([carol.public_key], 1),
|
||||
([carol.public_key], 2)],
|
||||
).sign([carol.private_key])
|
||||
block = Block(transactions=[tx_0])
|
||||
conn.db.bigchain.insert_one(block.to_dict())
|
||||
spents = list(query.get_spent(conn, tx_0.id, 0))
|
||||
assert not spents
|
||||
|
||||
tx_1 = Transaction.transfer(
|
||||
tx_0.to_inputs()[2:3],
|
||||
[([carol.public_key], 1),
|
||||
([carol.public_key], 1)],
|
||||
asset_id=tx_0.id,
|
||||
).sign([carol.private_key])
|
||||
block = Block(transactions=[tx_1])
|
||||
conn.db.bigchain.insert_one(block.to_dict())
|
||||
spents = list(query.get_spent(conn, tx_0.id, 0))
|
||||
assert not spents
|
||||
|
||||
tx_2 = Transaction.transfer(
|
||||
tx_0.to_inputs()[0:1] + tx_1.to_inputs()[1:2],
|
||||
[([carol.public_key], 2)],
|
||||
asset_id=tx_0.id,
|
||||
).sign([carol.private_key])
|
||||
block = Block(transactions=[tx_2])
|
||||
conn.db.bigchain.insert_one(block.to_dict())
|
||||
spents = list(query.get_spent(conn, tx_0.id, 1))
|
||||
assert not spents
|
||||
|
||||
|
||||
def test_get_owned_ids(signed_create_tx, user_pk):
|
||||
from bigchaindb.backend import connect, query
|
||||
from bigchaindb.models import Block
|
||||
|
@ -21,7 +21,6 @@ def test_make_sure_we_dont_remove_any_command():
|
||||
assert parser.parse_args(['start']).command
|
||||
assert parser.parse_args(['set-shards', '1']).command
|
||||
assert parser.parse_args(['set-replicas', '1']).command
|
||||
assert parser.parse_args(['load']).command
|
||||
assert parser.parse_args(['add-replicas', 'localhost:27017']).command
|
||||
assert parser.parse_args(['remove-replicas', 'localhost:27017']).command
|
||||
|
||||
@ -382,27 +381,6 @@ def test_calling_main(start_mock, base_parser_mock, parse_args_mock,
|
||||
help='Number of replicas (i.e. '
|
||||
'the replication factor)')
|
||||
|
||||
subparsers.add_parser.assert_any_call('load',
|
||||
help='Write transactions to the '
|
||||
'backlog')
|
||||
|
||||
subsubparsers.add_argument.assert_any_call('-m', '--multiprocess',
|
||||
nargs='?', type=int,
|
||||
default=False,
|
||||
help='Spawn multiple processes '
|
||||
'to run the command, if no '
|
||||
'value is provided, the number '
|
||||
'of processes is equal to the '
|
||||
'number of cores of the host '
|
||||
'machine')
|
||||
subsubparsers.add_argument.assert_any_call('-c', '--count',
|
||||
default=0,
|
||||
type=int,
|
||||
help='Number of transactions '
|
||||
'to push. If the parameter -m '
|
||||
'is set, the count is '
|
||||
'distributed equally to all '
|
||||
'the processes')
|
||||
assert start_mock.called is True
|
||||
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
from bigchaindb.common.schema import (
|
||||
TX_SCHEMA, VOTE_SCHEMA, drop_schema_descriptions)
|
||||
TX_SCHEMA_COMMON, VOTE_SCHEMA, drop_schema_descriptions)
|
||||
|
||||
|
||||
def _test_additionalproperties(node, path=''):
|
||||
@ -19,7 +19,7 @@ def _test_additionalproperties(node, path=''):
|
||||
|
||||
|
||||
def test_transaction_schema_additionalproperties():
|
||||
_test_additionalproperties(TX_SCHEMA)
|
||||
_test_additionalproperties(TX_SCHEMA_COMMON)
|
||||
|
||||
|
||||
def test_vote_schema_additionalproperties():
|
||||
|
@ -29,3 +29,32 @@ def test_validate_fails_metadata_empty_dict(create_tx):
|
||||
create_tx.metadata = {}
|
||||
with raises(SchemaValidationError):
|
||||
validate_transaction_schema(create_tx.to_dict())
|
||||
|
||||
|
||||
def test_transfer_asset_schema(signed_transfer_tx):
|
||||
tx = signed_transfer_tx.to_dict()
|
||||
validate_transaction_schema(tx)
|
||||
tx['asset']['data'] = {}
|
||||
with raises(SchemaValidationError):
|
||||
validate_transaction_schema(tx)
|
||||
del tx['asset']['data']
|
||||
tx['asset']['id'] = 'b' * 63
|
||||
with raises(SchemaValidationError):
|
||||
validate_transaction_schema(tx)
|
||||
|
||||
|
||||
def test_create_single_input(create_tx):
|
||||
tx = create_tx.to_dict()
|
||||
tx['inputs'] += tx['inputs']
|
||||
with raises(SchemaValidationError):
|
||||
validate_transaction_schema(tx)
|
||||
tx['inputs'] = []
|
||||
with raises(SchemaValidationError):
|
||||
validate_transaction_schema(tx)
|
||||
|
||||
|
||||
def test_create_tx_no_fulfills(create_tx):
|
||||
tx = create_tx.to_dict()
|
||||
tx['inputs'][0]['fulfills'] = {'tx': 'a' * 64, 'output': 0}
|
||||
with raises(SchemaValidationError):
|
||||
validate_transaction_schema(tx)
|
||||
|
@ -352,6 +352,17 @@ def test_tx_serialization_with_incorrect_hash(utx):
|
||||
utx_dict.pop('id')
|
||||
|
||||
|
||||
def test_tx_serialization_hash_function(tx):
|
||||
import sha3
|
||||
import json
|
||||
tx_dict = tx.to_dict()
|
||||
tx_dict['inputs'][0]['fulfillment'] = None
|
||||
del tx_dict['id']
|
||||
payload = json.dumps(tx_dict, skipkeys=False, sort_keys=True,
|
||||
separators=(',', ':'))
|
||||
assert sha3.sha3_256(payload.encode()).hexdigest() == tx.id
|
||||
|
||||
|
||||
def test_invalid_input_initialization(user_input, user_pub):
|
||||
from bigchaindb.common.transaction import Input
|
||||
|
||||
@ -445,12 +456,15 @@ def test_transaction_link_eq():
|
||||
|
||||
def test_add_input_to_tx(user_input, asset_definition):
|
||||
from bigchaindb.common.transaction import Transaction
|
||||
from .utils import validate_transaction_model
|
||||
|
||||
tx = Transaction(Transaction.CREATE, asset_definition, [], [])
|
||||
tx.add_input(user_input)
|
||||
|
||||
assert len(tx.inputs) == 1
|
||||
|
||||
validate_transaction_model(tx)
|
||||
|
||||
|
||||
def test_add_input_to_tx_with_invalid_parameters(asset_definition):
|
||||
from bigchaindb.common.transaction import Transaction
|
||||
@ -460,11 +474,11 @@ def test_add_input_to_tx_with_invalid_parameters(asset_definition):
|
||||
tx.add_input('somewronginput')
|
||||
|
||||
|
||||
def test_add_output_to_tx(user_output, asset_definition):
|
||||
def test_add_output_to_tx(user_output, user_input, asset_definition):
|
||||
from bigchaindb.common.transaction import Transaction
|
||||
from .utils import validate_transaction_model
|
||||
|
||||
tx = Transaction(Transaction.CREATE, asset_definition)
|
||||
tx = Transaction(Transaction.CREATE, asset_definition, [user_input])
|
||||
tx.add_output(user_output)
|
||||
|
||||
assert len(tx.outputs) == 1
|
||||
@ -546,40 +560,6 @@ def test_validate_input_with_invalid_parameters(utx):
|
||||
assert not valid
|
||||
|
||||
|
||||
def test_validate_multiple_inputs(user_input, user_output, user_priv,
|
||||
asset_definition):
|
||||
from copy import deepcopy
|
||||
|
||||
from bigchaindb.common.crypto import PrivateKey
|
||||
from bigchaindb.common.transaction import Transaction
|
||||
from .utils import validate_transaction_model
|
||||
|
||||
tx = Transaction(Transaction.CREATE, asset_definition,
|
||||
[user_input, deepcopy(user_input)],
|
||||
[user_output, deepcopy(user_output)])
|
||||
|
||||
expected_first = deepcopy(tx)
|
||||
expected_second = deepcopy(tx)
|
||||
expected_first.inputs = [expected_first.inputs[0]]
|
||||
expected_second.inputs = [expected_second.inputs[1]]
|
||||
|
||||
expected_first_bytes = str(expected_first).encode()
|
||||
expected_first.inputs[0].fulfillment.sign(expected_first_bytes,
|
||||
PrivateKey(user_priv))
|
||||
expected_second_bytes = str(expected_second).encode()
|
||||
expected_second.inputs[0].fulfillment.sign(expected_second_bytes,
|
||||
PrivateKey(user_priv))
|
||||
tx.sign([user_priv])
|
||||
|
||||
assert tx.inputs[0].to_dict()['fulfillment'] == \
|
||||
expected_first.inputs[0].fulfillment.serialize_uri()
|
||||
assert tx.inputs[1].to_dict()['fulfillment'] == \
|
||||
expected_second.inputs[0].fulfillment.serialize_uri()
|
||||
assert tx.inputs_valid() is True
|
||||
|
||||
validate_transaction_model(tx)
|
||||
|
||||
|
||||
def test_validate_tx_threshold_create_signature(user_user2_threshold_input,
|
||||
user_user2_threshold_output,
|
||||
user_pub,
|
||||
@ -610,6 +590,42 @@ def test_validate_tx_threshold_create_signature(user_user2_threshold_input,
|
||||
validate_transaction_model(tx)
|
||||
|
||||
|
||||
def test_validate_tx_threshold_duplicated_pk(user_pub, user_priv,
|
||||
asset_definition):
|
||||
from copy import deepcopy
|
||||
from cryptoconditions import Ed25519Fulfillment, ThresholdSha256Fulfillment
|
||||
from bigchaindb.common.transaction import Input, Output, Transaction
|
||||
from bigchaindb.common.crypto import PrivateKey
|
||||
|
||||
threshold = ThresholdSha256Fulfillment(threshold=2)
|
||||
threshold.add_subfulfillment(Ed25519Fulfillment(public_key=user_pub))
|
||||
threshold.add_subfulfillment(Ed25519Fulfillment(public_key=user_pub))
|
||||
|
||||
threshold_input = Input(threshold, [user_pub, user_pub])
|
||||
threshold_output = Output(threshold, [user_pub, user_pub])
|
||||
|
||||
tx = Transaction(Transaction.CREATE, asset_definition,
|
||||
[threshold_input], [threshold_output])
|
||||
expected = deepcopy(threshold_input)
|
||||
expected.fulfillment.subconditions[0]['body'].sign(str(tx).encode(),
|
||||
PrivateKey(user_priv))
|
||||
expected.fulfillment.subconditions[1]['body'].sign(str(tx).encode(),
|
||||
PrivateKey(user_priv))
|
||||
|
||||
tx.sign([user_priv, user_priv])
|
||||
|
||||
subconditions = tx.inputs[0].fulfillment.subconditions
|
||||
expected_subconditions = expected.fulfillment.subconditions
|
||||
assert subconditions[0]['body'].to_dict()['signature'] == \
|
||||
expected_subconditions[0]['body'].to_dict()['signature']
|
||||
assert subconditions[1]['body'].to_dict()['signature'] == \
|
||||
expected_subconditions[1]['body'].to_dict()['signature']
|
||||
|
||||
assert tx.inputs[0].to_dict()['fulfillment'] == \
|
||||
expected.fulfillment.serialize_uri()
|
||||
assert tx.inputs_valid() is True
|
||||
|
||||
|
||||
def test_multiple_input_validation_of_transfer_tx(user_input, user_output,
|
||||
user_priv, user2_pub,
|
||||
user2_priv, user3_pub,
|
||||
@ -621,8 +637,7 @@ def test_multiple_input_validation_of_transfer_tx(user_input, user_output,
|
||||
from cryptoconditions import Ed25519Fulfillment
|
||||
from .utils import validate_transaction_model
|
||||
|
||||
tx = Transaction(Transaction.CREATE, asset_definition,
|
||||
[user_input, deepcopy(user_input)],
|
||||
tx = Transaction(Transaction.CREATE, asset_definition, [user_input],
|
||||
[user_output, deepcopy(user_output)])
|
||||
tx.sign([user_priv])
|
||||
|
||||
@ -985,3 +1000,20 @@ def test_validate_version(utx):
|
||||
utx.version = '1.0.0'
|
||||
with raises(SchemaValidationError):
|
||||
validate_transaction_model(utx)
|
||||
|
||||
|
||||
def test_create_tx_no_asset_id(b, utx):
|
||||
from bigchaindb.common.exceptions import SchemaValidationError
|
||||
from .utils import validate_transaction_model
|
||||
utx.asset['id'] = 'b' * 64
|
||||
with raises(SchemaValidationError):
|
||||
validate_transaction_model(utx)
|
||||
|
||||
|
||||
def test_transfer_tx_asset_schema(transfer_utx):
|
||||
from bigchaindb.common.exceptions import SchemaValidationError
|
||||
from .utils import validate_transaction_model
|
||||
tx = transfer_utx
|
||||
tx.asset['data'] = {}
|
||||
with raises(SchemaValidationError):
|
||||
validate_transaction_model(tx)
|
||||
|
@ -223,6 +223,54 @@ def user2_pk():
|
||||
return USER2_PK
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def alice():
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
return generate_key_pair()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def alice_privkey(alice):
|
||||
return alice.private_key
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def alice_pubkey(alice):
|
||||
return alice.public_key
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def bob():
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
return generate_key_pair()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def bob_privkey(bob):
|
||||
return bob.private_key
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def bob_pubkey(carol):
|
||||
return bob.public_key
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def carol():
|
||||
from bigchaindb.common.crypto import generate_key_pair
|
||||
return generate_key_pair()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def carol_privkey(carol):
|
||||
return carol.private_key
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def carol_pubkey(carol):
|
||||
return carol.public_key
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def b():
|
||||
from bigchaindb import Bigchain
|
||||
|
@ -82,16 +82,16 @@ class TestBigchainApi(object):
|
||||
block = b.create_block([tx])
|
||||
b.write_block(block)
|
||||
|
||||
assert b.has_previous_vote(block.id, block.voters) is False
|
||||
assert b.has_previous_vote(block.id) is False
|
||||
|
||||
vote = b.vote(block.id, b.get_last_voted_block().id, True)
|
||||
b.write_vote(vote)
|
||||
|
||||
assert b.has_previous_vote(block.id, block.voters) is True
|
||||
assert b.has_previous_vote(block.id) is True
|
||||
|
||||
@pytest.mark.genesis
|
||||
def test_get_spent_with_double_inclusion_detected(self, b, monkeypatch):
|
||||
from bigchaindb.backend.exceptions import BigchainDBCritical
|
||||
from bigchaindb.exceptions import CriticalDoubleInclusion
|
||||
from bigchaindb.models import Transaction
|
||||
|
||||
tx = Transaction.create([b.me], [([b.me], 1)])
|
||||
@ -121,12 +121,47 @@ class TestBigchainApi(object):
|
||||
vote = b.vote(block3.id, b.get_last_voted_block().id, True)
|
||||
b.write_vote(vote)
|
||||
|
||||
with pytest.raises(BigchainDBCritical):
|
||||
with pytest.raises(CriticalDoubleInclusion):
|
||||
b.get_spent(tx.id, 0)
|
||||
|
||||
@pytest.mark.genesis
|
||||
def test_get_spent_with_double_spend_detected(self, b, monkeypatch):
|
||||
from bigchaindb.exceptions import CriticalDoubleSpend
|
||||
from bigchaindb.models import Transaction
|
||||
|
||||
tx = Transaction.create([b.me], [([b.me], 1)])
|
||||
tx = tx.sign([b.me_private])
|
||||
|
||||
monkeypatch.setattr('time.time', lambda: 1000000000)
|
||||
block1 = b.create_block([tx])
|
||||
b.write_block(block1)
|
||||
|
||||
monkeypatch.setattr('time.time', lambda: 1000000020)
|
||||
transfer_tx = Transaction.transfer(tx.to_inputs(), [([b.me], 1)],
|
||||
asset_id=tx.id)
|
||||
transfer_tx = transfer_tx.sign([b.me_private])
|
||||
block2 = b.create_block([transfer_tx])
|
||||
b.write_block(block2)
|
||||
|
||||
monkeypatch.setattr('time.time', lambda: 1000000030)
|
||||
transfer_tx2 = Transaction.transfer(tx.to_inputs(), [([b.me], 2)],
|
||||
asset_id=tx.id)
|
||||
transfer_tx2 = transfer_tx2.sign([b.me_private])
|
||||
block3 = b.create_block([transfer_tx2])
|
||||
b.write_block(block3)
|
||||
|
||||
# Vote both block2 and block3 valid
|
||||
vote = b.vote(block2.id, b.get_last_voted_block().id, True)
|
||||
b.write_vote(vote)
|
||||
vote = b.vote(block3.id, b.get_last_voted_block().id, True)
|
||||
b.write_vote(vote)
|
||||
|
||||
with pytest.raises(CriticalDoubleSpend):
|
||||
b.get_spent(tx.id, 0)
|
||||
|
||||
@pytest.mark.genesis
|
||||
def test_get_block_status_for_tx_with_double_inclusion(self, b, monkeypatch):
|
||||
from bigchaindb.backend.exceptions import BigchainDBCritical
|
||||
from bigchaindb.exceptions import CriticalDoubleInclusion
|
||||
from bigchaindb.models import Transaction
|
||||
|
||||
tx = Transaction.create([b.me], [([b.me], 1)])
|
||||
@ -146,7 +181,7 @@ class TestBigchainApi(object):
|
||||
vote = b.vote(block2.id, b.get_last_voted_block().id, True)
|
||||
b.write_vote(vote)
|
||||
|
||||
with pytest.raises(BigchainDBCritical):
|
||||
with pytest.raises(CriticalDoubleInclusion):
|
||||
b.get_blocks_status_containing_tx(tx.id)
|
||||
|
||||
@pytest.mark.genesis
|
||||
@ -428,58 +463,6 @@ class TestBigchainApi(object):
|
||||
|
||||
assert retrieved_block_1 == retrieved_block_2
|
||||
|
||||
@pytest.mark.genesis
|
||||
def test_more_votes_than_voters(self, b):
|
||||
from bigchaindb.common.exceptions import MultipleVotesError
|
||||
|
||||
block_1 = dummy_block()
|
||||
b.write_block(block_1)
|
||||
# insert duplicate votes
|
||||
vote_1 = b.vote(block_1.id, b.get_last_voted_block().id, True)
|
||||
vote_2 = b.vote(block_1.id, b.get_last_voted_block().id, True)
|
||||
vote_2['node_pubkey'] = 'aaaaaaa'
|
||||
b.write_vote(vote_1)
|
||||
b.write_vote(vote_2)
|
||||
|
||||
with pytest.raises(MultipleVotesError) as excinfo:
|
||||
b.block_election_status(block_1.id, block_1.voters)
|
||||
assert excinfo.value.args[0] == 'Block {block_id} has {n_votes} votes cast, but only {n_voters} voters'\
|
||||
.format(block_id=block_1.id, n_votes=str(2), n_voters=str(1))
|
||||
|
||||
def test_multiple_votes_single_node(self, b, genesis_block):
|
||||
from bigchaindb.common.exceptions import MultipleVotesError
|
||||
|
||||
block_1 = dummy_block()
|
||||
b.write_block(block_1)
|
||||
# insert duplicate votes
|
||||
for i in range(2):
|
||||
b.write_vote(b.vote(block_1.id, genesis_block.id, True))
|
||||
|
||||
with pytest.raises(MultipleVotesError) as excinfo:
|
||||
b.block_election_status(block_1.id, block_1.voters)
|
||||
assert excinfo.value.args[0] == 'Block {block_id} has multiple votes ({n_votes}) from voting node {node_id}'\
|
||||
.format(block_id=block_1.id, n_votes=str(2), node_id=b.me)
|
||||
|
||||
with pytest.raises(MultipleVotesError) as excinfo:
|
||||
b.has_previous_vote(block_1.id, block_1.voters)
|
||||
assert excinfo.value.args[0] == 'Block {block_id} has {n_votes} votes from public key {me}'\
|
||||
.format(block_id=block_1.id, n_votes=str(2), me=b.me)
|
||||
|
||||
@pytest.mark.genesis
|
||||
def test_improper_vote_error(selfs, b):
|
||||
from bigchaindb.common.exceptions import ImproperVoteError
|
||||
|
||||
block_1 = dummy_block()
|
||||
b.write_block(block_1)
|
||||
vote_1 = b.vote(block_1.id, b.get_last_voted_block().id, True)
|
||||
# mangle the signature
|
||||
vote_1['signature'] = 'a' * 87
|
||||
b.write_vote(vote_1)
|
||||
with pytest.raises(ImproperVoteError) as excinfo:
|
||||
b.has_previous_vote(block_1.id, block_1.id)
|
||||
assert excinfo.value.args[0] == 'Block {block_id} already has an incorrectly signed ' \
|
||||
'vote from public key {me}'.format(block_id=block_1.id, me=b.me)
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_assign_transaction_one_node(self, b, user_pk, user_sk):
|
||||
from bigchaindb.backend import query
|
||||
@ -530,7 +513,7 @@ class TestBigchainApi(object):
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
def test_non_create_input_not_found(self, b, user_pk):
|
||||
from cryptoconditions import Ed25519Fulfillment
|
||||
from bigchaindb.common.exceptions import TransactionDoesNotExist
|
||||
from bigchaindb.common.exceptions import InputDoesNotExist
|
||||
from bigchaindb.common.transaction import Input, TransactionLink
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb import Bigchain
|
||||
@ -542,7 +525,7 @@ class TestBigchainApi(object):
|
||||
tx = Transaction.transfer([input], [([user_pk], 1)],
|
||||
asset_id='mock_asset_link')
|
||||
|
||||
with pytest.raises(TransactionDoesNotExist):
|
||||
with pytest.raises(InputDoesNotExist):
|
||||
tx.validate(Bigchain())
|
||||
|
||||
def test_count_backlog(self, b, user_pk):
|
||||
@ -559,30 +542,12 @@ class TestBigchainApi(object):
|
||||
|
||||
|
||||
class TestTransactionValidation(object):
|
||||
def test_create_operation_with_inputs(self, b, user_pk, create_tx):
|
||||
from bigchaindb.common.transaction import TransactionLink
|
||||
|
||||
# Manipulate input so that it has a `fulfills` defined even
|
||||
# though it shouldn't have one
|
||||
create_tx.inputs[0].fulfills = TransactionLink('abc', 0)
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
b.validate_transaction(create_tx)
|
||||
assert excinfo.value.args[0] == 'A CREATE operation has no inputs'
|
||||
|
||||
def test_transfer_operation_no_inputs(self, b, user_pk,
|
||||
signed_transfer_tx):
|
||||
signed_transfer_tx.inputs[0].fulfills = None
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
b.validate_transaction(signed_transfer_tx)
|
||||
|
||||
assert excinfo.value.args[0] == 'Only `CREATE` transactions can have null inputs'
|
||||
|
||||
def test_non_create_input_not_found(self, b, user_pk, signed_transfer_tx):
|
||||
from bigchaindb.common.exceptions import TransactionDoesNotExist
|
||||
from bigchaindb.common.exceptions import InputDoesNotExist
|
||||
from bigchaindb.common.transaction import TransactionLink
|
||||
|
||||
signed_transfer_tx.inputs[0].fulfills = TransactionLink('c', 0)
|
||||
with pytest.raises(TransactionDoesNotExist):
|
||||
with pytest.raises(InputDoesNotExist):
|
||||
b.validate_transaction(signed_transfer_tx)
|
||||
|
||||
@pytest.mark.usefixtures('inputs')
|
||||
@ -741,7 +706,7 @@ class TestBlockValidation(object):
|
||||
b.validate_block(block)
|
||||
|
||||
def test_invalid_node_pubkey(self, b):
|
||||
from bigchaindb.common.exceptions import OperationError
|
||||
from bigchaindb.common.exceptions import SybilError
|
||||
from bigchaindb.common import crypto
|
||||
|
||||
# blocks can only be created by a federation node
|
||||
@ -758,8 +723,8 @@ class TestBlockValidation(object):
|
||||
# from a non federation node
|
||||
block = block.sign(tmp_sk)
|
||||
|
||||
# check that validate_block raises an OperationError
|
||||
with pytest.raises(OperationError):
|
||||
# check that validate_block raises an SybilError
|
||||
with pytest.raises(SybilError):
|
||||
b.validate_block(block)
|
||||
|
||||
|
||||
@ -778,7 +743,7 @@ class TestMultipleInputs(object):
|
||||
tx = tx.sign([user_sk])
|
||||
|
||||
# validate transaction
|
||||
assert b.is_valid_transaction(tx) == tx
|
||||
tx.validate(b)
|
||||
assert len(tx.inputs) == 1
|
||||
assert len(tx.outputs) == 1
|
||||
|
||||
@ -800,7 +765,7 @@ class TestMultipleInputs(object):
|
||||
asset_id=input_tx.id)
|
||||
tx = tx.sign([user_sk])
|
||||
|
||||
assert b.is_valid_transaction(tx) == tx
|
||||
tx.validate(b)
|
||||
assert len(tx.inputs) == 1
|
||||
assert len(tx.outputs) == 1
|
||||
|
||||
@ -832,7 +797,7 @@ class TestMultipleInputs(object):
|
||||
transfer_tx = transfer_tx.sign([user_sk, user2_sk])
|
||||
|
||||
# validate transaction
|
||||
assert b.is_valid_transaction(transfer_tx) == transfer_tx
|
||||
transfer_tx.validate(b)
|
||||
assert len(transfer_tx.inputs) == 1
|
||||
assert len(transfer_tx.outputs) == 1
|
||||
|
||||
@ -865,7 +830,7 @@ class TestMultipleInputs(object):
|
||||
asset_id=tx_input.id)
|
||||
tx = tx.sign([user_sk, user2_sk])
|
||||
|
||||
assert b.is_valid_transaction(tx) == tx
|
||||
tx.validate(b)
|
||||
assert len(tx.inputs) == 1
|
||||
assert len(tx.outputs) == 1
|
||||
|
||||
@ -1219,7 +1184,6 @@ def test_cant_spend_same_input_twice_in_tx(b, genesis_block):
|
||||
tx_transfer = Transaction.transfer(dup_inputs, [([b.me], 200)],
|
||||
asset_id=tx_create.id)
|
||||
tx_transfer_signed = tx_transfer.sign([b.me_private])
|
||||
assert b.is_valid_transaction(tx_transfer_signed) is False
|
||||
with pytest.raises(DoubleSpend):
|
||||
tx_transfer_signed.validate(b)
|
||||
|
||||
|
@ -5,27 +5,6 @@ import pytest
|
||||
pytestmark = [pytest.mark.bdb, pytest.mark.usefixtures('processes')]
|
||||
|
||||
|
||||
def test_fast_double_create(b, user_pk):
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.backend.query import count_blocks
|
||||
tx = Transaction.create([b.me], [([user_pk], 1)],
|
||||
metadata={'test': 'test'}).sign([b.me_private])
|
||||
|
||||
# write everything fast
|
||||
b.write_transaction(tx)
|
||||
b.write_transaction(tx)
|
||||
|
||||
time.sleep(2)
|
||||
tx_returned = b.get_transaction(tx.id)
|
||||
|
||||
# test that the tx can be queried
|
||||
assert tx_returned == tx
|
||||
# test the transaction appears only once
|
||||
last_voted_block = b.get_last_voted_block()
|
||||
assert len(last_voted_block.transactions) == 1
|
||||
assert count_blocks(b.connection) == 2
|
||||
|
||||
|
||||
def test_double_create(b, user_pk):
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.backend.query import count_blocks
|
||||
|
@ -72,6 +72,7 @@ class MultipipesStepper:
|
||||
r = f(**kwargs)
|
||||
if r is not None:
|
||||
self._enqueue(next_name, r)
|
||||
return r
|
||||
|
||||
self.tasks[name] = functools.wraps(f)(inner)
|
||||
self.input_tasks.add(name)
|
||||
@ -90,6 +91,7 @@ class MultipipesStepper:
|
||||
out = f(*args, **kwargs)
|
||||
if out is not None and next:
|
||||
self._enqueue(next_name, out)
|
||||
return out
|
||||
|
||||
task = functools.wraps(f)(inner)
|
||||
self.tasks[name] = task
|
||||
@ -111,12 +113,12 @@ class MultipipesStepper:
|
||||
logging.debug('Stepping %s', name)
|
||||
task = self.tasks[name]
|
||||
if name in self.input_tasks:
|
||||
task(**kwargs)
|
||||
return task(**kwargs)
|
||||
else:
|
||||
queue = self.queues.get(name, [])
|
||||
if not queue:
|
||||
raise Empty(name)
|
||||
task(*queue.pop(0), **kwargs)
|
||||
return task(*queue.pop(0), **kwargs)
|
||||
logging.debug('Stepped %s', name)
|
||||
|
||||
@property
|
||||
|
@ -46,28 +46,19 @@ def test_validate_transaction_handles_exceptions(b, signed_create_tx):
|
||||
"""
|
||||
from bigchaindb.pipelines.block import BlockPipeline
|
||||
block_maker = BlockPipeline()
|
||||
from bigchaindb.common.exceptions import ValidationError
|
||||
|
||||
# Test SchemaValidationError
|
||||
tx_dict = signed_create_tx.to_dict()
|
||||
tx_dict['invalid_key'] = 'schema validation gonna getcha!'
|
||||
assert block_maker.validate_tx(tx_dict) is None
|
||||
|
||||
# Test InvalidHash
|
||||
tx_dict = signed_create_tx.to_dict()
|
||||
tx_dict['id'] = 'a' * 64
|
||||
assert block_maker.validate_tx(tx_dict) is None
|
||||
with patch('bigchaindb.models.Transaction.validate') as validate:
|
||||
# Assert that validationerror gets caught
|
||||
validate.side_effect = ValidationError()
|
||||
assert block_maker.validate_tx(tx_dict) is None
|
||||
|
||||
# Test InvalidSignature when we pass a bad fulfillment
|
||||
tx_dict = signed_create_tx.to_dict()
|
||||
tx_dict['inputs'][0]['fulfillment'] = 'cf:0:aaaaaaaaaaaaaaaaaaaaaaaaa'
|
||||
assert block_maker.validate_tx(tx_dict) is None
|
||||
|
||||
# Test AmountError
|
||||
signed_create_tx.outputs[0].amount = 0
|
||||
tx_dict = signed_create_tx.to_dict()
|
||||
# set the correct value back so that we can continue using it
|
||||
signed_create_tx.outputs[0].amount = 1
|
||||
assert block_maker.validate_tx(tx_dict) is None
|
||||
# Assert that another error doesnt
|
||||
validate.side_effect = IOError()
|
||||
with pytest.raises(IOError):
|
||||
block_maker.validate_tx(tx_dict)
|
||||
|
||||
|
||||
def test_create_block(b, user_pk):
|
||||
@ -226,3 +217,12 @@ def test_full_pipeline(b, user_pk):
|
||||
block_len = len(block_doc.transactions)
|
||||
assert chained_block == block_doc
|
||||
assert number_assigned_to_others == 100 - block_len
|
||||
|
||||
|
||||
def test_block_snowflake(create_tx, signed_transfer_tx):
|
||||
from bigchaindb.pipelines.block import tx_collector
|
||||
snowflake = tx_collector()
|
||||
assert snowflake.send(create_tx) == [create_tx]
|
||||
snowflake.send(signed_transfer_tx)
|
||||
snowflake.send(create_tx)
|
||||
assert snowflake.send(None) == [create_tx, signed_transfer_tx]
|
||||
|
@ -83,12 +83,6 @@ def test_check_for_quorum_invalid_prev_node(b, user_pk):
|
||||
def test_check_for_quorum_valid(b, user_pk):
|
||||
from bigchaindb.models import Transaction
|
||||
|
||||
e = election.Election()
|
||||
|
||||
# create blocks with transactions
|
||||
tx1 = Transaction.create([b.me], [([user_pk], 1)])
|
||||
test_block = b.create_block([tx1])
|
||||
|
||||
# simulate a federation with four voters
|
||||
key_pairs = [crypto.generate_key_pair() for _ in range(4)]
|
||||
test_federation = [
|
||||
@ -96,8 +90,13 @@ def test_check_for_quorum_valid(b, user_pk):
|
||||
for key_pair in key_pairs
|
||||
]
|
||||
|
||||
b.nodes_except_me = [key_pair[1] for key_pair in key_pairs]
|
||||
|
||||
# create blocks with transactions
|
||||
tx1 = Transaction.create([b.me], [([user_pk], 1)])
|
||||
test_block = b.create_block([tx1])
|
||||
|
||||
# add voters to block and write
|
||||
test_block.voters = [key_pair[1] for key_pair in key_pairs]
|
||||
test_block = test_block.sign(b.me_private)
|
||||
b.write_block(test_block)
|
||||
|
||||
@ -108,10 +107,20 @@ def test_check_for_quorum_valid(b, user_pk):
|
||||
for vote in votes:
|
||||
b.write_vote(vote)
|
||||
|
||||
e = election.Election()
|
||||
e.bigchain = b
|
||||
|
||||
# since this block is valid, should go nowhere
|
||||
assert e.check_for_quorum(votes[-1]) is None
|
||||
|
||||
|
||||
@patch('bigchaindb.core.Bigchain.get_block')
|
||||
def test_invalid_vote(get_block, b):
|
||||
e = election.Election()
|
||||
assert e.check_for_quorum({}) is None
|
||||
get_block.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.bdb
|
||||
def test_check_requeue_transaction(b, user_pk):
|
||||
from bigchaindb.models import Transaction
|
||||
|
@ -36,7 +36,11 @@ def test_reassign_transactions(b, user_pk):
|
||||
|
||||
stm = stale.StaleTransactionMonitor(timeout=0.001,
|
||||
backlog_reassign_delay=0.001)
|
||||
stm.reassign_transactions(tx.to_dict())
|
||||
# This worked previously because transaction['assignee'] was only used if
|
||||
# bigchain.nodes_except_me was not empty.
|
||||
tx_dict = tx.to_dict()
|
||||
tx_dict['assignee'] = b.me
|
||||
stm.reassign_transactions(tx_dict)
|
||||
|
||||
# test with federation
|
||||
tx = Transaction.create([b.me], [([user_pk], 1)])
|
||||
@ -58,7 +62,7 @@ def test_reassign_transactions(b, user_pk):
|
||||
tx = tx.sign([b.me_private])
|
||||
stm.bigchain.nodes_except_me = ['lol']
|
||||
b.write_transaction(tx)
|
||||
stm.bigchain.nodes_except_me = None
|
||||
stm.bigchain.nodes_except_me = []
|
||||
|
||||
tx = list(query.get_stale_transactions(b.connection, 0))[0]
|
||||
stm.reassign_transactions(tx)
|
||||
|
@ -20,9 +20,26 @@ def test_stepping_changefeed_produces_update(b, steps):
|
||||
[tx.id, tx.id])
|
||||
|
||||
|
||||
@pytest.mark.bdb
|
||||
@pytest.mark.genesis
|
||||
def test_dupe_tx_in_block(b, steps):
|
||||
tx = input_single_create(b)
|
||||
for i in range(2):
|
||||
steps.stale_check_transactions()
|
||||
steps.stale_reassign_transactions()
|
||||
steps.block_changefeed()
|
||||
steps.block_filter_tx()
|
||||
steps.block_validate_tx()
|
||||
steps.block_validate_tx()
|
||||
assert steps.counts == {'block_create': 2}
|
||||
steps.block_create(timeout=False)
|
||||
block = steps.block_create(timeout=True)
|
||||
assert block.transactions == [tx]
|
||||
|
||||
|
||||
def input_single_create(b):
|
||||
from bigchaindb.common.transaction import Transaction
|
||||
metadata = {'r': random.random()}
|
||||
tx = Transaction.create([b.me], [([b.me], 1)], metadata)
|
||||
tx = Transaction.create([b.me], [([b.me], 1)], metadata).sign([b.me_private])
|
||||
b.write_transaction(tx)
|
||||
return tx
|
||||
|
@ -128,17 +128,23 @@ def test_validate_block_with_invalid_signature(b):
|
||||
@pytest.mark.genesis
|
||||
def test_vote_validate_transaction(b):
|
||||
from bigchaindb.pipelines import vote
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.common.exceptions import ValidationError
|
||||
|
||||
tx = dummy_tx(b)
|
||||
vote_obj = vote.Vote()
|
||||
validation = vote_obj.validate_tx(tx, 123, 1)
|
||||
assert validation == (True, 123, 1)
|
||||
|
||||
# NOTE: Submit unsigned transaction to `validate_tx` yields `False`.
|
||||
tx = Transaction.create([b.me], [([b.me], 1)])
|
||||
validation = vote_obj.validate_tx(tx, 456, 10)
|
||||
assert validation == (False, 456, 10)
|
||||
with patch('bigchaindb.models.Transaction.validate') as validate:
|
||||
# Assert that validationerror gets caught
|
||||
validate.side_effect = ValidationError()
|
||||
validation = vote_obj.validate_tx(tx, 456, 10)
|
||||
assert validation == (False, 456, 10)
|
||||
|
||||
# Assert that another error doesnt
|
||||
validate.side_effect = IOError()
|
||||
with pytest.raises(IOError):
|
||||
validation = vote_obj.validate_tx(tx, 456, 10)
|
||||
|
||||
|
||||
@pytest.mark.genesis
|
||||
|
@ -1,40 +0,0 @@
|
||||
|
||||
def test_verify_vote_passes(b, structurally_valid_vote):
|
||||
from bigchaindb.consensus import BaseConsensusRules
|
||||
from bigchaindb.common import crypto
|
||||
from bigchaindb.common.utils import serialize
|
||||
vote_body = structurally_valid_vote['vote']
|
||||
vote_data = serialize(vote_body)
|
||||
signature = crypto.PrivateKey(b.me_private).sign(vote_data.encode())
|
||||
vote_signed = {
|
||||
'node_pubkey': b.me,
|
||||
'signature': signature.decode(),
|
||||
'vote': vote_body
|
||||
}
|
||||
assert BaseConsensusRules.verify_vote([b.me], vote_signed)
|
||||
|
||||
|
||||
def test_verify_vote_fails_signature(b, structurally_valid_vote):
|
||||
from bigchaindb.consensus import BaseConsensusRules
|
||||
vote_body = structurally_valid_vote['vote']
|
||||
vote_signed = {
|
||||
'node_pubkey': b.me,
|
||||
'signature': 'a' * 86,
|
||||
'vote': vote_body
|
||||
}
|
||||
assert not BaseConsensusRules.verify_vote([b.me], vote_signed)
|
||||
|
||||
|
||||
def test_verify_vote_fails_schema(b):
|
||||
from bigchaindb.consensus import BaseConsensusRules
|
||||
from bigchaindb.common import crypto
|
||||
from bigchaindb.common.utils import serialize
|
||||
vote_body = {}
|
||||
vote_data = serialize(vote_body)
|
||||
signature = crypto.PrivateKey(b.me_private).sign(vote_data.encode())
|
||||
vote_signed = {
|
||||
'node_pubkey': b.me,
|
||||
'signature': signature.decode(),
|
||||
'vote': vote_body
|
||||
}
|
||||
assert not BaseConsensusRules.verify_vote([b.me], vote_signed)
|
@ -82,11 +82,44 @@ def test_get_blocks_status_containing_tx(monkeypatch):
|
||||
bigchain.get_blocks_status_containing_tx('txid')
|
||||
|
||||
|
||||
def test_has_previous_vote(monkeypatch):
|
||||
from bigchaindb.core import Bigchain
|
||||
monkeypatch.setattr(
|
||||
'bigchaindb.utils.verify_vote_signature', lambda voters, vote: False)
|
||||
bigchain = Bigchain(public_key='pubkey', private_key='privkey')
|
||||
block = {'votes': ({'node_pubkey': 'pubkey'},)}
|
||||
with pytest.raises(Exception):
|
||||
bigchain.has_previous_vote(block)
|
||||
@pytest.mark.genesis
|
||||
def test_get_spent_issue_1271(b, alice, bob, carol):
|
||||
from bigchaindb.models import Transaction
|
||||
|
||||
tx_1 = Transaction.create(
|
||||
[carol.public_key],
|
||||
[([carol.public_key], 8)],
|
||||
).sign([carol.private_key])
|
||||
|
||||
tx_2 = Transaction.transfer(
|
||||
tx_1.to_inputs(),
|
||||
[([bob.public_key], 2),
|
||||
([alice.public_key], 2),
|
||||
([carol.public_key], 4)],
|
||||
asset_id=tx_1.id,
|
||||
).sign([carol.private_key])
|
||||
|
||||
tx_3 = Transaction.transfer(
|
||||
tx_2.to_inputs()[2:3],
|
||||
[([alice.public_key], 1),
|
||||
([carol.public_key], 3)],
|
||||
asset_id=tx_1.id,
|
||||
).sign([carol.private_key])
|
||||
|
||||
tx_4 = Transaction.transfer(
|
||||
tx_2.to_inputs()[1:2] + tx_3.to_inputs()[0:1],
|
||||
[([bob.public_key], 3)],
|
||||
asset_id=tx_1.id,
|
||||
).sign([alice.private_key])
|
||||
|
||||
tx_5 = Transaction.transfer(
|
||||
tx_2.to_inputs()[0:1],
|
||||
[([alice.public_key], 2)],
|
||||
asset_id=tx_1.id,
|
||||
).sign([bob.private_key])
|
||||
block_5 = b.create_block([tx_1, tx_2, tx_3, tx_4, tx_5])
|
||||
b.write_block(block_5)
|
||||
assert b.get_spent(tx_2.id, 0) == tx_5
|
||||
assert not b.get_spent(tx_5.id, 0)
|
||||
assert b.get_outputs_filtered(alice.public_key)
|
||||
assert b.get_outputs_filtered(alice.public_key, include_spent=False)
|
||||
|
@ -1,22 +1,6 @@
|
||||
from pytest import raises
|
||||
|
||||
|
||||
class TestTransactionModel(object):
|
||||
def test_validating_an_invalid_transaction(self, b):
|
||||
from bigchaindb.models import Transaction
|
||||
|
||||
tx = Transaction.create([b.me], [([b.me], 1)])
|
||||
tx.operation = 'something invalid'
|
||||
|
||||
with raises(TypeError):
|
||||
tx.validate(b)
|
||||
|
||||
tx.operation = 'CREATE'
|
||||
tx.inputs = []
|
||||
with raises(ValueError):
|
||||
tx.validate(b)
|
||||
|
||||
|
||||
class TestBlockModel(object):
|
||||
def test_block_initialization(self, monkeypatch):
|
||||
from bigchaindb.models import Block
|
||||
@ -61,11 +45,10 @@ class TestBlockModel(object):
|
||||
assert block.to_dict() == expected
|
||||
|
||||
def test_block_invalid_serializaton(self):
|
||||
from bigchaindb.common.exceptions import OperationError
|
||||
from bigchaindb.models import Block
|
||||
|
||||
block = Block([])
|
||||
with raises(OperationError):
|
||||
with raises(ValueError):
|
||||
block.to_dict()
|
||||
|
||||
def test_block_deserialization(self, b):
|
||||
@ -115,13 +98,12 @@ class TestBlockModel(object):
|
||||
|
||||
transactions = [Transaction.create([b.me], [([b.me], 1)])]
|
||||
timestamp = gen_timestamp()
|
||||
voters = ['Qaaa', 'Qbbb']
|
||||
|
||||
block = {
|
||||
'timestamp': timestamp,
|
||||
'transactions': [tx.to_dict() for tx in transactions],
|
||||
'node_pubkey': b.me,
|
||||
'voters': voters,
|
||||
'voters': list(b.federation),
|
||||
}
|
||||
|
||||
block_body = {
|
||||
@ -163,3 +145,11 @@ class TestBlockModel(object):
|
||||
|
||||
public_key = PublicKey(b.me)
|
||||
assert public_key.verify(expected_block_serialized, block.signature)
|
||||
|
||||
def test_block_dupe_tx(self, b):
|
||||
from bigchaindb.models import Transaction
|
||||
from bigchaindb.common.exceptions import DuplicateTransaction
|
||||
tx = Transaction.create([b.me], [([b.me], 1)])
|
||||
block = b.create_block([tx, tx])
|
||||
with raises(DuplicateTransaction):
|
||||
block._validate_block_transactions(b)
|
||||
|
@ -201,3 +201,37 @@ def test_verify_vote_schema(b):
|
||||
assert not Voting.verify_vote_schema(vote)
|
||||
vote = b.vote('b', 'a' * 64, True)
|
||||
assert not Voting.verify_vote_schema(vote)
|
||||
|
||||
|
||||
################################################################################
|
||||
# block_election tests
|
||||
|
||||
|
||||
def test_block_election(b):
|
||||
|
||||
class TestVoting(Voting):
|
||||
@classmethod
|
||||
def verify_vote_signature(cls, vote):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def verify_vote_schema(cls, vote):
|
||||
return True
|
||||
|
||||
keyring = 'abc'
|
||||
block = {'id': 'xyz', 'block': {'voters': 'ab'}}
|
||||
votes = [{
|
||||
'node_pubkey': c,
|
||||
'vote': {'is_block_valid': True, 'previous_block': 'a'}
|
||||
} for c in 'abc']
|
||||
|
||||
assert TestVoting.block_election(block, votes, keyring) == {
|
||||
'status': VALID,
|
||||
'block_id': 'xyz',
|
||||
'counts': {'n_valid': 2, 'n_invalid': 0},
|
||||
'ineligible': [votes[-1]],
|
||||
'cheat': [],
|
||||
'malformed': [],
|
||||
'previous_block': 'a',
|
||||
'other_previous_block': {},
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ def test_get_divisble_transactions_returns_500(b, client):
|
||||
|
||||
asset_id = create_tx.id
|
||||
|
||||
url = TX_ENDPOINT + "?asset_id=" + asset_id
|
||||
url = TX_ENDPOINT + '?asset_id=' + asset_id
|
||||
assert client.get(url).status_code == 200
|
||||
assert len(client.get(url).json) == 3
|
||||
|
||||
|
@ -30,7 +30,7 @@ def test_get_block_status_endpoint_undecided(b, client):
|
||||
block = b.create_block([tx])
|
||||
b.write_block(block)
|
||||
|
||||
status = b.block_election_status(block.id, block.voters)
|
||||
status = b.block_election_status(block)
|
||||
|
||||
res = client.get(STATUSES_ENDPOINT + '?block_id=' + block.id)
|
||||
assert status == res.json['status']
|
||||
@ -51,7 +51,7 @@ def test_get_block_status_endpoint_valid(b, client):
|
||||
vote = b.vote(block.id, b.get_last_voted_block().id, True)
|
||||
b.write_vote(vote)
|
||||
|
||||
status = b.block_election_status(block.id, block.voters)
|
||||
status = b.block_election_status(block)
|
||||
|
||||
res = client.get(STATUSES_ENDPOINT + '?block_id=' + block.id)
|
||||
assert status == res.json['status']
|
||||
@ -72,7 +72,7 @@ def test_get_block_status_endpoint_invalid(b, client):
|
||||
vote = b.vote(block.id, b.get_last_voted_block().id, False)
|
||||
b.write_vote(vote)
|
||||
|
||||
status = b.block_election_status(block.id, block.voters)
|
||||
status = b.block_election_status(block)
|
||||
|
||||
res = client.get(STATUSES_ENDPOINT + '?block_id=' + block.id)
|
||||
assert status == res.json['status']
|
||||
|
@ -1,4 +1,3 @@
|
||||
import builtins
|
||||
import json
|
||||
from unittest.mock import patch
|
||||
|
||||
@ -113,18 +112,15 @@ def test_post_create_transaction_with_invalid_schema(client, caplog):
|
||||
('DoubleSpend', 'Nope! It is gone now!'),
|
||||
('InvalidHash', 'Do not smoke that!'),
|
||||
('InvalidSignature', 'Falsche Unterschrift!'),
|
||||
('OperationError', 'Create and transfer!'),
|
||||
('TransactionDoesNotExist', 'Hallucinations?'),
|
||||
('ValidationError', 'Create and transfer!'),
|
||||
('InputDoesNotExist', 'Hallucinations?'),
|
||||
('TransactionOwnerError', 'Not yours!'),
|
||||
('TransactionNotInValidBlock', 'Wait, maybe?'),
|
||||
('ValueError', '?'),
|
||||
('ValidationError', '?'),
|
||||
))
|
||||
def test_post_invalid_transaction(client, exc, msg, monkeypatch, caplog):
|
||||
from bigchaindb.common import exceptions
|
||||
try:
|
||||
exc_cls = getattr(exceptions, exc)
|
||||
except AttributeError:
|
||||
exc_cls = getattr(builtins, 'ValueError')
|
||||
exc_cls = getattr(exceptions, exc)
|
||||
|
||||
def mock_validation(self_, tx):
|
||||
raise exc_cls(msg)
|
||||
|
Loading…
x
Reference in New Issue
Block a user