mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge branch 'master' into unify-mongodb-and-rethinkdb-connection
This commit is contained in:
commit
c5b496b006
@ -15,6 +15,14 @@ For reference, the possible headings are:
|
||||
* **External Contributors** to list contributors outside of BigchainDB GmbH.
|
||||
* **Notes**
|
||||
|
||||
## [0.9.2] - 2017-03-02
|
||||
Tag name: v0.9.2
|
||||
|
||||
### Fixed
|
||||
Pin `python-rapidjson` library in `setup.py` to prevent `bigchaindb`'s
|
||||
installation to fail due to
|
||||
https://github.com/python-rapidjson/python-rapidjson/issues/62.
|
||||
|
||||
## [0.9.1] - 2017-02-06
|
||||
Tag name: v0.9.1
|
||||
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
There are many ways you can contribute to the BigchainDB project, some very easy and others more involved. We want to be friendly and welcoming to all potential contributors, so we ask that everyone involved abide by some simple guidelines outlined in our [Code of Conduct](./CODE_OF_CONDUCT.md).
|
||||
|
||||
Or, are you interested in contributing full-time? BigchainDB is hiring. See [here](https://github.com/bigchaindb/org/blob/master/engjob.md).
|
||||
|
||||
## Easy Ways to Contribute
|
||||
|
||||
The BigchainDB community has a Google Group and a Gitter chatroom. Our [Community page](https://www.bigchaindb.com/community) has more information about those.
|
||||
|
@ -26,6 +26,8 @@ WORKDIR /usr/src/app
|
||||
|
||||
RUN pip3 install --no-cache-dir -e .
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
WORKDIR /data
|
||||
|
||||
ENV BIGCHAINDB_CONFIG_PATH /data/.bigchaindb
|
||||
|
@ -15,3 +15,7 @@ class OperationError(BackendError):
|
||||
|
||||
class DuplicateKeyError(OperationError):
|
||||
"""Exception raised when an insert fails because the key is not unique"""
|
||||
|
||||
|
||||
class BigchainDBCritical(Exception):
|
||||
"""Unhandleable error that requires attention"""
|
||||
|
@ -212,13 +212,6 @@ def get_block(conn, block_id):
|
||||
projection={'_id': False}))
|
||||
|
||||
|
||||
@register_query(MongoDBConnection)
|
||||
def has_transaction(conn, transaction_id):
|
||||
return bool(conn.run(
|
||||
conn.collection('bigchain')
|
||||
.find_one({'block.transactions.id': transaction_id})))
|
||||
|
||||
|
||||
@register_query(MongoDBConnection)
|
||||
def count_blocks(conn):
|
||||
return conn.run(
|
||||
|
@ -211,20 +211,6 @@ def get_block(connection, block_id):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def has_transaction(connection, transaction_id):
|
||||
"""Check if a transaction exists in the bigchain table.
|
||||
|
||||
Args:
|
||||
transaction_id (str): the id of the transaction to check.
|
||||
|
||||
Returns:
|
||||
``True`` if the transaction exists, ``False`` otherwise.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@singledispatch
|
||||
def count_blocks(connection):
|
||||
"""Count the number of blocks in the bigchain table.
|
||||
|
@ -158,13 +158,6 @@ def get_block(connection, block_id):
|
||||
return connection.run(r.table('bigchain').get(block_id))
|
||||
|
||||
|
||||
@register_query(RethinkDBConnection)
|
||||
def has_transaction(connection, transaction_id):
|
||||
return bool(connection.run(
|
||||
r.table('bigchain', read_mode=READ_MODE)
|
||||
.get_all(transaction_id, index='transaction_id').count()))
|
||||
|
||||
|
||||
@register_query(RethinkDBConnection)
|
||||
def count_blocks(connection):
|
||||
return connection.run(
|
||||
|
@ -16,11 +16,15 @@ import copy
|
||||
import json
|
||||
import logging
|
||||
import collections
|
||||
from functools import lru_cache
|
||||
|
||||
from pkg_resources import iter_entry_points, ResolutionError
|
||||
|
||||
from bigchaindb.common import exceptions
|
||||
|
||||
import bigchaindb
|
||||
|
||||
from bigchaindb.consensus import BaseConsensusRules
|
||||
|
||||
# TODO: move this to a proper configuration file for logging
|
||||
logging.getLogger('requests').setLevel(logging.WARNING)
|
||||
@ -240,3 +244,40 @@ def autoconfigure(filename=None, config=None, force=False):
|
||||
newconfig = update(newconfig, config)
|
||||
|
||||
set_config(newconfig) # sets bigchaindb.config
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_consensus_plugin(name=None):
|
||||
"""Find and load the chosen consensus plugin.
|
||||
|
||||
Args:
|
||||
name (string): the name of the entry_point, as advertised in the
|
||||
setup.py of the providing package.
|
||||
|
||||
Returns:
|
||||
an uninstantiated subclass of ``bigchaindb.consensus.AbstractConsensusRules``
|
||||
"""
|
||||
if not name:
|
||||
return BaseConsensusRules
|
||||
|
||||
# TODO: This will return the first plugin with group `bigchaindb.consensus`
|
||||
# and name `name` in the active WorkingSet.
|
||||
# We should probably support Requirements specs in the config, e.g.
|
||||
# consensus_plugin: 'my-plugin-package==0.0.1;default'
|
||||
plugin = None
|
||||
for entry_point in iter_entry_points('bigchaindb.consensus', name):
|
||||
plugin = entry_point.load()
|
||||
|
||||
# No matching entry_point found
|
||||
if not plugin:
|
||||
raise ResolutionError(
|
||||
'No plugin found in group `bigchaindb.consensus` with name `{}`'.
|
||||
format(name))
|
||||
|
||||
# Is this strictness desireable?
|
||||
# It will probably reduce developer headaches in the wild.
|
||||
if not issubclass(plugin, (BaseConsensusRules,)):
|
||||
raise TypeError('object of type "{}" does not implement `bigchaindb.'
|
||||
'consensus.BaseConsensusRules`'.format(type(plugin)))
|
||||
|
||||
return plugin
|
||||
|
@ -10,6 +10,11 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class BaseConsensusRules():
|
||||
"""Base consensus rules for Bigchain.
|
||||
|
||||
A consensus plugin must expose a class inheriting from this one via an entry_point.
|
||||
|
||||
All methods listed below must be implemented.
|
||||
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
|
@ -11,6 +11,7 @@ from bigchaindb.common.transaction import TransactionLink
|
||||
import bigchaindb
|
||||
|
||||
from bigchaindb import backend, config_utils, utils
|
||||
from bigchaindb.backend import exceptions as backend_exceptions
|
||||
from bigchaindb.consensus import BaseConsensusRules
|
||||
from bigchaindb.models import Block, Transaction
|
||||
|
||||
@ -55,10 +56,18 @@ class Bigchain(object):
|
||||
self.me = public_key or bigchaindb.config['keypair']['public']
|
||||
self.me_private = private_key or bigchaindb.config['keypair']['private']
|
||||
self.nodes_except_me = keyring or bigchaindb.config['keyring']
|
||||
|
||||
if backlog_reassign_delay is None:
|
||||
backlog_reassign_delay = bigchaindb.config['backlog_reassign_delay']
|
||||
self.backlog_reassign_delay = backlog_reassign_delay
|
||||
self.consensus = BaseConsensusRules
|
||||
|
||||
consensusPlugin = bigchaindb.config.get('consensus_plugin')
|
||||
|
||||
if consensusPlugin:
|
||||
self.consensus = config_utils.load_consensus_plugin(consensusPlugin)
|
||||
else:
|
||||
self.consensus = BaseConsensusRules
|
||||
|
||||
self.connection = connection if connection else backend.connect(**bigchaindb.config['database'])
|
||||
if not self.me or not self.me_private:
|
||||
raise exceptions.KeypairNotFoundException()
|
||||
@ -178,6 +187,22 @@ class Bigchain(object):
|
||||
exceptions.TransactionNotInValidBlock, exceptions.AmountError):
|
||||
return False
|
||||
|
||||
def is_new_transaction(self, txid, exclude_block_id=None):
|
||||
"""
|
||||
Return True if the transaction does not exist in any
|
||||
VALID or UNDECIDED block. Return False otherwise.
|
||||
|
||||
Args:
|
||||
txid (str): Transaction ID
|
||||
exclude_block_id (str): Exclude block from search
|
||||
"""
|
||||
block_statuses = self.get_blocks_status_containing_tx(txid)
|
||||
block_statuses.pop(exclude_block_id, None)
|
||||
for status in block_statuses.values():
|
||||
if status != self.BLOCK_INVALID:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_block(self, block_id, include_status=False):
|
||||
"""Get the block with the specified `block_id` (and optionally its status)
|
||||
|
||||
@ -308,11 +333,10 @@ class Bigchain(object):
|
||||
if list(validity.values()).count(Bigchain.BLOCK_VALID) > 1:
|
||||
block_ids = str([block for block in validity
|
||||
if validity[block] == Bigchain.BLOCK_VALID])
|
||||
raise exceptions.DoubleSpend('Transaction {tx} is present in '
|
||||
'multiple valid blocks: '
|
||||
'{block_ids}'
|
||||
.format(tx=txid,
|
||||
block_ids=block_ids))
|
||||
raise backend_exceptions.BigchainDBCritical(
|
||||
'Transaction {tx} is present in '
|
||||
'multiple valid blocks: {block_ids}'
|
||||
.format(tx=txid, block_ids=block_ids))
|
||||
|
||||
return validity
|
||||
|
||||
@ -526,9 +550,6 @@ class Bigchain(object):
|
||||
|
||||
return backend.query.write_block(self.connection, block)
|
||||
|
||||
def transaction_exists(self, transaction_id):
|
||||
return backend.query.has_transaction(self.connection, transaction_id)
|
||||
|
||||
def prepare_genesis_block(self):
|
||||
"""Prepare a genesis block."""
|
||||
|
||||
|
@ -67,28 +67,19 @@ class BlockPipeline:
|
||||
AmountError):
|
||||
return None
|
||||
|
||||
if self.bigchain.transaction_exists(tx.id):
|
||||
# if the transaction already exists, we must check whether
|
||||
# it's in a valid or undecided block
|
||||
tx, status = self.bigchain.get_transaction(tx.id,
|
||||
include_status=True)
|
||||
if status == self.bigchain.TX_VALID \
|
||||
or status == self.bigchain.TX_UNDECIDED:
|
||||
# if the tx is already in a valid or undecided block,
|
||||
# then it no longer should be in the backlog, or added
|
||||
# to a new block. We can delete and drop it.
|
||||
self.bigchain.delete_transaction(tx.id)
|
||||
return None
|
||||
|
||||
tx_validated = self.bigchain.is_valid_transaction(tx)
|
||||
if tx_validated:
|
||||
return tx
|
||||
else:
|
||||
# if the transaction is not valid, remove it from the
|
||||
# backlog
|
||||
# If transaction is in any VALID or UNDECIDED block we
|
||||
# should not include it again
|
||||
if not self.bigchain.is_new_transaction(tx.id):
|
||||
self.bigchain.delete_transaction(tx.id)
|
||||
return None
|
||||
|
||||
# If transaction is not valid it should not be included
|
||||
if not self.bigchain.is_valid_transaction(tx):
|
||||
self.bigchain.delete_transaction(tx.id)
|
||||
return None
|
||||
|
||||
return tx
|
||||
|
||||
def create(self, tx, timeout=False):
|
||||
"""Create a block.
|
||||
|
||||
|
@ -14,7 +14,6 @@ import bigchaindb
|
||||
from bigchaindb import Bigchain
|
||||
from bigchaindb import backend
|
||||
from bigchaindb.backend.changefeed import ChangeFeed
|
||||
from bigchaindb.consensus import BaseConsensusRules
|
||||
from bigchaindb.models import Transaction, Block
|
||||
from bigchaindb.common import exceptions
|
||||
|
||||
@ -35,10 +34,10 @@ class Vote:
|
||||
# Since cannot share a connection to RethinkDB using multiprocessing,
|
||||
# we need to create a temporary instance of BigchainDB that we use
|
||||
# only to query RethinkDB
|
||||
self.consensus = BaseConsensusRules
|
||||
|
||||
# This is the Bigchain instance that will be "shared" (aka: copied)
|
||||
# by all the subprocesses
|
||||
|
||||
self.bigchain = Bigchain()
|
||||
self.last_voted_id = Bigchain().get_last_voted_block().id
|
||||
|
||||
@ -90,7 +89,8 @@ class Vote:
|
||||
yield tx, block_id, num_tx
|
||||
|
||||
def validate_tx(self, tx, block_id, num_tx):
|
||||
"""Validate a transaction.
|
||||
"""Validate a transaction. Transaction must also not be in any VALID
|
||||
block.
|
||||
|
||||
Args:
|
||||
tx (dict): the transaction to validate
|
||||
@ -101,7 +101,12 @@ class Vote:
|
||||
Three values are returned, the validity of the transaction,
|
||||
``block_id``, ``num_tx``.
|
||||
"""
|
||||
return bool(self.bigchain.is_valid_transaction(tx)), block_id, num_tx
|
||||
new = self.bigchain.is_new_transaction(tx.id, exclude_block_id=block_id)
|
||||
if not new:
|
||||
return False, block_id, num_tx
|
||||
|
||||
valid = bool(self.bigchain.is_valid_transaction(tx))
|
||||
return valid, block_id, num_tx
|
||||
|
||||
def vote(self, tx_validity, block_id, num_tx):
|
||||
"""Collect the validity of transactions and cast a vote when ready.
|
||||
|
@ -42,23 +42,10 @@ This writes two files: `~/.aws/credentials` and `~/.aws/config`. AWS tools and p
|
||||
|
||||
Eventually, you'll have one or more instances (virtual machines) running on AWS and you'll want to SSH to them. To do that, you need a public/private key pair. The public key will be sent to AWS, and you can tell AWS to put it in any instances you provision there. You'll keep the private key on your local workstation.
|
||||
|
||||
First you need to make up a key name. Some ideas:
|
||||
See the [page about how to generate a key pair for SSH](generate-key-pair-for-ssh.html).
|
||||
|
||||
* `bcdb-troy-1`
|
||||
* `bigchaindb-7`
|
||||
* `bcdb-jupiter`
|
||||
|
||||
If you already have key pairs on AWS (Amazon EC2), you have to pick a name that's not already being used.
|
||||
Below, replace every instance of `<key-name>` with your actual key name.
|
||||
To generate a public/private RSA key pair with that name:
|
||||
```text
|
||||
ssh-keygen -t rsa -C "<key-name>" -f ~/.ssh/<key-name>
|
||||
```
|
||||
|
||||
It will ask you for a passphrase. You can use whatever passphrase you like, but don't lose it. Two keys (files) will be created in `~/.ssh/`:
|
||||
|
||||
1. `~/.ssh/<key-name>.pub` is the public key
|
||||
2. `~/.ssh/<key-name>` is the private key
|
||||
## Send the Public Key to AWS
|
||||
|
||||
To send the public key to AWS, use the AWS Command-Line Interface:
|
||||
```text
|
||||
|
34
docs/server/source/appendices/generate-key-pair-for-ssh.md
Normal file
34
docs/server/source/appendices/generate-key-pair-for-ssh.md
Normal file
@ -0,0 +1,34 @@
|
||||
# Generate a Key Pair for SSH
|
||||
|
||||
This page describes how to use `ssh-keygen`
|
||||
to generate a public/private RSA key pair
|
||||
that can be used with SSH.
|
||||
(Note: `ssh-keygen` is found on most Linux and Unix-like
|
||||
operating systems; if you're using Windows,
|
||||
then you'll have to use another tool,
|
||||
such as PuTTYgen.)
|
||||
|
||||
By convention, SSH key pairs get stored in the `~/.ssh/` directory.
|
||||
Check what keys you already have there:
|
||||
```text
|
||||
ls -1 ~/.ssh/
|
||||
```
|
||||
|
||||
Next, make up a new key pair name (called `<name>` below).
|
||||
Here are some ideas:
|
||||
|
||||
* `aws-bdb-2`
|
||||
* `tim-bdb-azure`
|
||||
* `chris-bcdb-key`
|
||||
|
||||
Next, generate a public/private RSA key pair with that name:
|
||||
```text
|
||||
ssh-keygen -t rsa -C "<name>" -f ~/.ssh/<name>
|
||||
```
|
||||
|
||||
It will ask you for a passphrase.
|
||||
You can use whatever passphrase you like, but don't lose it.
|
||||
Two keys (files) will be created in `~/.ssh/`:
|
||||
|
||||
1. `~/.ssh/<name>.pub` is the public key
|
||||
2. `~/.ssh/<name>` is the private key
|
@ -17,6 +17,7 @@ Appendices
|
||||
pipelines
|
||||
backend
|
||||
aws-setup
|
||||
generate-key-pair-for-ssh
|
||||
firewall-notes
|
||||
ntp-notes
|
||||
example-rethinkdb-storage-setups
|
||||
|
@ -75,6 +75,8 @@ docker run \
|
||||
--name=rethinkdb \
|
||||
--publish=172.17.0.1:28015:28015 \
|
||||
--publish=172.17.0.1:58080:8080 \
|
||||
--restart=always \
|
||||
--volume "$HOME/bigchaindb_docker:/data" \
|
||||
rethinkdb:2.3
|
||||
```
|
||||
|
||||
@ -85,11 +87,25 @@ You can also access the RethinkDB dashboard at
|
||||
|
||||
#### For MongoDB
|
||||
|
||||
Note: MongoDB runs as user `mongodb` which had the UID `999` and GID `999`
|
||||
inside the container. For the volume to be mounted properly, as user `mongodb`
|
||||
in your host, you should have a `mongodb` user with UID and GID `999`.
|
||||
If you have another user on the host with UID `999`, the mapped files will
|
||||
be owned by this user in the host.
|
||||
If there is no owner with UID 999, you can create the corresponding user and
|
||||
group.
|
||||
|
||||
`groupadd -r --gid 999 mongodb && useradd -r --uid 999 -g mongodb mongodb`
|
||||
|
||||
|
||||
```text
|
||||
docker run \
|
||||
--detach \
|
||||
--name=mongodb \
|
||||
--publish=172.17.0.1:27017:27017 \
|
||||
--restart=always \
|
||||
--volume=/tmp/mongodb_docker/db:/data/db \
|
||||
--volume=/tmp/mongodb_docker/configdb:/data/configdb \
|
||||
mongo:3.4.1 --replSet=bigchain-rs
|
||||
```
|
||||
|
||||
@ -100,6 +116,7 @@ docker run \
|
||||
--detach \
|
||||
--name=bigchaindb \
|
||||
--publish=59984:9984 \
|
||||
--restart=always \
|
||||
--volume=$HOME/bigchaindb_docker:/data \
|
||||
bigchaindb/bigchaindb \
|
||||
start
|
||||
|
@ -21,7 +21,7 @@ Step 2: Configure kubectl
|
||||
The default location of the kubectl configuration file is ``~/.kube/config``.
|
||||
If you don't have that file, then you need to get it.
|
||||
|
||||
If you deployed your Kubernetes cluster on Azure
|
||||
**Azure.** If you deployed your Kubernetes cluster on Azure
|
||||
using the Azure CLI 2.0 (as per :doc:`our template <template-kubernetes-azure>`),
|
||||
then you can get the ``~/.kube/config`` file using:
|
||||
|
||||
@ -32,15 +32,128 @@ then you can get the ``~/.kube/config`` file using:
|
||||
--name <ACS cluster name>
|
||||
|
||||
|
||||
Step 3: Run a MongoDB Container
|
||||
-------------------------------
|
||||
Step 3: Create a StorageClass
|
||||
-----------------------------
|
||||
|
||||
To start a MongoDB Docker container in a pod on one of the cluster nodes:
|
||||
MongoDB needs somewhere to store its data persistently,
|
||||
outside the container where MongoDB is running.
|
||||
Explaining how Kubernetes handles persistent volumes,
|
||||
and the associated terminology,
|
||||
is beyond the scope of this documentation;
|
||||
see `the Kubernetes docs about persistent volumes
|
||||
<https://kubernetes.io/docs/user-guide/persistent-volumes>`_.
|
||||
|
||||
The first thing to do is create a Kubernetes StorageClass.
|
||||
|
||||
**Azure.** First, you need an Azure storage account.
|
||||
If you deployed your Kubernetes cluster on Azure
|
||||
using the Azure CLI 2.0
|
||||
(as per :doc:`our template <template-kubernetes-azure>`),
|
||||
then the `az acs create` command already created two
|
||||
storage accounts in the same location and resource group
|
||||
as your Kubernetes cluster.
|
||||
Both should have the same "storage account SKU": ``Standard_LRS``.
|
||||
Standard storage is lower-cost and lower-performance.
|
||||
It uses hard disk drives (HDD).
|
||||
LRS means locally-redundant storage: three replicas
|
||||
in the same data center.
|
||||
|
||||
Premium storage is higher-cost and higher-performance.
|
||||
It uses solid state drives (SSD).
|
||||
At the time of writing,
|
||||
when we created a storage account with SKU ``Premium_LRS``
|
||||
and tried to use that,
|
||||
the PersistentVolumeClaim would get stuck in a "Pending" state.
|
||||
For future reference, the command to create a storage account is
|
||||
`az storage account create <https://docs.microsoft.com/en-us/cli/azure/storage/account#create>`_.
|
||||
|
||||
Create a Kubernetes Storage Class named ``slow``
|
||||
by writing a file named ``azureStorageClass.yml`` containing:
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: slow
|
||||
provisioner: kubernetes.io/azure-disk
|
||||
parameters:
|
||||
skuName: Standard_LRS
|
||||
location: <region where your cluster is located>
|
||||
|
||||
and then:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl ?????
|
||||
$ kubectl apply -f azureStorageClass.yml
|
||||
|
||||
You can check if it worked using ``kubectl get storageclasses``.
|
||||
|
||||
Note that there is no line of the form
|
||||
``storageAccount: <azure storage account name>``
|
||||
under ``parameters:``. When we included one
|
||||
and then created a PersistentVolumeClaim based on it,
|
||||
the PersistentVolumeClaim would get stuck
|
||||
in a "Pending" state.
|
||||
Kubernetes just looks for a storageAccount
|
||||
with the specified skuName and location.
|
||||
|
||||
|
||||
Note: The BigchainDB Dashboard can be deployed
|
||||
as a Docker container, like everything else.
|
||||
Step 4: Create a PersistentVolumeClaim
|
||||
--------------------------------------
|
||||
|
||||
Next, you'll create a PersistentVolumeClaim named ``mongoclaim``.
|
||||
Create a file named ``mongoclaim.yml``
|
||||
with the following contents:
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: mongoclaim
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: slow
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
|
||||
Note how there's no explicit mention of Azure, AWS or whatever.
|
||||
``ReadWriteOnce`` (RWO) means the volume can be mounted as
|
||||
read-write by a single Kubernetes node.
|
||||
(``ReadWriteOnce`` is the *only* access mode supported
|
||||
by AzureDisk.)
|
||||
``storage: 20Gi`` means the volume has a size of 20
|
||||
`gibibytes <https://en.wikipedia.org/wiki/Gibibyte>`_.
|
||||
(You can change that if you like.)
|
||||
|
||||
Create ``mongoclaim`` in your Kubernetes cluster:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl apply -f mongoclaim.yml
|
||||
|
||||
You can check its status using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl get pvc
|
||||
|
||||
Initially, the status of ``mongoclaim`` might be "Pending"
|
||||
but it should become "Bound" fairly quickly.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ kubectl describe pvc
|
||||
Name: mongoclaim
|
||||
Namespace: default
|
||||
StorageClass: slow
|
||||
Status: Bound
|
||||
Volume: pvc-ebed81f1-fdca-11e6-abf0-000d3a27ab21
|
||||
Labels: <none>
|
||||
Capacity: 2Gi
|
||||
Access Modes: RWO
|
||||
No events.
|
||||
|
@ -18,7 +18,20 @@ You may find that you have to sign up for a Free Trial subscription first.
|
||||
That's okay: you can have many subscriptions.
|
||||
|
||||
|
||||
Step 2: Deploy an Azure Container Service (ACS)
|
||||
Step 2: Create an SSH Key Pair
|
||||
------------------------------
|
||||
|
||||
You'll want an SSH key pair so you'll be able to SSH
|
||||
to the virtual machines that you'll deploy in the next step.
|
||||
(If you already have an SSH key pair, you *could* reuse it,
|
||||
but it's probably a good idea to make a new SSH key pair
|
||||
for your Kubernetes VMs and nothing else.)
|
||||
|
||||
See the
|
||||
:ref:`page about how to generate a key pair for SSH <Generate a Key Pair for SSH>`.
|
||||
|
||||
|
||||
Step 3: Deploy an Azure Container Service (ACS)
|
||||
-----------------------------------------------
|
||||
|
||||
It's *possible* to deploy an Azure Container Service (ACS)
|
||||
@ -26,16 +39,18 @@ from the `Azure Portal <https://portal.azure.com>`_
|
||||
(i.e. online in your web browser)
|
||||
but it's actually easier to do it using the Azure
|
||||
Command-Line Interface (CLI).
|
||||
(The Azure Portal will ask you for a public SSH key
|
||||
and a "service principal," and you'll have to create those
|
||||
first if they don't exist. The CLI will create them
|
||||
for you if necessary.)
|
||||
|
||||
Microsoft has `instructions to install the Azure CLI 2.0
|
||||
on most common operating systems
|
||||
<https://docs.microsoft.com/en-us/cli/azure/install-az-cli2>`_.
|
||||
Do that.
|
||||
|
||||
First, update the Azure CLI to the latest version:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ az component update
|
||||
|
||||
Next, login to your account using:
|
||||
|
||||
.. code:: bash
|
||||
@ -82,8 +97,7 @@ Finally, you can deploy an ACS using something like:
|
||||
--agent-count 3 \
|
||||
--agent-vm-size Standard_D2_v2 \
|
||||
--dns-prefix <make up a name> \
|
||||
--generate-ssh-keys \
|
||||
--location <same location as the resource group> \
|
||||
--ssh-key-value ~/.ssh/<name>.pub \
|
||||
--orchestrator-type kubernetes
|
||||
|
||||
There are more options. For help understanding all the options, use the built-in help:
|
||||
@ -100,4 +114,32 @@ and click on the one you created
|
||||
to see all the resources in it.
|
||||
|
||||
Next, you can :doc:`run a BigchainDB node on your new
|
||||
Kubernetes cluster <node-on-kubernetes>`.
|
||||
Kubernetes cluster <node-on-kubernetes>`.
|
||||
|
||||
|
||||
Optional: SSH to Your New Kubernetes Cluster Nodes
|
||||
--------------------------------------------------
|
||||
|
||||
You can SSH to one of the just-deployed Kubernetes "master" nodes
|
||||
(virtual machines) using:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ ssh -i ~/.ssh/<name>.pub azureuser@<master-ip-address-or-hostname>
|
||||
|
||||
where you can get the IP address or hostname
|
||||
of a master node from the Azure Portal.
|
||||
Note how the default username is ``azureuser``.
|
||||
|
||||
The "agent" nodes don't get public IP addresses or hostnames,
|
||||
so you can't SSH to them *directly*,
|
||||
but you can first SSH to the master
|
||||
and then SSH to an agent from there
|
||||
(using the *private* IP address or hostname of the agent node).
|
||||
To do that, you either need to copy your SSH key pair to
|
||||
the master (a bad idea),
|
||||
or use something like
|
||||
`SSH agent forwarding <https://yakking.branchable.com/posts/ssh-A/>`_ (better).
|
||||
|
||||
Next, you can :doc:`run a BigchainDB node on your new
|
||||
Kubernetes cluster <node-on-kubernetes>`.
|
||||
|
@ -21,6 +21,7 @@ For convenience, here's a list of all the relevant environment variables (docume
|
||||
`BIGCHAINDB_SERVER_THREADS`<br>
|
||||
`BIGCHAINDB_CONFIG_PATH`<br>
|
||||
`BIGCHAINDB_BACKLOG_REASSIGN_DELAY`<br>
|
||||
`BIGCHAINDB_CONSENSUS_PLUGIN`<br>
|
||||
|
||||
The local config file is `$HOME/.bigchaindb` by default (a file which might not even exist), but you can tell BigchainDB to use a different file by using the `-c` command-line option, e.g. `bigchaindb -c path/to/config_file.json start`
|
||||
or using the `BIGCHAINDB_CONFIG_PATH` environment variable, e.g. `BIGHAINDB_CONFIG_PATH=.my_bigchaindb_config bigchaindb start`.
|
||||
@ -54,7 +55,7 @@ Internally (i.e. in the Python code), both keys have a default value of `None`,
|
||||
|
||||
## keyring
|
||||
|
||||
A list of the public keys of all the nodes in the cluster, excluding the public key of this node.
|
||||
A list of the public keys of all the nodes in the cluster, excluding the public key of this node.
|
||||
|
||||
**Example using an environment variable**
|
||||
```text
|
||||
@ -65,7 +66,7 @@ Note how the keys in the list are separated by colons.
|
||||
|
||||
**Example config file snippet**
|
||||
```js
|
||||
"keyring": ["BnCsre9MPBeQK8QZBFznU2dJJ2GwtvnSMdemCmod2XPB",
|
||||
"keyring": ["BnCsre9MPBeQK8QZBFznU2dJJ2GwtvnSMdemCmod2XPB",
|
||||
"4cYQHoQrvPiut3Sjs8fVR1BMZZpJjMTC4bsMTt9V71aQ"]
|
||||
```
|
||||
|
||||
@ -152,9 +153,23 @@ Specifies how long, in seconds, transactions can remain in the backlog before be
|
||||
**Example using environment variables**
|
||||
```text
|
||||
export BIGCHAINDB_BACKLOG_REASSIGN_DELAY=30
|
||||
```
|
||||
```
|
||||
|
||||
**Default value (from a config file)**
|
||||
```js
|
||||
"backlog_reassign_delay": 120
|
||||
"backlog_reassign_delay": 120
|
||||
```
|
||||
|
||||
## consensus_plugin
|
||||
|
||||
The [consensus plugin](../appendices/consensus.html) to use.
|
||||
|
||||
**Example using an environment variable**
|
||||
```text
|
||||
export BIGCHAINDB_CONSENSUS_PLUGIN=default
|
||||
```
|
||||
|
||||
**Example config file snippet: the default**
|
||||
```js
|
||||
"consensus_plugin": "default"
|
||||
```
|
||||
|
2
setup.py
2
setup.py
@ -65,7 +65,7 @@ install_requires = [
|
||||
'pymongo~=3.4',
|
||||
'pysha3~=1.0.2',
|
||||
'cryptoconditions>=0.5.0',
|
||||
'python-rapidjson>=0.0.8',
|
||||
'python-rapidjson==0.0.8',
|
||||
'logstats>=0.2.1',
|
||||
'flask>=0.10.1',
|
||||
'flask-restful~=0.3.0',
|
||||
|
@ -248,19 +248,6 @@ def test_get_block(signed_create_tx):
|
||||
assert block_db == block.to_dict()
|
||||
|
||||
|
||||
def test_has_transaction(signed_create_tx):
|
||||
from bigchaindb.backend import connect, query
|
||||
from bigchaindb.models import Block
|
||||
conn = connect()
|
||||
|
||||
# create and insert block
|
||||
block = Block(transactions=[signed_create_tx])
|
||||
conn.db.bigchain.insert_one(block.to_dict())
|
||||
|
||||
assert query.has_transaction(conn, signed_create_tx.id)
|
||||
assert query.has_transaction(conn, 'aaa') is False
|
||||
|
||||
|
||||
def test_count_blocks(signed_create_tx):
|
||||
from bigchaindb.backend import connect, query
|
||||
from bigchaindb.models import Block
|
||||
|
@ -29,7 +29,6 @@ def test_schema(schema_func_name, args_qty):
|
||||
('get_votes_by_block_id', 1),
|
||||
('write_block', 1),
|
||||
('get_block', 1),
|
||||
('has_transaction', 1),
|
||||
('write_vote', 1),
|
||||
('get_last_voted_block', 1),
|
||||
('get_unvoted_blocks', 1),
|
||||
|
@ -90,8 +90,8 @@ class TestBigchainApi(object):
|
||||
assert b.has_previous_vote(block.id, block.voters) is True
|
||||
|
||||
@pytest.mark.genesis
|
||||
def test_get_spent_with_double_spend(self, b, monkeypatch):
|
||||
from bigchaindb.common.exceptions import DoubleSpend
|
||||
def test_get_spent_with_double_inclusion_detected(self, b, monkeypatch):
|
||||
from bigchaindb.backend.exceptions import BigchainDBCritical
|
||||
from bigchaindb.models import Transaction
|
||||
|
||||
tx = Transaction.create([b.me], [([b.me], 1)])
|
||||
@ -115,18 +115,18 @@ class TestBigchainApi(object):
|
||||
block3 = b.create_block([transfer_tx2])
|
||||
b.write_block(block3)
|
||||
|
||||
# Vote both block2 and block3 valid to provoke a double spend
|
||||
# Vote both block2 and block3 valid
|
||||
vote = b.vote(block2.id, b.get_last_voted_block().id, True)
|
||||
b.write_vote(vote)
|
||||
vote = b.vote(block3.id, b.get_last_voted_block().id, True)
|
||||
b.write_vote(vote)
|
||||
|
||||
with pytest.raises(DoubleSpend):
|
||||
with pytest.raises(BigchainDBCritical):
|
||||
b.get_spent(tx.id, 0)
|
||||
|
||||
@pytest.mark.genesis
|
||||
def test_get_block_status_for_tx_with_double_spend(self, b, monkeypatch):
|
||||
from bigchaindb.common.exceptions import DoubleSpend
|
||||
def test_get_block_status_for_tx_with_double_inclusion(self, b, monkeypatch):
|
||||
from bigchaindb.backend.exceptions import BigchainDBCritical
|
||||
from bigchaindb.models import Transaction
|
||||
|
||||
tx = Transaction.create([b.me], [([b.me], 1)])
|
||||
@ -146,7 +146,7 @@ class TestBigchainApi(object):
|
||||
vote = b.vote(block2.id, b.get_last_voted_block().id, True)
|
||||
b.write_vote(vote)
|
||||
|
||||
with pytest.raises(DoubleSpend):
|
||||
with pytest.raises(BigchainDBCritical):
|
||||
b.get_blocks_status_containing_tx(tx.id)
|
||||
|
||||
@pytest.mark.genesis
|
||||
@ -1240,3 +1240,40 @@ def test_transaction_unicode(b):
|
||||
assert b.get_block(block.id) == block.to_dict()
|
||||
assert block.validate(b) == block
|
||||
assert beer_json in serialize(block.to_dict())
|
||||
|
||||
|
||||
@pytest.mark.bdb
|
||||
def test_is_new_transaction(b, genesis_block):
|
||||
from bigchaindb.models import Transaction
|
||||
|
||||
def write_tx(n):
|
||||
tx = Transaction.create([b.me], [([b.me], n)])
|
||||
tx = tx.sign([b.me_private])
|
||||
# Tx is new because it's not in any block
|
||||
assert b.is_new_transaction(tx.id)
|
||||
|
||||
block = b.create_block([tx])
|
||||
b.write_block(block)
|
||||
return tx, block
|
||||
|
||||
# test VALID case
|
||||
tx, block = write_tx(1)
|
||||
# Tx is now in undecided block
|
||||
assert not b.is_new_transaction(tx.id)
|
||||
assert b.is_new_transaction(tx.id, exclude_block_id=block.id)
|
||||
# After voting valid, should not be new
|
||||
vote = b.vote(block.id, genesis_block.id, True)
|
||||
b.write_vote(vote)
|
||||
assert not b.is_new_transaction(tx.id)
|
||||
assert b.is_new_transaction(tx.id, exclude_block_id=block.id)
|
||||
|
||||
# test INVALID case
|
||||
tx, block = write_tx(2)
|
||||
# Tx is now in undecided block
|
||||
assert not b.is_new_transaction(tx.id)
|
||||
assert b.is_new_transaction(tx.id, exclude_block_id=block.id)
|
||||
vote = b.vote(block.id, genesis_block.id, False)
|
||||
b.write_vote(vote)
|
||||
# Tx is new because it's only found in an invalid block
|
||||
assert b.is_new_transaction(tx.id)
|
||||
assert b.is_new_transaction(tx.id, exclude_block_id=block.id)
|
||||
|
@ -629,3 +629,17 @@ def test_start(mock_start, b):
|
||||
from bigchaindb.pipelines import vote
|
||||
vote.start()
|
||||
mock_start.assert_called_with()
|
||||
|
||||
|
||||
@pytest.mark.genesis
|
||||
def test_vote_no_double_inclusion(b):
|
||||
from bigchaindb.pipelines import vote
|
||||
|
||||
tx = dummy_tx(b)
|
||||
block = b.create_block([tx])
|
||||
r = vote.Vote().validate_tx(tx, block.id, 1)
|
||||
assert r == (True, block.id, 1)
|
||||
|
||||
b.write_block(block)
|
||||
r = vote.Vote().validate_tx(tx, 'other_block_id', 1)
|
||||
assert r == (False, 'other_block_id', 1)
|
||||
|
@ -48,6 +48,36 @@ def test_bigchain_instance_raises_when_not_configured(request, monkeypatch):
|
||||
bigchaindb.Bigchain()
|
||||
|
||||
|
||||
def test_load_consensus_plugin_loads_default_rules_without_name():
|
||||
from bigchaindb import config_utils
|
||||
from bigchaindb.consensus import BaseConsensusRules
|
||||
|
||||
assert config_utils.load_consensus_plugin() == BaseConsensusRules
|
||||
|
||||
|
||||
def test_load_consensus_plugin_raises_with_unknown_name():
|
||||
from pkg_resources import ResolutionError
|
||||
from bigchaindb import config_utils
|
||||
|
||||
with pytest.raises(ResolutionError):
|
||||
config_utils.load_consensus_plugin('bogus')
|
||||
|
||||
|
||||
def test_load_consensus_plugin_raises_with_invalid_subclass(monkeypatch):
|
||||
# Monkeypatch entry_point.load to return something other than a
|
||||
# ConsensusRules instance
|
||||
from bigchaindb import config_utils
|
||||
import time
|
||||
monkeypatch.setattr(config_utils,
|
||||
'iter_entry_points',
|
||||
lambda *args: [type('entry_point', (object), {'load': lambda: object})])
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
# Since the function is decorated with `lru_cache`, we need to
|
||||
# "miss" the cache using a name that has not been used previously
|
||||
config_utils.load_consensus_plugin(str(time.time()))
|
||||
|
||||
|
||||
def test_map_leafs_iterator():
|
||||
from bigchaindb import config_utils
|
||||
|
||||
|
@ -90,12 +90,3 @@ def test_has_previous_vote(monkeypatch):
|
||||
block = {'votes': ({'node_pubkey': 'pubkey'},)}
|
||||
with pytest.raises(Exception):
|
||||
bigchain.has_previous_vote(block)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('exists', (True, False))
|
||||
def test_transaction_exists(monkeypatch, exists):
|
||||
from bigchaindb.core import Bigchain
|
||||
monkeypatch.setattr(
|
||||
'bigchaindb.backend.query.has_transaction', lambda x, y: exists)
|
||||
bigchain = Bigchain(public_key='pubkey', private_key='privkey')
|
||||
assert bigchain.transaction_exists('txid') is exists
|
||||
|
Loading…
x
Reference in New Issue
Block a user