Merge branch 'alpha-3'

This commit is contained in:
Ahmed Muawia Khan 2018-05-02 16:35:49 +02:00
commit 82e3f21c9a
57 changed files with 768 additions and 773 deletions

View File

@ -30,10 +30,6 @@ matrix:
- python: 3.5
env: TOXENV=docsserver
include:
- python: 3.5
env:
- BIGCHAINDB_DATABASE_BACKEND=localmongodb
- BIGCHAINDB_DATABASE_SSL=
- python: 3.6
env:
- BIGCHAINDB_DATABASE_BACKEND=localmongodb

View File

@ -5,6 +5,8 @@ COPY . /usr/src/app/
WORKDIR /usr/src/app
RUN apt-get -qq update \
&& apt-get -y upgrade \
&& apt-get install -y jq \
&& pip install --no-cache-dir --process-dependency-links . \
&& pip install --no-cache-dir . \
&& apt-get autoremove \
&& apt-get clean

View File

@ -32,6 +32,6 @@ ENV BIGCHAINDB_CI_ABCI ${abci_status}
RUN mkdir -p /usr/src/app
COPY . /usr/src/app/
WORKDIR /usr/src/app
RUN pip install --no-cache-dir -e .[dev]
RUN pip install --no-cache-dir --process-dependency-links -e .[dev]
RUN bigchaindb -y configure "$backend"

View File

@ -21,7 +21,7 @@ from bigchaindb.commands.utils import (
configure_bigchaindb, start_logging_process, input_on_stderr)
from bigchaindb.backend.query import VALIDATOR_UPDATE_ID, PRE_COMMIT_ID
from bigchaindb.tendermint.lib import BigchainDB
from bigchaindb.tendermint.utils import public_key_from_base64
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
@ -100,8 +100,9 @@ def run_upsert_validator(args):
"""Store validators which should be synced with Tendermint"""
b = bigchaindb.Bigchain()
public_key = public_key_from_base64(args.public_key)
validator = {'pub_key': {'type': 'ed25519',
'data': args.public_key},
'data': public_key},
'power': args.power}
validator_update = {'validator': validator,
'update_id': VALIDATOR_UPDATE_ID}

View File

@ -2,11 +2,13 @@
with Tendermint."""
import logging
from abci import BaseApplication, Result
from abci.application import BaseApplication, Result
from abci.types_pb2 import ResponseEndBlock, ResponseInfo, Validator
from bigchaindb.tendermint import BigchainDB
from bigchaindb.tendermint.utils import decode_transaction, calculate_hash
from bigchaindb.tendermint.utils import (decode_transaction,
calculate_hash,
amino_encoded_public_key)
from bigchaindb.tendermint.lib import Block, PreCommitState
from bigchaindb.backend.query import PRE_COMMIT_ID
@ -86,7 +88,6 @@ class App(BaseApplication):
return Result.error(log='Invalid transaction')
else:
logger.debug('storing tx')
# self.bigchaindb.store_transaction(transaction)
self.block_txn_ids.append(transaction.id)
self.block_transactions.append(transaction)
return Result.ok()
@ -118,13 +119,14 @@ class App(BaseApplication):
pre_commit_state = PreCommitState(commit_id=PRE_COMMIT_ID,
height=self.new_height,
transactions=self.block_txn_ids)
logger.debug('Updating PreCommitState: %s', self.new_height)
self.bigchaindb.store_pre_commit_state(pre_commit_state._asdict())
# NOTE: interface for `ResponseEndBlock` has be changed in the latest
# version of py-abci i.e. the validator updates should be return
# as follows:
# ResponseEndBlock(validator_updates=validator_updates)
return ResponseEndBlock(diffs=validator_updates)
return ResponseEndBlock(validator_updates=validator_updates)
def commit(self):
"""Store the new height and along with block hash."""
@ -141,13 +143,15 @@ class App(BaseApplication):
# this effects crash recovery. Refer BEP#8 for details
self.bigchaindb.store_block(block._asdict())
return Result.ok(data=data)
logger.debug('Commit-ing new block with hash: apphash=%s ,'
'height=%s, txn ids=%s', data, self.new_height,
self.block_txn_ids)
return data
def encode_validator(v):
pub_key = v['pub_key']['data']
# NOTE: tendermint expects public to be encoded in go-wire format
# so `01` has to be appended
pubKey = bytes.fromhex('01{}'.format(pub_key))
return Validator(pubKey=pubKey,
ed25519_public_key = v['pub_key']['data']
# NOTE: tendermint expects public to be encoded in go-amino format
pub_key = amino_encoded_public_key(ed25519_public_key)
return Validator(pub_key=pub_key,
power=v['power'])

View File

@ -42,8 +42,8 @@ def process_event(event_queue, event, stream_id):
event_stream_id = stream_id + '#event'
event = json.loads(event)
if (event['id'] == event_stream_id and event['result']['name'] == 'NewBlock'):
block = event['result']['data']['data']['block']
if (event['id'] == event_stream_id and event['result']['query'] == 'tm.event=\'NewBlock\''):
block = event['result']['data']['value']['block']
block_id = block['header']['height']
block_txs = block['data']['txs']
@ -60,29 +60,26 @@ def subscribe_events(ws, stream_id):
payload = {
'method': 'subscribe',
'jsonrpc': '2.0',
'params': ['NewBlock'],
'params': ['tm.event=\'NewBlock\''],
'id': stream_id
}
yield from ws.send_str(json.dumps(payload))
@asyncio.coroutine
def try_connect_and_recv(event_queue, max_tries):
def try_connect_and_recv(event_queue):
try:
yield from connect_and_recv(event_queue)
except Exception as e:
if max_tries:
logger.warning('WebSocket connection failed with exception %s', e)
time.sleep(3)
yield from try_connect_and_recv(event_queue, max_tries-1)
else:
logger.exception('WebSocket connection failed with exception %s', e)
logger.warning('WebSocket connection failed with exception %s', e)
time.sleep(3)
yield from try_connect_and_recv(event_queue)
def start(event_queue):
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(try_connect_and_recv(event_queue, 10))
loop.run_until_complete(try_connect_and_recv(event_queue))
except (KeyboardInterrupt, SystemExit):
logger.info('Shutting down Tendermint event stream connection')

View File

@ -65,22 +65,23 @@ class BigchainDB(Bigchain):
if response.get('error') is not None:
return (500, 'Internal error')
result = response['result']
if mode == MODE_LIST[2]:
return self._process_commit_mode_response(result)
else:
status_code = result['code']
return self._process_status_code(status_code,
'Error while processing transaction')
return (202, '')
# result = response['result']
# if mode == MODE_LIST[2]:
# return self._process_commit_mode_response(result)
# else:
# status_code = result['code']
# return self._process_status_code(status_code,
# 'Error while processing transaction')
def _process_commit_mode_response(self, result):
check_tx_status_code = result['check_tx']['code']
if check_tx_status_code == 0:
deliver_tx_status_code = result['deliver_tx']['code']
return self._process_status_code(deliver_tx_status_code,
'Error while commiting the transaction')
else:
return (500, 'Error while validating the transaction')
# def _process_commit_mode_response(self, result):
# check_tx_status_code = result['check_tx']['code']
# if check_tx_status_code == 0:
# deliver_tx_status_code = result['deliver_tx']['code']
# return self._process_status_code(deliver_tx_status_code,
# 'Error while commiting the transaction')
# else:
# return (500, 'Error while validating the transaction')
def _process_status_code(self, status_code, failure_msg):
return (202, '') if status_code == 0 else (500, failure_msg)

View File

@ -1,4 +1,5 @@
import base64
import hashlib
import json
from binascii import hexlify
@ -65,3 +66,23 @@ def merkleroot(hashes):
for i in range(0, len(hashes)-1, 2)
]
return merkleroot(parent_hashes)
def public_key64_to_address(base64_public_key):
"""Note this only compatibile with Tendermint 0.19.0 """
ed25519_public_key = public_key_from_base64(base64_public_key)
encoded_public_key = amino_encoded_public_key(ed25519_public_key)
return hashlib.new('ripemd160', encoded_public_key).hexdigest().upper()
def public_key_from_base64(base64_public_key):
return base64.b64decode(base64_public_key).hex().upper()
def public_key_to_base64(ed25519_public_key):
ed25519_public_key = bytes.fromhex(ed25519_public_key)
return base64.b64encode(ed25519_public_key).decode('utf-8')
def amino_encoded_public_key(ed25519_public_key):
return bytes.fromhex('1624DE6220{}'.format(ed25519_public_key))

View File

@ -46,14 +46,14 @@ services:
retries: 3
command: '.ci/entrypoint.sh'
tendermint:
image: tendermint/tendermint:0.12
volumes:
- ./tmdata/config.toml:/tendermint/config.toml
image: tendermint/tendermint:0.19.2
# volumes:
# - ./tmdata:/tendermint
entrypoint: ''
ports:
- "46656"
- "46657"
command: bash -c "tendermint init && tendermint node"
command: sh -c "tendermint init && tendermint node --proxy_app=tcp://bigchaindb:46658"
bdb:
image: busybox
depends_on:

View File

@ -27,7 +27,7 @@ to form a network.
Below, we refer to multiple files by their directory and filename,
such as ``tendermint/tendermint-ext-conn-svc.yaml``. Those files are located in the
such as ``bigchaindb/bigchaindb-ext-conn-svc.yaml``. Those files are located in the
`bigchaindb/bigchaindb repository on GitHub
<https://github.com/bigchaindb/bigchaindb/>`_ in the ``k8s/`` directory.
Make sure you're getting those files from the appropriate Git branch on
@ -93,12 +93,6 @@ Lets assume we are deploying a 4 node cluster, your naming conventions could loo
"mdb-mon-instance-2",
"mdb-mon-instance-3",
"mdb-mon-instance-4"
],
"Tendermint": [
"tendermint-instance-1",
"tendermint-instance-2",
"tendermint-instance-3",
"tendermint-instance-4"
]
}
@ -355,17 +349,13 @@ The above example is meant to be repeated for all the Kubernetes components of a
* ``mongodb/mongodb-node-X-ss.yaml``
* ``tendermint/tendermint-node-X-svc.yaml``
* ``tendermint/tendermint-node-X-sc.yaml``
* ``tendermint/tendermint-node-X-pvc.yaml``
* ``tendermint/tendermint-node-X-ss.yaml``
* ``bigchaindb/bigchaindb-node-X-svc.yaml``
* ``bigchaindb/bigchaindb-node-X-dep.yaml``
* ``bigchaindb/bigchaindb-node-X-sc.yaml``
* ``bigchaindb/bigchaindb-node-X-pvc.yaml``
* ``bigchaindb/bigchaindb-node-X-ss.yaml``
* ``nginx-openresty/nginx-openresty-node-X-svc.yaml``
@ -405,33 +395,31 @@ described :ref:`above <pre-reqs-bdb-network>`:
* :ref:`Start the OpenResty Kubernetes Service <start-the-openresty-kubernetes-service>`.
* :ref:`Start the Tendermint Kubernetes Service <start-the-tendermint-kubernetes-service>`.
Only for multi site deployments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We need to make sure that clusters are able
to talk to each other i.e. specifically the communication between the
Tendermint peers. Set up networking between the clusters using
BigchainDB peers. Set up networking between the clusters using
`Kubernetes Services <https://kubernetes.io/docs/concepts/services-networking/service/>`_.
Assuming we have a Tendermint instance ``tendermint-instance-1`` residing in Azure data center location ``westeurope`` and we
want to connect to ``tendermint-instance-2``, ``tendermint-instance-3``, and ``tendermint-instance-4`` located in Azure data centers
Assuming we have a BigchainDB instance ``bigchaindb-instance-1`` residing in Azure data center location ``westeurope`` and we
want to connect to ``bigchaindb-instance-2``, ``bigchaindb-instance-3``, and ``bigchaindb-instance-4`` located in Azure data centers
``eastus``, ``centralus`` and ``westus``, respectively. Unless you already have explicitly set up networking for
``tendermint-instance-1`` to communicate with ``tendermint-instance-2/3/4`` and
``bigchaindb-instance-1`` to communicate with ``bigchaindb-instance-2/3/4`` and
vice versa, we will have to add a Kubernetes Service in each cluster to accomplish this goal in order to set up a
Tendermint P2P network.
BigchainDB P2P network.
It is similar to ensuring that there is a ``CNAME`` record in the DNS
infrastructure to resolve ``tendermint-instance-X`` to the host where it is actually available.
infrastructure to resolve ``bigchaindb-instance-X`` to the host where it is actually available.
We can do this in Kubernetes using a Kubernetes Service of ``type``
``ExternalName``.
* This configuration is located in the file ``tendermint/tendermint-ext-conn-svc.yaml``.
* This configuration is located in the file ``bigchaindb/bigchaindb-ext-conn-svc.yaml``.
* Set the name of the ``metadata.name`` to the host name of the Tendermint instance you are trying to connect to.
For instance if you are configuring this service on cluster with ``tendermint-instance-1`` then the ``metadata.name`` will
be ``tendermint-instance-2`` and vice versa.
* Set the name of the ``metadata.name`` to the host name of the BigchainDB instance you are trying to connect to.
For instance if you are configuring this service on cluster with ``bigchaindb-instance-1`` then the ``metadata.name`` will
be ``bigchaindb-instance-2`` and vice versa.
* Set ``spec.ports.port[0]`` to the ``tm-p2p-port`` from the ConfigMap for the other cluster.
@ -447,7 +435,7 @@ We can do this in Kubernetes using a Kubernetes Service of ``type``
If you are not the system administrator of the cluster, you have to get in
touch with the system administrator/s of the other ``n-1`` clusters and
share with them your instance name (``tendermint-instance-name`` in the ConfigMap)
share with them your instance name (``bigchaindb-instance-name`` in the ConfigMap)
and the FQDN of the NGINX instance acting as Gateway(set in: :ref:`Assign DNS name to NGINX
Public IP <assign-dns-name-to-nginx-public-ip>`).
@ -461,18 +449,18 @@ naming convention described :ref:`above <pre-reqs-bdb-network>` and referring to
* :ref:`Start the NGINX Kubernetes Deployment <start-the-nginx-deployment>`.
Deploy Kubernetes StorageClasses for MongoDB and Tendermint
-----------------------------------------------------------
Deploy Kubernetes StorageClasses for MongoDB and BigchainDB
------------------------------------------------------------
Deploy the following StorageClasses for each node by following the naming convention
described :ref:`above <pre-reqs-bdb-network>`:
* :ref:`Create Kubernetes Storage Classes for MongoDB <create-kubernetes-storage-class-mdb>`.
* :ref:`Create Kubernetes Storage Classes for Tendermint <create-kubernetes-storage-class>`.
* :ref:`Create Kubernetes Storage Classes for BigchainDB <create-kubernetes-storage-class>`.
Deploy Kubernetes PersistentVolumeClaims for MongoDB and Tendermint
Deploy Kubernetes PersistentVolumeClaims for MongoDB and BigchainDB
--------------------------------------------------------------------
Deploy the following services for each node by following the naming convention
@ -480,7 +468,7 @@ described :ref:`above <pre-reqs-bdb-network>`:
* :ref:`Create Kubernetes Persistent Volume Claims for MongoDB <create-kubernetes-persistent-volume-claim-mdb>`.
* :ref:`Create Kubernetes Persistent Volume Claims for Tendermint <create-kubernetes-persistent-volume-claim>`
* :ref:`Create Kubernetes Persistent Volume Claims for BigchainDB <create-kubernetes-persistent-volume-claim>`
Deploy MongoDB Kubernetes StatefulSet
@ -501,13 +489,13 @@ in the network by referring to the following section:
* :ref:`Configure Users and Access Control for MongoDB <configure-users-and-access-control-mongodb>`.
Deploy Tendermint Kubernetes StatefulSet
----------------------------------------
Start Kubernetes StatefulSet for BigchainDB
-------------------------------------------
Deploy the Tendermint Stateful for each node by following the
Start the BigchainDB Kubernetes StatefulSet for each node by following the
naming convention described :ref:`above <pre-reqs-bdb-network>` and referring to the following instructions:
* :ref:`create-kubernetes-stateful-set`.
* :ref:`Start a Kubernetes Deployment for BigchainDB <start-kubernetes-stateful-set-bdb>`.
Start Kubernetes Deployment for MongoDB Monitoring Agent
@ -516,16 +504,7 @@ Start Kubernetes Deployment for MongoDB Monitoring Agent
Start the MongoDB monitoring agent Kubernetes deployment for each node by following the
naming convention described :ref:`above <pre-reqs-bdb-network>` and referring to the following instructions:
* :ref:`Start a Kubernetes StatefulSet for Tendermint <start-kubernetes-deployment-for-mdb-mon-agent>`.
Start Kubernetes Deployment for BigchainDB
------------------------------------------
Start the BigchainDB Kubernetes deployment for each node by following the
naming convention described :ref:`above <pre-reqs-bdb-network>` and referring to the following instructions:
* :ref:`Start a Kubernetes Deployment for BigchainDB <start-kubernetes-deployment-bdb>`.
* :ref:`Start a Kubernetes Deployment for MongoDB Monitoring Agent <start-kubernetes-deployment-for-mdb-mon-agent>`.
Start Kubernetes Deployment for OpenResty

View File

@ -56,15 +56,8 @@ MongoDB admin user credentials, username and password.
This user is created on the *admin* database with the authorization to create other users.
vars.TM_INSTANCE_NAME
~~~~~~~~~~~~~~~~~~~~~~
Name of tendermint instance that is part of your BigchainDB node.
This name should be unique across the cluster, for more information please refer to
:ref:`generate-the-blockchain-id-and-genesis-time`.
vars.TM_SEEDS, TM_VALIDATORS, TM_VALIDATORS_POWERS, TM_GENESIS_TIME and TM_CHAIN_ID
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
vars.BDB_PERSISTENT_PEERS, BDB_VALIDATORS, BDB_VALIDATORS_POWERS, BDB_GENESIS_TIME and BDB_CHAIN_ID
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These parameters are shared across the cluster. More information about the generation
of these parameters can be found at :ref:`generate-the-blockchain-id-and-genesis-time`.

View File

@ -137,6 +137,10 @@ Step 4: Start the NGINX Service
$ kubectl apply -f nginx-https/nginx-https-svc.yaml
OR
$ kubectl apply -f nginx-http/nginx-http-svc.yaml
.. _assign-dns-name-to-nginx-public-ip:
@ -217,30 +221,9 @@ Step 8(Optional): Start the OpenResty Kubernetes Service
$ kubectl apply -f nginx-openresty/nginx-openresty-svc.yaml
.. _start-the-tendermint-kubernetes-service:
Step 9: Start the Tendermint Kubernetes Service
-----------------------------------------------
* This configuration is located in the file ``tendermint/tendermint-svc.yaml``.
* Set the ``metadata.name`` and ``metadata.labels.name`` to the value
set in ``tm-instance-name`` in the ConfigMap above.
* Set the ``spec.selector.app`` to the value set in ``tm-instance-name`` in
the ConfigMap followed by ``-ss``. For example, if the value set in the
``tm-instance-name`` is ``tm-instance-0``, set the
``spec.selector.app`` to ``tm-instance-0-ss``.
* Start the Kubernetes Service:
.. code:: bash
$ kubectl apply -f tendermint/tendermint-svc.yaml
.. _start-the-nginx-deployment:
Step 10: Start the NGINX Kubernetes Deployment
Step 9: Start the NGINX Kubernetes Deployment
----------------------------------------------
* NGINX is used as a proxy to the BigchainDB, Tendermint and MongoDB instances in
@ -249,12 +232,8 @@ Step 10: Start the NGINX Kubernetes Deployment
on ``mongodb-frontend-port``, ``tm-p2p-port`` and ``tm-pub-key-access``
to MongoDB and Tendermint respectively.
Step 10.2: NGINX with HTTPS
^^^^^^^^^^^^^^^^^^^^^^^^^^^
* This configuration is located in the file
``nginx-https/nginx-https-dep.yaml``.
``nginx-https/nginx-https-dep.yaml`` or ``nginx-http/nginx-http-dep.yaml``.
* Start the Kubernetes Deployment:
@ -262,10 +241,14 @@ Step 10.2: NGINX with HTTPS
$ kubectl apply -f nginx-https/nginx-https-dep.yaml
OR
$ kubectl apaply -f nginx-http/nginx-http-dep.yaml
.. _create-kubernetes-storage-class-mdb:
Step 11: Create Kubernetes Storage Classes for MongoDB
Step 10: Create Kubernetes Storage Classes for MongoDB
------------------------------------------------------
MongoDB needs somewhere to store its data persistently,
@ -338,7 +321,7 @@ You can check if it worked using ``kubectl get storageclasses``.
.. _create-kubernetes-persistent-volume-claim-mdb:
Step 12: Create Kubernetes Persistent Volume Claims for MongoDB
Step 11: Create Kubernetes Persistent Volume Claims for MongoDB
---------------------------------------------------------------
Next, you will create two PersistentVolumeClaim objects ``mongo-db-claim`` and
@ -393,7 +376,7 @@ but it should become "Bound" fairly quickly.
.. _start-kubernetes-stateful-set-mongodb:
Step 13: Start a Kubernetes StatefulSet for MongoDB
Step 12: Start a Kubernetes StatefulSet for MongoDB
---------------------------------------------------
* Create the MongoDB StatefulSet using:
@ -416,7 +399,7 @@ Step 13: Start a Kubernetes StatefulSet for MongoDB
.. _configure-users-and-access-control-mongodb:
Step 14: Configure Users and Access Control for MongoDB
Step 13: Configure Users and Access Control for MongoDB
-------------------------------------------------------
* In this step, you will create a user on MongoDB with authorization
@ -430,14 +413,14 @@ Step 14: Configure Users and Access Control for MongoDB
.. _create-kubernetes-storage-class:
Step 15: Create Kubernetes Storage Classes for Tendermint
Step 14: Create Kubernetes Storage Classes for BigchainDB
----------------------------------------------------------
Tendermint needs somewhere to store its data persistently, it uses
BigchainDB needs somewhere to store Tendermint data persistently, Tendermint uses
LevelDB as the persistent storage layer.
The Kubernetes template for configuration of Storage Class is located in the
file ``tendermint/tendermint-sc.yaml``.
file ``bigchaindb/bigchaindb-sc.yaml``.
Details about how to create a Azure Storage account and how Kubernetes Storage Class works
are already covered in this document: :ref:`create-kubernetes-storage-class-mdb`.
@ -446,20 +429,20 @@ Create the required storage classes using:
.. code:: bash
$ kubectl apply -f tendermint/tendermint-sc.yaml
$ kubectl apply -f bigchaindb/bigchaindb-sc.yaml
You can check if it worked using ``kubectl get storageclasses``.
.. _create-kubernetes-persistent-volume-claim:
Step 16: Create Kubernetes Persistent Volume Claims for Tendermint
Step 15: Create Kubernetes Persistent Volume Claims for BigchainDB
------------------------------------------------------------------
Next, you will create two PersistentVolumeClaim objects ``tendermint-db-claim`` and
``tendermint-config-db-claim``.
This configuration is located in the file ``tendermint/tendermint-pvc.yaml``.
This configuration is located in the file ``bigchaindb/bigchaindb-pvc.yaml``.
Details about Kubernetes Persistent Volumes, Persistent Volume Claims
and how they work with Azure are already covered in this
@ -469,7 +452,7 @@ Create the required Persistent Volume Claims using:
.. code:: bash
$ kubectl apply -f tendermint/tendermint-pvc.yaml
$ kubectl apply -f bigchaindb/bigchaindb-pvc.yaml
You can check its status using:
@ -478,56 +461,43 @@ You can check its status using:
kubectl get pvc -w
.. _create-kubernetes-stateful-set:
.. _start-kubernetes-stateful-set-bdb:
Step 17: Start a Kubernetes StatefulSet for Tendermint
Step 16: Start a Kubernetes StatefulSet for BigchainDB
------------------------------------------------------
* This configuration is located in the file ``tendermint/tendermint-ss.yaml``.
* This configuration is located in the file ``bigchaindb/bigchaindb-ss.yaml``.
* Set the ``spec.serviceName`` to the value set in ``tm-instance-name`` in
* Set the ``spec.serviceName`` to the value set in ``bdb-instance-name`` in
the ConfigMap.
For example, if the value set in the ``tm-instance-name``
is ``tm-instance-0``, set the field to ``tm-instance-0``.
For example, if the value set in the ``bdb-instance-name``
is ``bdb-instance-0``, set the field to ``tm-instance-0``.
* Set ``metadata.name``, ``spec.template.metadata.name`` and
``spec.template.metadata.labels.app`` to the value set in
``tm-instance-name`` in the ConfigMap, followed by
``bdb-instance-name`` in the ConfigMap, followed by
``-ss``.
For example, if the value set in the
``tm-instance-name`` is ``tm-instance-0``, set the fields to the value
``tm-insance-0-ss``.
``bdb-instance-name`` is ``bdb-instance-0``, set the fields to the value
``bdb-insance-0-ss``.
* As we gain more experience running Tendermint in testing and production, we
will tweak the ``resources.limits.cpu`` and ``resources.limits.memory``.
* Create the Tendermint StatefulSet using:
* Create the BigchainDB StatefulSet using:
.. code:: bash
$ kubectl apply -f tendermint/tendermint-ss.yaml
$ kubectl apply -f bigchaindb/bigchaindb-ss.yaml
.. code:: bash
$ kubectl get pods -w
.. _start-kubernetes-deployment-bdb:
Step 18: Start a Kubernetes Deployment for BigchainDB
-----------------------------------------------------
* Create the BigchainDB Deployment using:
.. code:: bash
$ kubectl apply -f bigchaindb/bigchaindb-dep.yaml
* You can check its status using the command ``kubectl get deployments -w``
.. _start-kubernetes-deployment-for-mdb-mon-agent:
Step 19(Optional): Start a Kubernetes Deployment for MongoDB Monitoring Agent
Step 17(Optional): Start a Kubernetes Deployment for MongoDB Monitoring Agent
------------------------------------------------------------------------------
* This configuration is located in the file
@ -556,7 +526,7 @@ Step 19(Optional): Start a Kubernetes Deployment for MongoDB Monitoring Agent
.. _start-kubernetes-deployment-openresty:
Step 20(Optional): Start a Kubernetes Deployment for OpenResty
Step 18(Optional): Start a Kubernetes Deployment for OpenResty
--------------------------------------------------------------
* This configuration is located in the file
@ -595,7 +565,7 @@ Step 20(Optional): Start a Kubernetes Deployment for OpenResty
* You can check its status using the command ``kubectl get deployments -w``
Step 21(Optional): Configure the MongoDB Cloud Manager
Step 19(Optional): Configure the MongoDB Cloud Manager
------------------------------------------------------
Refer to the
@ -604,7 +574,7 @@ for details on how to configure the MongoDB Cloud Manager to enable
monitoring and backup.
Step 22(Optional): Only for multi site deployments(Geographically dispersed)
Step 20(Optional): Only for multi site deployments(Geographically dispersed)
----------------------------------------------------------------------------
We need to make sure that clusters are able
@ -612,22 +582,22 @@ to talk to each other i.e. specifically the communication between the
Tendermint peers. Set up networking between the clusters using
`Kubernetes Services <https://kubernetes.io/docs/concepts/services-networking/service/>`_.
Assuming we have a Tendermint instance ``tendermint-instance-1`` residing in Azure data center location ``westeurope`` and we
want to connect to ``tendermint-instance-2``, ``tendermint-instance-3``, and ``tendermint-instance-4`` located in Azure data centers
Assuming we have a BigchainDB instance ``bdb-instance-1`` residing in Azure data center location ``westeurope`` and we
want to connect to ``bdb-instance-2``, ``bdb-instance-3``, and ``bdb-instance-4`` located in Azure data centers
``eastus``, ``centralus`` and ``westus``, respectively. Unless you already have explicitly set up networking for
``tendermint-instance-1`` to communicate with ``tendermint-instance-2/3/4`` and
``bdb-instance-1`` to communicate with ``bdb-instance-2/3/4`` and
vice versa, we will have to add a Kubernetes Service in each cluster to accomplish this goal in order to set up a
Tendermint P2P network.
It is similar to ensuring that there is a ``CNAME`` record in the DNS
infrastructure to resolve ``tendermint-instance-X`` to the host where it is actually available.
infrastructure to resolve ``bdb-instance-X`` to the host where it is actually available.
We can do this in Kubernetes using a Kubernetes Service of ``type``
``ExternalName``.
* This configuration is located in the file ``tendermint/tendermint-ext-conn-svc.yaml``.
* This configuration is located in the file ``bigchaindb/bigchaindb-ext-conn-svc.yaml``.
* Set the name of the ``metadata.name`` to the host name of the Tendermint instance you are trying to connect to.
For instance if you are configuring this service on cluster with ``tendermint-instance-1`` then the ``metadata.name`` will
be ``tendermint-instance-2`` and vice versa.
* Set the name of the ``metadata.name`` to the host name of the BigchainDB instance you are trying to connect to.
For instance if you are configuring this service on cluster with ``bdb-instance-1`` then the ``metadata.name`` will
be ``bdb-instance-2`` and vice versa.
* Set ``spec.ports.port[0]`` to the ``tm-p2p-port`` from the ConfigMap for the other cluster.
@ -650,10 +620,10 @@ We can do this in Kubernetes using a Kubernetes Service of ``type``
.. _verify-and-test-bdb:
Step 23: Verify the BigchainDB Node Setup
Step 21: Verify the BigchainDB Node Setup
-----------------------------------------
Step 23.1: Testing Internally
Step 21.1: Testing Internally
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To test the setup of your BigchainDB node, you could use a Docker container
@ -702,20 +672,12 @@ To test the BigchainDB instance:
$ curl -X GET http://bdb-instance-0:9984
$ curl -X GET http://bdb-instance-0:9986/pub_key.json
$ curl -X GET http://bdb-instance-0:46657/abci_info
$ wsc -er ws://bdb-instance-0:9985/api/v1/streams/valid_transactions
To test the Tendermint instance:
.. code:: bash
$ nslookup tm-instance-0
$ dig +noall +answer _bdb-api-port._tcp.tm-instance-0.default.svc.cluster.local SRV
$ dig +noall +answer _bdb-ws-port._tcp.tm-instance-0.default.svc.cluster.local SRV
$ curl -X GET http://tm-instance-0:9986/pub_key.json
To test the OpenResty instance:
@ -769,7 +731,7 @@ The above curl command should result in the response
``It looks like you are trying to access MongoDB over HTTP on the native driver port.``
Step 23.2: Testing Externally
Step 21.2: Testing Externally
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Check the MongoDB monitoring agent on the MongoDB Cloud Manager

View File

@ -10,10 +10,10 @@ You can modify them to suit your needs.
.. _generate-the-blockchain-id-and-genesis-time:
Generate All Shared Tendermint Setup Parameters
Generate All Shared BigchainDB Setup Parameters
-----------------------------------------------
There are some shared Tendermint setup paramters that every node operator
There are some shared BigchainDB setup paramters that every node operator
in the consortium shares
because they are properties of the Tendermint cluster.
They look like this:
@ -21,30 +21,30 @@ They look like this:
.. code::
# Tendermint data
TM_SEEDS='tm-instance-1,tm-instance-2,tm-instance-3,tm-instance-4'
TM_VALIDATORS='tm-instance-1,tm-instance-2,tm-instance-3,tm-instance-4'
TM_VALIDATOR_POWERS='10,10,10,10'
TM_GENESIS_TIME='0001-01-01T00:00:00Z'
TM_CHAIN_ID='test-chain-rwcPML'
BDB_PERSISTENT_PEERS='bdb-instance-1,bdb-instance-2,bdb-instance-3,bdb-instance-4'
BDB_VALIDATORS='bdb-instance-1,bdb-instance-2,bdb-instance-3,bdb-instance-4'
BDB_VALIDATOR_POWERS='10,10,10,10'
BDB_GENESIS_TIME='0001-01-01T00:00:00Z'
BDB_CHAIN_ID='test-chain-rwcPML'
Those paramters only have to be generated once, by one member of the consortium.
That person will then share the results (Tendermint setup parameters)
with all the node operators.
The above example parameters are for a cluster of 4 initial (seed) nodes.
Note how ``TM_SEEDS``, ``TM_VALIDATORS`` and ``TM_VALIDATOR_POWERS`` are lists
Note how ``BDB_PERSISTENT_PEERS``, ``BDB_VALIDATORS`` and ``BDB_VALIDATOR_POWERS`` are lists
with 4 items each.
**If your consortium has a different number of initial nodes,
then those lists should have that number or items.**
Use ``10`` for all the power values.
To generate a ``TM_GENESIS_TIME`` and a ``TM_CHAIN_ID``,
To generate a ``BDB_GENESIS_TIME`` and a ``BDB_CHAIN_ID``,
you can do this:
.. code::
$ mkdir $(pwd)/tmdata
$ docker run --rm -v $(pwd)/tmdata:/tendermint tendermint/tendermint:0.13 init
$ docker run --rm -v $(pwd)/tmdata:/tendermint/config tendermint/tendermint:0.19.2 init
$ cat $(pwd)/tmdata/genesis.json
You should see something that looks like:
@ -63,10 +63,10 @@ You should see something that looks like:
"app_hash": ""
}
The value with ``"genesis_time"`` is ``TM_GENESIS_TIME`` and
the value with ``"chain_id"`` is ``TM_CHAIN_ID``.
The value with ``"genesis_time"`` is ``BDB_GENESIS_TIME`` and
the value with ``"chain_id"`` is ``BDB_CHAIN_ID``.
Now you have all the Tendermint setup parameters and can share them
Now you have all the BigchainDB setup parameters and can share them
with all of the node operators. (They will put them in their ``vars`` file.
We'll say more about that file below.)
@ -143,4 +143,4 @@ and then you will deploy all the stuff that you need to have a BigchainDB node.
<br>
<br>
<br>
<br>
<br>

View File

@ -1,165 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: bdb-instance-0-dep
spec:
replicas: 1
template:
metadata:
labels:
app: bdb-instance-0-dep
spec:
terminationGracePeriodSeconds: 10
containers:
- name: bigchaindb
image: bigchaindb/bigchaindb:2.0.0-alpha2
imagePullPolicy: Always
args:
- start
env:
- name: BIGCHAINDB_DATABASE_HOST
valueFrom:
configMapKeyRef:
name: vars
key: mdb-instance-name
- name: BIGCHAINDB_DATABASE_PORT
valueFrom:
configMapKeyRef:
name: vars
key: mongodb-backend-port
- name: BIGCHAINDB_DATABASE_BACKEND
value: "localmongodb"
- name: BIGCHAINDB_DATABASE_NAME
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-database-name
- name: BIGCHAINDB_SERVER_BIND
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-server-bind
- name: BIGCHAINDB_WSSERVER_HOST
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-ws-interface
- name: BIGCHAINDB_WSSERVER_ADVERTISED_HOST
valueFrom:
configMapKeyRef:
name: vars
key: node-fqdn
- name: BIGCHAINDB_WSSERVER_PORT
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-ws-port
- name: BIGCHAINDB_WSSERVER_ADVERTISED_PORT
valueFrom:
configMapKeyRef:
name: vars
key: node-frontend-port
- name: BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-wsserver-advertised-scheme
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
valueFrom:
configMapKeyRef:
name: bdb-config
key: bigchaindb-backlog-reassign-delay
- name: BIGCHAINDB_DATABASE_MAXTRIES
valueFrom:
configMapKeyRef:
name: bdb-config
key: bigchaindb-database-maxtries
- name: BIGCHAINDB_DATABASE_CONNECTION_TIMEOUT
valueFrom:
configMapKeyRef:
name: bdb-config
key: bigchaindb-database-connection-timeout
- name: BIGCHAINDB_LOG_LEVEL_CONSOLE
valueFrom:
configMapKeyRef:
name: bdb-config
key: bigchaindb-log-level
- name: BIGCHAINDB_SERVER_WORKERS
value: "1"
- name: BIGCHAINDB_SERVER_THREADS
value: "1"
- name: BIGCHAINDB_DATABASE_SSL
value: "true"
- name: BIGCHAINDB_DATABASE_CA_CERT
value: /etc/bigchaindb/ca/ca.pem
- name: BIGCHAINDB_DATABASE_CRLFILE
value: /etc/bigchaindb/ca/crl.pem
- name: BIGCHAINDB_DATABASE_CERTFILE
value: /etc/bigchaindb/ssl/bdb-instance.pem
- name: BIGCHAINDB_DATABASE_KEYFILE
value: /etc/bigchaindb/ssl/bdb-instance.key
- name: BIGCHAINDB_DATABASE_LOGIN
valueFrom:
configMapKeyRef:
name: bdb-config
key: bdb-user
- name: BIGCHAINDB_TENDERMINT_HOST
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-instance-name
- name: TENDERMINT_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-rpc-port
command:
- bash
- "-c"
- |
bigchaindb -l DEBUG start
ports:
- containerPort: 9984
protocol: TCP
name: bdb-port
- containerPort: 9985
protocol: TCP
name: bdb-ws-port
- containerPort: 46658
protocol: TCP
name: tm-abci-port
volumeMounts:
- name: bdb-certs
mountPath: /etc/bigchaindb/ssl/
readOnly: true
- name: ca-auth
mountPath: /etc/bigchaindb/ca/
readOnly: true
resources:
limits:
cpu: 200m
memory: 768Mi
livenessProbe:
httpGet:
path: /
port: bdb-port
initialDelaySeconds: 15
periodSeconds: 15
failureThreshold: 3
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /
port: bdb-port
initialDelaySeconds: 15
timeoutSeconds: 10
restartPolicy: Always
volumes:
- name: bdb-certs
secret:
secretName: bdb-certs
defaultMode: 0400
- name: ca-auth
secret:
secretName: ca-auth
defaultMode: 0400

View File

@ -1,9 +1,9 @@
apiVersion: v1
kind: Service
metadata:
# Name of tendermint instance you are trying to connect to
# name: "tm-instance-0"
name: "<remote-tendermint-host>"
# Name of BigchainDB instance you are trying to connect to
# name: "bdb-instance-0"
name: "<remote-bdb-host>"
namespace: default
spec:
ports:
@ -13,8 +13,10 @@ spec:
name: p2p
- port: 46657
name: pubkey
- port: 9986
name: nginx
type: ExternalName
# FQDN of remote cluster/NGINX instance
#externalName: "nginx-instance-for-tm-instance-0.westeurope.cloudapp.azure.com"
#externalName: "nginx-instance-for-bdb-instance-0.westeurope.cloudapp.azure.com"
externalName: "<dns-name-remote-nginx>"

View File

@ -29,4 +29,3 @@ spec:
resources:
requests:
storage: 1Gi

View File

@ -8,7 +8,7 @@ metadata:
provisioner: kubernetes.io/azure-disk
parameters:
skuName: Premium_LRS #[Premium_LRS, Standard_LRS]
location: westeurope
location: <Storage account location>
# If you have created a different storage account e.g. for Premium Storage
storageAccount: <Storage account name>
# Use Managed Disk(s) with VMs using Managed Disks(Only used for Tectonic deployment)
@ -24,7 +24,7 @@ metadata:
provisioner: kubernetes.io/azure-disk
parameters:
skuName: Premium_LRS #[Premium_LRS, Standard_LRS]
location: westeurope
location: <Storage account location>
# If you have created a different storage account e.g. for Premium Storage
storageAccount: <Storage account name>
# Use Managed Disk(s) with VMs using Managed Disks(Only used for Tectonic deployment)

View File

@ -0,0 +1,294 @@
#################################################################################
# This YAML file desribes a StatefulSet with a service for running and exposing #
# a Tendermint instance. It depends on the tendermint-config-db-claim #
# and tendermint-db-claim k8s pvc. #
#################################################################################
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: bdb-instance-0-ss
namespace: default
spec:
serviceName: bdb-instance-0
replicas: 1
template:
metadata:
name: bdb-instance-0-ss
labels:
app: bdb-instance-0-ss
spec:
restartPolicy: Always
volumes:
- name: bdb-data
persistentVolumeClaim:
claimName: tendermint-db-claim
- name: bdb-config-data
persistentVolumeClaim:
claimName: tendermint-config-db-claim
- name: bdb-certs
secret:
secretName: bdb-certs
defaultMode: 0400
- name: ca-auth
secret:
secretName: ca-auth
defaultMode: 0400
containers:
# Treating bigchaindb+ nginx + tendermint as a POD because they should not
# exist without each other
# Nginx container for hosting public key of this ndoe
- name: nginx
imagePullPolicy: Always
image: bigchaindb/nginx_pub_key_access:2.0.0-alpha3
env:
- name: TM_PUB_KEY_ACCESS_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: bdb-pub-key-access
ports:
- containerPort: 9986
name: bdb-pk-access
volumeMounts:
- name: bdb-config-data
mountPath: /usr/share/nginx
readOnly: true
#Tendermint container
- name: tendermint
imagePullPolicy: Always
image: bigchaindb/tendermint:2.0.0-alpha3
env:
- name: TM_PERSISTENT_PEERS
valueFrom:
configMapKeyRef:
name: tendermint-config
key: bdb-persistent-peers
- name: TM_VALIDATOR_POWER
valueFrom:
configMapKeyRef:
name: tendermint-config
key: bdb-validator-power
- name: TM_VALIDATORS
valueFrom:
configMapKeyRef:
name: tendermint-config
key: bdb-validators
- name: TM_PUB_KEY_ACCESS_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: bdb-pub-key-access
- name: TM_GENESIS_TIME
valueFrom:
configMapKeyRef:
name: tendermint-config
key: bdb-genesis-time
- name: TM_CHAIN_ID
valueFrom:
configMapKeyRef:
name: tendermint-config
key: bdb-chain-id
- name: TM_P2P_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: bdb-p2p-port
- name: TM_INSTANCE_NAME
valueFrom:
configMapKeyRef:
name: vars
key: bdb-instance-name
- name: TMHOME
value: /tendermint
- name: TM_PROXY_APP
valueFrom:
configMapKeyRef:
name: vars
key: bdb-instance-name
- name: TM_ABCI_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: bdb-abci-port
- name: TM_RPC_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: bdb-rpc-port
resources:
limits:
cpu: 1
memory: 5G
volumeMounts:
- name: bdb-data
mountPath: /tendermint
- name: bdb-config-data
mountPath: /tendermint_node_data
ports:
- containerPort: 46656
name: p2p
- containerPort: 46657
name: rpc
livenessProbe:
exec:
command:
- /bin/bash
- "-c"
- |
curl -s --fail --max-time 10 "http://${TM_INSTANCE_NAME}:${TM_RPC_PORT}/abci_info" > /dev/null
ERR=$?
if [ "$ERR" == 28 ]; then
exit 1
elif [[ $(curl --max-time 10 "http://${TM_INSTANCE_NAME}:${TM_RPC_PORT}/abci_info" | jq -r ".error.code") == -32603 ]]; then
exit 1
elif [ "$ERR" != 0 ]; then
exit 1
else
exit 0
fi
initialDelaySeconds: 60
periodSeconds: 15
failureThreshold: 3
timeoutSeconds: 15
# BigchainDB container
- name: bigchaindb
image: bigchaindb/bigchaindb:2.0.0-alpha3
imagePullPolicy: Always
args:
- start
env:
- name: BIGCHAINDB_DATABASE_HOST
valueFrom:
configMapKeyRef:
name: vars
key: mdb-instance-name
- name: BIGCHAINDB_DATABASE_PORT
valueFrom:
configMapKeyRef:
name: vars
key: mongodb-backend-port
- name: BIGCHAINDB_DATABASE_BACKEND
value: "localmongodb"
- name: BIGCHAINDB_DATABASE_NAME
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-database-name
- name: BIGCHAINDB_SERVER_BIND
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-server-bind
- name: BIGCHAINDB_WSSERVER_HOST
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-ws-interface
- name: BIGCHAINDB_WSSERVER_ADVERTISED_HOST
valueFrom:
configMapKeyRef:
name: vars
key: node-fqdn
- name: BIGCHAINDB_WSSERVER_PORT
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-ws-port
- name: BIGCHAINDB_WSSERVER_ADVERTISED_PORT
valueFrom:
configMapKeyRef:
name: vars
key: node-frontend-port
- name: BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME
valueFrom:
configMapKeyRef:
name: vars
key: bigchaindb-wsserver-advertised-scheme
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
valueFrom:
configMapKeyRef:
name: bdb-config
key: bigchaindb-backlog-reassign-delay
- name: BIGCHAINDB_DATABASE_MAXTRIES
valueFrom:
configMapKeyRef:
name: bdb-config
key: bigchaindb-database-maxtries
- name: BIGCHAINDB_DATABASE_CONNECTION_TIMEOUT
valueFrom:
configMapKeyRef:
name: bdb-config
key: bigchaindb-database-connection-timeout
- name: BIGCHAINDB_LOG_LEVEL_CONSOLE
valueFrom:
configMapKeyRef:
name: bdb-config
key: bigchaindb-log-level
- name: BIGCHAINDB_DATABASE_SSL
value: "true"
- name: BIGCHAINDB_DATABASE_CA_CERT
value: /etc/bigchaindb/ca/ca.pem
- name: BIGCHAINDB_DATABASE_CRLFILE
value: /etc/bigchaindb/ca/crl.pem
- name: BIGCHAINDB_DATABASE_CERTFILE
value: /etc/bigchaindb/ssl/bdb-instance.pem
- name: BIGCHAINDB_DATABASE_KEYFILE
value: /etc/bigchaindb/ssl/bdb-instance.key
- name: BIGCHAINDB_DATABASE_LOGIN
valueFrom:
configMapKeyRef:
name: bdb-config
key: bdb-user
- name: BIGCHAINDB_TENDERMINT_HOST
valueFrom:
configMapKeyRef:
name: vars
key: bdb-instance-name
- name: BIGCHAINDB_TENDERMINT_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: bdb-rpc-port
command:
- bash
- "-c"
- |
curl -s --fail "http://${BIGCHAINDB_TENDERMINT_HOST}:9986/pub_key.json" > /dev/null
ERR=$?
while [ "$ERR" != 0 ]; do
sleep 30
curl -s --fail "http://${BIGCHAINDB_TENDERMINT_HOST}:9986/pub_key.json" > /dev/null
ERR=$?
echo "Waiting for Tendermint instance."
done
bigchaindb -l DEBUG start
ports:
- containerPort: 9984
protocol: TCP
name: bdb-port
- containerPort: 9985
protocol: TCP
name: bdb-ws-port
- containerPort: 46658
protocol: TCP
name: bdb-abci-port
volumeMounts:
- name: bdb-certs
mountPath: /etc/bigchaindb/ssl/
readOnly: true
- name: ca-auth
mountPath: /etc/bigchaindb/ca/
readOnly: true
resources:
limits:
cpu: 200m
memory: 2G
livenessProbe:
httpGet:
path: /
port: bdb-port
initialDelaySeconds: 60
periodSeconds: 15
failureThreshold: 3
timeoutSeconds: 15

View File

@ -7,7 +7,7 @@ metadata:
name: bdb-instance-0
spec:
selector:
app: bdb-instance-0-dep
app: bdb-instance-0-ss
ports:
- port: 9984
targetPort: 9984
@ -21,5 +21,17 @@ spec:
targetPort: 46658
name: tm-abci-port
protocol: TCP
- port: 46656
targetPort: 46656
name: tm-p2p-port
protocol: TCP
- port: 46657
targetPort: 46657
name: tm-rpc-port
protocol: TCP
- port: 9986
targetPort: 9986
name: pub-key-access
protocol: TCP
type: ClusterIP
clusterIP: None

View File

@ -0,0 +1,5 @@
#!/bin/bash
docker build -t bigchaindb/nginx_pub_key_access:2.0.0-alpha3 .
docker push bigchaindb/nginx_pub_key_access:2.0.0-alpha3

View File

@ -1,6 +1,8 @@
FROM tendermint/tendermint:0.12
FROM tendermint/tendermint:0.19.2
LABEL maintainer "dev@bigchaindb.com"
WORKDIR /
USER root
RUN apk --update add bash
COPY genesis.json.template /etc/tendermint/genesis.json
COPY tendermint_entrypoint.bash /
VOLUME /tendermint /tendermint_node_data

View File

@ -13,7 +13,7 @@ reflect any changes made to the container.
docker run \
--name=tendermint \
--env TM_PUB_KEY_ACCESS_PORT=<port to access public keys hosted by nginx> \
--env TM_SEEDS=<commad separated list of all nodes IP addresses/Hostnames> \
--env TM_PERSISTENT_PEERS=<commad separated list of all peers IP addresses/Hostnames> \
--env TM_VALIDATOR_POWER=<voting power of node> \
--env TM_VALIDATORS=<list of all validators> \
--env TM_GENESIS_TIME=<genesis time> \

View File

@ -0,0 +1,5 @@
#!/bin/bash
docker build -t bigchaindb/tendermint:2.0.0-alpha3 .
docker push bigchaindb/tendermint:2.0.0-alpha3

View File

@ -2,7 +2,7 @@
set -euo pipefail
# Cluster vars
tm_seeds=`printenv TM_SEEDS`
tm_persistent_peers=`printenv TM_PERSISTENT_PEERS`
tm_validators=`printenv TM_VALIDATORS`
tm_validator_power=`printenv TM_VALIDATOR_POWER`
tm_pub_key_access_port=`printenv TM_PUB_KEY_ACCESS_PORT`
@ -23,7 +23,7 @@ CANNOT_INITIATLIZE_INSTANCE='Cannot start instance, if initial validator(s) are
# sanity check
if [[ -z "${tm_seeds:?TM_SEEDS not specified. Exiting!}" || \
if [[ -z "${tm_persistent_peers:?TM_PERSISTENT_PEERS not specified. Exiting!}" || \
-z "${tm_validators:?TM_VALIDATORS not specified. Exiting!}" || \
-z "${tm_validator_power:?TM_VALIDATOR_POWER not specified. Exiting!}" || \
-z "${tm_pub_key_access_port:?TM_PUB_KEY_ACCESS_PORT not specified. Exiting!}" || \
@ -36,7 +36,7 @@ if [[ -z "${tm_seeds:?TM_SEEDS not specified. Exiting!}" || \
echo "Missing required enviroment variables."
exit 1
else
echo tm_seeds="$TM_SEEDS"
echo tm_persistent_peers="$TM_PERSISTENT_PEERS"
echo tm_validators="$TM_VALIDATORS"
echo tm_validator_power="$TM_VALIDATOR_POWER"
echo tm_pub_key_access_port="$TM_PUB_KEY_ACCESS_PORT"
@ -49,20 +49,25 @@ else
fi
# copy template
cp /etc/tendermint/genesis.json /tendermint/genesis.json
mkdir -p /tendermint/config
cp /etc/tendermint/genesis.json /tendermint/config/genesis.json
TM_GENESIS_FILE=/tendermint/genesis.json
TM_GENESIS_FILE=/tendermint/config/genesis.json
TM_PUB_KEY_DIR=/tendermint_node_data
# configure the nginx.conf file with env variables
sed -i "s|TM_GENESIS_TIME|\"${tm_genesis_time}\"|g" ${TM_GENESIS_FILE}
sed -i "s|TM_CHAIN_ID|\"${tm_chain_id}\"|g" ${TM_GENESIS_FILE}
if [ ! -f /tendermint/priv_validator.json ]; then
tendermint gen_validator > /tendermint/priv_validator.json
if [ ! -f /tendermint/config/priv_validator.json ]; then
tendermint gen_validator > /tendermint/config/priv_validator.json
# pub_key.json will be served by the nginx container
cat /tendermint/priv_validator.json
cat /tendermint/priv_validator.json | jq ".pub_key" > "$TM_PUB_KEY_DIR"/pub_key.json
cat /tendermint/config/priv_validator.json
cat /tendermint/config/priv_validator.json | jq ".pub_key" > "$TM_PUB_KEY_DIR"/pub_key.json
fi
if [ ! -f /tendermint/config/node_key.json ]; then
tendermint gen_node_key > "$TM_PUB_KEY_DIR"/address
fi
# fill genesis file with validators
@ -90,23 +95,39 @@ for i in "${!VALS_ARR[@]}"; do
sleep 30
curl -s --fail "http://${VALS_ARR[$i]}:$tm_pub_key_access_port/pub_key.json" > /dev/null
ERR=$?
echo "Cannot connect to Tendermint instance: ${VALS_ARR[$i]}"
echo "Cannot get public key for Tendermint instance: ${VALS_ARR[$i]}"
done
set -e
# add validator to genesis file along with its pub_key
curl -s "http://${VALS_ARR[$i]}:$tm_pub_key_access_port/pub_key.json" | jq ". as \$k | {pub_key: \$k, power: ${VAL_POWERS_ARR[$i]}, name: \"${VALS_ARR[$i]}\"}" > pub_validator.json
cat /tendermint/genesis.json | jq ".validators |= .+ [$(cat pub_validator.json)]" > tmpgenesis && mv tmpgenesis /tendermint/genesis.json
cat /tendermint/config/genesis.json | jq ".validators |= .+ [$(cat pub_validator.json)]" > tmpgenesis && mv tmpgenesis /tendermint/config/genesis.json
rm pub_validator.json
done
# construct seeds
IFS=',' read -ra SEEDS_ARR <<< "$tm_seeds"
seeds=()
for s in "${SEEDS_ARR[@]}"; do
seeds+=("$s:$tm_p2p_port")
done
seeds=$(IFS=','; echo "${seeds[*]}")
# construct persistent peers
IFS=',' read -ra PEERS_ARR <<< "$tm_persistent_peers"
peers=()
for s in "${PEERS_ARR[@]}"; do
echo "http://$s:$tm_pub_key_access_port/address"
curl -s --fail "http://$s:$tm_pub_key_access_port/address" > /dev/null
ERR=$?
while [ "$ERR" != 0 ]; do
RETRIES=$((RETRIES+1))
if [ $RETRIES -eq 10 ]; then
echo "${CANNOT_INITIATLIZE_INSTANCE}"
exit 1
fi
# 300(30 * 10(retries)) second timeout before container dies if it cannot find initial peers
sleep 30
curl -s --fail "http://$s:$tm_pub_key_access_port/address" > /dev/null
ERR=$?
echo "Cannot get address for Tendermint instance: ${s}"
done
peer_addr=$(curl -s "http://$s:$tm_pub_key_access_port/address")
peers+=("$peer_addr@$s:$tm_p2p_port")
done
peers=$(IFS=','; echo "${peers[*]}")
# start nginx
echo "INFO: starting tendermint..."
exec tendermint node --p2p.seeds="$seeds" --moniker="$tm_instance_name" --proxy_app="tcp://$tm_proxy_app:$tm_abci_port" --consensus.create_empty_blocks=false
exec tendermint node --p2p.persistent_peers="$peers" --moniker="$tm_instance_name" --proxy_app="tcp://$tm_proxy_app:$tm_abci_port" --consensus.create_empty_blocks=false --p2p.pex=false

View File

@ -130,46 +130,39 @@ metadata:
name: tendermint-config
namespace: default
data:
# tm-seeds is the list of all the peers in the network.
tm-seeds: "<',' separated list of all tendermint nodes in the network>"
# bdb-persistent-peers is the list of all the peers in the network.
bdb-persistent-peers: "<',' separated list of all tendermint peers in the network>"
# tm-validators is the list of all validators in the network.
tm-validators: "<',' separated list of all validators in the network>"
# bdb-validators is the list of all validators in the network.
bdb-validators: "<',' separated list of all validators in the network>"
# tm-validator-power is the validators voting power, make sure the order and
# bdb-validator-power is the validators voting power, make sure the order and
# the number of nodes in tm-validator-power and tm-validators is the same.
tm-validator-power: "<',' separated list of validator power of each node in the network>"
bdb-validator-power: "<',' separated list of validator power of each node in the network>"
# tm-genesis-time is the official time of blockchain start.
# bdb-genesis-time is the official time of blockchain start.
# example: 0001-01-01T00:00:00Z
tm-genesis-time: "<timestamp of blockchain start>"
bdb-genesis-time: "<timestamp of blockchain start>"
# tm-chain-id is the ID of the blockchain. Must be unique for every blockchain.
# bdb-chain-id is the ID of the blockchain. Must be unique for every blockchain.
# example: test-chain-KPI1Ud
tm-chain-id: "<ID of the blockchain>"
bdb-chain-id: "<ID of the blockchain>"
# tendermint-instance-name is the name of the Tendermint instance
# in the cluster
tm-instance-name: "<name of tendermint instance>"
# ngx-tm-instance-name is the FQDN of the tendermint instance in this cluster
ngx-tm-instance-name: "<name of tendermint instance>.default.svc.cluster.local"
# tm-abci-port is used by Tendermint Core for ABCI traffic. BigchainDB nodes
# bdb-abci-port is used by Tendermint Core for ABCI traffic. BigchainDB nodes
# use that internally.
tm-abci-port: "46658"
bdb-abci-port: "46658"
# tm-p2p-port is used by Tendermint Core to communicate with
# bdb-p2p-port is used by Tendermint Core to communicate with
# other peers in the network. This port is accessible publicly.
tm-p2p-port: "46656"
bdb-p2p-port: "46656"
# tm-rpc-port is used by Tendermint Core to rpc. BigchainDB nodes
# bdb-rpc-port is used by Tendermint Core to rpc. BigchainDB nodes
# use this port internally.
tm-rpc-port: "46657"
bbd-rpc-port: "46657"
# tm-pub-key-access is the port number used to host/publish the
# bdb-pub-key-access is the port number used to host/publish the
# public key of the tendemrint node in this cluster.
tm-pub-key-access: "9986"
bdb-pub-key-access: "9986"
---
apiVersion: v1

View File

@ -1,4 +1,4 @@
#!/bin/bash
docker build -t bigchaindb/localmongodb:2.0.0-alpha .
docker push bigchaindb/localmongodb:2.0.0-alpha
docker build -t bigchaindb/localmongodb:2.0.0-alpha3 .
docker push bigchaindb/localmongodb:2.0.0-alpha3

View File

@ -8,7 +8,7 @@ metadata:
provisioner: kubernetes.io/azure-disk
parameters:
skuName: Premium_LRS #[Premium_LRS, Standard_LRS]
location: westeurope
location: <Storage account location>
# If you have created a different storage account e.g. for Premium Storage
storageAccount: <Storage account name>
# Use Managed Disk(s) with VMs using Managed Disks(Only used for Tectonic deployment)
@ -24,7 +24,7 @@ metadata:
provisioner: kubernetes.io/azure-disk
parameters:
skuName: Premium_LRS #[Premium_LRS, Standard_LRS]
location: westeurope
location: <Storage account location>
# If you have created a different storage account e.g. for Premium Storage
storageAccount: <Storage account name>
# Use Managed Disk(s) with VMs using Managed Disks(Only used for Tectonic deployment)

View File

@ -1,5 +1,5 @@
#!/bin/bash
docker build -t bigchaindb/nginx_http:2.0.0-alpha .
docker build -t bigchaindb/nginx_http:2.0.0-alpha3 .
docker push bigchaindb/nginx_http:2.0.0-alpha
docker push bigchaindb/nginx_http:2.0.0-alpha3

View File

@ -146,17 +146,17 @@ stream {
# DNS resolver to use for all the backend names specified in this configuration.
resolver DNS_SERVER valid=30s ipv6=off;
# The following map block enables lazy-binding to the backend at runtime,
# The following map blocks enable lazy-binding to the backend at runtime,
# rather than binding as soon as NGINX starts.
map $remote_addr $tm_backend {
default TM_BACKEND_HOST;
map $remote_addr $bdb_backend {
default BIGCHAINDB_BACKEND_HOST;
}
# Server to forward connection to nginx instance hosting
# tendermint node public key.
server {
listen TM_PUB_KEY_ACCESS_PORT;
proxy_pass $tm_backend:TM_PUB_KEY_ACCESS_PORT;
proxy_pass $bdb_backend:TM_PUB_KEY_ACCESS_PORT;
}
# Server to forward p2p connections to Tendermint instance.
@ -164,7 +164,7 @@ stream {
listen TM_P2P_PORT so_keepalive=3m:1m:5;
preread_timeout 60s;
tcp_nodelay on;
proxy_pass $tm_backend:TM_P2P_PORT;
proxy_pass $bdb_backend:TM_P2P_PORT;
}
}

View File

@ -21,6 +21,10 @@ bdb_backend_host=`printenv BIGCHAINDB_BACKEND_HOST`
bdb_api_port=`printenv BIGCHAINDB_API_PORT`
bdb_ws_port=`printenv BIGCHAINDB_WS_PORT`
# Tendermint vars
tm_pub_key_access_port=`printenv TM_PUB_KEY_ACCESS_PORT`
tm_p2p_port=`printenv TM_P2P_PORT`
# sanity check
if [[ -z "${node_frontend_port:?NODE_FRONTEND_PORT not specified. Exiting!}" || \
@ -33,7 +37,6 @@ if [[ -z "${node_frontend_port:?NODE_FRONTEND_PORT not specified. Exiting!}" ||
-z "${dns_server:?DNS_SERVER not specified. Exiting!}" || \
-z "${health_check_port:?HEALTH_CHECK_PORT not specified.}" || \
-z "${tm_pub_key_access_port:?TM_PUB_KEY_ACCESS_PORT not specified. Exiting!}" || \
-z "${tm_backend_host:?TM_BACKEND_HOST not specified. Exiting!}" || \
-z "${tm_p2p_port:?TM_P2P_PORT not specified. Exiting!}" ]]; then
exit 1
else
@ -47,7 +50,6 @@ else
echo BIGCHAINDB_API_PORT="$bdb_api_port"
echo BIGCHAINDB_WS_PORT="$bdb_ws_port"
echo TM_PUB_KEY_ACCESS_PORT="$tm_pub_key_access_port"
echo TM_BACKEND_HOST="$tm_backend_host"
echo TM_P2P_PORT="$tm_p2p_port"
fi
@ -64,7 +66,6 @@ sed -i "s|BIGCHAINDB_WS_PORT|${bdb_ws_port}|g" ${NGINX_CONF_FILE}
sed -i "s|DNS_SERVER|${dns_server}|g" ${NGINX_CONF_FILE}
sed -i "s|HEALTH_CHECK_PORT|${health_check_port}|g" ${NGINX_CONF_FILE}
sed -i "s|TM_PUB_KEY_ACCESS_PORT|${tm_pub_key_access_port}|g" ${NGINX_CONF_FILE}
sed -i "s|TM_BACKEND_HOST|${tm_backend_host}|g" ${NGINX_CONF_FILE}
sed -i "s|TM_P2P_PORT|${tm_p2p_port}|g" ${NGINX_CONF_FILE}
# start nginx

View File

@ -59,29 +59,24 @@ spec:
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-pub-key-access
- name: TM_BACKEND_HOST
valueFrom:
configMapKeyRef:
name: tendermint-config
key: ngx-tm-instance-name
key: bdb-pub-key-access
- name: TM_P2P_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-p2p-port
key: bdb-p2p-port
ports:
- containerPort: "<node-health-check-port from ConfigMap>"
- containerPort: 8888
protocol: TCP
name: ngx-health
- containerPort: "<node-frontend-port from ConfigMap>"
- containerPort: 80
protocol: TCP
- containerPort: "<tm-pub-key-access from ConfigMap>"
- containerPort: 9986
protocol: TCP
name: tm-pub-key
- containerPort: "<tm-p2p-port from ConfigMap>"
name: bdb-pub-key
- containerPort: 46656
protocol: TCP
name: tm-p2p-port
name: bdb-p2p-port
livenessProbe:
httpGet:
path: /health

View File

@ -1,5 +1,5 @@
#!/bin/bash
docker build -t bigchaindb/nginx_https:2.0.0-alpha .
docker build -t bigchaindb/nginx_https:2.0.0-alpha3 .
docker push bigchaindb/nginx_https:2.0.0-alpha
docker push bigchaindb/nginx_https:2.0.0-alpha3

View File

@ -177,17 +177,17 @@ stream {
# DNS resolver to use for all the backend names specified in this configuration.
resolver DNS_SERVER valid=30s ipv6=off;
# The following map block enables lazy-binding to the backend at runtime,
# The following map blocks enable lazy-binding to the backend at runtime,
# rather than binding as soon as NGINX starts.
map $remote_addr $tm_backend {
default TM_BACKEND_HOST;
map $remote_addr $bdb_backend {
default BIGCHAINDB_BACKEND_HOST;
}
# Server to forward connection to nginx instance hosting
# tendermint node public key.
server {
listen TM_PUB_KEY_ACCESS_PORT;
proxy_pass $tm_backend:TM_PUB_KEY_ACCESS_PORT;
proxy_pass $bdb_backend:TM_PUB_KEY_ACCESS_PORT;
}
# Server to forward p2p connections to Tendermint instance.
@ -195,7 +195,7 @@ stream {
listen TM_P2P_PORT so_keepalive=3m:1m:5;
preread_timeout 60s;
tcp_nodelay on;
proxy_pass $tm_backend:TM_P2P_PORT;
proxy_pass $bdb_backend:TM_P2P_PORT;
}
}

View File

@ -174,17 +174,17 @@ stream {
# DNS resolver to use for all the backend names specified in this configuration.
resolver DNS_SERVER valid=30s ipv6=off;
# The following map block enables lazy-binding to the backend at runtime,
# The following map blocks enable lazy-binding to the backend at runtime,
# rather than binding as soon as NGINX starts.
map $remote_addr $tm_backend {
default TM_BACKEND_HOST;
map $remote_addr $bdb_backend {
default BIGCHAINDB_BACKEND_HOST;
}
# Server to forward connection to nginx instance hosting
# tendermint node public key.
server {
listen TM_PUB_KEY_ACCESS_PORT;
proxy_pass $tm_backend:TM_PUB_KEY_ACCESS_PORT;
proxy_pass $bdb_backend:TM_PUB_KEY_ACCESS_PORT;
}
# Server to forward p2p connections to Tendermint instance.
@ -192,7 +192,7 @@ stream {
listen TM_P2P_PORT so_keepalive=3m:1m:5;
preread_timeout 60s;
tcp_nodelay on;
proxy_pass $tm_backend:TM_P2P_PORT;
proxy_pass $bdb_backend:TM_P2P_PORT;
}
}

View File

@ -31,7 +31,6 @@ bdb_ws_port=`printenv BIGCHAINDB_WS_PORT`
# Tendermint vars
tm_pub_key_access_port=`printenv TM_PUB_KEY_ACCESS_PORT`
tm_backend_host=`printenv TM_BACKEND_HOST`
tm_p2p_port=`printenv TM_P2P_PORT`
@ -48,7 +47,6 @@ if [[ -z "${node_frontend_port:?NODE_FRONTEND_PORT not specified. Exiting!}" ||
-z "${health_check_port:?HEALTH_CHECK_PORT not specified. Exiting!}" || \
-z "${node_fqdn:?NODE_FQDN not specified. Exiting!}" || \
-z "${tm_pub_key_access_port:?TM_PUB_KEY_ACCESS_PORT not specified. Exiting!}" || \
-z "${tm_backend_host:?TM_BACKEND_HOST not specified. Exiting!}" || \
-z "${tm_p2p_port:?TM_P2P_PORT not specified. Exiting!}" ]]; then
echo "Missing required environment variables. Exiting!"
exit 1
@ -65,7 +63,6 @@ else
echo BIGCHAINDB_API_PORT="$bdb_api_port"
echo BIGCHAINDB_WS_PORT="$bdb_ws_port"
echo TM_PUB_KEY_ACCESS_PORT="$tm_pub_key_access_port"
echo TM_BACKEND_HOST="$tm_backend_host"
echo TM_P2P_PORT="$tm_p2p_port"
fi
@ -93,7 +90,6 @@ sed -i "s|BIGCHAINDB_WS_PORT|${bdb_ws_port}|g" ${NGINX_CONF_FILE}
sed -i "s|DNS_SERVER|${dns_server}|g" ${NGINX_CONF_FILE}
sed -i "s|HEALTH_CHECK_PORT|${health_check_port}|g" ${NGINX_CONF_FILE}
sed -i "s|TM_PUB_KEY_ACCESS_PORT|${tm_pub_key_access_port}|g" ${NGINX_CONF_FILE}
sed -i "s|TM_BACKEND_HOST|${tm_backend_host}|g" ${NGINX_CONF_FILE}
sed -i "s|TM_P2P_PORT|${tm_p2p_port}|g" ${NGINX_CONF_FILE}
# start nginx

View File

@ -12,7 +12,7 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: bigchaindb/nginx_https:2.0.0-alpha
image: bigchaindb/nginx_https:2.0.0-alpha3
imagePullPolicy: Always
env:
- name: NODE_FRONTEND_PORT
@ -74,17 +74,12 @@ spec:
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-pub-key-access
- name: TM_BACKEND_HOST
valueFrom:
configMapKeyRef:
name: tendermint-config
key: ngx-tm-instance-name
key: bdb-pub-key-access
- name: TM_P2P_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-p2p-port
key: bdb-p2p-port
- name: AUTHORIZATION_MODE
valueFrom:
configMapKeyRef:
@ -107,10 +102,10 @@ spec:
name: ngx-port
- containerPort: 9986
protocol: TCP
name: tm-pub-key
name: bdb-pub-key
- containerPort: 46656
protocol: TCP
name: tm-p2p-port
name: bdb-p2p-port
livenessProbe:
httpGet:
path: /health

View File

@ -59,21 +59,21 @@ function configure_client_cert_gen(){
echo "set_var EASYRSA_SSL_CONF \"$1/openssl-1.0.cnf\"" >> $1/vars
echo "set_var EASYRSA_PKI \"$1/pki\"" >> $1/vars
$1/easyrsa init-pki
$1/easyrsa gen-req "$BDB_CN"-"$INDEX" nopass
$1/easyrsa gen-req "$BDB_CN" nopass
$1/easyrsa gen-req "$MDB_MON_CN"-"$INDEX" nopass
}
function import_requests(){
# $1:- Base directory for Root CA
$1/easyrsa import-req $BASE_MEMBER_CERT_DIR/$BASE_EASY_RSA_PATH/pki/reqs/"$MDB_CN"-"$INDEX".req "$MDB_CN"-"$INDEX"
$1/easyrsa import-req $BASE_CLIENT_CERT_DIR/$BASE_EASY_RSA_PATH/pki/reqs/"$BDB_CN"-"$INDEX".req "$BDB_CN"-"$INDEX"
$1/easyrsa import-req $BASE_CLIENT_CERT_DIR/$BASE_EASY_RSA_PATH/pki/reqs/"$BDB_CN".req "$BDB_CN"
$1/easyrsa import-req $BASE_CLIENT_CERT_DIR/$BASE_EASY_RSA_PATH/pki/reqs/"$MDB_MON_CN"-"$INDEX".req "$MDB_MON_CN"-"$INDEX"
}
function sign_requests(){
# $1:- Base directory for Root CA
$1/easyrsa --subject-alt-name=DNS:localhost,DNS:"$MDB_CN"-"$INDEX" sign-req server "$MDB_CN"-"$INDEX"
$1/easyrsa sign-req client "$BDB_CN"-"$INDEX"
$1/easyrsa sign-req client "$BDB_CN"
$1/easyrsa sign-req client "$MDB_MON_CN"-"$INDEX"
}
@ -82,7 +82,7 @@ function make_pem_files(){
# $2:- Base directory for kubernetes related config for secret.yaml
mkdir $2
cat $1/pki/issued/"$MDB_CN"-"$INDEX".crt $BASE_MEMBER_CERT_DIR/$BASE_EASY_RSA_PATH/pki/private/"$MDB_CN"-"$INDEX".key > $2/"$MDB_CN"-"$INDEX".pem
cat $1/pki/issued/"$BDB_CN"-"$INDEX".crt $BASE_CLIENT_CERT_DIR/$BASE_EASY_RSA_PATH/pki/private/"$BDB_CN"-"$INDEX".key > $2/"$BDB_CN"-"$INDEX".pem
cat $1/pki/issued/"$BDB_CN".crt $BASE_CLIENT_CERT_DIR/$BASE_EASY_RSA_PATH/pki/private/"$BDB_CN".key > $2/"$BDB_CN".pem
cat $1/pki/issued/"$MDB_MON_CN"-"$INDEX".crt $BASE_CLIENT_CERT_DIR/$BASE_EASY_RSA_PATH/pki/private/"$MDB_MON_CN"-"$INDEX".key > $2/"$MDB_MON_CN"-"$INDEX".pem
}
@ -91,17 +91,17 @@ function convert_b64(){
# $2:- Base directory for Root CA
# $3:- Base directory for client requests/keys
cat $1/"$MDB_CN"-"$INDEX".pem | base64 -w 0 > $1/"$MDB_CN"-"$INDEX".pem.b64
cat $1/"$BDB_CN"-"$INDEX".pem | base64 -w 0 > $1/"$BDB_CN"-"$INDEX".pem.b64
cat $1/"$BDB_CN".pem | base64 -w 0 > $1/"$BDB_CN".pem.b64
cat $1/"$MDB_MON_CN"-"$INDEX".pem | base64 -w 0 > $1/"$MDB_MON_CN"-"$INDEX".pem.b64
cat $3/pki/private/"$BDB_CN"-"$INDEX".key | base64 -w 0 > $1/"$BDB_CN"-"$INDEX".key.b64
cat $3/pki/private/"$BDB_CN".key | base64 -w 0 > $1/"$BDB_CN".key.b64
cat $2/pki/ca.crt | base64 -w 0 > $1/ca.crt.b64
cat $2/pki/crl.pem | base64 -w 0 > $1/crl.pem.b64
}
function configure_common(){
sudo apt-get update -y
sudo apt-get install openssl -y
apt-get update -y
apt-get install openssl -y
wget https://github.com/OpenVPN/easy-rsa/archive/3.0.1.tar.gz -P $1
tar xzvf $1/3.0.1.tar.gz -C $1/
rm $1/3.0.1.tar.gz
@ -113,8 +113,8 @@ function get_users(){
openssl x509 -in $BASE_CA_DIR/$BASE_EASY_RSA_PATH/pki/issued/"$MDB_CN"-"$INDEX".crt -inform PEM -subject \
-nameopt RFC2253 | head -n 1 | sed -r 's/^subject= //' > $1/"$MDB_CN"-"$INDEX".user
openssl x509 -in $BASE_CA_DIR/$BASE_EASY_RSA_PATH/pki/issued/"$BDB_CN"-"$INDEX".crt -inform PEM -subject \
-nameopt RFC2253 | head -n 1 | sed -r 's/^subject= //' > $1/"$BDB_CN"-"$INDEX".user
openssl x509 -in $BASE_CA_DIR/$BASE_EASY_RSA_PATH/pki/issued/"$BDB_CN".crt -inform PEM -subject \
-nameopt RFC2253 | head -n 1 | sed -r 's/^subject= //' > $1/"$BDB_CN".user
openssl x509 -in $BASE_CA_DIR/$BASE_EASY_RSA_PATH/pki/issued/"$MDB_MON_CN"-"$INDEX".crt -inform PEM -subject \
-nameopt RFC2253 | head -n 1 | sed -r 's/^subject= //' > $1/"$MDB_MON_CN"-"$INDEX".user
@ -128,14 +128,22 @@ function generate_secretes_no_threescale(){
mdb_instance_pem=`cat $1/"$MDB_CN"-"$INDEX".pem.b64`
bdb_instance_pem=`cat $1/"$BDB_CN"-"$INDEX".pem.b64`
bdb_instance_key=`cat $1/"$BDB_CN"-"$INDEX".key.b64`
bdb_instance_pem=`cat $1/"$BDB_CN".pem.b64`
bdb_instance_key=`cat $1/"$BDB_CN".key.b64`
root_ca_pem=`cat $1/ca.crt.b64`
root_crl_pem=`cat $1/crl.pem.b64`
secrete_token=`echo $2 | base64 -w 0`
https_cert_key=`cat $3 | base64 -w 0`
https_cert_chain_pem=`cat $4 | base64 -w 0`
if [ -f $3 ]; then
https_cert_key=`cat $3 | base64 -w 0`
else
https_cert_key=""
fi
if [ -f $4 ]; then
https_cert_chain_pem=`cat $4 | base64 -w 0`
else
https_cert_chain_pem=""
fi
mdb_admin_password=`echo $5 | base64 -w 0`
@ -215,18 +223,17 @@ EOF
function generate_config_map(){
mdb_instance_name="$MDB_CN-$INDEX"
bdb_instance_name="$BDB_CN-$INDEX"
ngx_instance_name="ngx-instance-$INDEX"
bdb_user=`cat "${1}"/"$BDB_CN"-"${INDEX}".user`
bdb_user=`cat "${1}"/"$BDB_CN".user`
mdb_admin_username="${2}"
node_fqdn="${3}"
tm_seeds="${4}"
tm_validators="${5}"
tm_validators_power="${6}"
tm_genesis_time="${7}"
tm_chain_id="${8}"
tm_instance_name="${9}"
bdb_persistent_peers="${4}"
bdb_validators="${5}"
bdb_validators_power="${6}"
bdb_genesis_time="${7}"
bdb_chain_id="${8}"
bdb_instance_name="${9}"
dns_resolver_k8s="${10}"
auth_mode="${11}"
@ -356,46 +363,39 @@ metadata:
name: tendermint-config
namespace: default
data:
# tm-seeds is the list of all the peers in the network.
tm-seeds: "${tm_seeds}"
# bdb-persistent-peers is the list of all the peers in the network.
bdb-persistent-peers: "${bdb_persistent_peers}"
# tm-validators is the list of all validators in the network.
tm-validators: "${tm_validators}"
# bdb-validators is the list of all validators in the network.
bdb-validators: "${bdb_validators}"
# tm-validator-power is the validators voting power, make sure the order and
# the number of nodes in tm-validator-power and tm-validators is the same.
tm-validator-power: "${tm_validators_power}"
# bdb-validator-power is the validators voting power, make sure the order and
# the number of nodes in bdb-validator-power and bdb-validators is the same.
bdb-validator-power: "${bdb_validators_power}"
# tm-genesis-time is the official time of blockchain start.
# bdb-genesis-time is the official time of blockchain start.
# example: 0001-01-01T00:00:00Z
tm-genesis-time: "${tm_genesis_time}"
bdb-genesis-time: "${bdb_genesis_time}"
# tm-chain-id is the ID of the blockchain. Must be unique for every blockchain.
# bdb-chain-id is the ID of the blockchain. Must be unique for every blockchain.
# example: test-chain-KPI1Ud
tm-chain-id: "${tm_chain_id}"
bdb-chain-id: "${bdb_chain_id}"
# tendermint-instance-name is the name of the Tendermint instance
# in the cluster
tm-instance-name: "${tm_instance_name}"
# ngx-tm-instance-name is the FQDN of the tendermint instance in this cluster
ngx-tm-instance-name: "${tm_instance_name}.default.svc.cluster.local"
# tm-abci-port is used by Tendermint Core for ABCI traffic. BigchainDB nodes
# bdb-abci-port is used by Tendermint Core for ABCI traffic. BigchainDB nodes
# use that internally.
tm-abci-port: "46658"
bdb-abci-port: "46658"
# tm-p2p-port is used by Tendermint Core to communicate with
# bdb-p2p-port is used by Tendermint Core to communicate with
# other peers in the network. This port is accessible publicly.
tm-p2p-port: "46656"
bdb-p2p-port: "46656"
# tm-rpc-port is used by Tendermint Core to rpc. BigchainDB nodes
# bdb-rpc-port is used by Tendermint Core to rpc. BigchainDB nodes
# use this port internally.
tm-rpc-port: "46657"
bdb-rpc-port: "46657"
# tm-pub-key-access is the port number used to host/publish the
# bdb-pub-key-access is the port number used to host/publish the
# public key of the tendemrint node in this cluster.
tm-pub-key-access: "9986"
bdb-pub-key-access: "9986"
---
apiVersion: v1

View File

@ -9,7 +9,7 @@ CERT_DIR="certificates"
# base variables with default values
MDB_CN="mdb-instance"
BDB_CN="bdb-instance"
BDB_CN="$BDB_INSTANCE_NAME"
MDB_MON_CN="mdb-mon-instance"
INDEX='0'
CONFIGURE_CA='true'
@ -99,5 +99,5 @@ convert_b64 $BASE_K8S_DIR $BASE_CA_DIR/$BASE_EASY_RSA_PATH $BASE_CLIENT_CERT_DIR
get_users $BASE_USERS_DIR $BASE_CA_DIR/$BASE_EASY_RSA_PATH
generate_secretes_no_threescale $BASE_K8S_DIR $SECRET_TOKEN $HTTPS_CERT_KEY_FILE_NAME $HTTPS_CERT_CHAIN_FILE_NAME $MDB_ADMIN_PASSWORD
generate_config_map $BASE_USERS_DIR $MDB_ADMIN_USER $NODE_FQDN $TM_SEEDS $TM_VALIDATORS $TM_VALIDATOR_POWERS $TM_GENESIS_TIME \
$TM_CHAIN_ID $TM_INSTANCE_NAME $NODE_DNS_SERVER $AUTH_MODE
generate_config_map $BASE_USERS_DIR $MDB_ADMIN_USER $NODE_FQDN $BDB_PERSISTENT_PEERS $BDB_VALIDATORS $BDB_VALIDATOR_POWERS $BDB_GENESIS_TIME \
$BDB_CHAIN_ID $BDB_INSTANCE_NAME $NODE_DNS_SERVER $AUTH_MODE

View File

@ -1,5 +1,5 @@
# DNS name of the bigchaindb node
NODE_FQDN="test-node.bigchaindb.com"
NODE_FQDN="test.bigchaindb.com"
# Authorization mode: [secret-token, threescale]
AUTH_MODE="secret-token"
@ -10,39 +10,38 @@ AUTH_MODE="secret-token"
SECRET_TOKEN="test-secret"
# Absolute path for the SSL certificate key
HTTPS_CERT_KEY_FILE_NAME="</path/to/https.key>"
HTTPS_CERT_KEY_FILE_NAME="/path/to/https.key"
# Absolute path for the SSL certificate chain
HTTPS_CERT_CHAIN_FILE_NAME="</path/to/https.pem>"
HTTPS_CERT_CHAIN_FILE_NAME="/path/to/https.crt"
# MongoDB Admin user credentials
MDB_ADMIN_USER='adminUser'
MDB_ADMIN_PASSWORD='superstrongpassword'
# Tendermint instance name of the bigchaindb
# node. This name should be unique
TM_INSTANCE_NAME='tm-instance-0'
# BigchainDB instance name. This name should be unique
BDB_INSTANCE_NAME='bdb-instance-0'
# Comma separated list of initial peers in the
# network.
TM_SEEDS='tm-instance-0,tm-instance-1,tm-instance-2'
BDB_PERSISTENT_PEERS='bdb-instance-0,bdb-instance-1,bdb-instance-2,bdb-instance-3'
# Comma separated list of validators in the
# network
TM_VALIDATORS='tm-instance-0,tm-instance-1,tm-instance-2'
BDB_VALIDATORS='bdb-instance-0,bdb-instance-1,bdb-instance-2,bdb-instance-3'
# Comma separated list of voting
# power of all validators. Make sure
# order and number of powers corresponds
# to TM_VALIDATORS
TM_VALIDATOR_POWERS='10,10,10'
# to BDB_VALIDATORS
BDB_VALIDATOR_POWERS='10,10,10,10'
# Offical time of blockchain start
TM_GENESIS_TIME='0001-01-01T00:00:00Z'
BDB_GENESIS_TIME='0001-01-01T00:00:00Z'
# Blockchain ID must be unique for
# every blockchain
TM_CHAIN_ID='test-chain-rwcPML'
BDB_CHAIN_ID='test-chain-rwcPML'
# IP Address of the resolver(DNS server).
# i.e. IP of `kube-dns`, can be retrieved using:

View File

@ -1,5 +0,0 @@
#!/bin/bash
docker build -t bigchaindb/nginx_pub_key_access:2.0.0-alpha .
docker push bigchaindb/nginx_pub_key_access:2.0.0-alpha

View File

@ -1,120 +0,0 @@
#################################################################################
# This YAML file desribes a StatefulSet with a service for running and exposing #
# a Tendermint instance. It depends on the tendermint-config-db-claim #
# and tendermint-db-claim k8s pvc. #
#################################################################################
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: tm-instance-0-ss
namespace: default
spec:
serviceName: tm-instance-0
replicas: 1
template:
metadata:
name: tm-instance-0-ss
labels:
app: tm-instance-0-ss
spec:
restartPolicy: Always
volumes:
- name: tm-data
persistentVolumeClaim:
claimName: tendermint-db-claim
- name: tm-config-data
persistentVolumeClaim:
claimName: tendermint-config-db-claim
containers:
# Treating nginx + tendermint as a POD because they should not
# exist without each other
# Nginx container for hosting public key of this ndoe
- name: nginx
imagePullPolicy: Always
image: bigchaindb/nginx_pub_key_access:2.0.0-alpha
env:
- name: TM_PUB_KEY_ACCESS_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-pub-key-access
ports:
- containerPort: 9986
name: tm-pk-access
volumeMounts:
- name: tm-config-data
mountPath: /usr/share/nginx
readOnly: true
#Tendermint container
- name: tendermint
imagePullPolicy: Always
image: bigchaindb/tendermint:2.0.0-alpha
env:
- name: TM_SEEDS
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-seeds
- name: TM_VALIDATOR_POWER
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-validator-power
- name: TM_VALIDATORS
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-validators
- name: TM_PUB_KEY_ACCESS_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-pub-key-access
- name: TM_GENESIS_TIME
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-genesis-time
- name: TM_CHAIN_ID
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-chain-id
- name: TM_P2P_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-p2p-port
- name: TM_INSTANCE_NAME
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-instance-name
- name: TMHOME
value: /tendermint
- name: TM_PROXY_APP
valueFrom:
configMapKeyRef:
name: vars
key: bdb-instance-name
- name: TM_ABCI_PORT
valueFrom:
configMapKeyRef:
name: tendermint-config
key: tm-abci-port
# Resource constraint on the pod, can be changed
resources:
limits:
cpu: 200m
memory: 5G
volumeMounts:
- name: tm-data
mountPath: /tendermint
- name: tm-config-data
mountPath: /tendermint_node_data
ports:
- containerPort: 46656
name: p2p
- containerPort: 46657
name: rpc

View File

@ -1,24 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: tm-instance-1
namespace: default
labels:
name: tm-instance-1
spec:
selector:
app: tm-instance-1-ss
ports:
- port: 46656
targetPort: 46656
name: p2p
protocol: TCP
- port: 46657
targetPort: 46657
name: rpc
protocol: TCP
- port: 9986
targetPort: 9986
name: pub-key-access
protocol: TCP
clusterIP: None

View File

@ -1,5 +0,0 @@
#!/bin/bash
docker build -t bigchaindb/tendermint:2.0.0-alpha .
docker push bigchaindb/tendermint:2.0.0-alpha

View File

@ -84,7 +84,7 @@ install_requires = [
'pyyaml~=3.12',
'aiohttp~=2.3',
'python-rapidjson-schema==0.1.1',
'abci~=0.3.0',
'abci==0.4.4',
'setproctitle~=1.1.0',
]
@ -131,6 +131,7 @@ setup(
],
},
install_requires=install_requires,
dependency_links=['git+https://github.com/kansi/py-abci.git@master#egg=abci-0.4.4'],
setup_requires=['pytest-runner'],
tests_require=tests_require,
extras_require={

View File

@ -348,7 +348,8 @@ class MockResponse():
def test_upsert_validator(mock_autoconfigure, mock_store_validator_update):
from bigchaindb.commands.bigchaindb import run_upsert_validator
args = Namespace(public_key='BOB_PUBLIC_KEY', power='10', config={})
args = Namespace(public_key='CJxdItf4lz2PwEf4SmYNAu/c/VpmX39JEgC5YpH7fxg=',
power='10', config={})
run_upsert_validator(args)
assert mock_store_validator_update.called

View File

@ -162,7 +162,7 @@ def test_end_block_return_validator_updates(b):
query.store_validator_update(b.connection, validator_update)
resp = app.end_block(99)
assert resp.diffs[0] == encode_validator(validator)
assert resp.validator_updates[0] == encode_validator(validator)
updates = b.get_validator_update()
assert updates == []

View File

@ -10,26 +10,32 @@ import pytest
def test_process_event_new_block():
from bigchaindb.tendermint.event_stream import process_event
event = '{"id": "test_stream_id#event", "jsonrpc": "2.0", "result":'\
' {"data": {"data": {"block": {"data": {"txs": ["eyJpbnB1dHMiOiBb'\
'eyJvd25lcnNfYmVmb3JlIjogWyJCWnZLQmNSUmgyd0tOOGZuTENlZUczSGhFaWF4'\
'TWdyWmlib0gyeUZvYzVwTCJdLCAiZnVsZmlsbHMiOiBudWxsLCAiZnVsZmlsbG1l'\
'bnQiOiAicEdTQUlKMER2S2JBeXkyQ2hqT212ZWVCc0FxWktTS0k3VDNWZGhtUkI2'\
'V2dhdzdoZ1VDUHluUnFuQW9RWDh2UlNXeXNwYk5uYWVBaVpOU19lQ3V6ejhDZWtJ'\
'OHBIejJnekExeDJkOF93NTUzWFVOUGJFbnpBUzhncURqeDFkaE1JeDM1ZnpVTCJ9'\
'XSwgIm91dHB1dHMiOiBbeyJwdWJsaWNfa2V5cyI6IFsiQlp2S0JjUlJoMndLTjhm'\
'bkxDZWVHM0hoRWlheE1nclppYm9IMnlGb2M1cEwiXSwgImNvbmRpdGlvbiI6IHsi'\
'ZGV0YWlscyI6IHsidHlwZSI6ICJlZDI1NTE5LXNoYS0yNTYiLCAicHVibGljX2tl'\
'eSI6ICJCWnZLQmNSUmgyd0tOOGZuTENlZUczSGhFaWF4TWdyWmlib0gyeUZvYzVw'\
'TCJ9LCAidXJpIjogIm5pOi8vL3NoYS0yNTY7eHVFX1ZPNjd6aHc0LTRjN0k1YUtm'\
'WGtzX1Q1MjUwMnBuOC1mcVJQQkloRT9mcHQ9ZWQyNTUxOS1zaGEtMjU2JmNvc3Q9'\
'MTMxMDcyIn0sICJhbW91bnQiOiAiMSJ9XSwgIm9wZXJhdGlvbiI6ICJDUkVBVEUi'\
'LCAibWV0YWRhdGEiOiB7InNob3J0IjogImxpdHRsZSJ9LCAiYXNzZXQiOiB7ImRh'\
'dGEiOiB7ImJpY3ljbGUiOiB7InNlcmlhbF9udW1iZXIiOiAiYWJjZDEyMzQiLCAi'\
'bWFudWZhY3R1cmVyIjogImJrZmFiIn19fSwgInZlcnNpb24iOiAiMS4wIiwgImlk'\
'IjogIjE4NzM3Yzc0OWQxZGE2Yzc5YjFmYWZiZjkwOTkwNzEwMDA1ZWM4MTYxNGQ5'\
'YWFiNDkyZTgwYTkzNWRkYThjMzAifQ=="]}, "header": {"height": 1}}},'\
' "type": "new_block"}, "name": "NewBlock"}}'
event = '{"jsonrpc": "2.0", "id": "test_stream_id#event", "result": {'\
'"query": "tm.event=\'NewBlock\'", "data": { "type": "CF18EA939D3240",'\
'"value": { "block": { "header": { "chain_id": "test-chain-ipQIAa",'\
'"height": 1, "time": "2018-04-23T14:49:30.509920098Z", "num_txs": 1,'\
'"last_block_id": { "hash": "", "parts": { "total": 0, "hash": "" }},'\
'"total_txs": 1, "last_commit_hash": "", "data_hash": "38792142CE6D7F6F46F71777CB53F94CD9497B23",'\
'"validators_hash": "BF0D0EC2E13C76E69FA572516B6D93E64F3C58EF",'\
'"consensus_hash": "F66EF1DF8BA6DAC7A1ECCE40CC84E54A1CEBC6A5", "app_hash": "",'\
'"last_results_hash": "", "evidence_hash": "" }, "data": {"txs": ['\
'"eyJpbnB1dHMiOiBbeyJvd25lcnNfYmVmb3JlIjogWyJFb2Z0Z0FNd2hKQXM0cW81b'\
'0dhOU1GWXF5dFp5WEdaNmVmZFVYc1dXTDdmZSJdLCAiZnVsZmlsbHMiOiBudWxsLCA'\
'iZnVsZmlsbG1lbnQiOiAicEdTQUlNMGNueFFGeTZrSE1PcGxBbzh1ZncwNDlsZ2VxN'\
'HBOeDFNdksya0pjRjBCZ1VETjN2RTlsWmhaT21jMWZHbFpLUFZmZDdCTi1RVTdBa0N'\
'TZ1NKWVRPYzB3YVlmQ1RXc1FQS1VmOE5fODFKd21YOUJxcnlLejYyTmVubHg0dGszN'\
'GtVRCJ9XSwgIm91dHB1dHMiOiBbeyJwdWJsaWNfa2V5cyI6IFsiRW9mdGdBTXdoSkF'\
'zNHFvNW9HYTlNRllxeXRaeVhHWjZlZmRVWHNXV0w3ZmUiXSwgImNvbmRpdGlvbiI6I'\
'HsiZGV0YWlscyI6IHsidHlwZSI6ICJlZDI1NTE5LXNoYS0yNTYiLCAicHVibGljX2t'\
'leSI6ICJFb2Z0Z0FNd2hKQXM0cW81b0dhOU1GWXF5dFp5WEdaNmVmZFVYc1dXTDdmZ'\
'SJ9LCAidXJpIjogIm5pOi8vL3NoYS0yNTY7cFJZWTJQQUE0S3dHd0dUNVQtUXRCQUY'\
'0VWY1WG5JcVkxWmExVER0N0hMQT9mcHQ9ZWQyNTUxOS1zaGEtMjU2JmNvc3Q9MTMxM'\
'DcyIn0sICJhbW91bnQiOiAiMSJ9XSwgIm9wZXJhdGlvbiI6ICJDUkVBVEUiLCAibWV'\
'0YWRhdGEiOiBudWxsLCAiYXNzZXQiOiB7ImRhdGEiOiBudWxsfSwgInZlcnNpb24iO'\
'iAiMi4wIiwgImlkIjogImUwMmM0ZWM3MmExYzUzMmJkNjUyNWZkNGMxODU3ZDhmN2E'\
'wYWVkYTgyNGVjY2NhZGY4NTlmNzc0Zjc3ZTgwZGUifQ=="]}, "evidence": {'\
'"evidence": null}, "last_commit": { "blockID": { "hash": "", "parts":'\
'{"total": 0, "hash": ""} }, "precommits": null } } } } } }'
event_queue = Queue()
process_event(event_queue, event, 'test_stream_id')
@ -42,15 +48,17 @@ def test_process_event_new_block():
def test_process_event_empty_block():
from bigchaindb.tendermint.event_stream import process_event
event = '{"jsonrpc": "2.0", "id": "test_stream_id#event",'\
'"result": {"name": "NewBlock", "data": {"type": "new_block",'\
' "data": {"block": {"header": {"chain_id": "test-chain-cbVRwC",'\
' "height": 1, "time": "2017-12-04T22:42:54.33+05:30", "num_txs": 0,'\
' "last_block_id": {"hash": "", "parts": {"total": 0, "hash": ""}},'\
' "last_commit_hash": "", "data_hash": "",'\
' "validators_hash": "ACF23A690EB72D051931E878E8F3D6E01A17A81C",'\
' "app_hash": ""}, "data": {"txs": []}, "last_commit": {"blockID": '\
' {"hash": "", "parts": {"total": 0, "hash": ""}}, "precommits": []}}}}}}'
event = '{"jsonrpc": "2.0", "id": "bigchaindb_stream_1524555674#event",'\
'"result": {"query": "tm.event=\'NewBlock\'", "data": {"type": '\
'"CF18EA939D3240", "value": {"block": {"header": {"chain_id": '\
'"test-chain-ipQIAa", "height": 1, "time": "2018-04-24T07:41:16.838038877Z",'\
'"num_txs": 0, "last_block_id": {"hash": "", "parts": {"total": 0, "hash": ""}},'\
'"total_txs": 0, "last_commit_hash": "", "data_hash": "", "validators_hash":'\
'"BF0D0EC2E13C76E69FA572516B6D93E64F3C58EF", "consensus_hash": '\
'"F66EF1DF8BA6DAC7A1ECCE40CC84E54A1CEBC6A5", "app_hash": "", '\
'"last_results_hash": "", "evidence_hash": ""}, "data": {"txs": null},'\
'"evidence": {"evidence": null}, "last_commit": {"blockID": {"hash": "", '\
'"parts": {"total": 0, "hash": ""}}, "precommits": null}}}}}}'
event_queue = Queue()
process_event(event_queue, event, 'test_stream_id')
@ -62,7 +70,7 @@ def test_process_unknown_event():
from bigchaindb.tendermint.event_stream import process_event
event = '{"jsonrpc": "2.0", "id": "test_stream_id#event",'\
' "result": {"name": "UnknownEvent"}}'
' "result": { "query": "tm.event=\'UnknownEvent\'" }}'
event_queue = Queue()
process_event(event_queue, event, 'test_stream_id')
@ -96,7 +104,7 @@ async def test_subscribe_events(tendermint_ws_url, b):
b.post_transaction(tx, 'broadcast_tx_async')
msg = await ws.receive()
msg_data_dict = json.loads(msg.data)
raw_txn = msg_data_dict['result']['data']['data']['block']['data']['txs'][0]
raw_txn = msg_data_dict['result']['data']['value']['block']['data']['txs'][0]
transaction = json.loads(base64.b64decode(raw_txn).decode('utf8'))
assert transaction == tx.to_dict()

View File

@ -5,7 +5,7 @@ import pytest
from abci.server import ProtocolHandler
from io import BytesIO
import abci.types_pb2 as types
from abci.wire import read_message
from abci.encoding import read_message
from abci.messages import to_request_deliver_tx, to_request_check_tx
@ -70,8 +70,6 @@ def test_app(tb):
data = p.process('commit', None)
res, err = read_message(BytesIO(data), types.Response)
assert res
assert res.commit.code == 0
assert res.commit.data == new_block_hash.encode('utf-8')
assert b.get_transaction(tx.id).id == tx.id
@ -90,6 +88,7 @@ def test_app(tb):
p.process('end_block', r)
data = p.process('commit', None)
res, err = read_message(BytesIO(data), types.Response)
assert res.commit.data == new_block_hash.encode('utf-8')
block0 = b.get_latest_block()
@ -105,11 +104,14 @@ def test_upsert_validator(b, alice):
from bigchaindb.backend.query import VALIDATOR_UPDATE_ID
from bigchaindb.backend import query, connect
from bigchaindb.models import Transaction
from bigchaindb.tendermint.utils import public_key_to_base64
import time
conn = connect()
public_key = '1718D2DBFF00158A0852A17A01C78F4DCF3BA8E4FB7B8586807FAC182A535034'
power = 1
validator = {'pub_key': {'type': 'ed25519',
validator = {'pub_key': {'type': 'AC26791624DE60',
'data': public_key},
'power': power}
validator_update = {'validator': validator,
@ -124,8 +126,44 @@ def test_upsert_validator(b, alice):
code, message = b.write_transaction(tx, 'broadcast_tx_commit')
assert code == 202
time.sleep(5)
validators = b.get_validators()
validators = [(v['pub_key']['data'], v['voting_power']) for v in validators]
validators = [(v['pub_key']['value'], v['voting_power']) for v in validators]
assert ((public_key, power) in validators)
public_key64 = public_key_to_base64(public_key)
assert ((public_key64, power) in validators)
@pytest.mark.abci
def test_post_transaction_responses(tendermint_ws_url, b):
from bigchaindb.common.crypto import generate_key_pair
from bigchaindb.models import Transaction
alice = generate_key_pair()
bob = generate_key_pair()
tx = Transaction.create([alice.public_key],
[([alice.public_key], 1)],
asset=None)\
.sign([alice.private_key])
code, message = b.write_transaction(tx, 'broadcast_tx_commit')
assert code == 202
tx_transfer = Transaction.transfer(tx.to_inputs(),
[([bob.public_key], 1)],
asset_id=tx.id)\
.sign([alice.private_key])
code, message = b.write_transaction(tx_transfer, 'broadcast_tx_commit')
assert code == 202
# NOTE: DOESN'T WORK (double spend)
# Tendermint crashes with error: Unexpected result type
# carly = generate_key_pair()
# double_spend = Transaction.transfer(tx.to_inputs(),
# [([carly.public_key], 1)],
# asset_id=tx.id)\
# .sign([alice.private_key])
# code, message = b.write_transaction(double_spend, 'broadcast_tx_commit')
# assert code == 500

View File

@ -350,38 +350,3 @@ def test_get_utxoset_merkle_root(b, utxoset):
'86d311c03115bf4d287f8449ca5828505432d69b82762d47077b1c00fe426eac')
merkle_root = b.get_utxoset_merkle_root()
assert merkle_root == expected_merkle_root
@pytest.mark.abci
def test_post_transaction_responses(tendermint_ws_url, b):
from bigchaindb.common.crypto import generate_key_pair
from bigchaindb.models import Transaction
alice = generate_key_pair()
bob = generate_key_pair()
tx = Transaction.create([alice.public_key],
[([alice.public_key], 1)],
asset=None)\
.sign([alice.private_key])
code, message = b.write_transaction(tx, 'broadcast_tx_commit')
assert code == 202
tx_transfer = Transaction.transfer(tx.to_inputs(),
[([bob.public_key], 1)],
asset_id=tx.id)\
.sign([alice.private_key])
code, message = b.write_transaction(tx_transfer, 'broadcast_tx_commit')
assert code == 202
# NOTE: DOESN'T WORK (double spend)
# Tendermint crashes with error: Unexpected result type
# carly = generate_key_pair()
# double_spend = Transaction.transfer(tx.to_inputs(),
# [([carly.public_key], 1)],
# asset_id=tx.id)\
# .sign([alice.private_key])
# code, message = b.write_transaction(double_spend, 'broadcast_tx_commit')
# assert code == 500

View File

@ -42,3 +42,29 @@ def test_merkleroot():
hashes = [sha3_256(i.encode()).digest() for i in 'abc']
assert merkleroot(hashes) == (
'78c7c394d3158c218916b7ae0ebdea502e0f4e85c08e3b371e3dfd824d389fa3')
SAMPLE_PUBLIC_KEY = {
"address": "53DC09497A6ED73B342C78AB1E916076A03A8B95",
"pub_key": {
"type": "AC26791624DE60",
"value": "7S+T/do70jvneAq0M1so2X3M1iWTSuwtuSAr3nVpfEw="
}
}
def test_convert_base64_public_key_to_address():
from bigchaindb.tendermint.utils import public_key64_to_address
address = public_key64_to_address(SAMPLE_PUBLIC_KEY['pub_key']['value'])
assert address == SAMPLE_PUBLIC_KEY['address']
def test_public_key_encoding_decoding():
from bigchaindb.tendermint.utils import (public_key_from_base64,
public_key_to_base64)
public_key = public_key_from_base64(SAMPLE_PUBLIC_KEY['pub_key']['value'])
base64_public_key = public_key_to_base64(public_key)
assert base64_public_key == SAMPLE_PUBLIC_KEY['pub_key']['value']

View File

@ -13,7 +13,7 @@ setenv =
rethinkdb: BIGCHAINDB_DATABASE_BACKEND=rethinkdb
mongodb: BIGCHAINDB_DATABASE_BACKEND=mongodb
deps = {[base]deps}
install_command = pip install {opts} {packages}
install_command = pip install --process-dependency-links {opts} {packages}
extras = test
commands = pytest -v -n auto --cov=bigchaindb --basetemp={envtmpdir}