From 704d55988baefc1d3133b952c3e83482dc53782f Mon Sep 17 00:00:00 2001 From: Jody Winston Date: Fri, 30 Dec 2016 12:53:53 -0600 Subject: [PATCH 01/34] Fix use of locales for python et al --- Dockerfile | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index c4c30789..c181625a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,16 @@ FROM rethinkdb:2.3 -RUN apt-get update +# From http://stackoverflow.com/a/38553499 + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y locales + +RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ + echo 'LANG="en_US.UTF-8"'>/etc/default/locale && \ + dpkg-reconfigure --frontend=noninteractive locales && \ + update-locale LANG=en_US.UTF-8 + +ENV LANG en_US.UTF-8 + RUN apt-get -y install python3 python3-pip libffi-dev RUN pip3 install --upgrade pip RUN pip3 install --upgrade setuptools From 96432ce0be2e59ceead82ec09dcacb341213c7bb Mon Sep 17 00:00:00 2001 From: utarl Date: Wed, 4 Jan 2017 10:01:45 +0800 Subject: [PATCH 02/34] add multiple current owners --- .../source/data-models/inputs-outputs.rst | 25 +++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/docs/server/source/data-models/inputs-outputs.rst b/docs/server/source/data-models/inputs-outputs.rst index 46bcf6d0..d067fd1f 100644 --- a/docs/server/source/data-models/inputs-outputs.rst +++ b/docs/server/source/data-models/inputs-outputs.rst @@ -7,7 +7,7 @@ Amounts of an asset are encoded in the outputs of a transaction, and each output .. note:: - This document (and various places in the BigchainDB documentation and code) talks about control of an asset in terms of *owners* and *ownership*. The language is chosen to represent the most common use cases, but in some more complex scenarios, it may not be accurate to say that the output is owned by the controllers of those public keys–it would only be correct to say that those public keys are associated with the ability to fulfill the output. Also, depending on the use case, the entity controlling an output via a private key may not be the legal owner of the asset in the corresponding legal domain. However, since we aim to use language that is simple to understand and covers the majority of use cases, we talk in terms of *owners* of an output that have the ability to *spend* that output. + This document (and various places in the BigchainDB documentation and code) talks about control of an asset in terms of *owners* and *ownership*. The language is chosen to represent the most common use cases, but in some more complex scenarios, it may not be accurate to say that the output is owned by the controllers of those public keys鈥搃t would only be correct to say that those public keys are associated with the ability to fulfill the output. Also, depending on the use case, the entity controlling an output via a private key may not be the legal owner of the asset in the corresponding legal domain. However, since we aim to use language that is simple to understand and covers the majority of use cases, we talk in terms of *owners* of an output that have the ability to *spend* that output. In the most basic case, an output may define a **simple signature condition**, which gives control of the output to the entity controlling a corresponding private key. @@ -24,7 +24,7 @@ The (single) output of a threshold condition can be used as one of the inputs of When one creates a condition, one can calculate its fulfillment length (e.g. 96). The more complex the condition, the larger its fulfillment length will be. A BigchainDB federation can put an upper limit on the allowed fulfillment length, as a way of capping the complexity of conditions (and the computing time required to validate them). -If someone tries to make a condition where the output of a threshold condition feeds into the input of another “earlier” threshold condition (i.e. in a closed logical circuit), then their computer will take forever to calculate the (infinite) “condition URI”, at least in theory. In practice, their computer will run out of memory or their client software will timeout after a while. +If someone tries to make a condition where the output of a threshold condition feeds into the input of another 鈥渆arlier鈥� threshold condition (i.e. in a closed logical circuit), then their computer will take forever to calculate the (infinite) 鈥渃ondition URI鈥�, at least in theory. In practice, their computer will run out of memory or their client software will timeout after a while. Outputs ------- @@ -127,3 +127,24 @@ If there is only one *current owner*, the fulfillment will be a simple signature See the reference on :ref:`inputs ` for descriptions of the meaning of each field. + +Multiple Current Owners +````````````````` + +If there are Multiple _current owners_, the fulfillment will be a little different from One Current Owner (Suppose it has two current owners). + +.. code-block:: json + { + "owners_before": ["",""], + "fulfillment": "cf:2:AQIBAgEBYwAEYEv6O5HjHGl7OWo2Tu5mWcWQcL_OGrFuUjyej-dK3LM99TbZsRd8c9luQhU30xCH5AdNaupxg-pLHuk8DoSaDA1MHQGXUZ80a_cV-4UaaaCpdey8K0CEcJxre0X96hTHCwABAWMABGBnsuHExhuSj5Mdm-q0KoPgX4nAt0s00k1WTMCzuUpQIp6aStLoTSMlsvS4fmDtOSv9gubekKLuHTMAk-LQFSKF1JdzwaVWAA2UOv0v_OS2gY3A-r0kRq8HtzjYdcmVswUA", + "input": { + "cid": 0, + "txid": "e4805f1bfc999d6409b38e3a4c3b2fafad7c1280eb0d441da7083e945dd89eb8" + } + } + +- ``owners_before``: A list of public keys of the owners before the transaction; in this case it has two owners, hence two public keys. +- ``fulfillment``: A crypto-conditions URI that encodes the cryptographic fulfillments like signatures and others;'cf' indicates this is a fulfillment, '2' indicates the condition type is THRESHOLD-SHA-256 (while '4' in One Current Owner indicates its condition type is ED25519). +- ``input``: Pointer to the asset and condition of a previous transaction + - ``cid``: Condition index - the index of the condition in the array of conditions in the previous transaction + - ``txid``: Transaction id \ No newline at end of file From 4de0bb4c8c26552d949818ce1ed657d16ca55b77 Mon Sep 17 00:00:00 2001 From: utarl Date: Wed, 4 Jan 2017 10:11:00 +0800 Subject: [PATCH 03/34] change messy code --- docs/server/source/data-models/inputs-outputs.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/server/source/data-models/inputs-outputs.rst b/docs/server/source/data-models/inputs-outputs.rst index d067fd1f..9fce5672 100644 --- a/docs/server/source/data-models/inputs-outputs.rst +++ b/docs/server/source/data-models/inputs-outputs.rst @@ -7,7 +7,7 @@ Amounts of an asset are encoded in the outputs of a transaction, and each output .. note:: - This document (and various places in the BigchainDB documentation and code) talks about control of an asset in terms of *owners* and *ownership*. The language is chosen to represent the most common use cases, but in some more complex scenarios, it may not be accurate to say that the output is owned by the controllers of those public keys鈥搃t would only be correct to say that those public keys are associated with the ability to fulfill the output. Also, depending on the use case, the entity controlling an output via a private key may not be the legal owner of the asset in the corresponding legal domain. However, since we aim to use language that is simple to understand and covers the majority of use cases, we talk in terms of *owners* of an output that have the ability to *spend* that output. + This document (and various places in the BigchainDB documentation and code) talks about control of an asset in terms of *owners* and *ownership*. The language is chosen to represent the most common use cases, but in some more complex scenarios, it may not be accurate to say that the output is owned by the controllers of those public keys-it would only be correct to say that those public keys are associated with the ability to fulfill the output. Also, depending on the use case, the entity controlling an output via a private key may not be the legal owner of the asset in the corresponding legal domain. However, since we aim to use language that is simple to understand and covers the majority of use cases, we talk in terms of *owners* of an output that have the ability to *spend* that output. In the most basic case, an output may define a **simple signature condition**, which gives control of the output to the entity controlling a corresponding private key. @@ -24,7 +24,7 @@ The (single) output of a threshold condition can be used as one of the inputs of When one creates a condition, one can calculate its fulfillment length (e.g. 96). The more complex the condition, the larger its fulfillment length will be. A BigchainDB federation can put an upper limit on the allowed fulfillment length, as a way of capping the complexity of conditions (and the computing time required to validate them). -If someone tries to make a condition where the output of a threshold condition feeds into the input of another 鈥渆arlier鈥� threshold condition (i.e. in a closed logical circuit), then their computer will take forever to calculate the (infinite) 鈥渃ondition URI鈥�, at least in theory. In practice, their computer will run out of memory or their client software will timeout after a while. +If someone tries to make a condition where the output of a threshold condition feeds into the input of another "earlier" threshold condition (i.e. in a closed logical circuit), then their computer will take forever to calculate the (infinite) "condition URI", at least in theory. In practice, their computer will run out of memory or their client software will timeout after a while. Outputs ------- From c0d812f0d2a27aa3bc3d7558c6a905cec6fae135 Mon Sep 17 00:00:00 2001 From: utarl Date: Wed, 4 Jan 2017 10:18:06 +0800 Subject: [PATCH 04/34] change messy code --- docs/server/source/data-models/inputs-outputs.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/server/source/data-models/inputs-outputs.rst b/docs/server/source/data-models/inputs-outputs.rst index 9fce5672..bd2cfee3 100644 --- a/docs/server/source/data-models/inputs-outputs.rst +++ b/docs/server/source/data-models/inputs-outputs.rst @@ -7,7 +7,7 @@ Amounts of an asset are encoded in the outputs of a transaction, and each output .. note:: - This document (and various places in the BigchainDB documentation and code) talks about control of an asset in terms of *owners* and *ownership*. The language is chosen to represent the most common use cases, but in some more complex scenarios, it may not be accurate to say that the output is owned by the controllers of those public keys-it would only be correct to say that those public keys are associated with the ability to fulfill the output. Also, depending on the use case, the entity controlling an output via a private key may not be the legal owner of the asset in the corresponding legal domain. However, since we aim to use language that is simple to understand and covers the majority of use cases, we talk in terms of *owners* of an output that have the ability to *spend* that output. + This document (and various places in the BigchainDB documentation and code) talks about control of an asset in terms of *owners* and *ownership*. The language is chosen to represent the most common use cases, but in some more complex scenarios, it may not be accurate to say that the output is owned by the controllers of those public keysCit would only be correct to say that those public keys are associated with the ability to fulfill the output. Also, depending on the use case, the entity controlling an output via a private key may not be the legal owner of the asset in the corresponding legal domain. However, since we aim to use language that is simple to understand and covers the majority of use cases, we talk in terms of *owners* of an output that have the ability to *spend* that output. In the most basic case, an output may define a **simple signature condition**, which gives control of the output to the entity controlling a corresponding private key. @@ -24,7 +24,7 @@ The (single) output of a threshold condition can be used as one of the inputs of When one creates a condition, one can calculate its fulfillment length (e.g. 96). The more complex the condition, the larger its fulfillment length will be. A BigchainDB federation can put an upper limit on the allowed fulfillment length, as a way of capping the complexity of conditions (and the computing time required to validate them). -If someone tries to make a condition where the output of a threshold condition feeds into the input of another "earlier" threshold condition (i.e. in a closed logical circuit), then their computer will take forever to calculate the (infinite) "condition URI", at least in theory. In practice, their computer will run out of memory or their client software will timeout after a while. +If someone tries to make a condition where the output of a threshold condition feeds into the input of another earlier threshold condition (i.e. in a closed logical circuit), then their computer will take forever to calculate the (infinite) condition URI, at least in theory. In practice, their computer will run out of memory or their client software will timeout after a while. Outputs ------- From b8ae2132aae82549821adc140446ca3db7326dee Mon Sep 17 00:00:00 2001 From: utarl Date: Wed, 4 Jan 2017 10:21:43 +0800 Subject: [PATCH 05/34] change messy code --- docs/server/source/data-models/inputs-outputs.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/server/source/data-models/inputs-outputs.rst b/docs/server/source/data-models/inputs-outputs.rst index bd2cfee3..3c2958cf 100644 --- a/docs/server/source/data-models/inputs-outputs.rst +++ b/docs/server/source/data-models/inputs-outputs.rst @@ -7,7 +7,7 @@ Amounts of an asset are encoded in the outputs of a transaction, and each output .. note:: - This document (and various places in the BigchainDB documentation and code) talks about control of an asset in terms of *owners* and *ownership*. The language is chosen to represent the most common use cases, but in some more complex scenarios, it may not be accurate to say that the output is owned by the controllers of those public keysCit would only be correct to say that those public keys are associated with the ability to fulfill the output. Also, depending on the use case, the entity controlling an output via a private key may not be the legal owner of the asset in the corresponding legal domain. However, since we aim to use language that is simple to understand and covers the majority of use cases, we talk in terms of *owners* of an output that have the ability to *spend* that output. + This document (and various places in the BigchainDB documentation and code) talks about control of an asset in terms of *owners* and *ownership*. The language is chosen to represent the most common use cases, but in some more complex scenarios, it may not be accurate to say that the output is owned by the controllers of those public keys–it would only be correct to say that those public keys are associated with the ability to fulfill the output. Also, depending on the use case, the entity controlling an output via a private key may not be the legal owner of the asset in the corresponding legal domain. However, since we aim to use language that is simple to understand and covers the majority of use cases, we talk in terms of *owners* of an output that have the ability to *spend* that output. In the most basic case, an output may define a **simple signature condition**, which gives control of the output to the entity controlling a corresponding private key. @@ -24,7 +24,7 @@ The (single) output of a threshold condition can be used as one of the inputs of When one creates a condition, one can calculate its fulfillment length (e.g. 96). The more complex the condition, the larger its fulfillment length will be. A BigchainDB federation can put an upper limit on the allowed fulfillment length, as a way of capping the complexity of conditions (and the computing time required to validate them). -If someone tries to make a condition where the output of a threshold condition feeds into the input of another earlier threshold condition (i.e. in a closed logical circuit), then their computer will take forever to calculate the (infinite) condition URI, at least in theory. In practice, their computer will run out of memory or their client software will timeout after a while. +If someone tries to make a condition where the output of a threshold condition feeds into the input of another “earlier” threshold condition (i.e. in a closed logical circuit), then their computer will take forever to calculate the (infinite) “condition URI”, at least in theory. In practice, their computer will run out of memory or their client software will timeout after a while. Outputs ------- From 0c53a4940527b2911ae91167e7a8b37bdd150402 Mon Sep 17 00:00:00 2001 From: utarl Date: Wed, 4 Jan 2017 10:36:10 +0800 Subject: [PATCH 06/34] change messy code --- docs/server/source/data-models/inputs-outputs.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/server/source/data-models/inputs-outputs.rst b/docs/server/source/data-models/inputs-outputs.rst index 3c2958cf..a429cf71 100644 --- a/docs/server/source/data-models/inputs-outputs.rst +++ b/docs/server/source/data-models/inputs-outputs.rst @@ -129,7 +129,7 @@ If there is only one *current owner*, the fulfillment will be a simple signature See the reference on :ref:`inputs ` for descriptions of the meaning of each field. Multiple Current Owners -````````````````` +``````````````````````` If there are Multiple _current owners_, the fulfillment will be a little different from One Current Owner (Suppose it has two current owners). From 4266499263a88d7ea2e4bcef383e81c252988e75 Mon Sep 17 00:00:00 2001 From: utarl Date: Wed, 4 Jan 2017 10:44:30 +0800 Subject: [PATCH 07/34] change messy code --- .../source/data-models/inputs-outputs.rst | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/docs/server/source/data-models/inputs-outputs.rst b/docs/server/source/data-models/inputs-outputs.rst index a429cf71..55babf9b 100644 --- a/docs/server/source/data-models/inputs-outputs.rst +++ b/docs/server/source/data-models/inputs-outputs.rst @@ -134,14 +134,16 @@ Multiple Current Owners If there are Multiple _current owners_, the fulfillment will be a little different from One Current Owner (Suppose it has two current owners). .. code-block:: json - { - "owners_before": ["",""], - "fulfillment": "cf:2:AQIBAgEBYwAEYEv6O5HjHGl7OWo2Tu5mWcWQcL_OGrFuUjyej-dK3LM99TbZsRd8c9luQhU30xCH5AdNaupxg-pLHuk8DoSaDA1MHQGXUZ80a_cV-4UaaaCpdey8K0CEcJxre0X96hTHCwABAWMABGBnsuHExhuSj5Mdm-q0KoPgX4nAt0s00k1WTMCzuUpQIp6aStLoTSMlsvS4fmDtOSv9gubekKLuHTMAk-LQFSKF1JdzwaVWAA2UOv0v_OS2gY3A-r0kRq8HtzjYdcmVswUA", - "input": { - "cid": 0, - "txid": "e4805f1bfc999d6409b38e3a4c3b2fafad7c1280eb0d441da7083e945dd89eb8" - } - } + + { + "owners_before": ["",""], + "fulfillment": "cf:2:AQIBAgEBYwAEYEv6O5HjHGl7OWo2Tu5mWcWQcL_OGrFuUjyej-dK3LM99TbZsRd8c9luQhU30xCH5AdNaupxg-pLHuk8DoSaDA1MHQGXUZ80a_cV-4UaaaCpdey8K0CEcJxre0X96hTHCwABAWMABGBnsuHExhuSj5Mdm-q0KoPgX4nAt0s00k1WTMCzuUpQIp6aStLoTSMlsvS4fmDtOSv9gubekKLuHTMAk-LQFSKF1JdzwaVWAA2UOv0v_OS2gY3A-r0kRq8HtzjYdcmVswUA", + "input": { + "cid": 0, + "txid": "e4805f1bfc999d6409b38e3a4c3b2fafad7c1280eb0d441da7083e945dd89eb8" + } + } + - ``owners_before``: A list of public keys of the owners before the transaction; in this case it has two owners, hence two public keys. - ``fulfillment``: A crypto-conditions URI that encodes the cryptographic fulfillments like signatures and others;'cf' indicates this is a fulfillment, '2' indicates the condition type is THRESHOLD-SHA-256 (while '4' in One Current Owner indicates its condition type is ED25519). From ab8579148c2f77917f7537efab551c35e13c982e Mon Sep 17 00:00:00 2001 From: utarl Date: Wed, 4 Jan 2017 10:49:07 +0800 Subject: [PATCH 08/34] add multiple current owners --- docs/server/source/data-models/inputs-outputs.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/server/source/data-models/inputs-outputs.rst b/docs/server/source/data-models/inputs-outputs.rst index 55babf9b..a4256fa6 100644 --- a/docs/server/source/data-models/inputs-outputs.rst +++ b/docs/server/source/data-models/inputs-outputs.rst @@ -131,7 +131,7 @@ See the reference on :ref:`inputs ` for descriptions of the meaning of ea Multiple Current Owners ``````````````````````` -If there are Multiple _current owners_, the fulfillment will be a little different from One Current Owner (Suppose it has two current owners). +If there are Multiple *current owners*, the fulfillment will be a little different from One Current Owner (Suppose it has two current owners). .. code-block:: json From cadc9add3998cef086cf8bf3711043e48b2ea9a9 Mon Sep 17 00:00:00 2001 From: Scott Sadler Date: Wed, 4 Jan 2017 15:39:10 +0100 Subject: [PATCH 09/34] fixups for 'multiple current owners' section in inputs/outputs docs --- docs/server/source/data-models/inputs-outputs.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/server/source/data-models/inputs-outputs.rst b/docs/server/source/data-models/inputs-outputs.rst index a4256fa6..b195a767 100644 --- a/docs/server/source/data-models/inputs-outputs.rst +++ b/docs/server/source/data-models/inputs-outputs.rst @@ -131,7 +131,7 @@ See the reference on :ref:`inputs ` for descriptions of the meaning of ea Multiple Current Owners ``````````````````````` -If there are Multiple *current owners*, the fulfillment will be a little different from One Current Owner (Suppose it has two current owners). +If there are multiple *current owners*, the fulfillment will be a little different from `One Current Owner`_ (suppose it has two current owners). .. code-block:: json @@ -139,14 +139,14 @@ If there are Multiple *current owners*, the fulfillment will be a little differe "owners_before": ["",""], "fulfillment": "cf:2:AQIBAgEBYwAEYEv6O5HjHGl7OWo2Tu5mWcWQcL_OGrFuUjyej-dK3LM99TbZsRd8c9luQhU30xCH5AdNaupxg-pLHuk8DoSaDA1MHQGXUZ80a_cV-4UaaaCpdey8K0CEcJxre0X96hTHCwABAWMABGBnsuHExhuSj5Mdm-q0KoPgX4nAt0s00k1WTMCzuUpQIp6aStLoTSMlsvS4fmDtOSv9gubekKLuHTMAk-LQFSKF1JdzwaVWAA2UOv0v_OS2gY3A-r0kRq8HtzjYdcmVswUA", "input": { - "cid": 0, + "output": 0, "txid": "e4805f1bfc999d6409b38e3a4c3b2fafad7c1280eb0d441da7083e945dd89eb8" } } - ``owners_before``: A list of public keys of the owners before the transaction; in this case it has two owners, hence two public keys. -- ``fulfillment``: A crypto-conditions URI that encodes the cryptographic fulfillments like signatures and others;'cf' indicates this is a fulfillment, '2' indicates the condition type is THRESHOLD-SHA-256 (while '4' in One Current Owner indicates its condition type is ED25519). -- ``input``: Pointer to the asset and condition of a previous transaction - - ``cid``: Condition index - the index of the condition in the array of conditions in the previous transaction - - ``txid``: Transaction id \ No newline at end of file +- ``fulfillment``: A crypto-conditions URI that encodes the cryptographic fulfillments like signatures and others;'cf' indicates this is a fulfillment, '2' indicates the condition type is THRESHOLD-SHA-256 (while '4' in `One Current Owner`_ indicates its condition type is ED25519). +- ``fulfills``: Pointer to an output from a previous transaction that is being spent + - ``output``: The index of the output in a previous transaction + - ``txid``: ID of the transaction From d714b133aa0210b25360017086b5e873c2191c4e Mon Sep 17 00:00:00 2001 From: Scott Sadler Date: Thu, 5 Jan 2017 10:15:35 +0100 Subject: [PATCH 10/34] documentation fixes to inputs-outputs.rst --- docs/server/source/data-models/inputs-outputs.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/server/source/data-models/inputs-outputs.rst b/docs/server/source/data-models/inputs-outputs.rst index b195a767..9f1b5d56 100644 --- a/docs/server/source/data-models/inputs-outputs.rst +++ b/docs/server/source/data-models/inputs-outputs.rst @@ -131,14 +131,14 @@ See the reference on :ref:`inputs ` for descriptions of the meaning of ea Multiple Current Owners ``````````````````````` -If there are multiple *current owners*, the fulfillment will be a little different from `One Current Owner`_ (suppose it has two current owners). +If there are multiple *current owners*, the fulfillment will be a little different from `One Current Owner`_. Suppose it has two current owners. .. code-block:: json { "owners_before": ["",""], "fulfillment": "cf:2:AQIBAgEBYwAEYEv6O5HjHGl7OWo2Tu5mWcWQcL_OGrFuUjyej-dK3LM99TbZsRd8c9luQhU30xCH5AdNaupxg-pLHuk8DoSaDA1MHQGXUZ80a_cV-4UaaaCpdey8K0CEcJxre0X96hTHCwABAWMABGBnsuHExhuSj5Mdm-q0KoPgX4nAt0s00k1WTMCzuUpQIp6aStLoTSMlsvS4fmDtOSv9gubekKLuHTMAk-LQFSKF1JdzwaVWAA2UOv0v_OS2gY3A-r0kRq8HtzjYdcmVswUA", - "input": { + "fulfills": { "output": 0, "txid": "e4805f1bfc999d6409b38e3a4c3b2fafad7c1280eb0d441da7083e945dd89eb8" } From af39e204c97dae443234dfe59d9b20fdb06ecfb8 Mon Sep 17 00:00:00 2001 From: Trent McConaghy Date: Sat, 7 Jan 2017 11:31:51 +0100 Subject: [PATCH 11/34] "ascribe" -> "BigchainDB" --- HOW_TO_HANDLE_PULL_REQUESTS.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/HOW_TO_HANDLE_PULL_REQUESTS.md b/HOW_TO_HANDLE_PULL_REQUESTS.md index 8d501068..4dfbec15 100644 --- a/HOW_TO_HANDLE_PULL_REQUESTS.md +++ b/HOW_TO_HANDLE_PULL_REQUESTS.md @@ -2,9 +2,9 @@ This document is for whoever has the ability to merge pull requests in the Git repositories associated with BigchainDB. -If the pull request is from an employee of ascribe GmbH, then you can ignore this document. +If the pull request is from an employee of BigchainDB GmbH, then you can ignore this document. -If the pull request is from someone who is _not_ an employee of ascribe, then: +If the pull request is from someone who is _not_ an employee of BigchainDB, then: * Have they agreed to the Individual Contributor Agreement in the past? (Troy, Greg, and others have a list.) If yes, then you can merge the PR and ignore the rest of this document. * Do they belong to a company or organization which agreed to the Entity Contributor Agreement in the past, and will they be contributing on behalf of that company or organization? (Troy, Greg, and others have a list.) If yes, then you can merge the PR and ignore the rest of this document. @@ -34,7 +34,7 @@ We will email you (or your employer) with further instructions. (END OF COMMENT) -Once they click SEND, we (ascribe) will get an email with the information in the form. (Troy gets those emails for sure, I'm not sure who else.) The next step is to send an email to the email address submitted in the form, saying something like (where the stuff in [square brackets] should be replaced): +Once they click SEND, we (BigchainDB) will get an email with the information in the form. (Troy gets those emails for sure, I'm not sure who else.) The next step is to send an email to the email address submitted in the form, saying something like (where the stuff in [square brackets] should be replaced): Hi [NAME], From 595f7dc701535ad7561f68a517f870f04c418f82 Mon Sep 17 00:00:00 2001 From: Rodolphe Marques Date: Fri, 6 Jan 2017 11:23:21 +0100 Subject: [PATCH 12/34] several fixes to mongodb queries --- bigchaindb/backend/mongodb/query.py | 108 +++++++++++++++++++++++----- tests/pipelines/test_vote.py | 30 ++++---- tests/test_config_utils.py | 5 +- 3 files changed, 108 insertions(+), 35 deletions(-) diff --git a/bigchaindb/backend/mongodb/query.py b/bigchaindb/backend/mongodb/query.py index 8765d110..658eb3ca 100644 --- a/bigchaindb/backend/mongodb/query.py +++ b/bigchaindb/backend/mongodb/query.py @@ -1,6 +1,7 @@ """Query implementation for MongoDB""" from time import time +from itertools import chain from pymongo import ReturnDocument @@ -36,7 +37,8 @@ def delete_transaction(conn, *transaction_id): @register_query(MongoDBConnection) def get_stale_transactions(conn, reassign_delay): return conn.db['backlog']\ - .find({'assignment_timestamp': {'$lt': time() - reassign_delay}}) + .find({'assignment_timestamp': {'$lt': time() - reassign_delay}}, + projection={'_id': False}) @register_query(MongoDBConnection) @@ -58,7 +60,10 @@ def get_transaction_from_block(conn, transaction_id, block_id): @register_query(MongoDBConnection) def get_transaction_from_backlog(conn, transaction_id): - return conn.db['backlog'].find_one({'id': transaction_id}) + return conn.db['backlog']\ + .find_one({'id': transaction_id}, + projection={'_id': False, 'assignee': False, + 'assignment_timestamp': False}) @register_query(MongoDBConnection) @@ -70,33 +75,83 @@ def get_blocks_status_from_transaction(conn, transaction_id): @register_query(MongoDBConnection) def get_txids_by_asset_id(conn, asset_id): - return conn.db['bigchain']\ - .find({'block.transactions.asset.id': asset_id}, - projection=['id']) + # get the txid of the create transaction for asset_id + cursor = conn.db['bigchain'].aggregate([ + {'$match': { + 'block.transactions.id': asset_id, + 'block.transactions.operation': 'CREATE' + }}, + {'$unwind': '$block.transactions'}, + {'$match': { + 'block.transactions.id': asset_id, + 'block.transactions.operation': 'CREATE' + }}, + {'$project': {'block.transactions.id': True}} + ]) + create_tx_txids = (elem['block']['transactions']['id'] for elem in cursor) + + # get txids of transfer transaction with asset_id + cursor = conn.db['bigchain'].aggregate([ + {'$match': { + 'block.transactions.asset.id': asset_id + }}, + {'$unwind': '$block.transactions'}, + {'$match': { + 'block.transactions.asset.id': asset_id + }}, + {'$project': {'block.transactions.id': True}} + ]) + transfer_tx_ids = (elem['block']['transactions']['id'] for elem in cursor) + + return chain(create_tx_txids, transfer_tx_ids) @register_query(MongoDBConnection) def get_asset_by_id(conn, asset_id): - return conn.db['bigchain']\ - .find_one({'block.transactions.asset.id': asset_id, - 'block.transactions.asset.operation': 'CREATE'}, - projection=['block.transactions.asset']) + cursor = conn.db['bigchain'].aggregate([ + {'$match': { + 'block.transactions.id': asset_id, + 'block.transactions.operation': 'CREATE' + }}, + {'$unwind': '$block.transactions'}, + {'$match': { + 'block.transactions.id': asset_id, + 'block.transactions.operation': 'CREATE' + }}, + {'$project': {'block.transactions.asset': True}} + ]) + # we need to access some nested fields before returning so lets use a + # generator to avoid having to read all records on the cursor at this point + return (elem['block']['transactions'] for elem in cursor) @register_query(MongoDBConnection) def get_spent(conn, transaction_id, condition_id): - return conn.db['bigchain']\ - .find_one({'block.transactions.fulfillments.input.txid': - transaction_id, - 'block.transactions.fulfillments.input.cid': - condition_id}) + cursor = conn.db['bigchain'].aggregate([ + {'$unwind': '$block.transactions'}, + {'$match': { + 'block.transactions.fulfillments.input.txid': transaction_id, + 'block.transactions.fulfillments.input.cid': condition_id + }} + ]) + # we need to access some nested fields before returning so lets use a + # generator to avoid having to read all records on the cursor at this point + return (elem['block']['transactions'] for elem in cursor) @register_query(MongoDBConnection) def get_owned_ids(conn, owner): - return conn.db['bigchain']\ - .find({'block.transactions.transaction.conditions.owners_after': - owner}) + cursor = conn.db['bigchain'].aggregate([ + {'$unwind': '$block.transactions'}, + {'$match': { + 'block.transactions.conditions.owners_after': { + '$elemMatch': {'$eq': owner} + } + }} + ]) + # we need to access some nested fields before returning so lets use a + # generator to avoid having to read all records on the cursor at this point + return (elem['block']['transactions'] for elem in cursor) @register_query(MongoDBConnection) @@ -121,7 +176,8 @@ def write_block(conn, block): @register_query(MongoDBConnection) def get_block(conn, block_id): - return conn.db['bigchain'].find_one({'id': block_id}) + return conn.db['bigchain'].find_one({'id': block_id}, + projection={'_id': False}) @register_query(MongoDBConnection) @@ -184,4 +240,18 @@ def get_last_voted_block(conn, node_pubkey): @register_query(MongoDBConnection) def get_unvoted_blocks(conn, node_pubkey): - pass + return conn.db['bigchain'].aggregate([ + {'$lookup': { + 'from': 'votes', + 'localField': 'id', + 'foreignField': 'vote.voting_for_block', + 'as': 'votes' + }}, + {'$match': { + 'votes.node_pubkey': {'$ne': node_pubkey}, + 'block.transactions.operation': {'$ne': 'GENESIS'} + }}, + {'$project': { + 'votes': False, '_id': False + }} + ]) diff --git a/tests/pipelines/test_vote.py b/tests/pipelines/test_vote.py index 1c46c065..e0b27f50 100644 --- a/tests/pipelines/test_vote.py +++ b/tests/pipelines/test_vote.py @@ -279,7 +279,7 @@ def test_valid_block_voting_with_transfer_transactions(monkeypatch, tx = Transaction.create([b.me], [([test_user_pub], 1)]) tx = tx.sign([b.me_private]) - monkeypatch.setattr('time.time', lambda: 1111111111) + monkeypatch.setattr('time.time', lambda: 1000000000) block = b.create_block([tx]) b.write_block(block) @@ -289,7 +289,7 @@ def test_valid_block_voting_with_transfer_transactions(monkeypatch, asset_id=tx.id) tx2 = tx2.sign([test_user_priv]) - monkeypatch.setattr('time.time', lambda: 2222222222) + monkeypatch.setattr('time.time', lambda: 2000000000) block2 = b.create_block([tx2]) b.write_block(block2) @@ -314,7 +314,7 @@ def test_valid_block_voting_with_transfer_transactions(monkeypatch, 'previous_block': genesis_block.id, 'is_block_valid': True, 'invalid_reason': None, - 'timestamp': '2222222222'} + 'timestamp': '2000000000'} serialized_vote = utils.serialize(vote_doc['vote']).encode() assert vote_doc['node_pubkey'] == b.me @@ -328,7 +328,7 @@ def test_valid_block_voting_with_transfer_transactions(monkeypatch, 'previous_block': block.id, 'is_block_valid': True, 'invalid_reason': None, - 'timestamp': '2222222222'} + 'timestamp': '2000000000'} serialized_vote2 = utils.serialize(vote2_doc['vote']).encode() assert vote2_doc['node_pubkey'] == b.me @@ -498,15 +498,15 @@ def test_voter_considers_unvoted_blocks_when_single_node(monkeypatch, b): outpipe = Pipe() - monkeypatch.setattr('time.time', lambda: 1111111111) + monkeypatch.setattr('time.time', lambda: 1000000000) block_ids = [] # insert blocks in the database while the voter process is not listening # (these blocks won't appear in the changefeed) - monkeypatch.setattr('time.time', lambda: 2222222222) + monkeypatch.setattr('time.time', lambda: 1000000020) block_1 = dummy_block(b) block_ids.append(block_1.id) - monkeypatch.setattr('time.time', lambda: 3333333333) + monkeypatch.setattr('time.time', lambda: 1000000030) b.write_block(block_1) block_2 = dummy_block(b) block_ids.append(block_2.id) @@ -522,7 +522,7 @@ def test_voter_considers_unvoted_blocks_when_single_node(monkeypatch, b): outpipe.get() # create a new block that will appear in the changefeed - monkeypatch.setattr('time.time', lambda: 4444444444) + monkeypatch.setattr('time.time', lambda: 1000000040) block_3 = dummy_block(b) block_ids.append(block_3.id) b.write_block(block_3) @@ -546,15 +546,15 @@ def test_voter_chains_blocks_with_the_previous_ones(monkeypatch, b): outpipe = Pipe() - monkeypatch.setattr('time.time', lambda: 1111111111) + monkeypatch.setattr('time.time', lambda: 1000000000) block_ids = [] - monkeypatch.setattr('time.time', lambda: 2222222222) + monkeypatch.setattr('time.time', lambda: 1000000020) block_1 = dummy_block(b) block_ids.append(block_1.id) b.write_block(block_1) - monkeypatch.setattr('time.time', lambda: 3333333333) + monkeypatch.setattr('time.time', lambda: 1000000030) block_2 = dummy_block(b) block_ids.append(block_2.id) b.write_block(block_2) @@ -588,9 +588,9 @@ def test_voter_checks_for_previous_vote(monkeypatch, b): inpipe = Pipe() outpipe = Pipe() - monkeypatch.setattr('time.time', lambda: 1111111111) + monkeypatch.setattr('time.time', lambda: 1000000000) - monkeypatch.setattr('time.time', lambda: 2222222222) + monkeypatch.setattr('time.time', lambda: 1000000020) block_1 = dummy_block(b) inpipe.put(block_1.to_dict()) assert len(list(query.get_votes_by_block_id(b.connection, block_1.id))) == 0 @@ -603,11 +603,11 @@ def test_voter_checks_for_previous_vote(monkeypatch, b): outpipe.get() # queue block for voting AGAIN - monkeypatch.setattr('time.time', lambda: 3333333333) + monkeypatch.setattr('time.time', lambda: 1000000030) inpipe.put(block_1.to_dict()) # queue another block - monkeypatch.setattr('time.time', lambda: 4444444444) + monkeypatch.setattr('time.time', lambda: 1000000040) block_2 = dummy_block(b) inpipe.put(block_2.to_dict()) diff --git a/tests/test_config_utils.py b/tests/test_config_utils.py index 2a326147..e190f622 100644 --- a/tests/test_config_utils.py +++ b/tests/test_config_utils.py @@ -102,7 +102,10 @@ def test_env_config(monkeypatch): def test_autoconfigure_read_both_from_file_and_env(monkeypatch, request): file_config = { - 'database': {'host': 'test-host'}, + 'database': { + 'host': 'test-host', + 'backend': request.config.getoption('--database-backend') + }, 'backlog_reassign_delay': 5 } monkeypatch.setattr('bigchaindb.config_utils.file_config', lambda *args, **kwargs: file_config) From 7f05974f0f9da311baae73a05c2a40ae1f84b9fd Mon Sep 17 00:00:00 2001 From: Rodolphe Marques Date: Fri, 6 Jan 2017 15:30:25 +0100 Subject: [PATCH 13/34] Added a secondary index with uniqueness constraint in backlog. Several test fixes --- bigchaindb/backend/mongodb/changefeed.py | 6 ++--- bigchaindb/backend/mongodb/query.py | 6 ++++- bigchaindb/backend/mongodb/schema.py | 6 +++++ tests/backend/mongodb/test_changefeed.py | 1 + tests/backend/mongodb/test_schema.py | 11 +++++++-- .../{ => rethinkdb}/test_changefeed.py | 0 tests/db/test_bigchain_api.py | 23 +++++++++---------- 7 files changed, 35 insertions(+), 18 deletions(-) create mode 100644 tests/backend/mongodb/test_changefeed.py rename tests/backend/{ => rethinkdb}/test_changefeed.py (100%) diff --git a/bigchaindb/backend/mongodb/changefeed.py b/bigchaindb/backend/mongodb/changefeed.py index d52927b9..05ae7150 100644 --- a/bigchaindb/backend/mongodb/changefeed.py +++ b/bigchaindb/backend/mongodb/changefeed.py @@ -67,10 +67,10 @@ class MongoDBChangeFeed(ChangeFeed): # See https://github.com/bigchaindb/bigchaindb/issues/992 if is_insert and (self.operation & ChangeFeed.INSERT): record['o'].pop('_id', None) - doc = record['o'] + self.outqueue.put(record['o']) elif is_delete and (self.operation & ChangeFeed.DELETE): # on delete it only returns the id of the document - doc = record['o'] + self.outqueue.put(record['o']) elif is_update and (self.operation & ChangeFeed.UPDATE): # the oplog entry for updates only returns the update # operations to apply to the document and not the @@ -78,7 +78,7 @@ class MongoDBChangeFeed(ChangeFeed): # and then return it. doc = self.connection.conn[dbname][table]\ .find_one(record['o2'], projection={'_id': False}) - self.outqueue.put(doc) + self.outqueue.put(doc) @register_changefeed(MongoDBConnection) diff --git a/bigchaindb/backend/mongodb/query.py b/bigchaindb/backend/mongodb/query.py index 658eb3ca..fdccda68 100644 --- a/bigchaindb/backend/mongodb/query.py +++ b/bigchaindb/backend/mongodb/query.py @@ -4,6 +4,7 @@ from time import time from itertools import chain from pymongo import ReturnDocument +from pymongo import errors from bigchaindb import backend from bigchaindb.common.exceptions import CyclicBlockchainError @@ -16,7 +17,10 @@ register_query = module_dispatch_registrar(backend.query) @register_query(MongoDBConnection) def write_transaction(conn, signed_transaction): - return conn.db['backlog'].insert_one(signed_transaction) + try: + return conn.db['backlog'].insert_one(signed_transaction) + except errors.DuplicateKeyError: + return @register_query(MongoDBConnection) diff --git a/bigchaindb/backend/mongodb/schema.py b/bigchaindb/backend/mongodb/schema.py index fed2d1e4..50674b12 100644 --- a/bigchaindb/backend/mongodb/schema.py +++ b/bigchaindb/backend/mongodb/schema.py @@ -72,6 +72,12 @@ def create_bigchain_secondary_index(conn, dbname): def create_backlog_secondary_index(conn, dbname): logger.info('Create `backlog` secondary index.') + # secondary index on the transaction id with a uniqueness constraint + # to make sure there are no duplicated transactions in the backlog + conn.conn[dbname]['backlog'].create_index('id', + name='transaction_id', + unique=True) + # compound index to read transactions from the backlog per assignee conn.conn[dbname]['backlog']\ .create_index([('assignee', ASCENDING), diff --git a/tests/backend/mongodb/test_changefeed.py b/tests/backend/mongodb/test_changefeed.py new file mode 100644 index 00000000..90179ab8 --- /dev/null +++ b/tests/backend/mongodb/test_changefeed.py @@ -0,0 +1 @@ +"""MongoDB changefeed tests""" diff --git a/tests/backend/mongodb/test_schema.py b/tests/backend/mongodb/test_schema.py index 033d4113..8a97ddd8 100644 --- a/tests/backend/mongodb/test_schema.py +++ b/tests/backend/mongodb/test_schema.py @@ -25,7 +25,8 @@ def test_init_creates_db_tables_and_indexes(): 'transaction_id'] indexes = conn.conn[dbname]['backlog'].index_information().keys() - assert sorted(indexes) == ['_id_', 'assignee__transaction_timestamp'] + assert sorted(indexes) == ['_id_', 'assignee__transaction_timestamp', + 'transaction_id'] indexes = conn.conn[dbname]['votes'].index_information().keys() assert sorted(indexes) == ['_id_', 'block_and_voter'] @@ -85,13 +86,19 @@ def test_create_secondary_indexes(): # Backlog table indexes = conn.conn[dbname]['backlog'].index_information().keys() - assert sorted(indexes) == ['_id_', 'assignee__transaction_timestamp'] + assert sorted(indexes) == ['_id_', 'assignee__transaction_timestamp', + 'transaction_id'] # Votes table indexes = conn.conn[dbname]['votes'].index_information().keys() assert sorted(indexes) == ['_id_', 'block_and_voter'] +# The database is set up with a session scope. +# If we run this test we will remove secondary indexes that are nedeed for +# the rest of the tests +@pytest.mark.skipif(reason='This will remove the secondary indexes needed' + ' for the rest of the tests') def test_drop(): import bigchaindb from bigchaindb import backend diff --git a/tests/backend/test_changefeed.py b/tests/backend/rethinkdb/test_changefeed.py similarity index 100% rename from tests/backend/test_changefeed.py rename to tests/backend/rethinkdb/test_changefeed.py diff --git a/tests/db/test_bigchain_api.py b/tests/db/test_bigchain_api.py index 2d4c48e5..241c12a0 100644 --- a/tests/db/test_bigchain_api.py +++ b/tests/db/test_bigchain_api.py @@ -97,18 +97,18 @@ class TestBigchainApi(object): tx = Transaction.create([b.me], [([b.me], 1)]) tx = tx.sign([b.me_private]) - monkeypatch.setattr('time.time', lambda: 1) + monkeypatch.setattr('time.time', lambda: 1000000000) block1 = b.create_block([tx]) b.write_block(block1) - monkeypatch.setattr('time.time', lambda: 2) + monkeypatch.setattr('time.time', lambda: 1000000020) transfer_tx = Transaction.transfer(tx.to_inputs(), [([b.me], 1)], asset_id=tx.id) transfer_tx = transfer_tx.sign([b.me_private]) block2 = b.create_block([transfer_tx]) b.write_block(block2) - monkeypatch.setattr('time.time', lambda: 3333333333) + monkeypatch.setattr('time.time', lambda: 1000000030) transfer_tx2 = Transaction.transfer(tx.to_inputs(), [([b.me], 1)], asset_id=tx.id) transfer_tx2 = transfer_tx2.sign([b.me_private]) @@ -132,11 +132,11 @@ class TestBigchainApi(object): tx = Transaction.create([b.me], [([b.me], 1)]) tx = tx.sign([b.me_private]) - monkeypatch.setattr('time.time', lambda: 1) + monkeypatch.setattr('time.time', lambda: 1000000000) block1 = b.create_block([tx]) b.write_block(block1) - monkeypatch.setattr('time.time', lambda: 2222222222) + monkeypatch.setattr('time.time', lambda: 1000000020) block2 = b.create_block([tx]) b.write_block(block2) @@ -160,7 +160,7 @@ class TestBigchainApi(object): block1 = b.create_block([tx1]) b.write_block(block1) - monkeypatch.setattr('time.time', lambda: 2000000000) + monkeypatch.setattr('time.time', lambda: 1000000020) tx2 = Transaction.create([b.me], [([b.me], 1)], metadata={'msg': random.random()}) tx2 = tx2.sign([b.me_private]) @@ -180,6 +180,7 @@ class TestBigchainApi(object): @pytest.mark.usefixtures('inputs') def test_write_transaction(self, b, user_pk, user_sk): + from bigchaindb import Bigchain from bigchaindb.models import Transaction input_tx = b.get_owned_ids(user_pk).pop() @@ -190,12 +191,10 @@ class TestBigchainApi(object): tx = tx.sign([user_sk]) response = b.write_transaction(tx) - assert response['skipped'] == 0 - assert response['deleted'] == 0 - assert response['unchanged'] == 0 - assert response['errors'] == 0 - assert response['replaced'] == 0 - assert response['inserted'] == 1 + tx_from_db, status = b.get_transaction(tx.id, include_status=True) + + assert tx_from_db.to_dict() == tx.to_dict() + assert status == Bigchain.TX_IN_BACKLOG @pytest.mark.usefixtures('inputs') def test_read_transaction(self, b, user_pk, user_sk): From 8926178e529262ee46e3af4a58dd77e365ec5903 Mon Sep 17 00:00:00 2001 From: Rodolphe Marques Date: Mon, 9 Jan 2017 13:32:17 +0100 Subject: [PATCH 14/34] initialize replica set for tests --- tests/conftest.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 9a6b7a73..88747b03 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -136,11 +136,18 @@ def _configure_bigchaindb(request): def _setup_database(_configure_bigchaindb): from bigchaindb import config from bigchaindb.backend import connect, schema + from bigchaindb.backend.mongodb.schema import initialize_replica_set from bigchaindb.common.exceptions import DatabaseDoesNotExist print('Initializing test db') dbname = config['database']['name'] conn = connect() + # if we are setting up mongodb for the first time we need to make sure + # that the replica set is initialized before doing any operation in the + # database + if config['database']['backend'] == 'mongodb': + initialize_replica_set(conn) + try: schema.drop_database(conn, dbname) except DatabaseDoesNotExist: From c6ea345d86d20e016e880b6492bbe31f66641f85 Mon Sep 17 00:00:00 2001 From: Rodolphe Marques Date: Mon, 9 Jan 2017 13:49:55 +0100 Subject: [PATCH 15/34] Updated mongodb queries to work with new transaction model. --- bigchaindb/backend/mongodb/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bigchaindb/backend/mongodb/query.py b/bigchaindb/backend/mongodb/query.py index fdccda68..4cb82ef6 100644 --- a/bigchaindb/backend/mongodb/query.py +++ b/bigchaindb/backend/mongodb/query.py @@ -130,12 +130,12 @@ def get_asset_by_id(conn, asset_id): @register_query(MongoDBConnection) -def get_spent(conn, transaction_id, condition_id): +def get_spent(conn, transaction_id, output): cursor = conn.db['bigchain'].aggregate([ {'$unwind': '$block.transactions'}, {'$match': { - 'block.transactions.fulfillments.input.txid': transaction_id, - 'block.transactions.fulfillments.input.cid': condition_id + 'block.transactions.inputs.fulfills.txid': transaction_id, + 'block.transactions.inputs.fulfills.output': output }} ]) # we need to access some nested fields before returning so lets use a @@ -148,7 +148,7 @@ def get_owned_ids(conn, owner): cursor = conn.db['bigchain'].aggregate([ {'$unwind': '$block.transactions'}, {'$match': { - 'block.transactions.conditions.owners_after': { + 'block.transactions.outputs.public_keys': { '$elemMatch': {'$eq': owner} } }} From 9bc658b83387115bb97821604154bfbdbe68193f Mon Sep 17 00:00:00 2001 From: Rodolphe Marques Date: Mon, 9 Jan 2017 14:03:57 +0100 Subject: [PATCH 16/34] fix travis mongodb integration --- .ci/travis-before-script.sh | 7 +++++-- .ci/travis_script.sh | 2 ++ .travis.yml | 3 ++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/.ci/travis-before-script.sh b/.ci/travis-before-script.sh index c6c5fe2e..5f4e0bae 100755 --- a/.ci/travis-before-script.sh +++ b/.ci/travis-before-script.sh @@ -4,6 +4,9 @@ set -e -x if [[ "${TOXENV}" == *-rdb ]]; then rethinkdb --daemon -elif [[ "${TOXENV}" == *-mdb ]]; then - sudo service mongod start +elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb ]]; then + wget http://downloads.mongodb.org/linux/mongodb-linux-x86_64-3.4.1.tgz -O /tmp/mongodb.tgz + tar -xvf /tmp/mongodb.tgz + mkdir /tmp/mongodb-data + ${PWD}/mongodb-linux-x86_64-3.4.1/bin/mongod --dbpath=/tmp/mongodb-data --replSet=rs0 & fi diff --git a/.ci/travis_script.sh b/.ci/travis_script.sh index ad5a2ddc..25d0da30 100755 --- a/.ci/travis_script.sh +++ b/.ci/travis_script.sh @@ -4,6 +4,8 @@ set -e -x if [[ -n ${TOXENV} ]]; then tox -e ${TOXENV} +elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb ]]; then + pytest -vs --database-backend=mongodb --cov=bigchaindb else pytest -v -n auto --cov=bigchaindb fi diff --git a/.travis.yml b/.travis.yml index 5704b3a6..719d6f26 100644 --- a/.travis.yml +++ b/.travis.yml @@ -27,7 +27,6 @@ matrix: rethinkdb: '2.3.5' env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb - python: 3.5 - services: mongodb env: BIGCHAINDB_DATABASE_BACKEND=mongodb - python: 3.5 addons: @@ -38,6 +37,8 @@ before_install: sudo .ci/travis-before-install.sh install: .ci/travis-install.sh +before_script: .ci/travis-before-script.sh + script: .ci/travis_script.sh after_success: .ci/travis-after-success.sh From abf77c04eea86b183890838b07b55af2c5486475 Mon Sep 17 00:00:00 2001 From: Rodolphe Marques Date: Mon, 9 Jan 2017 14:10:49 +0100 Subject: [PATCH 17/34] Omit mongod log messages. MongoDB failures no longer allowed --- .ci/travis-before-script.sh | 2 +- .ci/travis_script.sh | 2 +- .travis.yml | 2 -- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.ci/travis-before-script.sh b/.ci/travis-before-script.sh index 5f4e0bae..06319d47 100755 --- a/.ci/travis-before-script.sh +++ b/.ci/travis-before-script.sh @@ -8,5 +8,5 @@ elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb ]]; then wget http://downloads.mongodb.org/linux/mongodb-linux-x86_64-3.4.1.tgz -O /tmp/mongodb.tgz tar -xvf /tmp/mongodb.tgz mkdir /tmp/mongodb-data - ${PWD}/mongodb-linux-x86_64-3.4.1/bin/mongod --dbpath=/tmp/mongodb-data --replSet=rs0 & + ${PWD}/mongodb-linux-x86_64-3.4.1/bin/mongod --dbpath=/tmp/mongodb-data --replSet=rs0 &> /dev/null & fi diff --git a/.ci/travis_script.sh b/.ci/travis_script.sh index 25d0da30..8b638906 100755 --- a/.ci/travis_script.sh +++ b/.ci/travis_script.sh @@ -5,7 +5,7 @@ set -e -x if [[ -n ${TOXENV} ]]; then tox -e ${TOXENV} elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb ]]; then - pytest -vs --database-backend=mongodb --cov=bigchaindb + pytest -v --database-backend=mongodb --cov=bigchaindb else pytest -v -n auto --cov=bigchaindb fi diff --git a/.travis.yml b/.travis.yml index 719d6f26..6089260b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,8 +12,6 @@ env: matrix: fast_finish: true - allow_failures: - - env: BIGCHAINDB_DATABASE_BACKEND=mongodb exclude: - python: 3.4 env: TOXENV=flake8 From 24c1f8d8046b7c010aa12a71b5f82314929cf462 Mon Sep 17 00:00:00 2001 From: Rodolphe Marques Date: Mon, 9 Jan 2017 14:15:01 +0100 Subject: [PATCH 18/34] mongodb at the end of travis matrix --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 6089260b..da7ae05f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,12 +24,12 @@ matrix: addons: rethinkdb: '2.3.5' env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb - - python: 3.5 - env: BIGCHAINDB_DATABASE_BACKEND=mongodb - python: 3.5 addons: rethinkdb: '2.3.5' env: BIGCHAINDB_DATABASE_BACKEND=rethinkdb + - python: 3.5 + env: BIGCHAINDB_DATABASE_BACKEND=mongodb before_install: sudo .ci/travis-before-install.sh From f4bbf278156863c421d27c32a5fcde9154be4a1a Mon Sep 17 00:00:00 2001 From: Rodolphe Marques Date: Mon, 9 Jan 2017 16:28:20 +0100 Subject: [PATCH 19/34] fix indentation error --- .ci/travis_script.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/travis_script.sh b/.ci/travis_script.sh index 8b638906..83d1731e 100755 --- a/.ci/travis_script.sh +++ b/.ci/travis_script.sh @@ -5,7 +5,7 @@ set -e -x if [[ -n ${TOXENV} ]]; then tox -e ${TOXENV} elif [[ "${BIGCHAINDB_DATABASE_BACKEND}" == mongodb ]]; then - pytest -v --database-backend=mongodb --cov=bigchaindb + pytest -v --database-backend=mongodb --cov=bigchaindb else pytest -v -n auto --cov=bigchaindb fi From 322d6bde129e28e846e3af4b7824952989670362 Mon Sep 17 00:00:00 2001 From: diminator Date: Tue, 10 Jan 2017 14:45:29 +0100 Subject: [PATCH 20/34] (fix): each import on its own line --- bigchaindb/web/routes.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bigchaindb/web/routes.py b/bigchaindb/web/routes.py index dca7a518..75f2ffac 100644 --- a/bigchaindb/web/routes.py +++ b/bigchaindb/web/routes.py @@ -3,7 +3,8 @@ from flask_restful import Api from bigchaindb.web.views import ( info, statuses, - transactions as tx, unspents, + transactions as tx, + unspents, ) From aa6d4fdcc81f7eaab1bcc9126aab372706538413 Mon Sep 17 00:00:00 2001 From: Rodolphe Marques Date: Tue, 10 Jan 2017 15:31:16 +0100 Subject: [PATCH 21/34] small fix to test --- tests/backend/mongodb/test_schema.py | 17 ++++------------- tests/conftest.py | 4 ++-- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/tests/backend/mongodb/test_schema.py b/tests/backend/mongodb/test_schema.py index 8a97ddd8..33ddb719 100644 --- a/tests/backend/mongodb/test_schema.py +++ b/tests/backend/mongodb/test_schema.py @@ -94,24 +94,15 @@ def test_create_secondary_indexes(): assert sorted(indexes) == ['_id_', 'block_and_voter'] -# The database is set up with a session scope. -# If we run this test we will remove secondary indexes that are nedeed for -# the rest of the tests -@pytest.mark.skipif(reason='This will remove the secondary indexes needed' - ' for the rest of the tests') -def test_drop(): +def test_drop(dummy_db): import bigchaindb from bigchaindb import backend from bigchaindb.backend import schema conn = backend.connect() - dbname = bigchaindb.config['database']['name'] - - # The db is set up by fixtures - assert dbname in conn.conn.database_names() - - schema.drop_database(conn, dbname) - assert dbname not in conn.conn.database_names() + assert dummy_db in conn.conn.database_names() + schema.drop_database(conn, dummy_db) + assert dummy_db not in conn.conn.database_names() def test_get_replica_set_name_not_enabled(): diff --git a/tests/conftest.py b/tests/conftest.py index 88747b03..8b7c5cb1 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -322,10 +322,10 @@ def dummy_db(request): if xdist_suffix: dbname = '{}_{}'.format(dbname, xdist_suffix) try: - schema.create_database(conn, dbname) + schema.init_database(conn, dbname) except DatabaseAlreadyExists: schema.drop_database(conn, dbname) - schema.create_database(conn, dbname) + schema.init_database(conn, dbname) yield dbname try: schema.drop_database(conn, dbname) From c91eff70e5da551bc7d5f6a59bbc6791eafbe2b3 Mon Sep 17 00:00:00 2001 From: Rodolphe Marques Date: Tue, 10 Jan 2017 16:49:30 +0100 Subject: [PATCH 22/34] Feat/960/test mongodb queries (#1030) * Created tests to mongodb queries Small fix to queries * cleanup pytest markers --- bigchaindb/backend/mongodb/query.py | 35 ++- tests/backend/mongodb/test_queries.py | 367 ++++++++++++++++++++++++++ 2 files changed, 388 insertions(+), 14 deletions(-) create mode 100644 tests/backend/mongodb/test_queries.py diff --git a/bigchaindb/backend/mongodb/query.py b/bigchaindb/backend/mongodb/query.py index 4cb82ef6..745ae731 100644 --- a/bigchaindb/backend/mongodb/query.py +++ b/bigchaindb/backend/mongodb/query.py @@ -47,19 +47,25 @@ def get_stale_transactions(conn, reassign_delay): @register_query(MongoDBConnection) def get_transaction_from_block(conn, transaction_id, block_id): - return conn.db['bigchain'].aggregate([ - {'$match': {'id': block_id}}, - {'$project': { - 'block.transactions': { - '$filter': { - 'input': '$block.transactions', - 'as': 'transaction', - 'cond': { - '$eq': ['$$transaction.id', transaction_id] + try: + return conn.db['bigchain'].aggregate([ + {'$match': {'id': block_id}}, + {'$project': { + 'block.transactions': { + '$filter': { + 'input': '$block.transactions', + 'as': 'transaction', + 'cond': { + '$eq': ['$$transaction.id', transaction_id] + } } } - } - }}]).next()['block']['transactions'][0] + }}]).next()['block']['transactions'].pop() + except (StopIteration, IndexError): + # StopIteration is raised if the block was not found + # IndexError is returned if the block is found but no transactions + # match + return @register_query(MongoDBConnection) @@ -207,9 +213,10 @@ def write_vote(conn, vote): @register_query(MongoDBConnection) def get_genesis_block(conn): - return conn.db['bigchain'].find_one({ - 'block.transactions.0.operation': 'GENESIS' - }) + return conn.db['bigchain'].find_one( + {'block.transactions.0.operation': 'GENESIS'}, + {'_id': False} + ) @register_query(MongoDBConnection) diff --git a/tests/backend/mongodb/test_queries.py b/tests/backend/mongodb/test_queries.py new file mode 100644 index 00000000..5b38e473 --- /dev/null +++ b/tests/backend/mongodb/test_queries.py @@ -0,0 +1,367 @@ +import pytest + +pytestmark = pytest.mark.bdb + + +def test_write_transaction(signed_create_tx): + from bigchaindb.backend import connect, query + conn = connect() + + # write the transaction + query.write_transaction(conn, signed_create_tx.to_dict()) + + # get the transaction + tx_db = conn.db.backlog.find_one({'id': signed_create_tx.id}, + {'_id': False}) + + assert tx_db == signed_create_tx.to_dict() + + +def test_update_transaction(signed_create_tx): + from bigchaindb.backend import connect, query + conn = connect() + + # update_transaction can update any field we want, but lets update the + # same fields that are updated by bigchaindb core. + signed_create_tx = signed_create_tx.to_dict() + signed_create_tx.update({'assignee': 'aaa', 'assignment_timestamp': 10}) + conn.db.backlog.insert_one(signed_create_tx) + + query.update_transaction(conn, signed_create_tx['id'], + {'assignee': 'bbb', 'assignment_timestamp': 20}) + + tx_db = conn.db.backlog.find_one({'id': signed_create_tx['id']}, + {'_id': False}) + + assert tx_db['assignee'] == 'bbb' + assert tx_db['assignment_timestamp'] == 20 + + +def test_delete_transaction(signed_create_tx): + from bigchaindb.backend import connect, query + conn = connect() + + # write_the transaction + result = conn.db.backlog.insert_one(signed_create_tx.to_dict()) + + # delete transaction + query.delete_transaction(conn, signed_create_tx.id) + + tx_db = conn.db.backlog.find_one({'_id': result.inserted_id}) + assert tx_db is None + + +def test_get_stale_transactions(signed_create_tx): + import time + from bigchaindb.backend import connect, query + conn = connect() + + # create two transaction, one of them stale + tx1 = signed_create_tx.to_dict() + tx1.update({'id': 'notstale', 'assignment_timestamp': time.time()}) + tx2 = signed_create_tx.to_dict() + tx2.update({'id': 'stale', 'assignment_timestamp': time.time() - 60}) + + # write the transactions + conn.db.backlog.insert_one(tx1) + conn.db.backlog.insert_one(tx2) + + # get stale transactions + stale_txs = list(query.get_stale_transactions(conn, 30)) + + assert len(stale_txs) == 1 + assert stale_txs[0]['id'] == 'stale' + + +def test_get_transaction_from_block(user_pk): + from bigchaindb.backend import connect, query + from bigchaindb.models import Transaction, Block + conn = connect() + + # create a block with 2 transactions + txs = [ + Transaction.create([user_pk], [([user_pk], 1)]), + Transaction.create([user_pk], [([user_pk], 1)]), + ] + block = Block(transactions=txs) + conn.db.bigchain.insert_one(block.to_dict()) + + tx_db = query.get_transaction_from_block(conn, txs[0].id, block.id) + assert tx_db == txs[0].to_dict() + + assert query.get_transaction_from_block(conn, txs[0].id, 'aaa') is None + assert query.get_transaction_from_block(conn, 'aaa', block.id) is None + + +def test_get_transaction_from_backlog(create_tx): + from bigchaindb.backend import connect, query + conn = connect() + + # insert transaction + conn.db.backlog.insert_one(create_tx.to_dict()) + + # query the backlog + tx_db = query.get_transaction_from_backlog(conn, create_tx.id) + + assert tx_db == create_tx.to_dict() + + +def test_get_block_status_from_transaction(create_tx): + from bigchaindb.backend import connect, query + from bigchaindb.models import Block + conn = connect() + + # create a block + block = Block(transactions=[create_tx], voters=['aaa', 'bbb', 'ccc']) + # insert block + conn.db.bigchain.insert_one(block.to_dict()) + + block_db = list(query.get_blocks_status_from_transaction(conn, + create_tx.id)) + + assert len(block_db) == 1 + block_db = block_db.pop() + assert block_db['id'] == block.id + assert block_db['block']['voters'] == block.voters + + +def test_get_txids_by_asset_id(signed_create_tx, signed_transfer_tx): + from bigchaindb.backend import connect, query + from bigchaindb.models import Block + conn = connect() + + # create and insert two blocks, one for the create and one for the + # transfer transaction + block = Block(transactions=[signed_create_tx]) + conn.db.bigchain.insert_one(block.to_dict()) + block = Block(transactions=[signed_transfer_tx]) + conn.db.bigchain.insert_one(block.to_dict()) + + txids = list(query.get_txids_by_asset_id(conn, signed_create_tx.id)) + + assert len(txids) == 2 + assert txids == [signed_create_tx.id, signed_transfer_tx.id] + + +def test_get_asset_by_id(create_tx): + from bigchaindb.backend import connect, query + from bigchaindb.models import Block + conn = connect() + + # create asset and block + create_tx.asset = {'msg': 'aaa'} + block = Block(transactions=[create_tx]) + conn.db.bigchain.insert_one(block.to_dict()) + + asset = list(query.get_asset_by_id(conn, create_tx.id)) + + assert len(asset) == 1 + assert asset[0]['asset'] == create_tx.asset + + +def test_get_spent(signed_create_tx, signed_transfer_tx): + from bigchaindb.backend import connect, query + from bigchaindb.models import Block + conn = connect() + + # create and insert two blocks, one for the create and one for the + # transfer transaction + block = Block(transactions=[signed_create_tx]) + conn.db.bigchain.insert_one(block.to_dict()) + block = Block(transactions=[signed_transfer_tx]) + conn.db.bigchain.insert_one(block.to_dict()) + + spents = list(query.get_spent(conn, signed_create_tx.id, 0)) + + assert len(spents) == 1 + assert spents[0] == signed_transfer_tx.to_dict() + + +def test_get_owned_ids(signed_create_tx, user_pk): + from bigchaindb.backend import connect, query + from bigchaindb.models import Block + conn = connect() + + # create and insert a block + block = Block(transactions=[signed_create_tx]) + conn.db.bigchain.insert_one(block.to_dict()) + + owned_ids = list(query.get_owned_ids(conn, user_pk)) + + assert len(owned_ids) == 1 + assert owned_ids[0] == signed_create_tx.to_dict() + + +def test_get_votes_by_block_id(signed_create_tx, structurally_valid_vote): + from bigchaindb.backend import connect, query + from bigchaindb.models import Block + conn = connect() + + # create and insert a block + block = Block(transactions=[signed_create_tx]) + conn.db.bigchain.insert_one(block.to_dict()) + # create and insert some votes + structurally_valid_vote['vote']['voting_for_block'] = block.id + conn.db.votes.insert_one(structurally_valid_vote) + structurally_valid_vote['vote']['voting_for_block'] = block.id + structurally_valid_vote.pop('_id') + conn.db.votes.insert_one(structurally_valid_vote) + + votes = list(query.get_votes_by_block_id(conn, block.id)) + + assert len(votes) == 2 + assert votes[0]['vote']['voting_for_block'] == block.id + assert votes[1]['vote']['voting_for_block'] == block.id + + +def test_get_votes_by_block_id_and_voter(signed_create_tx, + structurally_valid_vote): + from bigchaindb.backend import connect, query + from bigchaindb.models import Block + conn = connect() + + # create and insert a block + block = Block(transactions=[signed_create_tx]) + conn.db.bigchain.insert_one(block.to_dict()) + # create and insert some votes + structurally_valid_vote['vote']['voting_for_block'] = block.id + structurally_valid_vote['node_pubkey'] = 'aaa' + conn.db.votes.insert_one(structurally_valid_vote) + structurally_valid_vote['vote']['voting_for_block'] = block.id + structurally_valid_vote['node_pubkey'] = 'bbb' + structurally_valid_vote.pop('_id') + conn.db.votes.insert_one(structurally_valid_vote) + + votes = list(query.get_votes_by_block_id_and_voter(conn, block.id, 'aaa')) + + assert len(votes) == 1 + assert votes[0]['node_pubkey'] == 'aaa' + + +def test_write_block(signed_create_tx): + from bigchaindb.backend import connect, query + from bigchaindb.models import Block + conn = connect() + + # create and write block + block = Block(transactions=[signed_create_tx]) + query.write_block(conn, block) + + block_db = conn.db.bigchain.find_one({'id': block.id}, {'_id': False}) + + assert block_db == block.to_dict() + + +def test_get_block(signed_create_tx): + from bigchaindb.backend import connect, query + from bigchaindb.models import Block + conn = connect() + + # create and insert block + block = Block(transactions=[signed_create_tx]) + conn.db.bigchain.insert_one(block.to_dict()) + + block_db = query.get_block(conn, block.id) + + assert block_db == block.to_dict() + + +def test_has_transaction(signed_create_tx): + from bigchaindb.backend import connect, query + from bigchaindb.models import Block + conn = connect() + + # create and insert block + block = Block(transactions=[signed_create_tx]) + conn.db.bigchain.insert_one(block.to_dict()) + + assert query.has_transaction(conn, signed_create_tx.id) + assert query.has_transaction(conn, 'aaa') is False + + +def test_count_blocks(signed_create_tx): + from bigchaindb.backend import connect, query + from bigchaindb.models import Block + conn = connect() + + # create and insert some blocks + block = Block(transactions=[signed_create_tx]) + conn.db.bigchain.insert_one(block.to_dict()) + conn.db.bigchain.insert_one(block.to_dict()) + + assert query.count_blocks(conn) == 2 + + +def test_count_backlog(signed_create_tx): + from bigchaindb.backend import connect, query + conn = connect() + + # create and insert some transations + conn.db.backlog.insert_one(signed_create_tx.to_dict()) + signed_create_tx.metadata = {'msg': 'aaa'} + conn.db.backlog.insert_one(signed_create_tx.to_dict()) + + assert query.count_backlog(conn) == 2 + + +def test_write_vote(structurally_valid_vote): + from bigchaindb.backend import connect, query + conn = connect() + + # write a vote + query.write_vote(conn, structurally_valid_vote) + # retrieve the vote + vote_db = conn.db.votes.find_one( + {'node_pubkey': structurally_valid_vote['node_pubkey']} + ) + + assert vote_db == structurally_valid_vote + + +def test_get_genesis_block(genesis_block): + from bigchaindb.backend import connect, query + conn = connect() + + assert query.get_genesis_block(conn) == genesis_block.to_dict() + + +def test_get_last_voted_block(genesis_block, signed_create_tx, b): + from bigchaindb.backend import connect, query + from bigchaindb.models import Block + from bigchaindb.common.exceptions import CyclicBlockchainError + conn = connect() + + # check that the last voted block is the genesis block + assert query.get_last_voted_block(conn, b.me) == genesis_block.to_dict() + + # create and insert a new vote and block + block = Block(transactions=[signed_create_tx]) + conn.db.bigchain.insert_one(block.to_dict()) + vote = b.vote(block.id, genesis_block.id, True) + conn.db.votes.insert_one(vote) + + assert query.get_last_voted_block(conn, b.me) == block.to_dict() + + # force a bad chain + vote.pop('_id') + vote['vote']['voting_for_block'] = genesis_block.id + vote['vote']['previous_block'] = block.id + conn.db.votes.insert_one(vote) + + with pytest.raises(CyclicBlockchainError): + query.get_last_voted_block(conn, b.me) + + +def test_get_unvoted_blocks(signed_create_tx): + from bigchaindb.backend import connect, query + from bigchaindb.models import Block + conn = connect() + + # create and insert a block + block = Block(transactions=[signed_create_tx], node_pubkey='aaa') + conn.db.bigchain.insert_one(block.to_dict()) + + unvoted_blocks = list(query.get_unvoted_blocks(conn, 'aaa')) + + assert len(unvoted_blocks) == 1 + assert unvoted_blocks[0] == block.to_dict() From 0233a38f0b90cf2a830bdec3a50a16b43281cc5d Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Thu, 22 Dec 2016 22:00:44 +0100 Subject: [PATCH 23/34] Closes #996 --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index ec8ea941..fee61c05 100644 --- a/tox.ini +++ b/tox.ini @@ -23,7 +23,7 @@ deps = flake8 skip_install = True extras = None -commands = flake8 --max-line-length 119 bigchaindb +commands = flake8 --max-line-length 119 bigchaindb tests [testenv:docsroot] basepython = {[base]basepython} From c02478e0b2d1146b3d54be2f546feb22d967b017 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Thu, 22 Dec 2016 22:52:12 +0100 Subject: [PATCH 24/34] Add flake8 option to setup.cfg --- setup.cfg | 3 +++ tox.ini | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 0eb958cf..cdec23f1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -4,3 +4,6 @@ test=pytest [coverage:run] source = . omit = *test* + +[flake8] +max_line_length = 119 diff --git a/tox.ini b/tox.ini index fee61c05..d2cd2a2c 100644 --- a/tox.ini +++ b/tox.ini @@ -23,7 +23,7 @@ deps = flake8 skip_install = True extras = None -commands = flake8 --max-line-length 119 bigchaindb tests +commands = flake8 bigchaindb tests [testenv:docsroot] basepython = {[base]basepython} From 1cfd3de60e282f7a8ed80a3f17c7d55b18ee9abb Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Thu, 22 Dec 2016 22:53:09 +0100 Subject: [PATCH 25/34] Fix flake8 errors in tests --- tests/backend/mongodb/test_schema.py | 1 - tests/common/test_transaction.py | 13 +++++-------- tests/conftest.py | 16 ++++++++++------ tests/db/test_bigchain_api.py | 2 +- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/backend/mongodb/test_schema.py b/tests/backend/mongodb/test_schema.py index 33ddb719..c806f433 100644 --- a/tests/backend/mongodb/test_schema.py +++ b/tests/backend/mongodb/test_schema.py @@ -95,7 +95,6 @@ def test_create_secondary_indexes(): def test_drop(dummy_db): - import bigchaindb from bigchaindb import backend from bigchaindb.backend import schema diff --git a/tests/common/test_transaction.py b/tests/common/test_transaction.py index 038d82df..769ab002 100644 --- a/tests/common/test_transaction.py +++ b/tests/common/test_transaction.py @@ -154,8 +154,7 @@ def test_invalid_output_initialization(cond_uri, user_pub): Output(cond_uri, [user_pub], 0) -def test_generate_output_split_half_recursive(user_pub, user2_pub, - user3_pub): +def test_generate_output_split_half_recursive(user_pub, user2_pub, user3_pub): from bigchaindb.common.transaction import Output from cryptoconditions import Ed25519Fulfillment, ThresholdSha256Fulfillment @@ -174,8 +173,8 @@ def test_generate_output_split_half_recursive(user_pub, user2_pub, assert cond.fulfillment.to_dict() == expected.to_dict() -def test_generate_outputs_split_half_single_owner(user_pub, user2_pub, - user3_pub): +def test_generate_outputs_split_half_single_owner(user_pub, + user2_pub, user3_pub): from bigchaindb.common.transaction import Output from cryptoconditions import Ed25519Fulfillment, ThresholdSha256Fulfillment @@ -231,8 +230,7 @@ def test_generate_output_single_owner_with_output(user_pub): assert cond.fulfillment.to_dict() == expected.to_dict() -def test_generate_output_invalid_parameters(user_pub, user2_pub, - user3_pub): +def test_generate_output_invalid_parameters(user_pub, user2_pub, user3_pub): from bigchaindb.common.transaction import Output with raises(ValueError): @@ -289,7 +287,6 @@ def test_create_default_asset_on_tx_initialization(asset_definition): def test_transaction_serialization(user_input, user_output, data): from bigchaindb.common.transaction import Transaction - from .utils import validate_transaction_model tx_id = 'l0l' @@ -559,7 +556,7 @@ def test_validate_multiple_inputs(user_input, user_output, user_priv, expected_first_bytes = str(expected_first).encode() expected_first.inputs[0].fulfillment.sign(expected_first_bytes, - PrivateKey(user_priv)) + PrivateKey(user_priv)) expected_second_bytes = str(expected_second).encode() expected_second.inputs[0].fulfillment.sign(expected_second_bytes, PrivateKey(user_priv)) diff --git a/tests/conftest.py b/tests/conftest.py index 8b7c5cb1..ea28521d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -275,9 +275,11 @@ def inputs(user_pk, b, genesis_block): prev_block_id = genesis_block.id for block in range(4): transactions = [ - Transaction.create([b.me], [([user_pk], 1)], - metadata={'msg': random.random()}) - .sign([b.me_private]) + Transaction.create( + [b.me], + [([user_pk], 1)], + metadata={'msg': random.random()}, + ).sign([b.me_private]) for _ in range(10) ] block = b.create_block(transactions) @@ -297,9 +299,11 @@ def inputs_shared(user_pk, user2_pk, genesis_block): prev_block_id = genesis_block.id for block in range(4): transactions = [ - Transaction.create([b.me], [user_pk, user2_pk], - metadata={'msg': random.random()}) - .sign([b.me_private]) + Transaction.create( + [b.me], + [user_pk, user2_pk], + metadata={'msg': random.random()}, + ).sign([b.me_private]) for _ in range(10) ] block = b.create_block(transactions) diff --git a/tests/db/test_bigchain_api.py b/tests/db/test_bigchain_api.py index 241c12a0..32487dbb 100644 --- a/tests/db/test_bigchain_api.py +++ b/tests/db/test_bigchain_api.py @@ -189,7 +189,7 @@ class TestBigchainApi(object): tx = Transaction.transfer(inputs, [([user_pk], 1)], asset_id=input_tx.id) tx = tx.sign([user_sk]) - response = b.write_transaction(tx) + b.write_transaction(tx) tx_from_db, status = b.get_transaction(tx.id, include_status=True) From 376172ec18168195fd1c8a56131065280a196def Mon Sep 17 00:00:00 2001 From: Rodolphe Marques Date: Tue, 10 Jan 2017 14:44:48 +0100 Subject: [PATCH 26/34] Update the README with instructions to run the tests against mongodb --- tests/README.md | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/tests/README.md b/tests/README.md index e84516a1..f00d55e6 100644 --- a/tests/README.md +++ b/tests/README.md @@ -8,7 +8,8 @@ A few notes: - [`tests/common/`](./common/) contains self-contained tests only testing [`bigchaindb/common/`](../bigchaindb/common/) -- [`tests/db/`](./db/) contains tests requiring the database backend (e.g. RethinkDB) +- [`tests/backend/`](./backend/) contains tests requiring + the database backend (RethinkDB or MongoDB) ## Writing Tests @@ -20,9 +21,24 @@ We write unit and integration tests for our Python code using the [pytest](http: ### Running Tests Directly -If you installed BigchainDB Server using `pip install bigchaindb`, then you didn't install the tests. Before you can run all the tests, you must install BigchainDB from source. The [`CONTRIBUTING.md` file](../CONTRIBUTING.md) has instructions for how to do that. +If you installed BigchainDB Server using `pip install bigchaindb`, then you +didn't install the tests. Before you can run all the tests, you must install +BigchainDB from source. The [`CONTRIBUTING.md` file](../CONTRIBUTING.md) has +instructions for how to do that. -Next, make sure you have RethinkDB running in the background (e.g. using `rethinkdb --daemon`). +Next, make sure you have RethinkDB or MongoDB running in the background. You +can run RethinkDB using `rethinkdb --daemon` or MongoDB using `mongod +--replSet=rs0`. + +The `pytest` command has many options. If you want to learn about all the +things you can do with pytest, see [the pytest +documentation](http://pytest.org/latest/). We've also added a customization to +pytest: + +`--database-backend`: Defines the backend to use for the tests. It defaults to +`rethinkdb` +It must be one of the backends available in the [server +configuration](https://docs.bigchaindb.com/projects/server/en/latest/server-reference/configuration.html). Now you can run all tests using: ```text @@ -39,13 +55,14 @@ or: python setup.py test ``` +**Note**: the above pytest commands default to use RethinkDB as the backend. If +you wish to run the tests against MongoDB add the `--database-backend=mongodb` +to the `pytest` command. + How does `python setup.py test` work? The documentation for [pytest-runner](https://pypi.python.org/pypi/pytest-runner) explains. The `pytest` command has many options. If you want to learn about all the things you can do with pytest, see [the pytest documentation](http://pytest.org/latest/). We've also added a customization to pytest: -`--database-backend`: Defines the backend to use for the tests. -It must be one of the backends available in the [server configuration](https://docs.bigchaindb.com/projects/server/en/latest/server-reference/configuration.html). - ### Running Tests with Docker Compose From f57990c9eac188ea01b4ccff7c5449a83de80dc1 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Tue, 10 Jan 2017 18:28:20 +0100 Subject: [PATCH 27/34] Pin mongodb version to 3.4.1 in docker-compose.yml --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 0d20f868..9ccd4737 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ version: '2' services: mdb: - image: mongo + image: mongo:3.4.1 ports: - "27017" From e696883f66dbf812f98bfc6cabf094d9e1db7e01 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Tue, 10 Jan 2017 18:47:21 +0100 Subject: [PATCH 28/34] Pin rethinkdb version to 2.3.5 in docker-compose --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 9ccd4737..16dbddd7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -15,7 +15,7 @@ services: - rdb-data rdb-data: - image: rethinkdb + image: rethinkdb:2.3.5 volumes: - /data command: "true" From 4994e30f0cf478b3417086c4cb83901e9c9719c7 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Tue, 10 Jan 2017 18:48:30 +0100 Subject: [PATCH 29/34] Run mongod with --replSet=rs0 option --- docker-compose.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/docker-compose.yml b/docker-compose.yml index 16dbddd7..072b8725 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -5,6 +5,7 @@ services: image: mongo:3.4.1 ports: - "27017" + command: mongod --replSet=rs0 rdb: image: rethinkdb From 4c8d1c6e8caa7cc27f5b903b1176ccd5ac113dac Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Tue, 10 Jan 2017 18:49:32 +0100 Subject: [PATCH 30/34] Document how to run tests for mongodb with docker --- tests/README.md | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/tests/README.md b/tests/README.md index f00d55e6..ce4ac22c 100644 --- a/tests/README.md +++ b/tests/README.md @@ -68,6 +68,8 @@ The `pytest` command has many options. If you want to learn about all the things You can also use [Docker Compose](https://docs.docker.com/compose/) to run all the tests. +#### With RethinkDB as the backend + First, start `RethinkDB` in the background: ```text @@ -80,14 +82,29 @@ then run the tests using: $ docker-compose run --rm bdb py.test -v ``` -If you've upgraded to a newer version of BigchainDB, you might have to rebuild the images before -being able to run the tests. Run: +#### With MongoDB as the backend + +First, start `MongoDB` in the background: + +```text +$ docker-compose up -d mdb +``` + +then run the tests using: + +```text +$ docker-compose run --rm bdb-mdb py.test -v +``` + +If you've upgraded to a newer version of BigchainDB, you might have to rebuild +the images before being able to run the tests. Run: ```text $ docker-compose build ``` -to rebuild all the images (usually you only need to rebuild the `bdb` image). +to rebuild all the images (usually you only need to rebuild the `bdb` and + `bdb-mdb` images). ## Automated Testing of All Pull Requests From ceeba0e89a25c87ef27f40b7aa6bc63dfa712572 Mon Sep 17 00:00:00 2001 From: diminator Date: Wed, 11 Jan 2017 13:45:47 +0100 Subject: [PATCH 31/34] logical xor & status flag --- bigchaindb/web/views/statuses.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/bigchaindb/web/views/statuses.py b/bigchaindb/web/views/statuses.py index 8ded2f29..06b768e6 100644 --- a/bigchaindb/web/views/statuses.py +++ b/bigchaindb/web/views/statuses.py @@ -23,25 +23,25 @@ class StatusApi(Resource): parser.add_argument('block_id', type=str) args = parser.parse_args(strict=True) + tx_id = args['tx_id'] + block_id = args['block_id'] - if sum(arg is not None for arg in args.values()) != 1: + # logical xor - exactly one query argument required + if bool(tx_id) == bool(block_id): return make_error(400, "Provide exactly one query parameter. Choices are: block_id, tx_id") pool = current_app.config['bigchain_pool'] status, links = None, None with pool() as bigchain: - if args['tx_id']: - status = bigchain.get_status(args['tx_id']) + if tx_id: + status = bigchain.get_status(tx_id) links = { - "tx": "/transactions/{}".format(args['tx_id']) + "tx": "/transactions/{}".format(tx_id) } - elif args['block_id']: - block = bigchain.get_block(block_id=args['block_id']) - if not block: - return make_error(404) - status = bigchain.block_election_status(block['id'], block['block']['voters']) + elif block_id: + _, status = bigchain.get_block(block_id=block_id, include_status=True) # TODO: enable once blocks endpoint is available # links = { # "block": "/blocks/{}".format(args['block_id']) From fdf2a85c979e544b9c043c324b8ea2b75ea3e30a Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Wed, 21 Dec 2016 17:46:57 +0100 Subject: [PATCH 32/34] Add a BigchaindDB base exception class --- bigchaindb/exceptions.py | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 bigchaindb/exceptions.py diff --git a/bigchaindb/exceptions.py b/bigchaindb/exceptions.py new file mode 100644 index 00000000..d8a4cd73 --- /dev/null +++ b/bigchaindb/exceptions.py @@ -0,0 +1,2 @@ +class BigchainDBError(Exception): + """Base class for BigchainDB exceptions.""" From c036b1490b8657225ff4ce1b5db65af5308b7c94 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Wed, 11 Jan 2017 13:34:45 +0100 Subject: [PATCH 33/34] Inherit from BigchainDBError for common exceptions --- bigchaindb/common/exceptions.py | 39 +++++++++++++++++---------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/bigchaindb/common/exceptions.py b/bigchaindb/common/exceptions.py index c4ccd083..60340492 100644 --- a/bigchaindb/common/exceptions.py +++ b/bigchaindb/common/exceptions.py @@ -1,28 +1,29 @@ """Custom exceptions used in the `bigchaindb` package. """ +from bigchaindb.exceptions import BigchainDBError -class ConfigurationError(Exception): +class ConfigurationError(BigchainDBError): """Raised when there is a problem with server configuration""" -class OperationError(Exception): +class OperationError(BigchainDBError): """Raised when an operation cannot go through""" -class TransactionDoesNotExist(Exception): +class TransactionDoesNotExist(BigchainDBError): """Raised if the transaction is not in the database""" -class TransactionOwnerError(Exception): +class TransactionOwnerError(BigchainDBError): """Raised if a user tries to transfer a transaction they don't own""" -class DoubleSpend(Exception): +class DoubleSpend(BigchainDBError): """Raised if a double spend is found""" -class ValidationError(Exception): +class ValidationError(BigchainDBError): """Raised if there was an error in validation""" @@ -35,56 +36,56 @@ class SchemaValidationError(ValidationError): """Raised if there was any error validating an object's schema""" -class InvalidSignature(Exception): +class InvalidSignature(BigchainDBError): """Raised if there was an error checking the signature for a particular operation""" -class DatabaseAlreadyExists(Exception): +class DatabaseAlreadyExists(BigchainDBError): """Raised when trying to create the database but the db is already there""" -class DatabaseDoesNotExist(Exception): +class DatabaseDoesNotExist(BigchainDBError): """Raised when trying to delete the database but the db is not there""" -class KeypairNotFoundException(Exception): +class KeypairNotFoundException(BigchainDBError): """Raised if operation cannot proceed because the keypair was not given""" -class KeypairMismatchException(Exception): +class KeypairMismatchException(BigchainDBError): """Raised if the private key(s) provided for signing don't match any of the current owner(s)""" -class StartupError(Exception): +class StartupError(BigchainDBError): """Raised when there is an error starting up the system""" -class ImproperVoteError(Exception): +class ImproperVoteError(BigchainDBError): """Raised if a vote is not constructed correctly, or signed incorrectly""" -class MultipleVotesError(Exception): +class MultipleVotesError(BigchainDBError): """Raised if a voter has voted more than once""" -class GenesisBlockAlreadyExistsError(Exception): +class GenesisBlockAlreadyExistsError(BigchainDBError): """Raised when trying to create the already existing genesis block""" -class CyclicBlockchainError(Exception): +class CyclicBlockchainError(BigchainDBError): """Raised when there is a cycle in the blockchain""" -class TransactionNotInValidBlock(Exception): +class TransactionNotInValidBlock(BigchainDBError): """Raised when a transfer transaction is attempting to fulfill the outputs of a transaction that is in an invalid or undecided block""" -class AssetIdMismatch(Exception): +class AssetIdMismatch(BigchainDBError): """Raised when multiple transaction inputs related to different assets""" -class AmountError(Exception): +class AmountError(BigchainDBError): """Raised when there is a problem with a transaction's output amounts""" From 8aa37277c2c4134107b9b4726b3dcec1881d1659 Mon Sep 17 00:00:00 2001 From: diminator Date: Wed, 11 Jan 2017 17:17:20 +0100 Subject: [PATCH 34/34] remove blank line for travis --- tests/web/test_transactions.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/web/test_transactions.py b/tests/web/test_transactions.py index b744dd32..ebb440d6 100644 --- a/tests/web/test_transactions.py +++ b/tests/web/test_transactions.py @@ -158,4 +158,3 @@ def test_post_invalid_transfer_transaction_returns_400(b, client, user_pk, user_ res = client.post(TX_ENDPOINT, data=json.dumps(transfer_tx.to_dict())) assert res.status_code == 400 -