mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-03-22 16:13:45 +00:00
Compare commits
430 Commits
v1.2.3-tes
...
v0.6.3-rc2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
31c0399484 | ||
|
|
f2a3ccd9ab | ||
|
|
31b5cd8d28 | ||
|
|
96bd1fa99b | ||
|
|
48d498e820 | ||
|
|
32c5cfeaf5 | ||
|
|
d55f4e8164 | ||
|
|
1927e81202 | ||
|
|
8a4ece1101 | ||
|
|
0bf1052abf | ||
|
|
2af03c1ccf | ||
|
|
a2aa58c8a4 | ||
|
|
7e74fc0b2b | ||
|
|
0653e59e16 | ||
|
|
32463ce906 | ||
|
|
23a3594c18 | ||
|
|
ca3172dad0 | ||
|
|
22dc3f998f | ||
|
|
91f4ed9825 | ||
|
|
aa9556aa59 | ||
|
|
91f0fe5740 | ||
|
|
b0fecc9f87 | ||
|
|
53cccd405f | ||
|
|
5b84184921 | ||
|
|
af1df425a2 | ||
|
|
8e170cf327 | ||
|
|
b55cfee8c8 | ||
|
|
420c3d4258 | ||
|
|
b92943a98c | ||
|
|
e1318aa326 | ||
|
|
2bd4a71913 | ||
|
|
5b206f4c9d | ||
|
|
3f969a2921 | ||
|
|
90be14fd57 | ||
|
|
1a5d9fc65c | ||
|
|
ec03a094e5 | ||
|
|
9d60bb1ee7 | ||
|
|
cd10de2dce | ||
|
|
658fb08c02 | ||
|
|
3b40488877 | ||
|
|
d3d0ad0cf3 | ||
|
|
473cc37a75 | ||
|
|
966cba4a4e | ||
|
|
da90755530 | ||
|
|
fa58623815 | ||
|
|
26af4da507 | ||
|
|
b527470153 | ||
|
|
e70561141d | ||
|
|
20b547984e | ||
|
|
16a658a5be | ||
|
|
42e50e6dc2 | ||
|
|
3d942ce355 | ||
|
|
94f617b06a | ||
|
|
211c4d05e8 | ||
|
|
a9f3bdf4ab | ||
|
|
2303aecab4 | ||
|
|
7655841e9f | ||
|
|
c4bbcf9de6 | ||
|
|
0cec1ce23e | ||
|
|
089fe828aa | ||
|
|
24a09fb3df | ||
|
|
b2901454d6 | ||
|
|
6cf589dc9b | ||
|
|
683ceda3a7 | ||
|
|
6a18b56587 | ||
|
|
2c9e5be816 | ||
|
|
5d5a0ef335 | ||
|
|
428f16ffef | ||
|
|
f93e54b63c | ||
|
|
c30b350e8e | ||
|
|
8fdb5aa024 | ||
|
|
83a3c30d01 | ||
|
|
63646c8c92 | ||
|
|
097e7ab42a | ||
|
|
3d45c8de50 | ||
|
|
8e1958c20b | ||
|
|
3e6c1792ef | ||
|
|
6b5b4bfb2a | ||
|
|
b797436884 | ||
|
|
2de3c1d0d4 | ||
|
|
7e81757e2f | ||
|
|
4773f87875 | ||
|
|
aa5bc34280 | ||
|
|
b9a25c1141 | ||
|
|
b42b8b16fd | ||
|
|
e0aac68759 | ||
|
|
9939671ccc | ||
|
|
eaa8515442 | ||
|
|
04b578cee1 | ||
|
|
f8e53d309c | ||
|
|
6076309b3e | ||
|
|
05db135d23 | ||
|
|
433cdb6006 | ||
|
|
4a4dca1926 | ||
|
|
6d591dde74 | ||
|
|
8e624e057e | ||
|
|
eb2642ba90 | ||
|
|
1a43cabfb9 | ||
|
|
580e37943b | ||
|
|
749775c7ea | ||
|
|
8ff8c30fb4 | ||
|
|
9893b7396c | ||
|
|
8c90344f28 | ||
|
|
e4955729d2 | ||
|
|
8a7b0314e5 | ||
|
|
e87d00c9cf | ||
|
|
336347b3c5 | ||
|
|
15d0899406 | ||
|
|
ad096f9781 | ||
|
|
d3c6a3dffc | ||
|
|
57b1653383 | ||
|
|
a86255ba51 | ||
|
|
0a7a4ce7d6 | ||
|
|
4c3735a897 | ||
|
|
22fd38c053 | ||
|
|
895f67a8d4 | ||
|
|
56e807b663 | ||
|
|
af64c7dc2d | ||
|
|
1e6458973b | ||
|
|
7bf8bb5436 | ||
|
|
1358911d95 | ||
|
|
1271d2f113 | ||
|
|
bc0227b49b | ||
|
|
dc643c2d76 | ||
|
|
0744e8ebc0 | ||
|
|
d4c9fdf6ac | ||
|
|
829979b6c7 | ||
|
|
32cd29bf70 | ||
|
|
03cb6cbd4d | ||
|
|
ba4a89488e | ||
|
|
b0d4a92e47 | ||
|
|
3e5a840c5a | ||
|
|
d6d34238d2 | ||
|
|
8bbced5925 | ||
|
|
20da1b9c9a | ||
|
|
b6a6e577c4 | ||
|
|
84888221ae | ||
|
|
222477b33e | ||
|
|
4a50d94633 | ||
|
|
b4dba782fb | ||
|
|
9c78a797e4 | ||
|
|
35c733a4c1 | ||
|
|
e5810d023e | ||
|
|
96930bd6ea | ||
|
|
e09ce32146 | ||
|
|
d15c009b3c | ||
|
|
95c8b8e9d8 | ||
|
|
2d798a5611 | ||
|
|
3a22249be9 | ||
|
|
a4c1898624 | ||
|
|
672f02490a | ||
|
|
fc00275d9c | ||
|
|
6219b93430 | ||
|
|
3a4571d671 | ||
|
|
96052ac69a | ||
|
|
6463a4b5d0 | ||
|
|
0ca127853d | ||
|
|
b884ba128e | ||
|
|
fe25ea3d8c | ||
|
|
e0f587f599 | ||
|
|
e9e1ef4772 | ||
|
|
eb8b841850 | ||
|
|
28681affda | ||
|
|
378f0b659a | ||
|
|
35b943e04f | ||
|
|
65f75c17fc | ||
|
|
806eab817c | ||
|
|
585510d76c | ||
|
|
c8a381d5bb | ||
|
|
3d04e6bded | ||
|
|
f8e851a6ed | ||
|
|
e70a615135 | ||
|
|
73ad0adf72 | ||
|
|
5b74e51db1 | ||
|
|
2e2492cc5d | ||
|
|
2ef5c2cbac | ||
|
|
3c89e1f7b3 | ||
|
|
2910724b49 | ||
|
|
3af945692e | ||
|
|
5fe9dae557 | ||
|
|
42c53ec3e2 | ||
|
|
291df8bfef | ||
|
|
d015286f65 | ||
|
|
fe91b4c878 | ||
|
|
7609c50641 | ||
|
|
df934990d7 | ||
|
|
3c4a80f16d | ||
|
|
a31139d4a5 | ||
|
|
6da3606721 | ||
|
|
bfbc72724d | ||
|
|
956b6f7d95 | ||
|
|
c1a039de3f | ||
|
|
f8b18e09d6 | ||
|
|
b20a7a679b | ||
|
|
36d866375e | ||
|
|
024edc30a3 | ||
|
|
6aa5e0b5a8 | ||
|
|
1a38550fdd | ||
|
|
3e7ebb5a84 | ||
|
|
4bca7342d3 | ||
|
|
f80908fb4e | ||
|
|
e000e10738 | ||
|
|
d83862f36c | ||
|
|
1020402b34 | ||
|
|
bc6ce6ed53 | ||
|
|
d3b1953deb | ||
|
|
3c67215e76 | ||
|
|
586624c836 | ||
|
|
49855e6333 | ||
|
|
624249c0f3 | ||
|
|
1cf443a63b | ||
|
|
8909679f44 | ||
|
|
e58efbf0ea | ||
|
|
34fb066590 | ||
|
|
299826f392 | ||
|
|
3d8dd8724d | ||
|
|
b8a00f7519 | ||
|
|
4dfc8cf5b0 | ||
|
|
5a99e4d2f3 | ||
|
|
606cd668ff | ||
|
|
dd537f5143 | ||
|
|
a1c631be62 | ||
|
|
707a728656 | ||
|
|
80b5631a48 | ||
|
|
2373965551 | ||
|
|
65cbb6655b | ||
|
|
cdd96d0670 | ||
|
|
ad04bbde83 | ||
|
|
5374d95416 | ||
|
|
de9aa39cc5 | ||
|
|
98987f4a8f | ||
|
|
9745f31b69 | ||
|
|
ee08531a52 | ||
|
|
61baf7b260 | ||
|
|
650e4f735e | ||
|
|
550b12b041 | ||
|
|
a4bb070722 | ||
|
|
30fe0c279b | ||
|
|
e405dd5981 | ||
|
|
243b4b8021 | ||
|
|
dd4c93e1ef | ||
|
|
a07335d74d | ||
|
|
7567cd4cb9 | ||
|
|
51ff9e2562 | ||
|
|
5b8ab63890 | ||
|
|
3dd7dc4496 | ||
|
|
d90a08ecfa | ||
|
|
45dc1a3e7b | ||
|
|
4ffb5daa37 | ||
|
|
b9138b720d | ||
|
|
d8954f1339 | ||
|
|
eb953286ec | ||
|
|
41c8178ad3 | ||
|
|
aa74b51e6f | ||
|
|
f7800eb5c4 | ||
|
|
193add502f | ||
|
|
44c55900f8 | ||
|
|
4c0ea78026 | ||
|
|
03a93fe51e | ||
|
|
eca0514465 | ||
|
|
aadbebb720 | ||
|
|
5daab45947 | ||
|
|
607b838ded | ||
|
|
25bdaeed31 | ||
|
|
8b2d3f07ce | ||
|
|
a3dc2f7da7 | ||
|
|
bf36f9ceb6 | ||
|
|
11de12304e | ||
|
|
a10320ad7b | ||
|
|
fd2bbf3557 | ||
|
|
7f9cf17274 | ||
|
|
ba0e239557 | ||
|
|
ed606bfda3 | ||
|
|
c0463a8a68 | ||
|
|
52e0a0967d | ||
|
|
29bcc271b5 | ||
|
|
94ec159147 | ||
|
|
9d434de4a5 | ||
|
|
49418f4222 | ||
|
|
38b4749f20 | ||
|
|
045984e6b9 | ||
|
|
38883d1a98 | ||
|
|
b5f365d282 | ||
|
|
a7d3a40465 | ||
|
|
359b16fca9 | ||
|
|
8b8e73feb5 | ||
|
|
6044b6ac1a | ||
|
|
a177ea4f15 | ||
|
|
3a15aa4bae | ||
|
|
427185b6a8 | ||
|
|
b282734a3f | ||
|
|
6d765f58ba | ||
|
|
20819ca4cd | ||
|
|
2174a0a7f2 | ||
|
|
ea6f7a28c2 | ||
|
|
ac9aa74a75 | ||
|
|
d46857677f | ||
|
|
cd719b1d5b | ||
|
|
7cf15ac93b | ||
|
|
d8e3191469 | ||
|
|
784d3de4ca | ||
|
|
733d06af5a | ||
|
|
df91643976 | ||
|
|
ebf635e6ff | ||
|
|
e41d9866c3 | ||
|
|
d984151549 | ||
|
|
6099ce56bd | ||
|
|
e0b5c145f7 | ||
|
|
cf37f733ef | ||
|
|
66a92a243c | ||
|
|
4a88eea57e | ||
|
|
fbaf360a42 | ||
|
|
1346810af8 | ||
|
|
9cbab94264 | ||
|
|
48f29cc11f | ||
|
|
e2b57e6231 | ||
|
|
f72afc8bbb | ||
|
|
0d1f447cb7 | ||
|
|
818f8c93eb | ||
|
|
264ffaae93 | ||
|
|
03b7af9a13 | ||
|
|
e3d7e83d44 | ||
|
|
07651e51c8 | ||
|
|
1cd2eb9308 | ||
|
|
a140327dd2 | ||
|
|
c1f7ae72e0 | ||
|
|
3a12fe9b1d | ||
|
|
c25c9b25bd | ||
|
|
f46dec449d | ||
|
|
60ab6330ff | ||
|
|
89dee3e005 | ||
|
|
70d7009985 | ||
|
|
3322a892e9 | ||
|
|
61d066e958 | ||
|
|
7b9ffc6c25 | ||
|
|
7a163d4dd7 | ||
|
|
189a3380a2 | ||
|
|
8680231e5a | ||
|
|
30f0e95969 | ||
|
|
c94becf144 | ||
|
|
369ec449a8 | ||
|
|
f4c6859e51 | ||
|
|
683dd52fcf | ||
|
|
11e936d109 | ||
|
|
9adb105e37 | ||
|
|
7b6ed9a778 | ||
|
|
3218fc5a04 | ||
|
|
3f94f8ca4c | ||
|
|
0842778c2c | ||
|
|
1332e1aa68 | ||
|
|
e872ebc7b3 | ||
|
|
e68b242243 | ||
|
|
9cc2a7260b | ||
|
|
bcd73012de | ||
|
|
1fea2a9421 | ||
|
|
bb7d68deda | ||
|
|
3ab861227d | ||
|
|
8f0d98ef9b | ||
|
|
dbd8bf3d2c | ||
|
|
1b6b02e0d2 | ||
|
|
2402bae1ff | ||
|
|
3dcf8d88b8 | ||
|
|
dbf9c09a2e | ||
|
|
5e9fc2defc | ||
|
|
bdc3cbceaa | ||
|
|
a71528fefb | ||
|
|
6725742d2c | ||
|
|
9a510e2e23 | ||
|
|
08a4b0dbf6 | ||
|
|
0c9e55a358 | ||
|
|
532e57b61c | ||
|
|
b1f59914d2 | ||
|
|
9a54b286c9 | ||
|
|
6e4b18a498 | ||
|
|
b5f8a0452e | ||
|
|
fab043ef14 | ||
|
|
8e0e62f21a | ||
|
|
9a1c2e2641 | ||
|
|
8cbc6670cc | ||
|
|
28ee6a8026 | ||
|
|
af39e96e3e | ||
|
|
db6e9c773f | ||
|
|
47214121a7 | ||
|
|
7b07609fd8 | ||
|
|
acb4b3f260 | ||
|
|
e0221aa8ab | ||
|
|
cba346d753 | ||
|
|
0f34cfb1a2 | ||
|
|
ea846a3284 | ||
|
|
63bfac9740 | ||
|
|
7284815c21 | ||
|
|
80307d108b | ||
|
|
722437afe9 | ||
|
|
684cf4b5fa | ||
|
|
c95a7b13a6 | ||
|
|
1ce7f21026 | ||
|
|
7d7df10493 | ||
|
|
8179862e0b | ||
|
|
6828f623b4 | ||
|
|
2c88a5b2fe | ||
|
|
a7f08598f3 | ||
|
|
83bad65d3a | ||
|
|
1f35378a4d | ||
|
|
39eab7a6d5 | ||
|
|
9dd025d4da | ||
|
|
bb75ea5020 | ||
|
|
8dbd4a2bed | ||
|
|
24305cda68 | ||
|
|
770dfd147d | ||
|
|
a9ff9b0e70 | ||
|
|
3cc6f2d648 | ||
|
|
a8f0d7b05b | ||
|
|
13f06ca293 | ||
|
|
c88fa1492e | ||
|
|
40657a83f5 | ||
|
|
44dd58b461 | ||
|
|
47891b17ab | ||
|
|
f7fbfbf5c4 | ||
|
|
0e278ca22b | ||
|
|
c66fb294c8 | ||
|
|
88b7e7ca03 | ||
|
|
a9b659a36f | ||
|
|
90fc6ba3e7 | ||
|
|
8ea97aa3fd | ||
|
|
7c9f5a65d8 | ||
|
|
e2d3c4c821 | ||
|
|
92578e2853 | ||
|
|
3018c18616 | ||
|
|
3ac9fa83c1 | ||
|
|
c5b0398dac |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -2,7 +2,7 @@
|
||||
*~
|
||||
|
||||
# Databases
|
||||
btcd.db
|
||||
kaspad.db
|
||||
*-shm
|
||||
*-wal
|
||||
|
||||
|
||||
955
CHANGES
955
CHANGES
@@ -1,955 +0,0 @@
|
||||
============================================================================
|
||||
User visible changes for btcd
|
||||
A full-node bitcoin implementation written in Go
|
||||
============================================================================
|
||||
|
||||
Changes in 0.12.0 (Fri Nov 20 2015)
|
||||
- Protocol and network related changes:
|
||||
- Add a new checkpoint at block height 382320 (#555)
|
||||
- Implement BIP0065 which includes support for version 4 blocks, a new
|
||||
consensus opcode (OP_CHECKLOCKTIMEVERIFY) that enforces transaction
|
||||
lock times, and a double-threshold switchover mechanism (#535, #459,
|
||||
#455)
|
||||
- Implement BIP0111 which provides a new bloom filter service flag and
|
||||
hence provides support for protocol version 70011 (#499)
|
||||
- Add a new parameter --nopeerbloomfilters to allow disabling bloom
|
||||
filter support (#499)
|
||||
- Reject non-canonically encoded variable length integers (#507)
|
||||
- Add mainnet peer discovery DNS seed (seed.bitcoin.jonasschnelli.ch)
|
||||
(#496)
|
||||
- Correct reconnect handling for persistent peers (#463, #464)
|
||||
- Ignore requests for block headers if not fully synced (#444)
|
||||
- Add CLI support for specifying the zone id on IPv6 addresses (#538)
|
||||
- Fix a couple of issues where the initial block sync could stall (#518,
|
||||
#229, #486)
|
||||
- Fix an issue which prevented the --onion option from working as
|
||||
intended (#446)
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Require transactions to only include signatures encoded with the
|
||||
canonical 'low-s' encoding (#512)
|
||||
- Add a new parameter --minrelaytxfee to allow the minimum transaction
|
||||
fee in BTC/kB to be overridden (#520)
|
||||
- Retain memory pool transactions when they redeem another one that is
|
||||
removed when a block is accepted (#539)
|
||||
- Do not send reject messages for a transaction if it is valid but
|
||||
causes an orphan transaction which depends on it to be determined
|
||||
as invalid (#546)
|
||||
- Refrain from attempting to add orphans to the memory pool multiple
|
||||
times when the transaction they redeem is added (#551)
|
||||
- Modify minimum transaction fee calculations to scale based on bytes
|
||||
instead of full kilobyte boundaries (#521, #537)
|
||||
- Implement signature cache:
|
||||
- Provides a limited memory cache of validated signatures which is a
|
||||
huge optimization when verifying blocks for transactions that are
|
||||
already in the memory pool (#506)
|
||||
- Add a new parameter '--sigcachemaxsize' which allows the size of the
|
||||
new cache to be manually changed if desired (#506)
|
||||
- Mining support changes:
|
||||
- Notify getblocktemplate long polling clients when a block is pushed
|
||||
via submitblock (#488)
|
||||
- Speed up getblocktemplate by making use of the new signature cache
|
||||
(#506)
|
||||
- RPC changes:
|
||||
- Implement getmempoolinfo command (#453)
|
||||
- Implement getblockheader command (#461)
|
||||
- Modify createrawtransaction command to accept a new optional parameter
|
||||
'locktime' (#529)
|
||||
- Modify listunspent result to include the 'spendable' field (#440)
|
||||
- Modify getinfo command to include 'errors' field (#511)
|
||||
- Add timestamps to blockconnected and blockdisconnected notifications
|
||||
(#450)
|
||||
- Several modifications to searchrawtranscations command:
|
||||
- Accept a new optional parameter 'vinextra' which causes the results
|
||||
to include information about the outputs referenced by a transaction's
|
||||
inputs (#485, #487)
|
||||
- Skip entries in the mempool too (#495)
|
||||
- Accept a new optional parameter 'reverse' to return the results in
|
||||
reverse order (most recent to oldest) (#497)
|
||||
- Accept a new optional parameter 'filteraddrs' which causes the
|
||||
results to only include inputs and outputs which involve the
|
||||
provided addresses (#516)
|
||||
- Change the notification order to notify clients about mined
|
||||
transactions (recvtx, redeemingtx) before the blockconnected
|
||||
notification (#449)
|
||||
- Update verifymessage RPC to use the standard algorithm so it is
|
||||
compatible with other implementations (#515)
|
||||
- Improve ping statistics by pinging on an interval (#517)
|
||||
- Websocket changes:
|
||||
- Implement session command which returns a per-session unique id (#500,
|
||||
#503)
|
||||
- btcctl utility changes:
|
||||
- Add getmempoolinfo command (#453)
|
||||
- Add getblockheader command (#461)
|
||||
- Add getwalletinfo command (#471)
|
||||
- Notable developer-related package changes:
|
||||
- Introduce a new peer package which acts a common base for creating and
|
||||
concurrently managing bitcoin network peers (#445)
|
||||
- Various cleanup of the new peer package (#528, #531, #524, #534,
|
||||
#549)
|
||||
- Blocks heights now consistently use int32 everywhere (#481)
|
||||
- The BlockHeader type in the wire package now provides the BtcDecode
|
||||
and BtcEncode methods (#467)
|
||||
- Update wire package to recognize BIP0064 (getutxo) service bit (#489)
|
||||
- Export LockTimeThreshold constant from txscript package (#454)
|
||||
- Export MaxDataCarrierSize constant from txscript package (#466)
|
||||
- Provide new IsUnspendable function from the txscript package (#478)
|
||||
- Export variable length string functions from the wire package (#514)
|
||||
- Export DNS Seeds for each network from the chaincfg package (#544)
|
||||
- Preliminary work towards separating the memory pool into a separate
|
||||
package (#525, #548)
|
||||
- Misc changes:
|
||||
- Various documentation updates (#442, #462, #465, #460, #470, #473,
|
||||
#505, #530, #545)
|
||||
- Add installation instructions for gentoo (#542)
|
||||
- Ensure an error is shown if OS limits can't be set at startup (#498)
|
||||
- Tighten the standardness checks for multisig scripts (#526)
|
||||
- Test coverage improvement (#468, #494, #527, #543, #550)
|
||||
- Several optimizations (#457, #474, #475, #476, #508, #509)
|
||||
- Minor code cleanup and refactoring (#472, #479, #482, #519, #540)
|
||||
- Contributors (alphabetical order):
|
||||
- Ben Echols
|
||||
- Bruno Clermont
|
||||
- danda
|
||||
- Daniel Krawisz
|
||||
- Dario Nieuwenhuis
|
||||
- Dave Collins
|
||||
- David Hill
|
||||
- Javed Khan
|
||||
- Jonathan Gillham
|
||||
- Joseph Becher
|
||||
- Josh Rickmar
|
||||
- Justus Ranvier
|
||||
- Mawuli Adzoe
|
||||
- Olaoluwa Osuntokun
|
||||
- Rune T. Aune
|
||||
|
||||
Changes in 0.11.1 (Wed May 27 2015)
|
||||
- Protocol and network related changes:
|
||||
- Use correct sub-command in reject message for rejected transactions
|
||||
(#436, #437)
|
||||
- Add a new parameter --torisolation which forces new circuits for each
|
||||
connection when using tor (#430)
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Reduce the default number max number of allowed orphan transactions
|
||||
to 1000 (#419)
|
||||
- Add a new parameter --maxorphantx which allows the maximum number of
|
||||
orphan transactions stored in the mempool to be specified (#419)
|
||||
- RPC changes:
|
||||
- Modify listtransactions result to include the 'involveswatchonly' and
|
||||
'vout' fields (#427)
|
||||
- Update getrawtransaction result to omit the 'confirmations' field
|
||||
when it is 0 (#420, #422)
|
||||
- Update signrawtransaction result to include errors (#423)
|
||||
- btcctl utility changes:
|
||||
- Add gettxoutproof command (#428)
|
||||
- Add verifytxoutproof command (#428)
|
||||
- Notable developer-related package changes:
|
||||
- The btcec package now provides the ability to perform ECDH
|
||||
encryption and decryption (#375)
|
||||
- The block and header validation in the blockchain package has been
|
||||
split to help pave the way toward concurrent downloads (#386)
|
||||
- Misc changes:
|
||||
- Minor peer optimization (#433)
|
||||
- Contributors (alphabetical order):
|
||||
- Dave Collins
|
||||
- David Hill
|
||||
- Federico Bond
|
||||
- Ishbir Singh
|
||||
- Josh Rickmar
|
||||
|
||||
Changes in 0.11.0 (Wed May 06 2015)
|
||||
- Protocol and network related changes:
|
||||
- **IMPORTANT: Update is required due to the following point**
|
||||
- Correct a few corner cases in script handling which could result in
|
||||
forking from the network on non-standard transactions (#425)
|
||||
- Add a new checkpoint at block height 352940 (#418)
|
||||
- Optimized script execution (#395, #400, #404, #409)
|
||||
- Fix a case that could lead stalled syncs (#138, #296)
|
||||
- Network address manager changes:
|
||||
- Implement eclipse attack countermeasures as proposed in
|
||||
http://cs-people.bu.edu/heilman/eclipse (#370, #373)
|
||||
- Optional address indexing changes:
|
||||
- Fix an issue where a reorg could cause an orderly shutdown when the
|
||||
address index is active (#340, #357)
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Increase maximum allowed space for nulldata transactions to 80 bytes
|
||||
(#331)
|
||||
- Implement support for the following rules specified by BIP0062:
|
||||
- The S value in ECDSA signature must be at most half the curve order
|
||||
(rule 5) (#349)
|
||||
- Script execution must result in a single non-zero value on the stack
|
||||
(rule 6) (#347)
|
||||
- NOTE: All 7 rules of BIP0062 are now implemented
|
||||
- Use network adjusted time in finalized transaction checks to improve
|
||||
consistency across nodes (#332)
|
||||
- Process orphan transactions on acceptance of new transactions (#345)
|
||||
- RPC changes:
|
||||
- Add support for a limited RPC user which is not allowed admin level
|
||||
operations on the server (#363)
|
||||
- Implement node command for more unified control over connected peers
|
||||
(#79, #341)
|
||||
- Implement generate command for regtest/simnet to support
|
||||
deterministically mining a specified number of blocks (#362, #407)
|
||||
- Update searchrawtransactions to return the matching transactions in
|
||||
order (#354)
|
||||
- Correct an issue with searchrawtransactions where it could return
|
||||
duplicates (#346, #354)
|
||||
- Increase precision of 'difficulty' field in getblock result to 8
|
||||
(#414, #415)
|
||||
- Omit 'nextblockhash' field from getblock result when it is empty
|
||||
(#416, #417)
|
||||
- Add 'id' and 'timeoffset' fields to getpeerinfo result (#335)
|
||||
- Websocket changes:
|
||||
- Implement new commands stopnotifyspent, stopnotifyreceived,
|
||||
stopnotifyblocks, and stopnotifynewtransactions to allow clients to
|
||||
cancel notification registrations (#122, #342)
|
||||
- btcctl utility changes:
|
||||
- A single dash can now be used as an argument to cause that argument to
|
||||
be read from stdin (#348)
|
||||
- Add generate command
|
||||
- Notable developer-related package changes:
|
||||
- The new version 2 btcjson package has now replaced the deprecated
|
||||
version 1 package (#368)
|
||||
- The btcec package now performs all signing using RFC6979 deterministic
|
||||
signatures (#358, #360)
|
||||
- The txscript package has been significantly cleaned up and had a few
|
||||
API changes (#387, #388, #389, #390, #391, #392, #393, #395, #396,
|
||||
#400, #403, #404, #405, #406, #408, #409, #410, #412)
|
||||
- A new PkScriptLocs function has been added to the wire package MsgTx
|
||||
type which provides callers that deal with scripts optimization
|
||||
opportunities (#343)
|
||||
- Misc changes:
|
||||
- Minor wire hashing optimizations (#366, #367)
|
||||
- Other minor internal optimizations
|
||||
- Contributors (alphabetical order):
|
||||
- Alex Akselrod
|
||||
- Arne Brutschy
|
||||
- Chris Jepson
|
||||
- Daniel Krawisz
|
||||
- Dave Collins
|
||||
- David Hill
|
||||
- Jimmy Song
|
||||
- Jonas Nick
|
||||
- Josh Rickmar
|
||||
- Olaoluwa Osuntokun
|
||||
- Oleg Andreev
|
||||
|
||||
Changes in 0.10.0 (Sun Mar 01 2015)
|
||||
- Protocol and network related changes:
|
||||
- Add a new checkpoint at block height 343185
|
||||
- Implement BIP066 which includes support for version 3 blocks, a new
|
||||
consensus rule which prevents non-DER encoded signatures, and a
|
||||
double-threshold switchover mechanism
|
||||
- Rather than announcing all known addresses on getaddr requests which
|
||||
can possibly result in multiple messages, randomize the results and
|
||||
limit them to the max allowed by a single message (1000 addresses)
|
||||
- Add more reserved IP spaces to the address manager
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Make transactions which contain reserved opcodes nonstandard
|
||||
- No longer accept or relay free and low-fee transactions that have
|
||||
insufficient priority to be mined in the next block
|
||||
- Implement support for the following rules specified by BIP0062:
|
||||
- ECDSA signature must use strict DER encoding (rule 1)
|
||||
- The signature script must only contain push operations (rule 2)
|
||||
- All push operations must use the smallest possible encoding (rule 3)
|
||||
- All stack values interpreted as a number must be encoding using the
|
||||
shortest possible form (rule 4)
|
||||
- NOTE: Rule 1 was already enforced, however the entire script now
|
||||
evaluates to false rather than only the signature verification as
|
||||
required by BIP0062
|
||||
- Allow transactions with nulldata transaction outputs to be treated as
|
||||
standard
|
||||
- Mining support changes:
|
||||
- Modify the getblocktemplate RPC to generate and return block templates
|
||||
for version 3 blocks which are compatible with BIP0066
|
||||
- Allow getblocktemplate to serve blocks when the current time is
|
||||
less than the minimum allowed time for a generated block template
|
||||
(https://github.com/btcsuite/btcd/issues/209)
|
||||
- Crypto changes:
|
||||
- Optimize scalar multiplication by the base point by using a
|
||||
pre-computed table which results in approximately a 35% speedup
|
||||
(https://github.com/btcsuite/btcec/issues/2)
|
||||
- Optimize general scalar multiplication by using the secp256k1
|
||||
endomorphism which results in approximately a 17-20% speedup
|
||||
(https://github.com/btcsuite/btcec/issues/1)
|
||||
- Optimize general scalar multiplication by using non-adjacent form
|
||||
which results in approximately an additional 8% speedup
|
||||
(https://github.com/btcsuite/btcec/issues/3)
|
||||
- Implement optional address indexing:
|
||||
- Add a new parameter --addrindex which will enable the creation of an
|
||||
address index which can be queried to determine all transactions which
|
||||
involve a given address
|
||||
(https://github.com/btcsuite/btcd/issues/190)
|
||||
- Add a new logging subsystem for address index related operations
|
||||
- Support new searchrawtransactions RPC
|
||||
(https://github.com/btcsuite/btcd/issues/185)
|
||||
- RPC changes:
|
||||
- Require TLS version 1.2 as the minimum version for all TLS connections
|
||||
- Provide support for disabling TLS when only listening on localhost
|
||||
(https://github.com/btcsuite/btcd/pull/192)
|
||||
- Modify help output for all commands to provide much more consistent
|
||||
and detailed information
|
||||
- Correct case in getrawtransaction which would refuse to serve certain
|
||||
transactions with invalid scripts
|
||||
(https://github.com/btcsuite/btcd/issues/210)
|
||||
- Correct error handling in the getrawtransaction RPC which could lead
|
||||
to a crash in rare cases
|
||||
(https://github.com/btcsuite/btcd/issues/196)
|
||||
- Update getinfo RPC to include the appropriate 'timeoffset' calculated
|
||||
from the median network time
|
||||
- Modify listreceivedbyaddress result type to include txids field so it
|
||||
is compatible
|
||||
- Add 'iswatchonly' field to validateaddress result
|
||||
- Add 'startingpriority' and 'currentpriority' fields to getrawmempool
|
||||
(https://github.com/btcsuite/btcd/issues/178)
|
||||
- Don't omit the 'confirmations' field from getrawtransaction when it is
|
||||
zero
|
||||
- Websocket changes:
|
||||
- Modify the behavior of the rescan command to automatically register
|
||||
for notifications about transactions paying to rescanned addresses
|
||||
or spending outputs from the final rescan utxo set when the rescan
|
||||
is through the best block in the chain
|
||||
- btcctl utility changes:
|
||||
- Make the list of commands available via the -l option rather than
|
||||
dumping the entire list on usage errors
|
||||
- Alphabetize and categorize the list of commands by chain and wallet
|
||||
- Make the help option only show the help options instead of also
|
||||
dumping all of the commands
|
||||
- Make the usage syntax much more consistent and correct a few cases of
|
||||
misnamed fields
|
||||
(https://github.com/btcsuite/btcd/issues/305)
|
||||
- Improve usage errors to show the specific parameter number, reason,
|
||||
and error code
|
||||
- Only show the usage for specific command is shown when a valid command
|
||||
is provided with invalid parameters
|
||||
- Add support for a SOCK5 proxy
|
||||
- Modify output for integer fields (such as timestamps) to display
|
||||
normally instead in scientific notation
|
||||
- Add invalidateblock command
|
||||
- Add reconsiderblock command
|
||||
- Add createnewaccount command
|
||||
- Add renameaccount command
|
||||
- Add searchrawtransactions command
|
||||
- Add importaddress command
|
||||
- Add importpubkey command
|
||||
- showblock utility changes:
|
||||
- Remove utility in favor of the RPC getblock method
|
||||
- Notable developer-related package changes:
|
||||
- Many of the core packages have been relocated into the btcd repository
|
||||
(https://github.com/btcsuite/btcd/issues/214)
|
||||
- A new version of the btcjson package that has been completely
|
||||
redesigned from the ground up based based upon how the project has
|
||||
evolved and lessons learned while using it since it was first written
|
||||
is now available in the btcjson/v2/btcjson directory
|
||||
- This will ultimately replace the current version so anyone making
|
||||
use of this package will need to update their code accordingly
|
||||
- The btcec package now provides better facilities for working directly
|
||||
with its public and private keys without having to mix elements from
|
||||
the ecdsa package
|
||||
- Update the script builder to ensure all rules specified by BIP0062 are
|
||||
adhered to when creating scripts
|
||||
- The blockchain package now provides a MedianTimeSource interface and
|
||||
concrete implementation for providing time samples from remote peers
|
||||
and using that data to calculate an offset against the local time
|
||||
- Misc changes:
|
||||
- Fix a slow memory leak due to tickers not being stopped
|
||||
(https://github.com/btcsuite/btcd/issues/189)
|
||||
- Fix an issue where a mix of orphans and SPV clients could trigger a
|
||||
condition where peers would no longer be served
|
||||
(https://github.com/btcsuite/btcd/issues/231)
|
||||
- The RPC username and password can now contain symbols which previously
|
||||
conflicted with special symbols used in URLs
|
||||
- Improve handling of obtaining random nonces to prevent cases where it
|
||||
could error when not enough entropy was available
|
||||
- Improve handling of home directory creation errors such as in the case
|
||||
of unmounted symlinks (https://github.com/btcsuite/btcd/issues/193)
|
||||
- Improve the error reporting for rejected transactions to include the
|
||||
inputs which are missing and/or being double spent
|
||||
- Update sample config file with new options and correct a comment
|
||||
regarding the fact the RPC server only listens on localhost by default
|
||||
(https://github.com/btcsuite/btcd/issues/218)
|
||||
- Update the continuous integration builds to run several tools which
|
||||
help keep code quality high
|
||||
- Significant amount of internal code cleanup and improvements
|
||||
- Other minor internal optimizations
|
||||
- Code Contributors (alphabetical order):
|
||||
- Beldur
|
||||
- Ben Holden-Crowther
|
||||
- Dave Collins
|
||||
- David Evans
|
||||
- David Hill
|
||||
- Guilherme Salgado
|
||||
- Javed Khan
|
||||
- Jimmy Song
|
||||
- John C. Vernaleo
|
||||
- Jonathan Gillham
|
||||
- Josh Rickmar
|
||||
- Michael Ford
|
||||
- Michail Kargakis
|
||||
- kac
|
||||
- Olaoluwa Osuntokun
|
||||
|
||||
Changes in 0.9.0 (Sat Sep 20 2014)
|
||||
- Protocol and network related changes:
|
||||
- Add a new checkpoint at block height 319400
|
||||
- Add support for BIP0037 bloom filters
|
||||
(https://github.com/conformal/btcd/issues/132)
|
||||
- Implement BIP0061 reject handling and hence support for protocol
|
||||
version 70002 (https://github.com/conformal/btcd/issues/133)
|
||||
- Add testnet DNS seeds for peer discovery (testnet-seed.alexykot.me
|
||||
and testnet-seed.bitcoin.schildbach.de)
|
||||
- Add mainnet DNS seed for peer discovery (seeds.bitcoin.open-nodes.org)
|
||||
- Make multisig transactions with non-null dummy data nonstandard
|
||||
(https://github.com/conformal/btcd/issues/131)
|
||||
- Make transactions with an excessive number of signature operations
|
||||
nonstandard
|
||||
- Perform initial DNS lookups concurrently which allows connections
|
||||
more quickly
|
||||
- Improve the address manager to significantly reduce memory usage and
|
||||
add tests
|
||||
- Remove orphan transactions when they appear in a mined block
|
||||
(https://github.com/conformal/btcd/issues/166)
|
||||
- Apply incremental back off on connection retries for persistent peers
|
||||
that give invalid replies to mirror the logic used for failed
|
||||
connections (https://github.com/conformal/btcd/issues/103)
|
||||
- Correct rate-limiting of free and low-fee transactions
|
||||
- Mining support changes:
|
||||
- Implement getblocktemplate RPC with the following support:
|
||||
(https://github.com/conformal/btcd/issues/124)
|
||||
- BIP0022 Non-Optional Sections
|
||||
- BIP0022 Long Polling
|
||||
- BIP0023 Basic Pool Extensions
|
||||
- BIP0023 Mutation coinbase/append
|
||||
- BIP0023 Mutations time, time/increment, and time/decrement
|
||||
- BIP0023 Mutation transactions/add
|
||||
- BIP0023 Mutations prevblock, coinbase, and generation
|
||||
- BIP0023 Block Proposals
|
||||
- Implement built-in concurrent CPU miner
|
||||
(https://github.com/conformal/btcd/issues/137)
|
||||
NOTE: CPU mining on mainnet is pointless. This has been provided
|
||||
for testing purposes such as for the new simulation test network
|
||||
- Add --generate flag to enable CPU mining
|
||||
- Deprecate the --getworkkey flag in favor of --miningaddr which
|
||||
specifies which addresses generated blocks will choose from to pay
|
||||
the subsidy to
|
||||
- RPC changes:
|
||||
- Implement gettxout command
|
||||
(https://github.com/conformal/btcd/issues/141)
|
||||
- Implement validateaddress command
|
||||
- Implement verifymessage command
|
||||
- Mark getunconfirmedbalance RPC as wallet-only
|
||||
- Mark getwalletinfo RPC as wallet-only
|
||||
- Update getgenerate, setgenerate, gethashespersec, and getmininginfo
|
||||
to return the appropriate information about new CPU mining status
|
||||
- Modify getpeerinfo pingtime and pingwait field types to float64 so
|
||||
they are compatible
|
||||
- Improve disconnect handling for normal HTTP clients
|
||||
- Make error code returns for invalid hex more consistent
|
||||
- Websocket changes:
|
||||
- Switch to a new more efficient websocket package
|
||||
(https://github.com/conformal/btcd/issues/134)
|
||||
- Add rescanfinished notification
|
||||
- Modify the rescanprogress notification to include block hash as well
|
||||
as height (https://github.com/conformal/btcd/issues/151)
|
||||
- btcctl utility changes:
|
||||
- Accept --simnet flag which automatically selects the appropriate port
|
||||
and TLS certificates needed to communicate with btcd and btcwallet on
|
||||
the simulation test network
|
||||
- Fix createrawtransaction command to send amounts denominated in BTC
|
||||
- Add estimatefee command
|
||||
- Add estimatepriority command
|
||||
- Add getmininginfo command
|
||||
- Add getnetworkinfo command
|
||||
- Add gettxout command
|
||||
- Add lockunspent command
|
||||
- Add signrawtransaction command
|
||||
- addblock utility changes:
|
||||
- Accept --simnet flag which automatically selects the appropriate port
|
||||
and TLS certificates needed to communicate with btcd and btcwallet on
|
||||
the simulation test network
|
||||
- Notable developer-related package changes:
|
||||
- Provide a new bloom package in btcutil which allows creating and
|
||||
working with BIP0037 bloom filters
|
||||
- Provide a new hdkeychain package in btcutil which allows working with
|
||||
BIP0032 hierarchical deterministic key chains
|
||||
- Introduce a new btcnet package which houses network parameters
|
||||
- Provide new simnet network (--simnet) which is useful for private
|
||||
simulation testing
|
||||
- Enforce low S values in serialized signatures as detailed in BIP0062
|
||||
- Return errors from all methods on the btcdb.Db interface
|
||||
(https://github.com/conformal/btcdb/issues/5)
|
||||
- Allow behavior flags to alter btcchain.ProcessBlock
|
||||
(https://github.com/conformal/btcchain/issues/5)
|
||||
- Provide a new SerializeSize API for blocks
|
||||
(https://github.com/conformal/btcwire/issues/19)
|
||||
- Several of the core packages now work with Google App Engine
|
||||
- Misc changes:
|
||||
- Correct an issue where the database could corrupt under certain
|
||||
circumstances which would require a new chain download
|
||||
- Slightly optimize deserialization
|
||||
- Use the correct IP block for he.net
|
||||
- Fix an issue where it was possible the block manager could hang on
|
||||
shutdown
|
||||
- Update sample config file so the comments are on a separate line
|
||||
rather than the end of a line so they are not interpreted as settings
|
||||
(https://github.com/conformal/btcd/issues/135)
|
||||
- Correct an issue where getdata requests were not being properly
|
||||
throttled which could lead to larger than necessary memory usage
|
||||
- Always show help when given the help flag even when the config file
|
||||
contains invalid entries
|
||||
- General code cleanup and minor optimizations
|
||||
|
||||
Changes in 0.8.0-beta (Sun May 25 2014)
|
||||
- Btcd is now Beta (https://github.com/conformal/btcd/issues/130)
|
||||
- Add a new checkpoint at block height 300255
|
||||
- Protocol and network related changes:
|
||||
- Lower the minimum transaction relay fee to 1000 satoshi to match
|
||||
recent reference client changes
|
||||
(https://github.com/conformal/btcd/issues/100)
|
||||
- Raise the maximum signature script size to support standard 15-of-15
|
||||
multi-signature pay-to-sript-hash transactions with compressed pubkeys
|
||||
to remain compatible with the reference client
|
||||
(https://github.com/conformal/btcd/issues/128)
|
||||
- Reduce max bytes allowed for a standard nulldata transaction to 40 for
|
||||
compatibility with the reference client
|
||||
- Introduce a new btcnet package which houses all of the network params
|
||||
for each network (mainnet, testnet, regtest) to ultimately enable
|
||||
easier addition and tweaking of networks without needing to change
|
||||
several packages
|
||||
- Fix several script discrepancies found by reference client test data
|
||||
- Add new DNS seed for peer discovery (seed.bitnodes.io)
|
||||
- Reduce the max known inventory cache from 20000 items to 1000 items
|
||||
- Fix an issue where unknown inventory types could lead to a hung peer
|
||||
- Implement inventory rebroadcast handler for sendrawtransaction
|
||||
(https://github.com/conformal/btcd/issues/99)
|
||||
- Update user agent to fully support BIP0014
|
||||
(https://github.com/conformal/btcwire/issues/10)
|
||||
- Implement initial mining support:
|
||||
- Add a new logging subsystem for mining related operations
|
||||
- Implement infrastructure for creating block templates
|
||||
- Provide options to control block template creation settings
|
||||
- Support the getwork RPC
|
||||
- Allow address identifiers to apply to more than one network since both
|
||||
testnet and the regression test network unfortunately use the same
|
||||
identifier
|
||||
- RPC changes:
|
||||
- Set the content type for HTTP POST RPC connections to application/json
|
||||
(https://github.com/conformal/btcd/issues/121)
|
||||
- Modified the RPC server startup so it only requires at least one valid
|
||||
listen interface
|
||||
- Correct an error path where it was possible certain errors would not
|
||||
be returned
|
||||
- Implement getwork command
|
||||
(https://github.com/conformal/btcd/issues/125)
|
||||
- Update sendrawtransaction command to reject orphans
|
||||
- Update sendrawtransaction command to include the reason a transaction
|
||||
was rejected
|
||||
- Update getinfo command to populate connection count field
|
||||
- Update getinfo command to include relay fee field
|
||||
(https://github.com/conformal/btcd/issues/107)
|
||||
- Allow transactions submitted with sendrawtransaction to bypass the
|
||||
rate limiter
|
||||
- Allow the getcurrentnet and getbestblock extensions to be accessed via
|
||||
HTTP POST in addition to Websockets
|
||||
(https://github.com/conformal/btcd/issues/127)
|
||||
- Websocket changes:
|
||||
- Rework notifications to ensure they are delivered in the order they
|
||||
occur
|
||||
- Rename notifynewtxs command to notifyreceived (funds received)
|
||||
- Rename notifyallnewtxs command to notifynewtransactions
|
||||
- Rename alltx notification to txaccepted
|
||||
- Rename allverbosetx notification to txacceptedverbose
|
||||
(https://github.com/conformal/btcd/issues/98)
|
||||
- Add rescan progress notification
|
||||
- Add recvtx notification
|
||||
- Add redeemingtx notification
|
||||
- Modify notifyspent command to accept an array of outpoints
|
||||
(https://github.com/conformal/btcd/issues/123)
|
||||
- Significantly optimize the rescan command to yield up to a 60x speed
|
||||
increase
|
||||
- btcctl utility changes:
|
||||
- Add createencryptedwallet command
|
||||
- Add getblockchaininfo command
|
||||
- Add importwallet command
|
||||
- Add addmultisigaddress command
|
||||
- Add setgenerate command
|
||||
- Accept --testnet and --wallet flags which automatically select
|
||||
the appropriate port and TLS certificates needed to communicate
|
||||
with btcd and btcwallet (https://github.com/conformal/btcd/issues/112)
|
||||
- Allow path expansion from config file entries
|
||||
(https://github.com/conformal/btcd/issues/113)
|
||||
- Minor refactor simplify handling of options
|
||||
- addblock utility changes:
|
||||
- Improve logging by making it consistent with the logging provided by
|
||||
btcd (https://github.com/conformal/btcd/issues/90)
|
||||
- Improve several package APIs for developers:
|
||||
- Add new amount type for consistently handling monetary values
|
||||
- Add new coin selector API
|
||||
- Add new WIF (Wallet Import Format) API
|
||||
- Add new crypto types for private keys and signatures
|
||||
- Add new API to sign transactions including script merging and hash
|
||||
types
|
||||
- Expose function to extract all pushed data from a script
|
||||
(https://github.com/conformal/btcscript/issues/8)
|
||||
- Misc changes:
|
||||
- Optimize address manager shuffling to do 67% less work on average
|
||||
- Resolve a couple of benign data races found by the race detector
|
||||
(https://github.com/conformal/btcd/issues/101)
|
||||
- Add IP address to all peer related errors to clarify which peer is the
|
||||
cause (https://github.com/conformal/btcd/issues/102)
|
||||
- Fix a UPNP case issue that prevented the --upnp option from working
|
||||
with some UPNP servers
|
||||
- Update documentation in the sample config file regarding debug levels
|
||||
- Adjust some logging levels to improve debug messages
|
||||
- Improve the throughput of query messages to the block manager
|
||||
- Several minor optimizations to reduce GC churn and enhance speed
|
||||
- Other minor refactoring
|
||||
- General code cleanup
|
||||
|
||||
Changes in 0.7.0 (Thu Feb 20 2014)
|
||||
- Fix an issue when parsing scripts which contain a multi-signature script
|
||||
which require zero signatures such as testnet block
|
||||
000000001881dccfeda317393c261f76d09e399e15e27d280e5368420f442632
|
||||
(https://github.com/conformal/btcscript/issues/7)
|
||||
- Add check to ensure all transactions accepted to mempool only contain
|
||||
canonical data pushes (https://github.com/conformal/btcscript/issues/6)
|
||||
- Fix an issue causing excessive memory consumption
|
||||
- Significantly rework and improve the websocket notification system:
|
||||
- Each client is now independent so slow clients no longer limit the
|
||||
speed of other connected clients
|
||||
- Potentially long-running operations such as rescans are now run in
|
||||
their own handler and rate-limited to one operation at a time without
|
||||
preventing simultaneous requests from the same client for the faster
|
||||
requests or notifications
|
||||
- A couple of scenarios which could cause shutdown to hang have been
|
||||
resolved
|
||||
- Update notifynewtx notifications to support all address types instead
|
||||
of only pay-to-pubkey-hash
|
||||
- Provide a --rpcmaxwebsockets option to allow limiting the number of
|
||||
concurrent websocket clients
|
||||
- Add a new websocket command notifyallnewtxs to request notifications
|
||||
(https://github.com/conformal/btcd/issues/86) (thanks @flammit)
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Add getnetworkhashps command
|
||||
- Add gettransaction command (wallet-specific)
|
||||
- Add signmessage command (wallet-specific)
|
||||
- Update getwork command to accept
|
||||
- Continue cleanup and work on implementing the RPC API:
|
||||
- Implement getnettotals command
|
||||
(https://github.com/conformal/btcd/issues/84)
|
||||
- Implement networkhashps command
|
||||
(https://github.com/conformal/btcd/issues/87)
|
||||
- Update getpeerinfo to always include syncnode field even when false
|
||||
- Remove help addenda for getpeerinfo now that it supports all fields
|
||||
- Close standard RPC connections on auth failure
|
||||
- Provide a --rpcmaxclients option to allow limiting the number of
|
||||
concurrent RPC clients (https://github.com/conformal/btcd/issues/68)
|
||||
- Include IP address in RPC auth failure log messages
|
||||
- Resolve a rather harmless data races found by the race detector
|
||||
(https://github.com/conformal/btcd/issues/94)
|
||||
- Increase block priority size and max standard transaction size to 50k
|
||||
and 100k, respectively (https://github.com/conformal/btcd/issues/71)
|
||||
- Add rate limiting of free transactions to the memory pool to prevent
|
||||
penny flooding (https://github.com/conformal/btcd/issues/40)
|
||||
- Provide a --logdir option (https://github.com/conformal/btcd/issues/95)
|
||||
- Change the default log file path to include the network
|
||||
- Add a new ScriptBuilder interface to btcscript to support creation of
|
||||
custom scripts (https://github.com/conformal/btcscript/issues/5)
|
||||
- General code cleanup
|
||||
|
||||
Changes in 0.6.0 (Tue Feb 04 2014)
|
||||
- Fix an issue when parsing scripts which contain invalid signatures that
|
||||
caused a chain fork on block
|
||||
0000000000000001e4241fd0b3469a713f41c5682605451c05d3033288fb2244
|
||||
- Correct an issue which could lead to an error in removeBlockNode
|
||||
(https://github.com/conformal/btcchain/issues/4)
|
||||
- Improve addblock utility as follows:
|
||||
- Check imported blocks against all chain rules and checkpoints
|
||||
- Skip blocks which are already known so you can stop and restart the
|
||||
import or start the import after you have already downloaded a portion
|
||||
of the chain
|
||||
- Correct an issue where the utility did not shutdown cleanly after
|
||||
processing all blocks
|
||||
- Add error on attempt to import orphan blocks
|
||||
- Improve error handling and reporting
|
||||
- Display statistics after input file has been fully processed
|
||||
- Rework, optimize, and improve headers-first mode:
|
||||
- Resuming the chain sync from any point before the final checkpoint
|
||||
will now use headers-first mode
|
||||
(https://github.com/conformal/btcd/issues/69)
|
||||
- Verify all checkpoints as opposed to only the final one
|
||||
- Reduce and bound memory usage
|
||||
- Rollback to the last known good point when a header does not match a
|
||||
checkpoint
|
||||
- Log information about what is happening with headers
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Add getaddednodeinfo command
|
||||
- Add getnettotals command
|
||||
- Add getblocktemplate command (wallet-specific)
|
||||
- Add getwork command (wallet-specific)
|
||||
- Add getnewaddress command (wallet-specific)
|
||||
- Add walletpassphrasechange command (wallet-specific)
|
||||
- Add walletlock command (wallet-specific)
|
||||
- Add sendfrom command (wallet-specific)
|
||||
- Add sendmany command (wallet-specific)
|
||||
- Add settxfee command (wallet-specific)
|
||||
- Add listsinceblock command (wallet-specific)
|
||||
- Add listaccounts command (wallet-specific)
|
||||
- Add keypoolrefill command (wallet-specific)
|
||||
- Add getreceivedbyaccount command (wallet-specific)
|
||||
- Add getrawchangeaddress command (wallet-specific)
|
||||
- Add gettxoutsetinfo command (wallet-specific)
|
||||
- Add listaddressgroupings command (wallet-specific)
|
||||
- Add listlockunspent command (wallet-specific)
|
||||
- Add listlock command (wallet-specific)
|
||||
- Add listreceivedbyaccount command (wallet-specific)
|
||||
- Add validateaddress command (wallet-specific)
|
||||
- Add verifymessage command (wallet-specific)
|
||||
- Add sendtoaddress command (wallet-specific)
|
||||
- Continue cleanup and work on implementing the RPC API:
|
||||
- Implement submitblock command
|
||||
(https://github.com/conformal/btcd/issues/61)
|
||||
- Implement help command
|
||||
- Implement ping command
|
||||
- Implement getaddednodeinfo command
|
||||
(https://github.com/conformal/btcd/issues/78)
|
||||
- Implement getinfo command
|
||||
- Update getpeerinfo to support bytesrecv and bytessent
|
||||
(https://github.com/conformal/btcd/issues/83)
|
||||
- Improve and correct several RPC server and websocket areas:
|
||||
- Change the connection endpoint for websockets from /wallet to /ws
|
||||
(https://github.com/conformal/btcd/issues/80)
|
||||
- Implement an alternative authentication for websockets so clients
|
||||
such as javascript from browsers that don't support setting HTTP
|
||||
headers can authenticate (https://github.com/conformal/btcd/issues/77)
|
||||
- Add an authentication deadline for RPC connections
|
||||
(https://github.com/conformal/btcd/issues/68)
|
||||
- Use standard authentication failure responses for RPC connections
|
||||
- Make automatically generated certificate more standard so it works
|
||||
from client such as node.js and Firefox
|
||||
- Correct some minor issues which could prevent the RPC server from
|
||||
shutting down in an orderly fashion
|
||||
- Make all websocket notifications require registration
|
||||
- Change the data sent over websockets to text since it is JSON-RPC
|
||||
- Allow connections that do not have an Origin header set
|
||||
- Expose and track the number of bytes read and written per peer
|
||||
(https://github.com/conformal/btcwire/issues/6)
|
||||
- Correct an issue with sendrawtransaction when invoked via websockets
|
||||
which prevented a minedtx notification from being added
|
||||
- Rescan operations issued from remote wallets are no stopped when
|
||||
the wallet disconnects mid-operation
|
||||
(https://github.com/conformal/btcd/issues/66)
|
||||
- Several optimizations related to fetching block information from the
|
||||
database
|
||||
- General code cleanup
|
||||
|
||||
Changes in 0.5.0 (Mon Jan 13 2014)
|
||||
- Optimize initial block download by introducing a new mode which
|
||||
downloads the block headers first (up to the final checkpoint)
|
||||
- Improve peer handling to remove the potential for slow peers to cause
|
||||
sluggishness amongst all peers
|
||||
(https://github.com/conformal/btcd/issues/63)
|
||||
- Fix an issue where the initial block sync could stall when the sync peer
|
||||
disconnects (https://github.com/conformal/btcd/issues/62)
|
||||
- Correct an issue where --externalip was doing a DNS lookup on the full
|
||||
host:port instead of just the host portion
|
||||
(https://github.com/conformal/btcd/issues/38)
|
||||
- Fix an issue which could lead to a panic on chain switches
|
||||
(https://github.com/conformal/btcd/issues/70)
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Show getdifficulty output as floating point to 6 digits of precision
|
||||
- Show all JSON object replies formatted as standard JSON
|
||||
- Allow btcctl getblock to accept optional params
|
||||
- Add getaccount command (wallet-specific)
|
||||
- Add getaccountaddress command (wallet-specific)
|
||||
- Add sendrawtransaction command
|
||||
- Continue cleanup and work on implementing RPC API calls
|
||||
- Update getrawmempool to support new optional verbose flag
|
||||
- Update getrawtransaction to match the reference client
|
||||
- Update getblock to support new optional verbose flag
|
||||
- Update raw transactions to fully match the reference client including
|
||||
support for all transaction types and address types
|
||||
- Correct getrawmempool fee field to return BTC instead of Satoshi
|
||||
- Correct getpeerinfo service flag to return 8 digit string so it
|
||||
matches the reference client
|
||||
- Correct verifychain to return a boolean
|
||||
- Implement decoderawtransaction command
|
||||
- Implement createrawtransaction command
|
||||
- Implement decodescript command
|
||||
- Implement gethashespersec command
|
||||
- Allow RPC handler overrides when invoked via a websocket versus
|
||||
legacy connection
|
||||
- Add new DNS seed for peer discovery
|
||||
- Display user agent on new valid peer log message
|
||||
(https://github.com/conformal/btcd/issues/64)
|
||||
- Notify wallet when new transactions that pay to registered addresses
|
||||
show up in the mempool before being mined into a block
|
||||
- Support a tor-specific proxy in addition to a normal proxy
|
||||
(https://github.com/conformal/btcd/issues/47)
|
||||
- Remove deprecated sqlite3 imports from utilities
|
||||
- Remove leftover profile write from addblock utility
|
||||
- Quite a bit of code cleanup and refactoring to improve maintainability
|
||||
|
||||
Changes in 0.4.0 (Thu Dec 12 2013)
|
||||
- Allow listen interfaces to be specified via --listen instead of only the
|
||||
port (https://github.com/conformal/btcd/issues/33)
|
||||
- Allow listen interfaces for the RPC server to be specified via
|
||||
--rpclisten instead of only the port
|
||||
(https://github.com/conformal/btcd/issues/34)
|
||||
- Only disable listening when --connect or --proxy are used when no
|
||||
--listen interface are specified
|
||||
(https://github.com/conformal/btcd/issues/10)
|
||||
- Add several new standard transaction checks to transaction memory pool:
|
||||
- Support nulldata scripts as standard
|
||||
- Only allow a max of one nulldata output per transaction
|
||||
- Enforce a maximum of 3 public keys in multi-signature transactions
|
||||
- The number of signatures in multi-signature transactions must not
|
||||
exceed the number of public keys
|
||||
- The number of inputs to a signature script must match the expected
|
||||
number of inputs for the script type
|
||||
- The number of inputs pushed onto the stack by a redeeming signature
|
||||
script must match the number of inputs consumed by the referenced
|
||||
public key script
|
||||
- When a block is connected, remove any transactions from the memory pool
|
||||
which are now double spends as a result of the newly connected
|
||||
transactions
|
||||
- Don't relay transactions resurrected during a chain switch since
|
||||
other peers will also be switching chains and therefore already know
|
||||
about them
|
||||
- Cleanup a few cases where rejected transactions showed as an error
|
||||
rather than as a rejected transaction
|
||||
- Ignore the default configuration file when --regtest (regression test
|
||||
mode) is specified
|
||||
- Implement TLS support for RPC including automatic certificate generation
|
||||
- Support HTTP authentication headers for web sockets
|
||||
- Update address manager to recognize and properly work with Tor
|
||||
addresses (https://github.com/conformal/btcd/issues/36) and
|
||||
(https://github.com/conformal/btcd/issues/37)
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Add the ability to specify a configuration file
|
||||
- Add a default entry for the RPC cert to point to the location
|
||||
it will likely be in the btcd home directory
|
||||
- Implement --version flag
|
||||
- Provide a --notls option to support non-TLS configurations
|
||||
- Fix a couple of minor races found by the Go race detector
|
||||
- Improve logging
|
||||
- Allow logging level to be specified on a per subsystem basis
|
||||
(https://github.com/conformal/btcd/issues/48)
|
||||
- Allow logging levels to be dynamically changed via RPC
|
||||
(https://github.com/conformal/btcd/issues/15)
|
||||
- Implement a rolling log file with a max of 10MB per file and a
|
||||
rotation size of 3 which results in a max logging size of 30 MB
|
||||
- Correct a minor issue with the rescanning websocket call
|
||||
(https://github.com/conformal/btcd/issues/54)
|
||||
- Fix a race with pushing address messages that could lead to a panic
|
||||
(https://github.com/conformal/btcd/issues/58)
|
||||
- Improve which external IP address is reported to peers based on which
|
||||
interface they are connected through
|
||||
(https://github.com/conformal/btcd/issues/35)
|
||||
- Add --externalip option to allow an external IP address to be specified
|
||||
for cases such as tor hidden services or advanced network configurations
|
||||
(https://github.com/conformal/btcd/issues/38)
|
||||
- Add --upnp option to support automatic port mapping via UPnP
|
||||
(https://github.com/conformal/btcd/issues/51)
|
||||
- Update Ctrl+C interrupt handler to properly sync address manager and
|
||||
remove the UPnP port mapping (if needed)
|
||||
- Continue cleanup and work on implementing RPC API calls
|
||||
- Add importprivkey (import private key) command to btcctl
|
||||
- Update getrawtransaction to provide addresses properly, support
|
||||
new verbose param, and match the reference implementation with the
|
||||
exception of MULTISIG (thanks @flammit)
|
||||
- Update getblock with new verbose flag (thanks @flammit)
|
||||
- Add listtransactions command to btcctl
|
||||
- Add getbalance command to btcctl
|
||||
- Add basic support for btcd to run as a native Windows service
|
||||
(https://github.com/conformal/btcd/issues/42)
|
||||
- Package addblock utility with Windows MSIs
|
||||
- Add support for TravisCI (continuous build integration)
|
||||
- Cleanup some documentation and usage
|
||||
- Several other minor bug fixes and general code cleanup
|
||||
|
||||
Changes in 0.3.3 (Wed Nov 13 2013)
|
||||
- Significantly improve initial block chain download speed
|
||||
(https://github.com/conformal/btcd/issues/20)
|
||||
- Add a new checkpoint at block height 267300
|
||||
- Optimize most recently used inventory handling
|
||||
(https://github.com/conformal/btcd/issues/21)
|
||||
- Optimize duplicate transaction input check
|
||||
(https://github.com/conformal/btcchain/issues/2)
|
||||
- Optimize transaction hashing
|
||||
(https://github.com/conformal/btcd/issues/25)
|
||||
- Rework and optimize wallet listener notifications
|
||||
(https://github.com/conformal/btcd/issues/22)
|
||||
- Optimize serialization and deserialization
|
||||
(https://github.com/conformal/btcd/issues/27)
|
||||
- Add support for minimum transaction fee to memory pool acceptance
|
||||
(https://github.com/conformal/btcd/issues/29)
|
||||
- Improve leveldb database performance by removing explicit GC call
|
||||
- Fix an issue where Ctrl+C was not always finishing orderly database
|
||||
shutdown
|
||||
- Fix an issue in the script handling for OP_CHECKSIG
|
||||
- Impose max limits on all variable length protocol entries to prevent
|
||||
abuse from malicious peers
|
||||
- Enforce DER signatures for transactions allowed into the memory pool
|
||||
- Separate the debug profile http server from the RPC server
|
||||
- Rework of the RPC code to improve performance and make the code cleaner
|
||||
- The getrawtransaction RPC call now properly checks the memory pool
|
||||
before consulting the db (https://github.com/conformal/btcd/issues/26)
|
||||
- Add support for the following RPC calls: getpeerinfo, getconnectedcount,
|
||||
addnode, verifychain
|
||||
(https://github.com/conformal/btcd/issues/13)
|
||||
(https://github.com/conformal/btcd/issues/17)
|
||||
- Implement rescan websocket extension to allow wallet rescans
|
||||
- Use correct paths for application data storage for all supported
|
||||
operating systems (https://github.com/conformal/btcd/issues/30)
|
||||
- Add a default redirect to the http profiling page when accessing the
|
||||
http profile server
|
||||
- Add a new --cpuprofile option which can be used to generate CPU
|
||||
profiling data on platforms that support it
|
||||
- Several other minor performance optimizations
|
||||
- Other minor bug fixes and general code cleanup
|
||||
|
||||
Changes in 0.3.2 (Tue Oct 22 2013)
|
||||
- Fix an issue that could cause the download of the block chain to stall
|
||||
(https://github.com/conformal/btcd/issues/12)
|
||||
- Remove deprecated sqlite as an available database backend
|
||||
- Close sqlite compile issue as sqlite has now been removed
|
||||
(https://github.com/conformal/btcd/issues/11)
|
||||
- Change default RPC ports to 8334 (mainnet) and 18334 (testnet)
|
||||
- Continue cleanup and work on implementing RPC API calls
|
||||
- Add support for the following RPC calls: getrawmempool,
|
||||
getbestblockhash, decoderawtransaction, getdifficulty,
|
||||
getconnectioncount, getpeerinfo, and addnode
|
||||
- Improve the btcctl utility that is used to issue JSON-RPC commands
|
||||
- Fix an issue preventing btcd from cleanly shutting down with the RPC
|
||||
stop command
|
||||
- Add a number of database interface tests to ensure backends implement
|
||||
the expected interface
|
||||
- Expose some additional information from btcscript to be used for
|
||||
identifying "standard"" transactions
|
||||
- Add support for plan9 - thanks @mischief
|
||||
(https://github.com/conformal/btcd/pull/19)
|
||||
- Other minor bug fixes and general code cleanup
|
||||
|
||||
Changes in 0.3.1-alpha (Tue Oct 15 2013)
|
||||
- Change default database to leveldb
|
||||
NOTE: This does mean you will have to redownload the block chain. Since we
|
||||
are still in alpha, we didn't feel writing a converter was worth the time as
|
||||
it would take away from more important issues at this stage
|
||||
- Add a warning if there are multiple block chain databases of different types
|
||||
- Fix issue with unexpected EOF in leveldb -- https://github.com/conformal/btcd/issues/18
|
||||
- Fix issue preventing block 21066 on testnet -- https://github.com/conformal/btcchain/issues/1
|
||||
- Fix issue preventing block 96464 on testnet -- https://github.com/conformal/btcscript/issues/1
|
||||
- Optimize transaction lookups
|
||||
- Correct a few cases of list removal that could result in improper cleanup
|
||||
of no longer needed orphans
|
||||
- Add functionality to increase ulimits on non-Windows platforms
|
||||
- Add support for mempool command which allows remote peers to query the
|
||||
transaction memory pool via the bitcoin protocol
|
||||
- Clean up logging a bit
|
||||
- Add a flag to disable checkpoints for developers
|
||||
- Add a lot of useful debug logging such as message summaries
|
||||
- Other minor bug fixes and general code cleanup
|
||||
|
||||
Initial Release 0.3.0-alpha (Sat Oct 05 2013):
|
||||
- Initial release
|
||||
10
Jenkinsfile
vendored
10
Jenkinsfile
vendored
@@ -1,10 +0,0 @@
|
||||
node {
|
||||
stage 'Checkout'
|
||||
checkout scm
|
||||
|
||||
stage 'Version'
|
||||
sh './deploy.sh version'
|
||||
|
||||
stage 'Build'
|
||||
sh "./deploy.sh build"
|
||||
}
|
||||
5
LICENSE
5
LICENSE
@@ -1,8 +1,9 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2018-2019 DAGLabs
|
||||
Copyright (c) 2013-20 18 The btcsuite developers
|
||||
Copyright (c) 2018-2019 The kaspanet developers
|
||||
Copyright (c) 2013-2018 The btcsuite developers
|
||||
Copyright (c) 2015-2016 The Decred developers
|
||||
Copyright (c) 2013-2014 Conformal Systems LLC.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
|
||||
125
README.md
125
README.md
@@ -1,49 +1,24 @@
|
||||
btcd
|
||||
|
||||
Kaspad
|
||||
====
|
||||
Warning: This is pre-alpha software. There's no guarantee anything works.
|
||||
====
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd)
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad)
|
||||
|
||||
btcd is an alternative full node bitcoin implementation written in Go (golang).
|
||||
Kaspad is the reference full node Kaspa implementation written in Go (golang).
|
||||
|
||||
This project is currently under active development and is in a Beta state. It
|
||||
is extremely stable and has been in production use since October 2013.
|
||||
|
||||
It properly downloads, validates, and serves the block chain using the exact
|
||||
rules (including consensus bugs) for block acceptance as Bitcoin Core. We have
|
||||
taken great care to avoid btcd causing a fork to the block chain. It includes a
|
||||
full block validation testing framework which contains all of the 'official'
|
||||
block acceptance tests (and some additional ones) that is run on every pull
|
||||
request to help ensure it properly follows consensus. Also, it passes all of
|
||||
the JSON test data in the Bitcoin Core code.
|
||||
|
||||
It also properly relays newly mined blocks, maintains a transaction pool, and
|
||||
relays individual transactions that have not yet made it into a block. It
|
||||
ensures all individual transactions admitted to the pool follow the rules
|
||||
required by the block chain and also includes more strict checks which filter
|
||||
transactions based on miner requirements ("standard" transactions).
|
||||
|
||||
One key difference between btcd and Bitcoin Core is that btcd does *NOT* include
|
||||
wallet functionality and this was a very intentional design decision. See the
|
||||
blog entry [here](https://blog.conformal.com/btcd-not-your-moms-bitcoin-daemon)
|
||||
for more details. This means you can't actually make or receive payments
|
||||
directly with btcd. That functionality is provided by the
|
||||
[btcwallet](https://github.com/btcsuite/btcwallet) and
|
||||
[Paymetheus](https://github.com/btcsuite/Paymetheus) (Windows-only) projects
|
||||
which are both under active development.
|
||||
This project is currently under active development and is in a pre-Alpha state.
|
||||
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
|
||||
|
||||
## Requirements
|
||||
|
||||
[Go](http://golang.org) 1.8 or newer.
|
||||
Latest version of [Go](http://golang.org) (currently 1.13).
|
||||
|
||||
## Installation
|
||||
|
||||
#### Windows - MSI Available
|
||||
|
||||
https://github.com/daglabs/btcd/releases
|
||||
|
||||
#### Linux/BSD/MacOSX/POSIX - Build from Source
|
||||
#### Build from Source
|
||||
|
||||
- Install Go according to the installation instructions here:
|
||||
http://golang.org/doc/install
|
||||
@@ -55,92 +30,50 @@ $ go version
|
||||
$ go env GOROOT GOPATH
|
||||
```
|
||||
|
||||
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
|
||||
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
|
||||
recommended that `GOPATH` is set to a directory in your home directory such as
|
||||
`~/goprojects` to avoid write permission issues. It is also recommended to add
|
||||
`~/dev/go` to avoid write permission issues. It is also recommended to add
|
||||
`$GOPATH/bin` to your `PATH` at this point.
|
||||
|
||||
- Run the following commands to obtain btcd, all dependencies, and install it:
|
||||
- Run the following commands to obtain and install kaspad including all dependencies:
|
||||
|
||||
```bash
|
||||
$ # Install dep: https://golang.github.io/dep/docs/installation.html
|
||||
$ git clone https://github.com/daglabs/btcd $GOPATH/src/github.com/daglabs/btcd
|
||||
$ cd $GOPATH/src/github.com/daglabs/btcd
|
||||
$ dep ensure
|
||||
$ git clone https://github.com/kaspanet/kaspad $GOPATH/src/github.com/kaspanet/kaspad
|
||||
$ cd $GOPATH/src/github.com/kaspanet/kaspad
|
||||
$ ./test.sh
|
||||
$ go install . ./cmd/...
|
||||
```
|
||||
`./test.sh` tests can be skipped, but some things might not run correctly on your system if tests fail.
|
||||
|
||||
- btcd (and utilities) will now be installed in ```$GOPATH/bin```. If you did
|
||||
- Kaspad (and utilities) should now be installed in `$GOPATH/bin`. If you did
|
||||
not already add the bin directory to your system path during Go installation,
|
||||
we recommend you do so now.
|
||||
you are encouraged to do so now.
|
||||
|
||||
## Updating
|
||||
|
||||
#### Windows
|
||||
|
||||
Install a newer MSI
|
||||
|
||||
#### Linux/BSD/MacOSX/POSIX - Build from Source
|
||||
|
||||
- Run the following commands to update btcd, all dependencies, and install it:
|
||||
|
||||
```bash
|
||||
$ cd $GOPATH/src/github.com/daglabs/btcd
|
||||
$ git pull && dep ensure
|
||||
$ go install . ./cmd/...
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
btcd has several configuration options available to tweak how it runs, but all
|
||||
of the basic operations described in the intro section work with zero
|
||||
configuration.
|
||||
|
||||
#### Windows (Installed from MSI)
|
||||
|
||||
Launch btcd from your Start menu.
|
||||
Kaspad has several configuration options available to tweak how it runs, but all
|
||||
of the basic operations work with zero configuration.
|
||||
|
||||
#### Linux/BSD/POSIX/Source
|
||||
|
||||
```bash
|
||||
$ ./btcd
|
||||
$ ./kaspad
|
||||
```
|
||||
|
||||
## IRC
|
||||
|
||||
- irc.freenode.net
|
||||
- channel #btcd
|
||||
- [webchat](https://webchat.freenode.net/?channels=btcd)
|
||||
## Discord
|
||||
Join our discord server using the following link: https://discord.gg/WmGhhzk
|
||||
|
||||
## Issue Tracker
|
||||
|
||||
The [integrated github issue tracker](https://github.com/daglabs/btcd/issues)
|
||||
The [integrated github issue tracker](https://github.com/kaspanet/kaspad/issues)
|
||||
is used for this project.
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is a work-in-progress. It is located in the [docs](https://github.com/daglabs/btcd/tree/master/docs) folder.
|
||||
|
||||
## GPG Verification Key
|
||||
|
||||
All official release tags are signed by Conformal so users can ensure the code
|
||||
has not been tampered with and is coming from the btcsuite developers. To
|
||||
verify the signature perform the following:
|
||||
|
||||
- Download the Conformal public key:
|
||||
https://raw.githubusercontent.com/btcsuite/btcd/master/release/GIT-GPG-KEY-conformal.txt
|
||||
|
||||
- Import the public key into your GPG keyring:
|
||||
```bash
|
||||
gpg --import GIT-GPG-KEY-conformal.txt
|
||||
```
|
||||
|
||||
- Verify the release tag with the following command where `TAG_NAME` is a
|
||||
placeholder for the specific tag:
|
||||
```bash
|
||||
git tag -v TAG_NAME
|
||||
```
|
||||
The documentation is a work-in-progress. It is located in the [docs](https://github.com/kaspanet/kaspad/tree/master/docs) folder.
|
||||
|
||||
## License
|
||||
|
||||
btcd is licensed under the [copyfree](http://copyfree.org) ISC License.
|
||||
Kaspad is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/).
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,17 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script uses gocov to generate a test coverage report.
|
||||
# The gocov tool my be obtained with the following command:
|
||||
# go get github.com/axw/gocov/gocov
|
||||
#
|
||||
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
|
||||
|
||||
# Check for gocov.
|
||||
type gocov >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo >&2 "This script requires the gocov tool."
|
||||
echo >&2 "You may obtain it with the following command:"
|
||||
echo >&2 "go get github.com/axw/gocov/gocov"
|
||||
exit 1
|
||||
fi
|
||||
gocov test | gocov report
|
||||
@@ -1,38 +0,0 @@
|
||||
// Copyright (c) 2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package addrmgr implements concurrency safe Bitcoin address manager.
|
||||
|
||||
Address Manager Overview
|
||||
|
||||
In order maintain the peer-to-peer Bitcoin network, there needs to be a source
|
||||
of addresses to connect to as nodes come and go. The Bitcoin protocol provides
|
||||
the getaddr and addr messages to allow peers to communicate known addresses with
|
||||
each other. However, there needs to a mechanism to store those results and
|
||||
select peers from them. It is also important to note that remote peers can't
|
||||
be trusted to send valid peers nor attempt to provide you with only peers they
|
||||
control with malicious intent.
|
||||
|
||||
With that in mind, this package provides a concurrency safe address manager for
|
||||
caching and selecting peers in a non-deterministic manner. The general idea is
|
||||
the caller adds addresses to the address manager and notifies it when addresses
|
||||
are connected, known good, and attempted. The caller also requests addresses as
|
||||
it needs them.
|
||||
|
||||
The address manager internally segregates the addresses into groups and
|
||||
non-deterministically selects groups in a cryptographically random manner. This
|
||||
reduce the chances multiple addresses from the same nets are selected which
|
||||
generally helps provide greater peer diversity, and perhaps more importantly,
|
||||
drastically reduces the chances an attacker is able to coerce your peer into
|
||||
only connecting to nodes they control.
|
||||
|
||||
The address manager also understands routability and Tor addresses and tries
|
||||
hard to only return routable addresses. In addition, it uses the information
|
||||
provided by the caller about connected, known good, and attempted addresses to
|
||||
periodically purge peers which no longer appear to be good peers as well as
|
||||
bias the selection toward known good peers. The general idea is to make a best
|
||||
effort at only providing usable addresses.
|
||||
*/
|
||||
package addrmgr
|
||||
@@ -1,25 +0,0 @@
|
||||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
func TstKnownAddressIsBad(ka *KnownAddress) bool {
|
||||
return ka.isBad()
|
||||
}
|
||||
|
||||
func TstKnownAddressChance(ka *KnownAddress) float64 {
|
||||
return ka.chance()
|
||||
}
|
||||
|
||||
func TstNewKnownAddress(na *wire.NetAddress, attempts int,
|
||||
lastattempt, lastsuccess time.Time, tried bool, refs int) *KnownAddress {
|
||||
return &KnownAddress{na: na, attempts: attempts, lastattempt: lastattempt,
|
||||
lastsuccess: lastsuccess, tried: tried, refs: refs}
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/addrmgr"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
func TestChance(t *testing.T) {
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
var tests = []struct {
|
||||
addr *addrmgr.KnownAddress
|
||||
expected float64
|
||||
}{
|
||||
{
|
||||
//Test normal case
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastseen < 0
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)},
|
||||
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastattempt < 0
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(30*time.Minute), time.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case in which lastattempt < ten minutes
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(-5*time.Minute), time.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case with several failed attempts.
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
2, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1 / 1.5 / 1.5,
|
||||
},
|
||||
}
|
||||
|
||||
err := .0001
|
||||
for i, test := range tests {
|
||||
chance := addrmgr.TstKnownAddressChance(test.addr)
|
||||
if math.Abs(test.expected-chance) >= err {
|
||||
t.Errorf("case %d: got %f, expected %f", i, chance, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsBad(t *testing.T) {
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
future := now.Add(35 * time.Minute)
|
||||
monthOld := now.Add(-43 * time.Hour * 24)
|
||||
secondsOld := now.Add(-2 * time.Second)
|
||||
minutesOld := now.Add(-27 * time.Minute)
|
||||
hoursOld := now.Add(-5 * time.Hour)
|
||||
zeroTime := time.Time{}
|
||||
|
||||
futureNa := &wire.NetAddress{Timestamp: future}
|
||||
minutesOldNa := &wire.NetAddress{Timestamp: minutesOld}
|
||||
monthOldNa := &wire.NetAddress{Timestamp: monthOld}
|
||||
currentNa := &wire.NetAddress{Timestamp: secondsOld}
|
||||
|
||||
//Test addresses that have been tried in the last minute.
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) {
|
||||
t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
|
||||
//Test address that claims to be from the future.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 6: addresses that claim to be from the future are bad.")
|
||||
}
|
||||
|
||||
//Test address that has not been seen in over a month.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 7: addresses more than a month old are bad.")
|
||||
}
|
||||
|
||||
//It has failed at least three times and never succeeded.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
|
||||
t.Errorf("test case 8: addresses that have never succeeded are bad.")
|
||||
}
|
||||
|
||||
//It has failed ten times in the last week
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
|
||||
}
|
||||
|
||||
//Test an address that should work.
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 10: This should be a valid address.")
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
|
||||
github.com/conformal/btcd/addrmgr/network.go GroupKey 100.00% (23/23)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.reset 100.00% (6/6)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC5737 100.00% (4/4)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC1918 100.00% (4/4)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go New 100.00% (3/3)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go NetAddressKey 100.00% (2/2)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4862 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.numAddresses 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/log.go init 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/log.go DisableLog 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go ipNet 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsIPv4 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsLocal 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsOnionCatTor 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC2544 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC3849 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC3927 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC3964 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4193 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4380 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4843 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC6052 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC6145 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC6598 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsValid 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRoutable 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetBestLocalAddress 94.74% (18/19)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddLocalAddress 90.91% (10/11)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go getReachabilityFrom 51.52% (17/33)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go ipString 50.00% (2/4)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetAddress 9.30% (4/43)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.deserializePeers 0.00% (0/50)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Good 0.00% (0/44)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.savePeers 0.00% (0/39)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.updateAddress 0.00% (0/30)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.expireNew 0.00% (0/22)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddressCache 0.00% (0/16)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.HostToNetAddress 0.00% (0/15)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getNewBucket 0.00% (0/15)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddressByIP 0.00% (0/14)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getTriedBucket 0.00% (0/14)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.chance 0.00% (0/13)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.loadPeers 0.00% (0/11)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.isBad 0.00% (0/11)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Connected 0.00% (0/10)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.addressHandler 0.00% (0/9)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.pickTried 0.00% (0/8)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.DeserializeNetAddress 0.00% (0/7)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Stop 0.00% (0/7)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Attempt 0.00% (0/7)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Start 0.00% (0/6)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddresses 0.00% (0/4)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NeedMoreAddresses 0.00% (0/3)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NumAddresses 0.00% (0/3)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddress 0.00% (0/3)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.LastAttempt 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.NetAddress 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.find 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr/log.go UseLogger 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr --------------------------------- 21.04% (113/537)
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/daglabs/btcd/apiserver/logger"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/jessevdk/go-flags"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLogFilename = "apiserver.log"
|
||||
defaultErrLogFilename = "apiserver_err.log"
|
||||
)
|
||||
|
||||
var (
|
||||
// activeNetParams are the currently active net params
|
||||
activeNetParams dagconfig.Params
|
||||
)
|
||||
|
||||
var (
|
||||
// Default configuration options
|
||||
defaultLogDir = util.AppDataDir("apiserver", false)
|
||||
defaultDBAddress = "localhost:3306"
|
||||
defaultHTTPListen = "0.0.0.0:8080"
|
||||
)
|
||||
|
||||
// Config defines the configuration options for the API server.
|
||||
type Config struct {
|
||||
LogDir string `long:"logdir" description:"Directory to log output."`
|
||||
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
|
||||
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
|
||||
DisableTLS bool `long:"notls" description:"Disable TLS"`
|
||||
DBAddress string `long:"dbaddress" description:"Database address"`
|
||||
DBUser string `long:"dbuser" description:"Database user" required:"true"`
|
||||
DBPassword string `long:"dbpass" description:"Database password" required:"true"`
|
||||
DBName string `long:"dbname" description:"Database name" required:"true"`
|
||||
HTTPListen string `long:"listen" description:"HTTP address to listen on (default: 0.0.0.0:8080)"`
|
||||
Migrate bool `long:"migrate" description:"Migrate the database to the latest version. The server will not start when using this flag."`
|
||||
TestNet bool `long:"testnet" description:"Connect to testnet"`
|
||||
SimNet bool `long:"simnet" description:"Connect to the simulation test network"`
|
||||
DevNet bool `long:"devnet" description:"Connect to the development test network"`
|
||||
}
|
||||
|
||||
// Parse parses the CLI arguments and returns a config struct.
|
||||
func Parse() (*Config, error) {
|
||||
cfg := &Config{
|
||||
LogDir: defaultLogDir,
|
||||
DBAddress: defaultDBAddress,
|
||||
HTTPListen: defaultHTTPListen,
|
||||
}
|
||||
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
|
||||
_, err := parser.Parse()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !cfg.Migrate {
|
||||
if cfg.RPCUser == "" {
|
||||
return nil, errors.New("--rpcuser is required if --migrate flag is not used")
|
||||
}
|
||||
if cfg.RPCPassword == "" {
|
||||
return nil, errors.New("--rpcpass is required if --migrate flag is not used")
|
||||
}
|
||||
if cfg.RPCServer == "" {
|
||||
return nil, errors.New("--rpcserver is required if --migrate flag is not used")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.RPCCert == "" && !cfg.DisableTLS {
|
||||
return nil, errors.New("--notls has to be disabled if --cert is used")
|
||||
}
|
||||
|
||||
if cfg.RPCCert != "" && cfg.DisableTLS {
|
||||
return nil, errors.New("--cert should be omitted if --notls is used")
|
||||
}
|
||||
|
||||
err = resolveNetwork(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logFile := filepath.Join(cfg.LogDir, defaultLogFilename)
|
||||
errLogFile := filepath.Join(cfg.LogDir, defaultErrLogFilename)
|
||||
logger.InitLog(logFile, errLogFile)
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func resolveNetwork(cfg *Config) error {
|
||||
// Multiple networks can't be selected simultaneously.
|
||||
numNets := 0
|
||||
if cfg.TestNet {
|
||||
numNets++
|
||||
}
|
||||
if cfg.SimNet {
|
||||
numNets++
|
||||
}
|
||||
if cfg.DevNet {
|
||||
numNets++
|
||||
}
|
||||
if numNets > 1 {
|
||||
return errors.New("multiple net params (testnet, simnet, devnet, etc.) can't be used " +
|
||||
"together -- choose one of them")
|
||||
}
|
||||
|
||||
activeNetParams = dagconfig.MainNetParams
|
||||
switch {
|
||||
case cfg.TestNet:
|
||||
activeNetParams = dagconfig.TestNetParams
|
||||
case cfg.SimNet:
|
||||
activeNetParams = dagconfig.SimNetParams
|
||||
case cfg.DevNet:
|
||||
activeNetParams = dagconfig.DevNetParams
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveNetParams returns the currently active net params
|
||||
func ActiveNetParams() *dagconfig.Params {
|
||||
return &activeNetParams
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/database"
|
||||
"github.com/daglabs/btcd/apiserver/models"
|
||||
"github.com/daglabs/btcd/apiserver/utils"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
const (
|
||||
// OrderAscending is parameter that can be used
|
||||
// in a get list handler to get a list ordered
|
||||
// in an ascending order.
|
||||
OrderAscending = "asc"
|
||||
|
||||
// OrderDescending is parameter that can be used
|
||||
// in a get list handler to get a list ordered
|
||||
// in an ascending order.
|
||||
OrderDescending = "desc"
|
||||
)
|
||||
|
||||
const maxGetBlocksLimit = 100
|
||||
|
||||
// GetBlockByHashHandler returns a block by a given hash.
|
||||
func GetBlockByHashHandler(blockHash string) (interface{}, *utils.HandlerError) {
|
||||
if bytes, err := hex.DecodeString(blockHash); err != nil || len(bytes) != daghash.HashSize {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("The given block hash is not a hex-encoded %d-byte hash.", daghash.HashSize))
|
||||
}
|
||||
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, utils.NewInternalServerHandlerError(err.Error())
|
||||
}
|
||||
|
||||
block := &models.Block{}
|
||||
dbResult := db.Where(&models.Block{BlockHash: blockHash}).Preload("AcceptingBlock").First(block)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return nil, utils.NewHandlerError(http.StatusNotFound, "No block with the given block hash was found.")
|
||||
}
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading transactions from the database:", dbResult.GetErrors())
|
||||
}
|
||||
return convertBlockModelToBlockResponse(block), nil
|
||||
}
|
||||
|
||||
// GetBlocksHandler searches for all blocks
|
||||
func GetBlocksHandler(order string, skip uint64, limit uint64) (interface{}, *utils.HandlerError) {
|
||||
if limit > maxGetBlocksLimit {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("The maximum allowed value for the limit is %d", maxGetTransactionsLimit))
|
||||
}
|
||||
blocks := []*models.Block{}
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
|
||||
}
|
||||
query := db.
|
||||
Limit(limit).
|
||||
Offset(skip).
|
||||
Preload("AcceptingBlock")
|
||||
if order == OrderAscending {
|
||||
query = query.Order("`id` ASC")
|
||||
} else if order == OrderDescending {
|
||||
query = query.Order("`id` DESC")
|
||||
} else {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("'%s' is not a valid order", order))
|
||||
}
|
||||
query.Find(&blocks)
|
||||
blockResponses := make([]*blockResponse, len(blocks))
|
||||
for i, block := range blocks {
|
||||
blockResponses[i] = convertBlockModelToBlockResponse(block)
|
||||
}
|
||||
return blockResponses, nil
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import "github.com/daglabs/btcd/apiserver/utils"
|
||||
|
||||
// GetFeeEstimatesHandler returns the fee estimates for different priorities
|
||||
// for accepting a transaction in the DAG.
|
||||
func GetFeeEstimatesHandler() (interface{}, *utils.HandlerError) {
|
||||
return &feeEstimateResponse{
|
||||
HighPriority: 3,
|
||||
NormalPriority: 2,
|
||||
LowPriority: 1,
|
||||
}, nil
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
package controllers
|
||||
|
||||
// RawTransaction represents a raw transaction posted to the API server
|
||||
type RawTransaction struct {
|
||||
RawTransaction string `json:"rawTransaction"`
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/daglabs/btcd/apiserver/models"
|
||||
"github.com/daglabs/btcd/btcjson"
|
||||
)
|
||||
|
||||
type transactionResponse struct {
|
||||
TransactionHash string `json:"transactionHash"`
|
||||
TransactionID string `json:"transactionId"`
|
||||
AcceptingBlockHash string `json:"acceptingBlockHash,omitempty"`
|
||||
AcceptingBlockBlueScore uint64 `json:"acceptingBlockBlueScore,omitempty"`
|
||||
SubnetworkID string `json:"subnetworkId"`
|
||||
LockTime uint64 `json:"lockTime"`
|
||||
Gas uint64 `json:"gas,omitempty"`
|
||||
PayloadHash string `json:"payloadHash,omitempty"`
|
||||
Payload string `json:"payload,omitempty"`
|
||||
Inputs []*transactionInputResponse `json:"inputs"`
|
||||
Outputs []*transactionOutputResponse `json:"outputs"`
|
||||
Mass uint64 `json:"mass"`
|
||||
}
|
||||
|
||||
type transactionOutputResponse struct {
|
||||
TransactionID string `json:"transactionId,omitempty"`
|
||||
Value uint64 `json:"value"`
|
||||
ScriptPubKey string `json:"scriptPubKey"`
|
||||
Address string `json:"address,omitempty"`
|
||||
AcceptingBlockHash string `json:"acceptingBlockHash,omitempty"`
|
||||
AcceptingBlockBlueScore uint64 `json:"acceptingBlockBlueScore,omitempty"`
|
||||
}
|
||||
|
||||
type transactionInputResponse struct {
|
||||
TransactionID string `json:"transactionId,omitempty"`
|
||||
PreviousTransactionID string `json:"previousTransactionId"`
|
||||
PreviousTransactionOutputIndex uint32 `json:"previousTransactionOutputIndex"`
|
||||
SignatureScript string `json:"signatureScript"`
|
||||
Sequence uint64 `json:"sequence"`
|
||||
Address string `json:"address"`
|
||||
}
|
||||
|
||||
type blockResponse struct {
|
||||
BlockHash string
|
||||
Version int32
|
||||
HashMerkleRoot string
|
||||
AcceptedIDMerkleRoot string
|
||||
UTXOCommitment string
|
||||
Timestamp uint64
|
||||
Bits uint32
|
||||
Nonce uint64
|
||||
AcceptingBlockHash *string
|
||||
BlueScore uint64
|
||||
IsChainBlock bool
|
||||
Mass uint64
|
||||
}
|
||||
|
||||
type feeEstimateResponse struct {
|
||||
HighPriority, NormalPriority, LowPriority float64
|
||||
}
|
||||
|
||||
func convertTxModelToTxResponse(tx *models.Transaction) *transactionResponse {
|
||||
txRes := &transactionResponse{
|
||||
TransactionHash: tx.TransactionHash,
|
||||
TransactionID: tx.TransactionID,
|
||||
AcceptingBlockHash: tx.AcceptingBlock.BlockHash,
|
||||
AcceptingBlockBlueScore: tx.AcceptingBlock.BlueScore,
|
||||
SubnetworkID: tx.Subnetwork.SubnetworkID,
|
||||
LockTime: tx.LockTime,
|
||||
Gas: tx.Gas,
|
||||
PayloadHash: tx.PayloadHash,
|
||||
Payload: hex.EncodeToString(tx.Payload),
|
||||
Inputs: make([]*transactionInputResponse, len(tx.TransactionInputs)),
|
||||
Outputs: make([]*transactionOutputResponse, len(tx.TransactionOutputs)),
|
||||
Mass: tx.Mass,
|
||||
}
|
||||
for i, txOut := range tx.TransactionOutputs {
|
||||
txRes.Outputs[i] = &transactionOutputResponse{
|
||||
Value: txOut.Value,
|
||||
ScriptPubKey: hex.EncodeToString(txOut.ScriptPubKey),
|
||||
Address: txOut.Address.Address,
|
||||
}
|
||||
}
|
||||
for i, txIn := range tx.TransactionInputs {
|
||||
txRes.Inputs[i] = &transactionInputResponse{
|
||||
PreviousTransactionID: txIn.PreviousTransactionOutput.Transaction.TransactionID,
|
||||
PreviousTransactionOutputIndex: txIn.PreviousTransactionOutput.Index,
|
||||
SignatureScript: hex.EncodeToString(txIn.SignatureScript),
|
||||
Sequence: txIn.Sequence,
|
||||
Address: txIn.PreviousTransactionOutput.Address.Address,
|
||||
}
|
||||
}
|
||||
return txRes
|
||||
}
|
||||
|
||||
func convertBlockModelToBlockResponse(block *models.Block) *blockResponse {
|
||||
blockRes := &blockResponse{
|
||||
BlockHash: block.BlockHash,
|
||||
Version: block.Version,
|
||||
HashMerkleRoot: block.HashMerkleRoot,
|
||||
AcceptedIDMerkleRoot: block.AcceptedIDMerkleRoot,
|
||||
UTXOCommitment: block.UTXOCommitment,
|
||||
Timestamp: uint64(block.Timestamp.Unix()),
|
||||
Bits: block.Bits,
|
||||
Nonce: block.Nonce,
|
||||
BlueScore: block.BlueScore,
|
||||
IsChainBlock: block.IsChainBlock,
|
||||
Mass: block.Mass,
|
||||
}
|
||||
if block.AcceptingBlock != nil {
|
||||
blockRes.AcceptingBlockHash = btcjson.String(block.AcceptingBlock.BlockHash)
|
||||
}
|
||||
return blockRes
|
||||
}
|
||||
@@ -1,187 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/database"
|
||||
"github.com/daglabs/btcd/apiserver/jsonrpc"
|
||||
"github.com/daglabs/btcd/apiserver/models"
|
||||
"github.com/daglabs/btcd/apiserver/utils"
|
||||
"github.com/daglabs/btcd/btcjson"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
const maxGetTransactionsLimit = 1000
|
||||
|
||||
// GetTransactionByIDHandler returns a transaction by a given transaction ID.
|
||||
func GetTransactionByIDHandler(txID string) (interface{}, *utils.HandlerError) {
|
||||
if bytes, err := hex.DecodeString(txID); err != nil || len(bytes) != daghash.TxIDSize {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("The given txid is not a hex-encoded %d-byte hash.", daghash.TxIDSize))
|
||||
}
|
||||
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, utils.NewInternalServerHandlerError(err.Error())
|
||||
}
|
||||
|
||||
tx := &models.Transaction{}
|
||||
query := db.Where(&models.Transaction{TransactionID: txID})
|
||||
dbResult := addTxPreloadedFields(query).First(&tx)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return nil, utils.NewHandlerError(http.StatusNotFound, "No transaction with the given txid was found.")
|
||||
}
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading transaction from the database:", dbErrors)
|
||||
}
|
||||
return convertTxModelToTxResponse(tx), nil
|
||||
}
|
||||
|
||||
// GetTransactionByHashHandler returns a transaction by a given transaction hash.
|
||||
func GetTransactionByHashHandler(txHash string) (interface{}, *utils.HandlerError) {
|
||||
if bytes, err := hex.DecodeString(txHash); err != nil || len(bytes) != daghash.HashSize {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("The given txhash is not a hex-encoded %d-byte hash.", daghash.HashSize))
|
||||
}
|
||||
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
|
||||
}
|
||||
|
||||
tx := &models.Transaction{}
|
||||
query := db.Where(&models.Transaction{TransactionHash: txHash})
|
||||
dbResult := addTxPreloadedFields(query).First(&tx)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return nil, utils.NewHandlerError(http.StatusNotFound, "No transaction with the given txhash was found.")
|
||||
}
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading transaction from the database:", dbErrors)
|
||||
}
|
||||
return convertTxModelToTxResponse(tx), nil
|
||||
}
|
||||
|
||||
// GetTransactionsByAddressHandler searches for all transactions
|
||||
// where the given address is either an input or an output.
|
||||
func GetTransactionsByAddressHandler(address string, skip uint64, limit uint64) (interface{}, *utils.HandlerError) {
|
||||
if limit > maxGetTransactionsLimit {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("The maximum allowed value for the limit is %d", maxGetTransactionsLimit))
|
||||
}
|
||||
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
|
||||
}
|
||||
|
||||
txs := []*models.Transaction{}
|
||||
query := db.
|
||||
Joins("LEFT JOIN `transaction_outputs` ON `transaction_outputs`.`transaction_id` = `transactions`.`id`").
|
||||
Joins("LEFT JOIN `addresses` AS `out_addresses` ON `out_addresses`.`id` = `transaction_outputs`.`address_id`").
|
||||
Joins("LEFT JOIN `transaction_inputs` ON `transaction_inputs`.`transaction_id` = `transactions`.`id`").
|
||||
Joins("LEFT JOIN `transaction_outputs` AS `inputs_outs` ON `inputs_outs`.`id` = `transaction_inputs`.`transaction_output_id`").
|
||||
Joins("LEFT JOIN `addresses` AS `in_addresses` ON `in_addresses`.`id` = `inputs_outs`.`address_id`").
|
||||
Where("`out_addresses`.`address` = ?", address).
|
||||
Or("`in_addresses`.`address` = ?", address).
|
||||
Limit(limit).
|
||||
Offset(skip).
|
||||
Order("`transactions`.`id` ASC")
|
||||
dbResult := addTxPreloadedFields(query).Find(&txs)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading transactions from the database:", dbErrors)
|
||||
}
|
||||
txResponses := make([]*transactionResponse, len(txs))
|
||||
for i, tx := range txs {
|
||||
txResponses[i] = convertTxModelToTxResponse(tx)
|
||||
}
|
||||
return txResponses, nil
|
||||
}
|
||||
|
||||
// GetUTXOsByAddressHandler searches for all UTXOs that belong to a certain address.
|
||||
func GetUTXOsByAddressHandler(address string) (interface{}, *utils.HandlerError) {
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
|
||||
}
|
||||
|
||||
var transactionOutputs []*models.TransactionOutput
|
||||
dbErrors := db.
|
||||
Joins("LEFT JOIN `addresses` ON `addresses`.`id` = `transaction_outputs`.`address_id`").
|
||||
Where("`addresses`.`address` = ? AND `transaction_outputs`.`is_spent` = 0", address).
|
||||
Preload("Transaction.AcceptingBlock").
|
||||
Find(&transactionOutputs).GetErrors()
|
||||
if len(dbErrors) > 0 {
|
||||
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading UTXOs from the database:", dbErrors)
|
||||
}
|
||||
|
||||
UTXOsResponses := make([]*transactionOutputResponse, len(transactionOutputs))
|
||||
for i, transactionOutput := range transactionOutputs {
|
||||
UTXOsResponses[i] = &transactionOutputResponse{
|
||||
Value: transactionOutput.Value,
|
||||
ScriptPubKey: hex.EncodeToString(transactionOutput.ScriptPubKey),
|
||||
AcceptingBlockHash: transactionOutput.Transaction.AcceptingBlock.BlockHash,
|
||||
AcceptingBlockBlueScore: transactionOutput.Transaction.AcceptingBlock.BlueScore,
|
||||
}
|
||||
}
|
||||
return UTXOsResponses, nil
|
||||
}
|
||||
|
||||
func addTxPreloadedFields(query *gorm.DB) *gorm.DB {
|
||||
return query.Preload("AcceptingBlock").
|
||||
Preload("Subnetwork").
|
||||
Preload("TransactionOutputs").
|
||||
Preload("TransactionOutputs.Address").
|
||||
Preload("TransactionInputs.PreviousTransactionOutput.Transaction").
|
||||
Preload("TransactionInputs.PreviousTransactionOutput.Address")
|
||||
}
|
||||
|
||||
// PostTransaction forwards a raw transaction to the JSON-RPC API server
|
||||
func PostTransaction(requestBody []byte) *utils.HandlerError {
|
||||
client, err := jsonrpc.GetClient()
|
||||
if err != nil {
|
||||
return utils.NewInternalServerHandlerError(err.Error())
|
||||
}
|
||||
|
||||
rawTx := &RawTransaction{}
|
||||
err = json.Unmarshal(requestBody, rawTx)
|
||||
if err != nil {
|
||||
return utils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("Error unmarshalling request body: %s", err),
|
||||
"The request body is not json-formatted")
|
||||
}
|
||||
|
||||
txBytes, err := hex.DecodeString(rawTx.RawTransaction)
|
||||
if err != nil {
|
||||
return utils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("Error decoding hex raw transaction: %s", err),
|
||||
"The raw transaction is not a hex-encoded transaction")
|
||||
}
|
||||
|
||||
txReader := bytes.NewReader(txBytes)
|
||||
tx := &wire.MsgTx{}
|
||||
err = tx.BtcDecode(txReader, 0)
|
||||
if err != nil {
|
||||
return utils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("Error decoding raw transaction: %s", err),
|
||||
"Error decoding raw transaction")
|
||||
}
|
||||
|
||||
_, err = client.SendRawTransaction(tx, true)
|
||||
if err != nil {
|
||||
if rpcErr, ok := err.(btcjson.RPCError); ok && rpcErr.Code == btcjson.ErrRPCVerify {
|
||||
return utils.NewHandlerError(http.StatusInternalServerError, rpcErr.Message)
|
||||
}
|
||||
return utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/config"
|
||||
"github.com/golang-migrate/migrate/v4/source"
|
||||
"github.com/jinzhu/gorm"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
)
|
||||
|
||||
// db is the API server database.
|
||||
var db *gorm.DB
|
||||
|
||||
// DB returns a reference to the database connection
|
||||
func DB() (*gorm.DB, error) {
|
||||
if db == nil {
|
||||
return nil, errors.New("Database is not connected")
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
type gormLogger struct{}
|
||||
|
||||
func (l gormLogger) Print(v ...interface{}) {
|
||||
str := fmt.Sprint(v...)
|
||||
log.Errorf(str)
|
||||
}
|
||||
|
||||
// Connect connects to the database mentioned in
|
||||
// config variable.
|
||||
func Connect(cfg *config.Config) error {
|
||||
connectionString := buildConnectionString(cfg)
|
||||
migrator, driver, err := openMigrator(connectionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isCurrent, version, err := isCurrent(migrator, driver)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error checking whether the database is current: %s", err)
|
||||
}
|
||||
if !isCurrent {
|
||||
return fmt.Errorf("Database is not current (version %d). Please migrate"+
|
||||
" the database by running the server with --migrate flag and then run it again.", version)
|
||||
}
|
||||
|
||||
db, err = gorm.Open("mysql", connectionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db.SetLogger(gormLogger{})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the connection to the database
|
||||
func Close() error {
|
||||
if db == nil {
|
||||
return nil
|
||||
}
|
||||
err := db.Close()
|
||||
db = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func buildConnectionString(cfg *config.Config) string {
|
||||
return fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True",
|
||||
cfg.DBUser, cfg.DBPassword, cfg.DBAddress, cfg.DBName)
|
||||
}
|
||||
|
||||
// isCurrent resolves whether the database is on the latest
|
||||
// version of the schema.
|
||||
func isCurrent(migrator *migrate.Migrate, driver source.Driver) (bool, uint, error) {
|
||||
// Get the current version
|
||||
version, isDirty, err := migrator.Version()
|
||||
if err == migrate.ErrNilVersion {
|
||||
return false, 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, 0, err
|
||||
}
|
||||
if isDirty {
|
||||
return false, 0, fmt.Errorf("Database is dirty")
|
||||
}
|
||||
|
||||
// The database is current if Next returns ErrNotExist
|
||||
_, err = driver.Next(version)
|
||||
if pathErr, ok := err.(*os.PathError); ok {
|
||||
if pathErr.Err == os.ErrNotExist {
|
||||
return true, version, nil
|
||||
}
|
||||
}
|
||||
return false, version, err
|
||||
}
|
||||
|
||||
func openMigrator(connectionString string) (*migrate.Migrate, source.Driver, error) {
|
||||
driver, err := source.Open("file://migrations")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
migrator, err := migrate.NewWithSourceInstance(
|
||||
"migrations", driver, "mysql://"+connectionString)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return migrator, driver, nil
|
||||
}
|
||||
|
||||
// Migrate database to the latest version.
|
||||
func Migrate(cfg *config.Config) error {
|
||||
connectionString := buildConnectionString(cfg)
|
||||
migrator, driver, err := openMigrator(connectionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isCurrent, version, err := isCurrent(migrator, driver)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error checking whether the database is current: %s", err)
|
||||
}
|
||||
if isCurrent {
|
||||
log.Infof("Database is already up-to-date (version %d)", version)
|
||||
return nil
|
||||
}
|
||||
err = migrator.Up()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
version, isDirty, err := migrator.Version()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isDirty {
|
||||
return fmt.Errorf("error migrating database: database is dirty")
|
||||
}
|
||||
log.Infof("Migrated database to the latest version (version %d)", version)
|
||||
return nil
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package database
|
||||
|
||||
import "github.com/daglabs/btcd/util/panics"
|
||||
import "github.com/daglabs/btcd/apiserver/logger"
|
||||
|
||||
var (
|
||||
log = logger.BackendLog.Logger("DTBS")
|
||||
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
)
|
||||
@@ -1,28 +0,0 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.13-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/daglabs/btcd
|
||||
|
||||
WORKDIR /go/src/github.com/daglabs/btcd
|
||||
|
||||
RUN apk add --no-cache curl git
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN cd apiserver && CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o apiserver .
|
||||
|
||||
# --- multistage docker build: stage #2: runtime image
|
||||
FROM alpine
|
||||
WORKDIR /app
|
||||
|
||||
RUN apk add --no-cache tini
|
||||
|
||||
COPY --from=build /go/src/github.com/daglabs/btcd/apiserver/ /app/
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD ["/app/apiserver"]
|
||||
@@ -1,125 +0,0 @@
|
||||
package jsonrpc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/config"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
|
||||
"github.com/daglabs/btcd/rpcclient"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// Client represents a connection to the JSON-RPC API of a full node
|
||||
type Client struct {
|
||||
*rpcclient.Client
|
||||
OnBlockAdded chan *BlockAddedMsg
|
||||
OnChainChanged chan *ChainChangedMsg
|
||||
}
|
||||
|
||||
var client *Client
|
||||
|
||||
// GetClient returns an instance of the JSON-RPC client, in case we have an active connection
|
||||
func GetClient() (*Client, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("JSON-RPC is not connected")
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// BlockAddedMsg defines the message received in onBlockAdded
|
||||
type BlockAddedMsg struct {
|
||||
ChainHeight uint64
|
||||
Header *wire.BlockHeader
|
||||
}
|
||||
|
||||
// ChainChangedMsg defines the message received in onChainChanged
|
||||
type ChainChangedMsg struct {
|
||||
RemovedChainBlockHashes []*daghash.Hash
|
||||
AddedChainBlocks []*rpcclient.ChainBlock
|
||||
}
|
||||
|
||||
// Close closes the connection to the JSON-RPC API server
|
||||
func Close() {
|
||||
if client == nil {
|
||||
return
|
||||
}
|
||||
|
||||
client.Disconnect()
|
||||
client = nil
|
||||
}
|
||||
|
||||
// Connect initiates a connection to the JSON-RPC API Server
|
||||
func Connect(cfg *config.Config) error {
|
||||
var cert []byte
|
||||
if !cfg.DisableTLS {
|
||||
var err error
|
||||
cert, err = ioutil.ReadFile(cfg.RPCCert)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading certificates file: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
connCfg := &rpcclient.ConnConfig{
|
||||
Host: cfg.RPCServer,
|
||||
Endpoint: "ws",
|
||||
User: cfg.RPCUser,
|
||||
Pass: cfg.RPCPassword,
|
||||
DisableTLS: cfg.DisableTLS,
|
||||
RequestTimeout: time.Second * 5,
|
||||
}
|
||||
|
||||
if !cfg.DisableTLS {
|
||||
connCfg.Certificates = cert
|
||||
}
|
||||
|
||||
var err error
|
||||
client, err = newClient(connCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error connecting to address %s: %s", cfg.RPCServer, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newClient(connCfg *rpcclient.ConnConfig) (*Client, error) {
|
||||
client = &Client{
|
||||
OnBlockAdded: make(chan *BlockAddedMsg),
|
||||
OnChainChanged: make(chan *ChainChangedMsg),
|
||||
}
|
||||
notificationHandlers := &rpcclient.NotificationHandlers{
|
||||
OnFilteredBlockAdded: func(height uint64, header *wire.BlockHeader,
|
||||
txs []*util.Tx) {
|
||||
client.OnBlockAdded <- &BlockAddedMsg{
|
||||
ChainHeight: height,
|
||||
Header: header,
|
||||
}
|
||||
},
|
||||
OnChainChanged: func(removedChainBlockHashes []*daghash.Hash,
|
||||
addedChainBlocks []*rpcclient.ChainBlock) {
|
||||
client.OnChainChanged <- &ChainChangedMsg{
|
||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||
AddedChainBlocks: addedChainBlocks,
|
||||
}
|
||||
},
|
||||
}
|
||||
var err error
|
||||
client.Client, err = rpcclient.New(connCfg, notificationHandlers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error connecting to address %s: %s", connCfg.Host, err)
|
||||
}
|
||||
|
||||
if err = client.NotifyBlocks(); err != nil {
|
||||
return nil, fmt.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err)
|
||||
}
|
||||
if err = client.NotifyChainChanges(); err != nil {
|
||||
return nil, fmt.Errorf("Error while registering client %s for chain changes notifications: %s", client.Host(), err)
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/logger"
|
||||
"github.com/daglabs/btcd/util/panics"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logger.BackendLog.Logger("APIS")
|
||||
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
)
|
||||
@@ -1,24 +0,0 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/logs"
|
||||
"os"
|
||||
)
|
||||
|
||||
// BackendLog is the logging backend used to create all subsystem loggers.
|
||||
var BackendLog = logs.NewBackend()
|
||||
|
||||
// InitLog attaches log file and error log file to the backend log.
|
||||
func InitLog(logFile, errLogFile string) {
|
||||
err := BackendLog.AddLogFile(logFile, logs.LevelTrace)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logs.LevelTrace, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = BackendLog.AddLogFile(errLogFile, logs.LevelWarn)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logs.LevelWarn, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/config"
|
||||
"github.com/daglabs/btcd/apiserver/database"
|
||||
"github.com/daglabs/btcd/apiserver/jsonrpc"
|
||||
"github.com/daglabs/btcd/apiserver/server"
|
||||
"github.com/daglabs/btcd/logger"
|
||||
"github.com/daglabs/btcd/signal"
|
||||
"github.com/daglabs/btcd/util/panics"
|
||||
_ "github.com/golang-migrate/migrate/v4/database/mysql"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
_ "github.com/jinzhu/gorm/dialects/mysql"
|
||||
)
|
||||
|
||||
func main() {
|
||||
defer panics.HandlePanic(log, logger.BackendLog)
|
||||
|
||||
cfg, err := config.Parse()
|
||||
if err != nil {
|
||||
errString := fmt.Sprintf("Error parsing command-line arguments: %s", err)
|
||||
_, fErr := fmt.Fprintf(os.Stderr, errString)
|
||||
if fErr != nil {
|
||||
panic(errString)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if cfg.Migrate {
|
||||
err := database.Migrate(cfg)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Error migrating database: %s", err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = database.Connect(cfg)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Error connecting to database: %s", err))
|
||||
}
|
||||
defer func() {
|
||||
err := database.Close()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Error closing the database: %s", err))
|
||||
}
|
||||
}()
|
||||
|
||||
err = jsonrpc.Connect(cfg)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Error connecting to servers: %s", err))
|
||||
}
|
||||
defer jsonrpc.Close()
|
||||
|
||||
shutdownServer := server.Start(cfg.HTTPListen)
|
||||
defer shutdownServer()
|
||||
|
||||
doneChan := make(chan struct{}, 1)
|
||||
spawn(func() {
|
||||
err := startSync(doneChan)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
|
||||
interrupt := signal.InterruptListener()
|
||||
<-interrupt
|
||||
|
||||
// Gracefully stop syncing
|
||||
doneChan <- struct{}{}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `blocks`;
|
||||
@@ -1,23 +0,0 @@
|
||||
CREATE TABLE `blocks`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`block_hash` CHAR(64) NOT NULL,
|
||||
`accepting_block_id` BIGINT UNSIGNED NULL,
|
||||
`version` INT NOT NULL,
|
||||
`hash_merkle_root` CHAR(64) NOT NULL,
|
||||
`accepted_id_merkle_root` CHAR(64) NOT NULL,
|
||||
`utxo_commitment` CHAR(64) NOT NULL,
|
||||
`timestamp` DATETIME NOT NULL,
|
||||
`bits` INT UNSIGNED NOT NULL,
|
||||
`nonce` BIGINT UNSIGNED NOT NULL,
|
||||
`blue_score` BIGINT UNSIGNED NOT NULL,
|
||||
`is_chain_block` TINYINT NOT NULL,
|
||||
`mass` BIGINT NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE INDEX `idx_blocks_block_hash` (`block_hash`),
|
||||
INDEX `idx_blocks_timestamp` (`timestamp`),
|
||||
INDEX `idx_blocks_is_chain_block` (`is_chain_block`),
|
||||
CONSTRAINT `fk_blocks_accepting_block_id`
|
||||
FOREIGN KEY (`accepting_block_id`)
|
||||
REFERENCES `blocks` (`id`)
|
||||
);
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `parent_blocks`;
|
||||
@@ -1,12 +0,0 @@
|
||||
CREATE TABLE `parent_blocks`
|
||||
(
|
||||
`block_id` BIGINT UNSIGNED NOT NULL,
|
||||
`parent_block_id` BIGINT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY (`block_id`, `parent_block_id`),
|
||||
CONSTRAINT `fk_parent_blocks_block_id`
|
||||
FOREIGN KEY (`block_id`)
|
||||
REFERENCES `blocks` (`id`),
|
||||
CONSTRAINT `fk_parent_blocks_parent_block_id`
|
||||
FOREIGN KEY (`parent_block_id`)
|
||||
REFERENCES `blocks` (`id`)
|
||||
);
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `raw_blocks`;
|
||||
@@ -1,9 +0,0 @@
|
||||
CREATE TABLE `raw_blocks`
|
||||
(
|
||||
`block_id` BIGINT UNSIGNED NOT NULL,
|
||||
`block_data` BLOB NOT NULL,
|
||||
PRIMARY KEY (`block_id`),
|
||||
CONSTRAINT `fk_raw_blocks_block_id`
|
||||
FOREIGN KEY (`block_id`)
|
||||
REFERENCES `blocks` (`id`)
|
||||
);
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `subnetworks`;
|
||||
@@ -1,8 +0,0 @@
|
||||
CREATE TABLE `subnetworks`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`subnetwork_id` CHAR(64) NOT NULL,
|
||||
`gas_limit` BIGINT UNSIGNED NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE INDEX `idx_subnetworks_subnetwork_id` (`subnetwork_id`)
|
||||
);
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `transactions`;
|
||||
@@ -1,19 +0,0 @@
|
||||
CREATE TABLE `transactions`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`accepting_block_id` BIGINT UNSIGNED NULL,
|
||||
`transaction_hash` CHAR(64) NOT NULL,
|
||||
`transaction_id` CHAR(64) NOT NULL,
|
||||
`lock_time` BIGINT UNSIGNED NOT NULL,
|
||||
`subnetwork_id` BIGINT UNSIGNED NOT NULL,
|
||||
`gas` BIGINT UNSIGNED NOT NULL,
|
||||
`payload_hash` CHAR(64) NOT NULL,
|
||||
`payload` BLOB NOT NULL,
|
||||
`mass` BIGINT NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE INDEX `idx_transactions_transaction_hash` (`transaction_hash`),
|
||||
INDEX `idx_transactions_transaction_id` (`transaction_id`),
|
||||
CONSTRAINT `fk_transactions_accepting_block_id`
|
||||
FOREIGN KEY (`accepting_block_id`)
|
||||
REFERENCES `blocks` (`id`)
|
||||
);
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `transactions_to_blocks`;
|
||||
@@ -1,14 +0,0 @@
|
||||
CREATE TABLE `transactions_to_blocks`
|
||||
(
|
||||
`transaction_id` BIGINT UNSIGNED NOT NULL,
|
||||
`block_id` BIGINT UNSIGNED NOT NULL,
|
||||
`index` INT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY (`transaction_id`, `block_id`),
|
||||
INDEX `idx_transactions_to_blocks_index` (`index`),
|
||||
CONSTRAINT `fk_transactions_to_blocks_block_id`
|
||||
FOREIGN KEY (`block_id`)
|
||||
REFERENCES `blocks` (`id`),
|
||||
CONSTRAINT `fk_transactions_to_blocks_transaction_id`
|
||||
FOREIGN KEY (`transaction_id`)
|
||||
REFERENCES `transactions` (`id`)
|
||||
);
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `addresses`;
|
||||
@@ -1,7 +0,0 @@
|
||||
CREATE TABLE `addresses`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`address` CHAR(50) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE INDEX `idx_addresses_address` (`address`)
|
||||
)
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `transaction_outputs`;
|
||||
@@ -1,18 +0,0 @@
|
||||
CREATE TABLE `transaction_outputs`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`transaction_id` BIGINT UNSIGNED NOT NULL,
|
||||
`index` INT UNSIGNED NOT NULL,
|
||||
`value` BIGINT UNSIGNED NOT NULL,
|
||||
`script_pub_key` BLOB NOT NULL,
|
||||
`is_spent` TINYINT NOT NULL,
|
||||
`address_id` BIGINT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
INDEX `idx_transaction_outputs_transaction_id` (`transaction_id`),
|
||||
CONSTRAINT `fk_transaction_outputs_transaction_id`
|
||||
FOREIGN KEY (`transaction_id`)
|
||||
REFERENCES `transactions` (`id`),
|
||||
CONSTRAINT `fk_transaction_outputs_address_id`
|
||||
FOREIGN KEY (`address_id`)
|
||||
REFERENCES `addresses` (`id`)
|
||||
);
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `transaction_inputs`;
|
||||
@@ -1,18 +0,0 @@
|
||||
CREATE TABLE `transaction_inputs`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`transaction_id` BIGINT UNSIGNED NULL,
|
||||
`previous_transaction_output_id` BIGINT UNSIGNED NOT NULL,
|
||||
`index` INT UNSIGNED NOT NULL,
|
||||
`signature_script` BLOB NOT NULL,
|
||||
`sequence` BIGINT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
INDEX `idx_transaction_inputs_transaction_id` (`transaction_id`),
|
||||
INDEX `idx_transaction_inputs_previous_transaction_output_id` (`previous_transaction_output_id`),
|
||||
CONSTRAINT `fk_transaction_inputs_transaction_id`
|
||||
FOREIGN KEY (`transaction_id`)
|
||||
REFERENCES `transactions` (`id`),
|
||||
CONSTRAINT `fk_transaction_inputs_previous_transaction_output_id`
|
||||
FOREIGN KEY (`previous_transaction_output_id`)
|
||||
REFERENCES `transaction_outputs` (`id`)
|
||||
);
|
||||
@@ -1,111 +0,0 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Block is the gorm model for the 'blocks' table
|
||||
type Block struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
BlockHash string
|
||||
AcceptingBlockID *uint64
|
||||
AcceptingBlock *Block
|
||||
Version int32
|
||||
HashMerkleRoot string
|
||||
AcceptedIDMerkleRoot string
|
||||
UTXOCommitment string
|
||||
Timestamp time.Time
|
||||
Bits uint32
|
||||
Nonce uint64
|
||||
BlueScore uint64
|
||||
IsChainBlock bool
|
||||
Mass uint64
|
||||
ParentBlocks []Block `gorm:"many2many:parent_blocks;"`
|
||||
}
|
||||
|
||||
// ParentBlock is the gorm model for the 'parent_blocks' table
|
||||
type ParentBlock struct {
|
||||
BlockID uint64
|
||||
Block Block
|
||||
ParentBlockID uint64
|
||||
ParentBlock Block
|
||||
}
|
||||
|
||||
// RawBlock is the gorm model for the 'raw_blocks' table
|
||||
type RawBlock struct {
|
||||
BlockID uint64
|
||||
Block Block
|
||||
BlockData []byte
|
||||
}
|
||||
|
||||
// Subnetwork is the gorm model for the 'subnetworks' table
|
||||
type Subnetwork struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
SubnetworkID string
|
||||
GasLimit *uint64
|
||||
}
|
||||
|
||||
// Transaction is the gorm model for the 'transactions' table
|
||||
type Transaction struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
AcceptingBlockID *uint64
|
||||
AcceptingBlock *Block
|
||||
TransactionHash string
|
||||
TransactionID string
|
||||
LockTime uint64
|
||||
SubnetworkID uint64
|
||||
Subnetwork Subnetwork
|
||||
Gas uint64
|
||||
PayloadHash string
|
||||
Payload []byte
|
||||
Mass uint64
|
||||
Blocks []Block `gorm:"many2many:transactions_to_blocks;"`
|
||||
TransactionOutputs []TransactionOutput
|
||||
TransactionInputs []TransactionInput
|
||||
}
|
||||
|
||||
// TransactionBlock is the gorm model for the 'transactions_to_blocks' table
|
||||
type TransactionBlock struct {
|
||||
TransactionID uint64
|
||||
Transaction Transaction
|
||||
BlockID uint64
|
||||
Block Block
|
||||
Index uint32
|
||||
}
|
||||
|
||||
// TableName returns the table name associated to the
|
||||
// TransactionBlock gorm model
|
||||
func (TransactionBlock) TableName() string {
|
||||
return "transactions_to_blocks"
|
||||
}
|
||||
|
||||
// TransactionOutput is the gorm model for the 'transaction_outputs' table
|
||||
type TransactionOutput struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
TransactionID uint64
|
||||
Transaction Transaction
|
||||
Index uint32
|
||||
Value uint64
|
||||
ScriptPubKey []byte
|
||||
IsSpent bool
|
||||
AddressID uint64
|
||||
Address Address
|
||||
}
|
||||
|
||||
// TransactionInput is the gorm model for the 'transaction_inputs' table
|
||||
type TransactionInput struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
TransactionID uint64
|
||||
Transaction Transaction
|
||||
PreviousTransactionOutputID uint64
|
||||
PreviousTransactionOutput TransactionOutput
|
||||
Index uint32
|
||||
SignatureScript []byte
|
||||
Sequence uint64
|
||||
}
|
||||
|
||||
// Address is the gorm model for the 'utxos' table
|
||||
type Address struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
Address string
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package server
|
||||
|
||||
import "github.com/daglabs/btcd/util/panics"
|
||||
import "github.com/daglabs/btcd/apiserver/logger"
|
||||
|
||||
var (
|
||||
log = logger.BackendLog.Logger("REST")
|
||||
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
)
|
||||
@@ -1,50 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/apiserver/utils"
|
||||
"net/http"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var nextRequestID uint64 = 1
|
||||
|
||||
func addRequestMetadataMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
rCtx := utils.ToAPIServerContext(r.Context()).SetRequestID(nextRequestID)
|
||||
r.WithContext(rCtx)
|
||||
nextRequestID++
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func loggingMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := utils.ToAPIServerContext(r.Context())
|
||||
ctx.Infof("Method: %s URI: %s", r.Method, r.RequestURI)
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func recoveryMiddleware(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := utils.ToAPIServerContext(r.Context())
|
||||
defer func() {
|
||||
recoveryErr := recover()
|
||||
if recoveryErr != nil {
|
||||
recoveryErrStr := fmt.Sprintf("%s", recoveryErr)
|
||||
log.Criticalf("Fatal error: %s", recoveryErrStr)
|
||||
log.Criticalf("Stack trace: %s", debug.Stack())
|
||||
sendErr(ctx, w, utils.NewInternalServerHandlerError(recoveryErrStr))
|
||||
}
|
||||
}()
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func setJSONMiddleware(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
@@ -1,244 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/controllers"
|
||||
"github.com/daglabs/btcd/apiserver/utils"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
const (
|
||||
routeParamTxID = "txID"
|
||||
routeParamTxHash = "txHash"
|
||||
routeParamAddress = "address"
|
||||
routeParamBlockHash = "blockHash"
|
||||
)
|
||||
|
||||
const (
|
||||
queryParamSkip = "skip"
|
||||
queryParamLimit = "limit"
|
||||
queryParamOrder = "order"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultGetTransactionsLimit = 100
|
||||
defaultGetBlocksLimit = 25
|
||||
defaultGetBlocksOrder = controllers.OrderAscending
|
||||
)
|
||||
|
||||
type handlerFunc func(ctx *utils.APIServerContext, routeParams map[string]string, queryParams map[string]string, requestBody []byte) (
|
||||
interface{}, *utils.HandlerError)
|
||||
|
||||
func makeHandler(handler handlerFunc) func(http.ResponseWriter, *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := utils.ToAPIServerContext(r.Context())
|
||||
|
||||
var requestBody []byte
|
||||
if r.Method == "POST" {
|
||||
var err error
|
||||
requestBody, err = ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
sendErr(ctx, w, utils.NewHandlerError(500, "Internal server error occured"))
|
||||
}
|
||||
}
|
||||
|
||||
flattenedQueryParams, hErr := flattenQueryParams(r.URL.Query())
|
||||
if hErr != nil {
|
||||
sendErr(ctx, w, hErr)
|
||||
return
|
||||
}
|
||||
|
||||
response, hErr := handler(ctx, mux.Vars(r), flattenedQueryParams, requestBody)
|
||||
if hErr != nil {
|
||||
sendErr(ctx, w, hErr)
|
||||
return
|
||||
}
|
||||
if response != nil {
|
||||
sendJSONResponse(w, response)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func flattenQueryParams(queryParams map[string][]string) (map[string]string, *utils.HandlerError) {
|
||||
flattenedMap := make(map[string]string)
|
||||
for param, valuesSlice := range queryParams {
|
||||
if len(valuesSlice) > 1 {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("Couldn't parse the '%s' query parameter:"+
|
||||
" expected a single value but got multiple values", param))
|
||||
}
|
||||
flattenedMap[param] = valuesSlice[0]
|
||||
}
|
||||
return flattenedMap, nil
|
||||
}
|
||||
|
||||
type clientError struct {
|
||||
ErrorCode int `json:"errorCode"`
|
||||
ErrorMessage string `json:"errorMessage"`
|
||||
}
|
||||
|
||||
func sendErr(ctx *utils.APIServerContext, w http.ResponseWriter, hErr *utils.HandlerError) {
|
||||
errMsg := fmt.Sprintf("got error: %s", hErr)
|
||||
ctx.Warnf(errMsg)
|
||||
w.WriteHeader(hErr.Code)
|
||||
sendJSONResponse(w, &clientError{
|
||||
ErrorCode: hErr.Code,
|
||||
ErrorMessage: hErr.ClientMessage,
|
||||
})
|
||||
}
|
||||
|
||||
func sendJSONResponse(w http.ResponseWriter, response interface{}) {
|
||||
b, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = fmt.Fprintf(w, string(b))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func mainHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string, _ []byte) (interface{}, *utils.HandlerError) {
|
||||
return struct {
|
||||
Message string `json:"message"`
|
||||
}{
|
||||
Message: "API server is running",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func addRoutes(router *mux.Router) {
|
||||
router.HandleFunc("/", makeHandler(mainHandler))
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/transaction/id/{%s}", routeParamTxID),
|
||||
makeHandler(getTransactionByIDHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/transaction/hash/{%s}", routeParamTxHash),
|
||||
makeHandler(getTransactionByHashHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/transactions/address/{%s}", routeParamAddress),
|
||||
makeHandler(getTransactionsByAddressHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/utxos/address/{%s}", routeParamAddress),
|
||||
makeHandler(getUTXOsByAddressHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/block/{%s}", routeParamBlockHash),
|
||||
makeHandler(getBlockByHashHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
"/blocks",
|
||||
makeHandler(getBlocksHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
"/fee-estimates",
|
||||
makeHandler(getFeeEstimatesHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
"/transaction",
|
||||
makeHandler(postTransactionHandler)).
|
||||
Methods("POST")
|
||||
}
|
||||
|
||||
func convertQueryParamToInt(queryParams map[string]string, param string, defaultValue int) (int, *utils.HandlerError) {
|
||||
if _, ok := queryParams[param]; ok {
|
||||
intValue, err := strconv.Atoi(queryParams[param])
|
||||
if err != nil {
|
||||
return 0, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("Couldn't parse the '%s' query parameter: %s", param, err))
|
||||
}
|
||||
return intValue, nil
|
||||
}
|
||||
return defaultValue, nil
|
||||
}
|
||||
|
||||
func getTransactionByIDHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, *utils.HandlerError) {
|
||||
|
||||
return controllers.GetTransactionByIDHandler(routeParams[routeParamTxID])
|
||||
}
|
||||
|
||||
func getTransactionByHashHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, *utils.HandlerError) {
|
||||
|
||||
return controllers.GetTransactionByHashHandler(routeParams[routeParamTxHash])
|
||||
}
|
||||
|
||||
func getTransactionsByAddressHandler(_ *utils.APIServerContext, routeParams map[string]string, queryParams map[string]string,
|
||||
_ []byte) (interface{}, *utils.HandlerError) {
|
||||
|
||||
skip, hErr := convertQueryParamToInt(queryParams, queryParamSkip, 0)
|
||||
if hErr != nil {
|
||||
return nil, hErr
|
||||
}
|
||||
limit, hErr := convertQueryParamToInt(queryParams, queryParamLimit, defaultGetTransactionsLimit)
|
||||
if hErr != nil {
|
||||
return nil, hErr
|
||||
}
|
||||
if _, ok := queryParams[queryParamLimit]; ok {
|
||||
var err error
|
||||
skip, err = strconv.Atoi(queryParams[queryParamLimit])
|
||||
if err != nil {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("Couldn't parse the '%s' query parameter: %s", queryParamLimit, err))
|
||||
}
|
||||
}
|
||||
return controllers.GetTransactionsByAddressHandler(routeParams[routeParamAddress], uint64(skip), uint64(limit))
|
||||
}
|
||||
|
||||
func getUTXOsByAddressHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, *utils.HandlerError) {
|
||||
|
||||
return controllers.GetUTXOsByAddressHandler(routeParams[routeParamAddress])
|
||||
}
|
||||
|
||||
func getBlockByHashHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, *utils.HandlerError) {
|
||||
|
||||
return controllers.GetBlockByHashHandler(routeParams[routeParamBlockHash])
|
||||
}
|
||||
|
||||
func getFeeEstimatesHandler(_ *utils.APIServerContext, _ map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, *utils.HandlerError) {
|
||||
|
||||
return controllers.GetFeeEstimatesHandler()
|
||||
}
|
||||
|
||||
func getBlocksHandler(_ *utils.APIServerContext, _ map[string]string, queryParams map[string]string,
|
||||
_ []byte) (interface{}, *utils.HandlerError) {
|
||||
|
||||
skip, hErr := convertQueryParamToInt(queryParams, queryParamSkip, 0)
|
||||
if hErr != nil {
|
||||
return nil, hErr
|
||||
}
|
||||
limit, hErr := convertQueryParamToInt(queryParams, queryParamLimit, defaultGetBlocksLimit)
|
||||
if hErr != nil {
|
||||
return nil, hErr
|
||||
}
|
||||
order := defaultGetBlocksOrder
|
||||
if orderParamValue, ok := queryParams[queryParamOrder]; ok {
|
||||
if orderParamValue != controllers.OrderAscending && orderParamValue != controllers.OrderDescending {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("'%s' is not a valid value for the '%s' query parameter", orderParamValue, queryParamLimit))
|
||||
}
|
||||
order = orderParamValue
|
||||
}
|
||||
return controllers.GetBlocksHandler(order, uint64(skip), uint64(limit))
|
||||
}
|
||||
|
||||
func postTransactionHandler(_ *utils.APIServerContext, _ map[string]string, _ map[string]string,
|
||||
requestBody []byte) (interface{}, *utils.HandlerError) {
|
||||
return nil, controllers.PostTransaction(requestBody)
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
const gracefulShutdownTimeout = 30 * time.Second
|
||||
|
||||
// Start starts the HTTP REST server and returns a
|
||||
// function to gracefully shutdown it.
|
||||
func Start(listenAddr string) func() {
|
||||
router := mux.NewRouter()
|
||||
router.Use(addRequestMetadataMiddleware)
|
||||
router.Use(recoveryMiddleware)
|
||||
router.Use(loggingMiddleware)
|
||||
router.Use(setJSONMiddleware)
|
||||
addRoutes(router)
|
||||
httpServer := &http.Server{
|
||||
Addr: listenAddr,
|
||||
Handler: handlers.CORS()(router),
|
||||
}
|
||||
spawn(func() {
|
||||
log.Errorf("%s", httpServer.ListenAndServe())
|
||||
})
|
||||
|
||||
return func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), gracefulShutdownTimeout)
|
||||
defer cancel()
|
||||
err := httpServer.Shutdown(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("Error shutting down HTTP server: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,877 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/apiserver/config"
|
||||
"github.com/daglabs/btcd/apiserver/database"
|
||||
"github.com/daglabs/btcd/apiserver/jsonrpc"
|
||||
"github.com/daglabs/btcd/apiserver/models"
|
||||
"github.com/daglabs/btcd/apiserver/utils"
|
||||
"github.com/daglabs/btcd/btcjson"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/jinzhu/gorm"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// startSync keeps the node and the API server in sync. On start, it downloads
|
||||
// all data that's missing from the API server, and once it's done it keeps
|
||||
// sync with the node via notifications.
|
||||
func startSync(doneChan chan struct{}) error {
|
||||
client, err := jsonrpc.GetClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Mass download missing data
|
||||
err = fetchInitialData(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Keep the node and the API server in sync
|
||||
sync(client, doneChan)
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchInitialData downloads all data that's currently missing from
|
||||
// the database.
|
||||
func fetchInitialData(client *jsonrpc.Client) error {
|
||||
err := syncBlocks(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = syncSelectedParentChain(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sync keeps the API server in sync with the node via notifications
|
||||
func sync(client *jsonrpc.Client, doneChan chan struct{}) {
|
||||
// ChainChangedMsgs must be processed in order and there may be times
|
||||
// when we may not be able to process them (e.g. appropriate
|
||||
// BlockAddedMsgs haven't arrived yet). As such, we pop messages from
|
||||
// client.OnChainChanged, make sure we're able to handle them, and
|
||||
// only then push them into nextChainChangedChan for them to be
|
||||
// actually handled.
|
||||
blockAddedMsgHandledChan := make(chan struct{})
|
||||
nextChainChangedChan := make(chan *jsonrpc.ChainChangedMsg)
|
||||
spawn(func() {
|
||||
for chainChanged := range client.OnChainChanged {
|
||||
for {
|
||||
<-blockAddedMsgHandledChan
|
||||
canHandle, err := canHandleChainChangedMsg(chainChanged)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if canHandle {
|
||||
break
|
||||
}
|
||||
}
|
||||
nextChainChangedChan <- chainChanged
|
||||
}
|
||||
})
|
||||
|
||||
// Handle client notifications until we're told to stop
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case blockAdded := <-client.OnBlockAdded:
|
||||
handleBlockAddedMsg(client, blockAdded)
|
||||
blockAddedMsgHandledChan <- struct{}{}
|
||||
case chainChanged := <-nextChainChangedChan:
|
||||
handleChainChangedMsg(chainChanged)
|
||||
case <-doneChan:
|
||||
log.Infof("startSync stopped")
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// syncBlocks attempts to download all DAG blocks starting with
|
||||
// the bluest block, and then inserts them into the database.
|
||||
func syncBlocks(client *jsonrpc.Client) error {
|
||||
// Start syncing from the bluest block hash. We use blue score to
|
||||
// simulate the "last" block we have because blue-block order is
|
||||
// the order that the node uses in the various JSONRPC calls.
|
||||
startHash, err := findHashOfBluestBlock(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var blocks []string
|
||||
var rawBlocks []btcjson.GetBlockVerboseResult
|
||||
for {
|
||||
blocksResult, err := client.GetBlocks(true, false, startHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(blocksResult.Hashes) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
rawBlocksResult, err := client.GetBlocks(true, true, startHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
startHash = &blocksResult.Hashes[len(blocksResult.Hashes)-1]
|
||||
blocks = append(blocks, blocksResult.Blocks...)
|
||||
rawBlocks = append(rawBlocks, rawBlocksResult.RawBlocks...)
|
||||
}
|
||||
|
||||
return addBlocks(client, blocks, rawBlocks)
|
||||
}
|
||||
|
||||
// syncSelectedParentChain attempts to download the selected parent
|
||||
// chain starting with the bluest chain-block, and then updates the
|
||||
// database accordingly.
|
||||
func syncSelectedParentChain(client *jsonrpc.Client) error {
|
||||
// Start syncing from the bluest chain-block hash. We use blue
|
||||
// score to simulate the "last" block we have because blue-block
|
||||
// order is the order that the node uses in the various JSONRPC
|
||||
// calls.
|
||||
startHash, err := findHashOfBluestBlock(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
chainFromBlockResult, err := client.GetChainFromBlock(false, startHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(chainFromBlockResult.AddedChainBlocks) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
startHash = &chainFromBlockResult.AddedChainBlocks[len(chainFromBlockResult.AddedChainBlocks)-1].Hash
|
||||
err = updateSelectedParentChain(chainFromBlockResult.RemovedChainBlockHashes,
|
||||
chainFromBlockResult.AddedChainBlocks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// findHashOfBluestBlock finds the block with the highest
|
||||
// blue score in the database. If the database is empty,
|
||||
// return nil.
|
||||
func findHashOfBluestBlock(mustBeChainBlock bool) (*string, error) {
|
||||
dbTx, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var block models.Block
|
||||
dbQuery := dbTx.Order("blue_score DESC")
|
||||
if mustBeChainBlock {
|
||||
dbQuery = dbQuery.Where(&models.Block{IsChainBlock: true})
|
||||
}
|
||||
dbResult := dbQuery.First(&block)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to find hash of bluest block: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return nil, nil
|
||||
}
|
||||
return &block.BlockHash, nil
|
||||
}
|
||||
|
||||
// fetchBlock downloads the serialized block and raw block data of
|
||||
// the block with hash blockHash.
|
||||
func fetchBlock(client *jsonrpc.Client, blockHash *daghash.Hash) (
|
||||
block string, rawBlock *btcjson.GetBlockVerboseResult, err error) {
|
||||
msgBlock, err := client.GetBlock(blockHash, nil)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
writer := bytes.NewBuffer(make([]byte, 0, msgBlock.SerializeSize()))
|
||||
err = msgBlock.Serialize(writer)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
block = hex.EncodeToString(writer.Bytes())
|
||||
|
||||
rawBlock, err = client.GetBlockVerboseTx(blockHash, nil)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return block, rawBlock, nil
|
||||
}
|
||||
|
||||
// addBlocks inserts data in the given blocks and rawBlocks pairwise
|
||||
// into the database. See addBlock for further details.
|
||||
func addBlocks(client *jsonrpc.Client, blocks []string, rawBlocks []btcjson.GetBlockVerboseResult) error {
|
||||
for i, rawBlock := range rawBlocks {
|
||||
block := blocks[i]
|
||||
err := addBlock(client, block, rawBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func doesBlockExist(dbTx *gorm.DB, blockHash string) (bool, error) {
|
||||
var dbBlock models.Block
|
||||
dbResult := dbTx.
|
||||
Where(&models.Block{BlockHash: blockHash}).
|
||||
First(&dbBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return false, utils.NewErrorFromDBErrors("failed to find block: ", dbErrors)
|
||||
}
|
||||
return !utils.IsDBRecordNotFoundError(dbErrors), nil
|
||||
}
|
||||
|
||||
// addBlocks inserts all the data that could be gleaned out of the serialized
|
||||
// block and raw block data into the database. This includes transactions,
|
||||
// subnetworks, and addresses.
|
||||
// Note that if this function may take a nil dbTx, in which case it would start
|
||||
// a database transaction by itself and commit it before returning.
|
||||
func addBlock(client *jsonrpc.Client, block string, rawBlock btcjson.GetBlockVerboseResult) error {
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbTx := db.Begin()
|
||||
|
||||
// Skip this block if it already exists.
|
||||
blockExists, err := doesBlockExist(dbTx, rawBlock.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockExists {
|
||||
dbTx.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
dbBlock, err := insertBlock(dbTx, rawBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = insertBlockParents(dbTx, rawBlock, dbBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = insertBlockData(dbTx, block, dbBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, transaction := range rawBlock.RawTx {
|
||||
dbSubnetwork, err := insertSubnetwork(dbTx, &transaction, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbTransaction, err := insertTransaction(dbTx, &transaction, dbSubnetwork)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = insertTransactionBlock(dbTx, dbBlock, dbTransaction, uint32(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = insertTransactionInputs(dbTx, &transaction, dbTransaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = insertTransactionOutputs(dbTx, &transaction, dbTransaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dbTx.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertBlock(dbTx *gorm.DB, rawBlock btcjson.GetBlockVerboseResult) (*models.Block, error) {
|
||||
bits, err := strconv.ParseUint(rawBlock.Bits, 16, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbBlock := models.Block{
|
||||
BlockHash: rawBlock.Hash,
|
||||
Version: rawBlock.Version,
|
||||
HashMerkleRoot: rawBlock.HashMerkleRoot,
|
||||
AcceptedIDMerkleRoot: rawBlock.AcceptedIDMerkleRoot,
|
||||
UTXOCommitment: rawBlock.UTXOCommitment,
|
||||
Timestamp: time.Unix(rawBlock.Time, 0),
|
||||
Bits: uint32(bits),
|
||||
Nonce: rawBlock.Nonce,
|
||||
BlueScore: rawBlock.BlueScore,
|
||||
IsChainBlock: false, // This must be false for updateSelectedParentChain to work properly
|
||||
Mass: rawBlock.Mass,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to insert block: ", dbErrors)
|
||||
}
|
||||
return &dbBlock, nil
|
||||
}
|
||||
|
||||
func insertBlockParents(dbTx *gorm.DB, rawBlock btcjson.GetBlockVerboseResult, dbBlock *models.Block) error {
|
||||
// Exit early if this is the genesis block
|
||||
if len(rawBlock.ParentHashes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
dbWhereBlockIDsIn := make([]*models.Block, len(rawBlock.ParentHashes))
|
||||
for i, parentHash := range rawBlock.ParentHashes {
|
||||
dbWhereBlockIDsIn[i] = &models.Block{BlockHash: parentHash}
|
||||
}
|
||||
var dbParents []models.Block
|
||||
dbResult := dbTx.
|
||||
Where(dbWhereBlockIDsIn).
|
||||
First(&dbParents)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find blocks: ", dbErrors)
|
||||
}
|
||||
if len(dbParents) != len(rawBlock.ParentHashes) {
|
||||
return fmt.Errorf("some parents are missing for block: %s", rawBlock.Hash)
|
||||
}
|
||||
|
||||
for _, dbParent := range dbParents {
|
||||
dbParentBlock := models.ParentBlock{
|
||||
BlockID: dbBlock.ID,
|
||||
ParentBlockID: dbParent.ID,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbParentBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to insert parentBlock: ", dbErrors)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertBlockData(dbTx *gorm.DB, block string, dbBlock *models.Block) error {
|
||||
blockData, err := hex.DecodeString(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbRawBlock := models.RawBlock{
|
||||
BlockID: dbBlock.ID,
|
||||
BlockData: blockData,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbRawBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to insert rawBlock: ", dbErrors)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertSubnetwork(dbTx *gorm.DB, transaction *btcjson.TxRawResult, client *jsonrpc.Client) (*models.Subnetwork, error) {
|
||||
var dbSubnetwork models.Subnetwork
|
||||
dbResult := dbTx.
|
||||
Where(&models.Subnetwork{SubnetworkID: transaction.Subnetwork}).
|
||||
First(&dbSubnetwork)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to find subnetwork: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
subnetwork, err := client.GetSubnetwork(transaction.Subnetwork)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbSubnetwork = models.Subnetwork{
|
||||
SubnetworkID: transaction.Subnetwork,
|
||||
GasLimit: subnetwork.GasLimit,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbSubnetwork)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to insert subnetwork: ", dbErrors)
|
||||
}
|
||||
}
|
||||
return &dbSubnetwork, nil
|
||||
}
|
||||
|
||||
func insertTransaction(dbTx *gorm.DB, transaction *btcjson.TxRawResult, dbSubnetwork *models.Subnetwork) (*models.Transaction, error) {
|
||||
var dbTransaction models.Transaction
|
||||
dbResult := dbTx.
|
||||
Where(&models.Transaction{TransactionID: transaction.TxID}).
|
||||
First(&dbTransaction)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to find transaction: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
payload, err := hex.DecodeString(transaction.Payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbTransaction = models.Transaction{
|
||||
TransactionHash: transaction.Hash,
|
||||
TransactionID: transaction.TxID,
|
||||
LockTime: transaction.LockTime,
|
||||
SubnetworkID: dbSubnetwork.ID,
|
||||
Gas: transaction.Gas,
|
||||
Mass: transaction.Mass,
|
||||
PayloadHash: transaction.PayloadHash,
|
||||
Payload: payload,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbTransaction)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to insert transaction: ", dbErrors)
|
||||
}
|
||||
}
|
||||
return &dbTransaction, nil
|
||||
}
|
||||
|
||||
func insertTransactionBlock(dbTx *gorm.DB, dbBlock *models.Block, dbTransaction *models.Transaction, index uint32) error {
|
||||
var dbTransactionBlock models.TransactionBlock
|
||||
dbResult := dbTx.
|
||||
Where(&models.TransactionBlock{TransactionID: dbTransaction.ID, BlockID: dbBlock.ID}).
|
||||
First(&dbTransactionBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find transactionBlock: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
dbTransactionBlock = models.TransactionBlock{
|
||||
TransactionID: dbTransaction.ID,
|
||||
BlockID: dbBlock.ID,
|
||||
Index: index,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbTransactionBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to insert transactionBlock: ", dbErrors)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertTransactionInputs(dbTx *gorm.DB, transaction *btcjson.TxRawResult, dbTransaction *models.Transaction) error {
|
||||
isCoinbase, err := isTransactionCoinbase(transaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !isCoinbase {
|
||||
for _, input := range transaction.Vin {
|
||||
err := insertTransactionInput(dbTx, dbTransaction, &input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isTransactionCoinbase(transaction *btcjson.TxRawResult) (bool, error) {
|
||||
subnetwork, err := subnetworkid.NewFromStr(transaction.Subnetwork)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return subnetwork.IsEqual(subnetworkid.SubnetworkIDCoinbase), nil
|
||||
}
|
||||
|
||||
func insertTransactionInput(dbTx *gorm.DB, dbTransaction *models.Transaction, input *btcjson.Vin) error {
|
||||
var dbPreviousTransactionOutput models.TransactionOutput
|
||||
dbResult := dbTx.
|
||||
Joins("LEFT JOIN `transactions` ON `transactions`.`id` = `transaction_outputs`.`transaction_id`").
|
||||
Where("`transactions`.`transactiond_id` = ? AND `transaction_outputs`.`index` = ?", input.TxID, input.Vout).
|
||||
First(&dbPreviousTransactionOutput)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find previous transactionOutput: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return fmt.Errorf("missing output transaction output for txID: %s and index: %d", input.TxID, input.Vout)
|
||||
}
|
||||
|
||||
var dbTransactionInputCount int
|
||||
dbResult = dbTx.
|
||||
Model(&models.TransactionInput{}).
|
||||
Where(&models.TransactionInput{TransactionID: dbTransaction.ID, PreviousTransactionOutputID: dbPreviousTransactionOutput.ID}).
|
||||
Count(&dbTransactionInputCount)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find transactionInput: ", dbErrors)
|
||||
}
|
||||
if dbTransactionInputCount == 0 {
|
||||
scriptSig, err := hex.DecodeString(input.ScriptSig.Hex)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
dbTransactionInput := models.TransactionInput{
|
||||
TransactionID: dbTransaction.ID,
|
||||
PreviousTransactionOutputID: dbPreviousTransactionOutput.ID,
|
||||
Index: input.Vout,
|
||||
SignatureScript: scriptSig,
|
||||
Sequence: input.Sequence,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbTransactionInput)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to insert transactionInput: ", dbErrors)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertTransactionOutputs(dbTx *gorm.DB, transaction *btcjson.TxRawResult, dbTransaction *models.Transaction) error {
|
||||
for _, output := range transaction.Vout {
|
||||
scriptPubKey, err := hex.DecodeString(output.ScriptPubKey.Hex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbAddress, err := insertAddress(dbTx, scriptPubKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = insertTransactionOutput(dbTx, dbTransaction, &output, scriptPubKey, dbAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertAddress(dbTx *gorm.DB, scriptPubKey []byte) (*models.Address, error) {
|
||||
_, addr, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey, config.ActiveNetParams())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hexAddress := addr.EncodeAddress()
|
||||
|
||||
var dbAddress models.Address
|
||||
dbResult := dbTx.
|
||||
Where(&models.Address{Address: hexAddress}).
|
||||
First(&dbAddress)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to find address: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
dbAddress = models.Address{
|
||||
Address: hexAddress,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbAddress)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to insert address: ", dbErrors)
|
||||
}
|
||||
}
|
||||
return &dbAddress, nil
|
||||
}
|
||||
|
||||
func insertTransactionOutput(dbTx *gorm.DB, dbTransaction *models.Transaction,
|
||||
output *btcjson.Vout, scriptPubKey []byte, dbAddress *models.Address) error {
|
||||
var dbTransactionOutputCount int
|
||||
dbResult := dbTx.
|
||||
Model(&models.TransactionOutput{}).
|
||||
Where(&models.TransactionOutput{TransactionID: dbTransaction.ID, Index: output.N}).
|
||||
Count(&dbTransactionOutputCount)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find transactionOutput: ", dbErrors)
|
||||
}
|
||||
if dbTransactionOutputCount == 0 {
|
||||
dbTransactionOutput := models.TransactionOutput{
|
||||
TransactionID: dbTransaction.ID,
|
||||
Index: output.N,
|
||||
Value: output.Value,
|
||||
IsSpent: false, // This must be false for updateSelectedParentChain to work properly
|
||||
ScriptPubKey: scriptPubKey,
|
||||
AddressID: dbAddress.ID,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbTransactionOutput)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to insert transactionOutput: ", dbErrors)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateSelectedParentChain updates the database to reflect the current selected
|
||||
// parent chain. First it "unaccepts" all removedChainHashes and then it "accepts"
|
||||
// all addChainBlocks.
|
||||
// Note that if this function may take a nil dbTx, in which case it would start
|
||||
// a database transaction by itself and commit it before returning.
|
||||
func updateSelectedParentChain(removedChainHashes []string, addedChainBlocks []btcjson.ChainBlock) error {
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbTx := db.Begin()
|
||||
|
||||
for _, removedHash := range removedChainHashes {
|
||||
err := updateRemovedChainHashes(dbTx, removedHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, addedBlock := range addedChainBlocks {
|
||||
err := updateAddedChainBlocks(dbTx, &addedBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dbTx.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateRemovedChainHashes "unaccepts" the block of the given removedHash.
|
||||
// That is to say, it marks it as not in the selected parent chain in the
|
||||
// following ways:
|
||||
// * All its TransactionInputs.PreviousTransactionOutputs are set IsSpent = false
|
||||
// * All its Transactions are set AcceptingBlockID = nil
|
||||
// * The block is set IsChainBlock = false
|
||||
// This function will return an error if any of the above are in an unexpected state
|
||||
func updateRemovedChainHashes(dbTx *gorm.DB, removedHash string) error {
|
||||
var dbBlock models.Block
|
||||
dbResult := dbTx.
|
||||
Where(&models.Block{BlockHash: removedHash}).
|
||||
First(&dbBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find block: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return fmt.Errorf("missing block for hash: %s", removedHash)
|
||||
}
|
||||
if !dbBlock.IsChainBlock {
|
||||
return fmt.Errorf("block erroneously marked as not a chain block: %s", removedHash)
|
||||
}
|
||||
|
||||
var dbTransactions []models.Transaction
|
||||
dbResult = dbTx.
|
||||
Where(&models.Transaction{AcceptingBlockID: &dbBlock.ID}).
|
||||
Preload("TransactionInputs.PreviousTransactionOutput").
|
||||
Find(&dbTransactions)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find transactions: ", dbErrors)
|
||||
}
|
||||
for _, dbTransaction := range dbTransactions {
|
||||
for _, dbTransactionInput := range dbTransaction.TransactionInputs {
|
||||
dbPreviousTransactionOutput := dbTransactionInput.PreviousTransactionOutput
|
||||
if !dbPreviousTransactionOutput.IsSpent {
|
||||
return fmt.Errorf("cannot de-spend an unspent transaction output: %s index: %d",
|
||||
dbTransaction.TransactionID, dbTransactionInput.Index)
|
||||
}
|
||||
|
||||
dbPreviousTransactionOutput.IsSpent = false
|
||||
dbResult = dbTx.Save(&dbPreviousTransactionOutput)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to update transactionOutput: ", dbErrors)
|
||||
}
|
||||
}
|
||||
|
||||
dbTransaction.AcceptingBlockID = nil
|
||||
dbResult := dbTx.Save(&dbTransaction)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to update transaction: ", dbErrors)
|
||||
}
|
||||
}
|
||||
|
||||
dbBlock.IsChainBlock = false
|
||||
dbResult = dbTx.Save(&dbBlock)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to update block: ", dbErrors)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateAddedChainBlocks "accepts" the given addedBlock. That is to say,
|
||||
// it marks it as in the selected parent chain in the following ways:
|
||||
// * All its TransactionInputs.PreviousTransactionOutputs are set IsSpent = true
|
||||
// * All its Transactions are set AcceptingBlockID = addedBlock
|
||||
// * The block is set IsChainBlock = true
|
||||
// This function will return an error if any of the above are in an unexpected state
|
||||
func updateAddedChainBlocks(dbTx *gorm.DB, addedBlock *btcjson.ChainBlock) error {
|
||||
for _, acceptedBlock := range addedBlock.AcceptedBlocks {
|
||||
var dbAccepedBlock models.Block
|
||||
dbResult := dbTx.
|
||||
Where(&models.Block{BlockHash: acceptedBlock.Hash}).
|
||||
First(&dbAccepedBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find block: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return fmt.Errorf("missing block for hash: %s", acceptedBlock.Hash)
|
||||
}
|
||||
if dbAccepedBlock.IsChainBlock {
|
||||
return fmt.Errorf("block erroneously marked as a chain block: %s", acceptedBlock.Hash)
|
||||
}
|
||||
|
||||
dbWhereTransactionIDsIn := make([]*models.Transaction, len(acceptedBlock.AcceptedTxIDs))
|
||||
for i, acceptedTxID := range acceptedBlock.AcceptedTxIDs {
|
||||
dbWhereTransactionIDsIn[i] = &models.Transaction{TransactionID: acceptedTxID}
|
||||
}
|
||||
var dbAcceptedTransactions []models.Transaction
|
||||
dbResult = dbTx.
|
||||
Where(dbWhereTransactionIDsIn).
|
||||
Preload("TransactionInputs.PreviousTransactionOutput").
|
||||
First(&dbAcceptedTransactions)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find transactions: ", dbErrors)
|
||||
}
|
||||
if len(dbAcceptedTransactions) != len(acceptedBlock.AcceptedTxIDs) {
|
||||
return fmt.Errorf("some transaction are missing for block: %s", acceptedBlock.Hash)
|
||||
}
|
||||
|
||||
for _, dbAcceptedTransaction := range dbAcceptedTransactions {
|
||||
for _, dbTransactionInput := range dbAcceptedTransaction.TransactionInputs {
|
||||
dbPreviousTransactionOutput := dbTransactionInput.PreviousTransactionOutput
|
||||
if dbPreviousTransactionOutput.IsSpent {
|
||||
return fmt.Errorf("cannot spend an already spent transaction output: %s index: %d",
|
||||
dbAcceptedTransaction.TransactionID, dbTransactionInput.Index)
|
||||
}
|
||||
|
||||
dbPreviousTransactionOutput.IsSpent = true
|
||||
dbResult = dbTx.Save(&dbPreviousTransactionOutput)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to update transactionOutput: ", dbErrors)
|
||||
}
|
||||
}
|
||||
|
||||
dbAcceptedTransaction.AcceptingBlockID = &dbAccepedBlock.ID
|
||||
dbResult = dbTx.Save(&dbAcceptedTransaction)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to update transaction: ", dbErrors)
|
||||
}
|
||||
}
|
||||
|
||||
dbAccepedBlock.IsChainBlock = true
|
||||
dbResult = dbTx.Save(&dbAccepedBlock)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to update block: ", dbErrors)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleBlockAddedMsg handles onBlockAdded messages
|
||||
func handleBlockAddedMsg(client *jsonrpc.Client, blockAdded *jsonrpc.BlockAddedMsg) {
|
||||
hash := blockAdded.Header.BlockHash()
|
||||
block, rawBlock, err := fetchBlock(client, hash)
|
||||
if err != nil {
|
||||
log.Warnf("Could not fetch block %s: %s", hash, err)
|
||||
return
|
||||
}
|
||||
err = addBlock(client, block, *rawBlock)
|
||||
if err != nil {
|
||||
log.Warnf("Could not insert block %s: %s", hash, err)
|
||||
return
|
||||
}
|
||||
log.Infof("Added block %s", hash)
|
||||
}
|
||||
|
||||
// canHandleChainChangedMsg checks whether we have all the necessary data
|
||||
// to successfully handle a ChainChangedMsg.
|
||||
func canHandleChainChangedMsg(chainChanged *jsonrpc.ChainChangedMsg) (bool, error) {
|
||||
dbTx, err := database.DB()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Collect all unique referenced block hashes
|
||||
hashes := make(map[string]struct{})
|
||||
for _, removedHash := range chainChanged.RemovedChainBlockHashes {
|
||||
hashes[removedHash.String()] = struct{}{}
|
||||
}
|
||||
for _, addedBlock := range chainChanged.AddedChainBlocks {
|
||||
hashes[addedBlock.Hash.String()] = struct{}{}
|
||||
for _, acceptedBlock := range addedBlock.AcceptedBlocks {
|
||||
hashes[acceptedBlock.Hash.String()] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that all the hashes exist in the database
|
||||
dbWhereBlockHashesIn := make([]*models.Block, len(hashes))
|
||||
i := 0
|
||||
for hash := range hashes {
|
||||
dbWhereBlockHashesIn[i] = &models.Block{BlockHash: hash}
|
||||
i++
|
||||
}
|
||||
var dbBlocksCount int
|
||||
dbResult := dbTx.
|
||||
Where(dbWhereBlockHashesIn).
|
||||
Count(&dbBlocksCount)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return false, utils.NewErrorFromDBErrors("failed to find block count: ", dbErrors)
|
||||
}
|
||||
if len(hashes) != dbBlocksCount {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// handleChainChangedMsg handles onChainChanged messages
|
||||
func handleChainChangedMsg(chainChanged *jsonrpc.ChainChangedMsg) {
|
||||
// Convert the data in chainChanged to something we can feed into
|
||||
// updateSelectedParentChain
|
||||
removedHashes, addedBlocks := convertChainChangedMsg(chainChanged)
|
||||
|
||||
err := updateSelectedParentChain(removedHashes, addedBlocks)
|
||||
if err != nil {
|
||||
log.Warnf("Could not update selected parent chain: %s", err)
|
||||
return
|
||||
}
|
||||
log.Infof("Chain changed: removed &d blocks and added %d block",
|
||||
len(removedHashes), len(addedBlocks))
|
||||
}
|
||||
|
||||
func convertChainChangedMsg(chainChanged *jsonrpc.ChainChangedMsg) (
|
||||
removedHashes []string, addedBlocks []btcjson.ChainBlock) {
|
||||
|
||||
removedHashes = make([]string, len(chainChanged.RemovedChainBlockHashes))
|
||||
for i, hash := range chainChanged.RemovedChainBlockHashes {
|
||||
removedHashes[i] = hash.String()
|
||||
}
|
||||
|
||||
addedBlocks = make([]btcjson.ChainBlock, len(chainChanged.AddedChainBlocks))
|
||||
for i, addedBlock := range chainChanged.AddedChainBlocks {
|
||||
acceptedBlocks := make([]btcjson.AcceptedBlock, len(addedBlock.AcceptedBlocks))
|
||||
for j, acceptedBlock := range addedBlock.AcceptedBlocks {
|
||||
acceptedTxIDs := make([]string, len(acceptedBlock.AcceptedTxIDs))
|
||||
for k, acceptedTxID := range acceptedBlock.AcceptedTxIDs {
|
||||
acceptedTxIDs[k] = acceptedTxID.String()
|
||||
}
|
||||
acceptedBlocks[j] = btcjson.AcceptedBlock{
|
||||
Hash: acceptedBlock.Hash.String(),
|
||||
AcceptedTxIDs: acceptedTxIDs,
|
||||
}
|
||||
}
|
||||
addedBlocks[i] = btcjson.ChainBlock{
|
||||
Hash: addedBlock.Hash.String(),
|
||||
AcceptedBlocks: acceptedBlocks,
|
||||
}
|
||||
}
|
||||
|
||||
return removedHashes, addedBlocks
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type contextKey string
|
||||
|
||||
const (
|
||||
contextKeyRequestID contextKey = "REQUEST_ID"
|
||||
)
|
||||
|
||||
// APIServerContext is a context.Context wrapper that
|
||||
// enables custom logs with request ID.
|
||||
type APIServerContext struct {
|
||||
context.Context
|
||||
}
|
||||
|
||||
// ToAPIServerContext takes a context.Context instance
|
||||
// and converts it to *ApiServerContext.
|
||||
func ToAPIServerContext(ctx context.Context) *APIServerContext {
|
||||
if asCtx, ok := ctx.(*APIServerContext); ok {
|
||||
return asCtx
|
||||
}
|
||||
return &APIServerContext{Context: ctx}
|
||||
}
|
||||
|
||||
// SetRequestID associates a request ID for the context.
|
||||
func (ctx *APIServerContext) SetRequestID(requestID uint64) context.Context {
|
||||
context.WithValue(ctx, contextKeyRequestID, requestID)
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (ctx *APIServerContext) requestID() uint64 {
|
||||
id := ctx.Value(contextKeyRequestID)
|
||||
uint64ID, _ := id.(uint64)
|
||||
return uint64ID
|
||||
}
|
||||
|
||||
func (ctx *APIServerContext) getLogString(format string, params ...interface{}) string {
|
||||
return fmt.Sprintf("RID %d: ", ctx.requestID()) + fmt.Sprintf(format, params...)
|
||||
}
|
||||
|
||||
// Tracef writes a customized formatted context
|
||||
// related log with log level 'Trace'.
|
||||
func (ctx *APIServerContext) Tracef(format string, params ...interface{}) {
|
||||
log.Trace(ctx.getLogString(format, params...))
|
||||
}
|
||||
|
||||
// Debugf writes a customized formatted context
|
||||
// related log with log level 'Debug'.
|
||||
func (ctx *APIServerContext) Debugf(format string, params ...interface{}) {
|
||||
log.Debug(ctx.getLogString(format, params...))
|
||||
}
|
||||
|
||||
// Infof writes a customized formatted context
|
||||
// related log with log level 'Info'.
|
||||
func (ctx *APIServerContext) Infof(format string, params ...interface{}) {
|
||||
log.Info(ctx.getLogString(format, params...))
|
||||
}
|
||||
|
||||
// Warnf writes a customized formatted context
|
||||
// related log with log level 'Warn'.
|
||||
func (ctx *APIServerContext) Warnf(format string, params ...interface{}) {
|
||||
log.Warn(ctx.getLogString(format, params...))
|
||||
}
|
||||
|
||||
// Errorf writes a customized formatted context
|
||||
// related log with log level 'Error'.
|
||||
func (ctx *APIServerContext) Errorf(format string, params ...interface{}) {
|
||||
log.Error(ctx.getLogString(format, params...))
|
||||
}
|
||||
|
||||
// Criticalf writes a customized formatted context
|
||||
// related log with log level 'Critical'.
|
||||
func (ctx *APIServerContext) Criticalf(format string, params ...interface{}) {
|
||||
log.Criticalf(ctx.getLogString(format, params...))
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jinzhu/gorm"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// HandlerError is an error returned from
|
||||
// a rest route handler or a middleware.
|
||||
type HandlerError struct {
|
||||
Code int
|
||||
Message string
|
||||
ClientMessage string
|
||||
}
|
||||
|
||||
func (hErr *HandlerError) Error() string {
|
||||
return hErr.Message
|
||||
}
|
||||
|
||||
// NewHandlerError returns a HandlerError with the given code and message.
|
||||
func NewHandlerError(code int, message string) *HandlerError {
|
||||
return &HandlerError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
ClientMessage: message,
|
||||
}
|
||||
}
|
||||
|
||||
// NewHandlerErrorWithCustomClientMessage returns a HandlerError with
|
||||
// the given code, message and client error message.
|
||||
func NewHandlerErrorWithCustomClientMessage(code int, message, clientMessage string) *HandlerError {
|
||||
return &HandlerError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
ClientMessage: clientMessage,
|
||||
}
|
||||
}
|
||||
|
||||
// NewInternalServerHandlerError returns a HandlerError with
|
||||
// the given message, and the http.StatusInternalServerError
|
||||
// status text as client message.
|
||||
func NewInternalServerHandlerError(message string) *HandlerError {
|
||||
return NewHandlerErrorWithCustomClientMessage(http.StatusInternalServerError, message, http.StatusText(http.StatusInternalServerError))
|
||||
}
|
||||
|
||||
// NewErrorFromDBErrors takes a slice of database errors and a prefix, and
|
||||
// returns an error with all of the database errors formatted to one string with
|
||||
// the given prefix
|
||||
func NewErrorFromDBErrors(prefix string, dbErrors []error) error {
|
||||
dbErrorsStrings := make([]string, len(dbErrors))
|
||||
for i, dbErr := range dbErrors {
|
||||
dbErrorsStrings[i] = fmt.Sprintf("\"%s\"", dbErr)
|
||||
}
|
||||
return fmt.Errorf("%s [%s]", prefix, strings.Join(dbErrorsStrings, ","))
|
||||
}
|
||||
|
||||
// NewHandlerErrorFromDBErrors takes a slice of database errors and a prefix, and
|
||||
// returns an HandlerError with error code http.StatusInternalServerError with
|
||||
// all of the database errors formatted to one string with the given prefix
|
||||
func NewHandlerErrorFromDBErrors(prefix string, dbErrors []error) *HandlerError {
|
||||
return NewInternalServerHandlerError(NewErrorFromDBErrors(prefix, dbErrors).Error())
|
||||
}
|
||||
|
||||
// IsDBRecordNotFoundError returns true if the given dbErrors contains only a RecordNotFound error
|
||||
func IsDBRecordNotFoundError(dbErrors []error) bool {
|
||||
return len(dbErrors) == 1 && gorm.IsRecordNotFoundError(dbErrors[0])
|
||||
}
|
||||
|
||||
// HasDBError returns true if the given dbErrors contain any errors that aren't RecordNotFound
|
||||
func HasDBError(dbErrors []error) bool {
|
||||
return !IsDBRecordNotFoundError(dbErrors) && len(dbErrors) > 0
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package utils
|
||||
|
||||
import "github.com/daglabs/btcd/util/panics"
|
||||
import "github.com/daglabs/btcd/apiserver/logger"
|
||||
|
||||
var (
|
||||
log = logger.BackendLog.Logger("UTIL")
|
||||
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
)
|
||||
247
app/app.go
Normal file
247
app/app.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/network/addressmanager"
|
||||
|
||||
"github.com/kaspanet/kaspad/network/netadapter/id"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/blockdag"
|
||||
"github.com/kaspanet/kaspad/domain/blockdag/indexers"
|
||||
"github.com/kaspanet/kaspad/domain/mempool"
|
||||
"github.com/kaspanet/kaspad/domain/mining"
|
||||
"github.com/kaspanet/kaspad/domain/txscript"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/dbaccess"
|
||||
"github.com/kaspanet/kaspad/infrastructure/signal"
|
||||
"github.com/kaspanet/kaspad/network/connmanager"
|
||||
"github.com/kaspanet/kaspad/network/dnsseed"
|
||||
"github.com/kaspanet/kaspad/network/domainmessage"
|
||||
"github.com/kaspanet/kaspad/network/netadapter"
|
||||
"github.com/kaspanet/kaspad/network/protocol"
|
||||
"github.com/kaspanet/kaspad/network/rpc"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
// App is a wrapper for all the kaspad services
|
||||
type App struct {
|
||||
cfg *config.Config
|
||||
rpcServer *rpc.Server
|
||||
addressManager *addressmanager.AddressManager
|
||||
protocolManager *protocol.Manager
|
||||
connectionManager *connmanager.ConnectionManager
|
||||
netAdapter *netadapter.NetAdapter
|
||||
|
||||
started, shutdown int32
|
||||
}
|
||||
|
||||
// Start launches all the kaspad services.
|
||||
func (a *App) Start() {
|
||||
// Already started?
|
||||
if atomic.AddInt32(&a.started, 1) != 1 {
|
||||
return
|
||||
}
|
||||
|
||||
log.Trace("Starting kaspad")
|
||||
|
||||
err := a.protocolManager.Start()
|
||||
if err != nil {
|
||||
panics.Exit(log, fmt.Sprintf("Error starting the p2p protocol: %+v", err))
|
||||
}
|
||||
|
||||
a.maybeSeedFromDNS()
|
||||
|
||||
a.connectionManager.Start()
|
||||
|
||||
if !a.cfg.DisableRPC {
|
||||
a.rpcServer.Start()
|
||||
}
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down all the kaspad services.
|
||||
func (a *App) Stop() {
|
||||
// Make sure this only happens once.
|
||||
if atomic.AddInt32(&a.shutdown, 1) != 1 {
|
||||
log.Infof("Kaspad is already in the process of shutting down")
|
||||
return
|
||||
}
|
||||
|
||||
log.Warnf("Kaspad shutting down")
|
||||
|
||||
a.connectionManager.Stop()
|
||||
|
||||
err := a.protocolManager.Stop()
|
||||
if err != nil {
|
||||
log.Errorf("Error stopping the p2p protocol: %+v", err)
|
||||
}
|
||||
|
||||
// Shutdown the RPC server if it's not disabled.
|
||||
if !a.cfg.DisableRPC {
|
||||
err := a.rpcServer.Stop()
|
||||
if err != nil {
|
||||
log.Errorf("Error stopping rpcServer: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = a.addressManager.Stop()
|
||||
if err != nil {
|
||||
log.Errorf("Error stopping address manager: %s", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// New returns a new App instance configured to listen on addr for the
|
||||
// kaspa network type specified by dagParams. Use start to begin accepting
|
||||
// connections from peers.
|
||||
func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{}) (*App, error) {
|
||||
indexManager, acceptanceIndex := setupIndexes(cfg)
|
||||
|
||||
sigCache := txscript.NewSigCache(cfg.SigCacheMaxSize)
|
||||
|
||||
// Create a new block DAG instance with the appropriate configuration.
|
||||
dag, err := setupDAG(cfg, databaseContext, interrupt, sigCache, indexManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txMempool := setupMempool(cfg, dag, sigCache)
|
||||
|
||||
netAdapter, err := netadapter.NewNetAdapter(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addressManager := addressmanager.New(cfg, databaseContext)
|
||||
|
||||
connectionManager, err := connmanager.New(cfg, netAdapter, addressManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
protocolManager, err := protocol.NewManager(cfg, dag, netAdapter, addressManager, txMempool, connectionManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpcServer, err := setupRPC(
|
||||
cfg, dag, txMempool, sigCache, acceptanceIndex, connectionManager, addressManager, protocolManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &App{
|
||||
cfg: cfg,
|
||||
rpcServer: rpcServer,
|
||||
protocolManager: protocolManager,
|
||||
connectionManager: connectionManager,
|
||||
netAdapter: netAdapter,
|
||||
addressManager: addressManager,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *App) maybeSeedFromDNS() {
|
||||
if !a.cfg.DisableDNSSeed {
|
||||
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, domainmessage.SFNodeNetwork, false, nil,
|
||||
a.cfg.Lookup, func(addresses []*domainmessage.NetAddress) {
|
||||
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
|
||||
// IPs of nodes and not its own IP, we can not know real IP of
|
||||
// source. So we'll take first returned address as source.
|
||||
a.addressManager.AddAddresses(addresses, addresses[0], nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
func setupDAG(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{},
|
||||
sigCache *txscript.SigCache, indexManager blockdag.IndexManager) (*blockdag.BlockDAG, error) {
|
||||
|
||||
dag, err := blockdag.New(&blockdag.Config{
|
||||
Interrupt: interrupt,
|
||||
DatabaseContext: databaseContext,
|
||||
DAGParams: cfg.NetParams(),
|
||||
TimeSource: blockdag.NewTimeSource(),
|
||||
SigCache: sigCache,
|
||||
IndexManager: indexManager,
|
||||
SubnetworkID: cfg.SubnetworkID,
|
||||
})
|
||||
return dag, err
|
||||
}
|
||||
|
||||
func setupIndexes(cfg *config.Config) (blockdag.IndexManager, *indexers.AcceptanceIndex) {
|
||||
// Create indexes if needed.
|
||||
var indexes []indexers.Indexer
|
||||
var acceptanceIndex *indexers.AcceptanceIndex
|
||||
if cfg.AcceptanceIndex {
|
||||
log.Info("acceptance index is enabled")
|
||||
acceptanceIndex = indexers.NewAcceptanceIndex()
|
||||
indexes = append(indexes, acceptanceIndex)
|
||||
}
|
||||
|
||||
// Create an index manager if any of the optional indexes are enabled.
|
||||
if len(indexes) < 0 {
|
||||
return nil, nil
|
||||
}
|
||||
indexManager := indexers.NewManager(indexes)
|
||||
return indexManager, acceptanceIndex
|
||||
}
|
||||
|
||||
func setupMempool(cfg *config.Config, dag *blockdag.BlockDAG, sigCache *txscript.SigCache) *mempool.TxPool {
|
||||
mempoolConfig := mempool.Config{
|
||||
Policy: mempool.Policy{
|
||||
AcceptNonStd: cfg.RelayNonStd,
|
||||
MaxOrphanTxs: cfg.MaxOrphanTxs,
|
||||
MaxOrphanTxSize: config.DefaultMaxOrphanTxSize,
|
||||
MinRelayTxFee: cfg.MinRelayTxFee,
|
||||
MaxTxVersion: 1,
|
||||
},
|
||||
CalcSequenceLockNoLock: func(tx *util.Tx, utxoSet blockdag.UTXOSet) (*blockdag.SequenceLock, error) {
|
||||
return dag.CalcSequenceLockNoLock(tx, utxoSet)
|
||||
},
|
||||
SigCache: sigCache,
|
||||
DAG: dag,
|
||||
}
|
||||
|
||||
return mempool.New(&mempoolConfig)
|
||||
}
|
||||
|
||||
func setupRPC(cfg *config.Config,
|
||||
dag *blockdag.BlockDAG,
|
||||
txMempool *mempool.TxPool,
|
||||
sigCache *txscript.SigCache,
|
||||
acceptanceIndex *indexers.AcceptanceIndex,
|
||||
connectionManager *connmanager.ConnectionManager,
|
||||
addressManager *addressmanager.AddressManager,
|
||||
protocolManager *protocol.Manager) (*rpc.Server, error) {
|
||||
|
||||
if !cfg.DisableRPC {
|
||||
policy := mining.Policy{
|
||||
BlockMaxMass: cfg.BlockMaxMass,
|
||||
}
|
||||
blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy, txMempool, dag, sigCache)
|
||||
|
||||
rpcServer, err := rpc.NewRPCServer(cfg, dag, txMempool, acceptanceIndex, blockTemplateGenerator,
|
||||
connectionManager, addressManager, protocolManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Signal process shutdown when the RPC server requests it.
|
||||
spawn("setupRPC-handleShutdownRequest", func() {
|
||||
<-rpcServer.RequestedProcessShutdown()
|
||||
signal.ShutdownRequestChannel <- struct{}{}
|
||||
})
|
||||
|
||||
return rpcServer, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// P2PNodeID returns the network ID associated with this App
|
||||
func (a *App) P2PNodeID() *id.ID {
|
||||
return a.netAdapter.ID()
|
||||
}
|
||||
|
||||
// AddressManager returns the AddressManager associated with this App
|
||||
func (a *App) AddressManager() *addressmanager.AddressManager {
|
||||
return a.addressManager
|
||||
}
|
||||
14
app/log.go
Normal file
14
app/log.go
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Copyright (c) 2017 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.KASD)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
@@ -1,103 +0,0 @@
|
||||
blockchain
|
||||
==========
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd/blockchain)
|
||||
|
||||
Package blockchain implements bitcoin block handling and chain selection rules.
|
||||
The test coverage is currently only around 60%, but will be increasing over
|
||||
time. See `test_coverage.txt` for the gocov coverage report. Alternatively, if
|
||||
you are running a POSIX OS, you can run the `cov_report.sh` script for a
|
||||
real-time report. Package blockchain is licensed under the liberal ISC license.
|
||||
|
||||
There is an associated blog post about the release of this package
|
||||
[here](https://blog.conformal.com/btcchain-the-bitcoin-chain-package-from-bctd/).
|
||||
|
||||
This package has intentionally been designed so it can be used as a standalone
|
||||
package for any projects needing to handle processing of blocks into the bitcoin
|
||||
block chain.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/daglabs/btcd/blockchain
|
||||
```
|
||||
|
||||
## Bitcoin Chain Processing Overview
|
||||
|
||||
Before a block is allowed into the block chain, it must go through an intensive
|
||||
series of validation rules. The following list serves as a general outline of
|
||||
those rules to provide some intuition into what is going on under the hood, but
|
||||
is by no means exhaustive:
|
||||
|
||||
- Reject duplicate blocks
|
||||
- Perform a series of sanity checks on the block and its transactions such as
|
||||
verifying proof of work, timestamps, number and character of transactions,
|
||||
transaction amounts, script complexity, and merkle root calculations
|
||||
- Compare the block against predetermined checkpoints for expected timestamps
|
||||
and difficulty based on elapsed time since the checkpoint
|
||||
- Save the most recent orphan blocks for a limited time in case their parent
|
||||
blocks become available
|
||||
- Stop processing if the block is an orphan as the rest of the processing
|
||||
depends on the block's position within the block chain
|
||||
- Perform a series of more thorough checks that depend on the block's position
|
||||
within the block chain such as verifying block difficulties adhere to
|
||||
difficulty retarget rules, timestamps are after the median of the last
|
||||
several blocks, all transactions are finalized, checkpoint blocks match, and
|
||||
block versions are in line with the previous blocks
|
||||
- Determine how the block fits into the chain and perform different actions
|
||||
accordingly in order to ensure any side chains which have higher difficulty
|
||||
than the main chain become the new main chain
|
||||
- When a block is being connected to the main chain (either through
|
||||
reorganization of a side chain to the main chain or just extending the
|
||||
main chain), perform further checks on the block's transactions such as
|
||||
verifying transaction duplicates, script complexity for the combination of
|
||||
connected scripts, coinbase maturity, double spends, and connected
|
||||
transaction values
|
||||
- Run the transaction scripts to verify the spender is allowed to spend the
|
||||
coins
|
||||
- Insert the block into the block database
|
||||
|
||||
## Examples
|
||||
|
||||
* [ProcessBlock Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-BlockChain-ProcessBlock)
|
||||
Demonstrates how to create a new chain instance and use ProcessBlock to
|
||||
attempt to add a block to the chain. This example intentionally
|
||||
attempts to insert a duplicate genesis block to illustrate how an invalid
|
||||
block is handled.
|
||||
|
||||
* [CompactToBig Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-CompactToBig)
|
||||
Demonstrates how to convert the compact "bits" in a block header which
|
||||
represent the target difficulty to a big integer and display it using the
|
||||
typical hex notation.
|
||||
|
||||
* [BigToCompact Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-BigToCompact)
|
||||
Demonstrates how to convert a target difficulty into the
|
||||
compact "bits" in a block header which represent that target difficulty.
|
||||
|
||||
## GPG Verification Key
|
||||
|
||||
All official release tags are signed by Conformal so users can ensure the code
|
||||
has not been tampered with and is coming from the btcsuite developers. To
|
||||
verify the signature perform the following:
|
||||
|
||||
- Download the public key from the Conformal website at
|
||||
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
|
||||
|
||||
- Import the public key into your GPG keyring:
|
||||
```bash
|
||||
gpg --import GIT-GPG-KEY-conformal.txt
|
||||
```
|
||||
|
||||
- Verify the release tag with the following command where `TAG_NAME` is a
|
||||
placeholder for the specific tag:
|
||||
```bash
|
||||
git tag -v TAG_NAME
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
|
||||
Package blockchain is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
||||
@@ -1,128 +0,0 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error {
|
||||
blockHeader := &block.MsgBlock().Header
|
||||
newNode := newBlockNode(blockHeader, newSet(), dag.dagParams.K)
|
||||
newNode.status = statusInvalidAncestor
|
||||
dag.index.AddNode(newNode)
|
||||
return dag.index.flushToDB()
|
||||
}
|
||||
|
||||
// maybeAcceptBlock potentially accepts a block into the block DAG. It
|
||||
// performs several validation checks which depend on its position within
|
||||
// the block DAG before adding it. The block is expected to have already
|
||||
// gone through ProcessBlock before calling this function with it.
|
||||
//
|
||||
// The flags are also passed to checkBlockContext and connectToDAG. See
|
||||
// their documentation for how the flags modify their behavior.
|
||||
//
|
||||
// This function MUST be called with the dagLock held (for writes).
|
||||
func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error {
|
||||
parents, err := lookupParentNodes(block, dag)
|
||||
if err != nil {
|
||||
if rErr, ok := err.(RuleError); ok && rErr.ErrorCode == ErrInvalidAncestorBlock {
|
||||
err := dag.addNodeToIndexWithInvalidAncestor(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// The block must pass all of the validation rules which depend on the
|
||||
// position of the block within the block DAG.
|
||||
err = dag.checkBlockContext(block, parents, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a new block node for the block and add it to the node index.
|
||||
newNode := newBlockNode(&block.MsgBlock().Header, parents, dag.dagParams.K)
|
||||
newNode.status = statusDataStored
|
||||
dag.index.AddNode(newNode)
|
||||
|
||||
// Insert the block into the database if it's not already there. Even
|
||||
// though it is possible the block will ultimately fail to connect, it
|
||||
// has already passed all proof-of-work and validity tests which means
|
||||
// it would be prohibitively expensive for an attacker to fill up the
|
||||
// disk with a bunch of blocks that fail to connect. This is necessary
|
||||
// since it allows block download to be decoupled from the much more
|
||||
// expensive connection logic. It also has some other nice properties
|
||||
// such as making blocks that never become part of the DAG or
|
||||
// blocks that fail to connect available for further analysis.
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
err := dbStoreBlock(dbTx, block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dag.index.flushToDBWithTx(dbTx)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure that all the block's transactions are finalized
|
||||
fastAdd := flags&BFFastAdd == BFFastAdd
|
||||
bluestParent := parents.bluest()
|
||||
if !fastAdd {
|
||||
if err := dag.validateAllTxsFinalized(block, newNode, bluestParent); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
block.SetChainHeight(newNode.chainHeight)
|
||||
|
||||
// Connect the passed block to the DAG. This also handles validation of the
|
||||
// transaction scripts.
|
||||
chainUpdates, err := dag.addBlock(newNode, parents, block, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Notify the caller that the new block was accepted into the block
|
||||
// DAG. The caller would typically want to react by relaying the
|
||||
// inventory to other peers.
|
||||
dag.dagLock.Unlock()
|
||||
dag.sendNotification(NTBlockAdded, &BlockAddedNotificationData{
|
||||
Block: block,
|
||||
WasUnorphaned: flags&BFWasUnorphaned != 0,
|
||||
})
|
||||
dag.sendNotification(NTChainChanged, &ChainChangedNotificationData{
|
||||
RemovedChainBlockHashes: chainUpdates.removedChainBlockHashes,
|
||||
AddedChainBlockHashes: chainUpdates.addedChainBlockHashes,
|
||||
})
|
||||
dag.dagLock.Lock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error) {
|
||||
header := block.MsgBlock().Header
|
||||
parentHashes := header.ParentHashes
|
||||
|
||||
nodes := newSet()
|
||||
for _, parentHash := range parentHashes {
|
||||
node := blockDAG.index.LookupNode(parentHash)
|
||||
if node == nil {
|
||||
str := fmt.Sprintf("parent block %s is unknown", parentHashes)
|
||||
return nil, ruleError(ErrParentBlockUnknown, str)
|
||||
} else if blockDAG.index.NodeStatus(node).KnownInvalid() {
|
||||
str := fmt.Sprintf("parent block %s is known to be invalid", parentHashes)
|
||||
return nil, ruleError(ErrInvalidAncestorBlock, str)
|
||||
}
|
||||
|
||||
nodes.add(node)
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
@@ -1,143 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"bou.ke/monkey"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", Config{
|
||||
DAGParams: &dagconfig.SimNetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
dag.TestSetCoinbaseMaturity(0)
|
||||
|
||||
// Test rejecting the block if its parents are missing
|
||||
orphanBlockFile := "blk_3B.dat"
|
||||
loadedBlocks, err := LoadBlocks(filepath.Join("testdata/", orphanBlockFile))
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: "+
|
||||
"Error loading file '%s': %s\n", orphanBlockFile, err)
|
||||
}
|
||||
block := loadedBlocks[0]
|
||||
|
||||
err = dag.maybeAcceptBlock(block, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
|
||||
"Expected: %s, got: <nil>", ErrParentBlockUnknown)
|
||||
}
|
||||
ruleErr, ok := err.(RuleError)
|
||||
if !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrParentBlockUnknown {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
|
||||
"Unexpected error code. Want: %s, got: %s", ErrParentBlockUnknown, ruleErr.ErrorCode)
|
||||
}
|
||||
|
||||
// Test rejecting the block if its parents are invalid
|
||||
blocksFile := "blk_0_to_4.dat"
|
||||
blocks, err := LoadBlocks(filepath.Join("testdata/", blocksFile))
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: "+
|
||||
"Error loading file '%s': %s\n", blocksFile, err)
|
||||
}
|
||||
|
||||
// Add a valid block and mark it as invalid
|
||||
block1 := blocks[1]
|
||||
isOrphan, delay, err := dag.ProcessBlock(block1, BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: Valid block unexpectedly returned an error: %s", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: block 1 is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: incorrectly returned block 1 is an orphan")
|
||||
}
|
||||
blockNode1 := dag.index.LookupNode(block1.Hash())
|
||||
dag.index.SetStatusFlags(blockNode1, statusValidateFailed)
|
||||
|
||||
block2 := blocks[2]
|
||||
err = dag.maybeAcceptBlock(block2, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
|
||||
"Expected: %s, got: <nil>", ErrInvalidAncestorBlock)
|
||||
}
|
||||
ruleErr, ok = err.(RuleError)
|
||||
if !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrInvalidAncestorBlock {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
|
||||
"Unexpected error. Want: %s, got: %s", ErrInvalidAncestorBlock, ruleErr.ErrorCode)
|
||||
}
|
||||
|
||||
// Set block1's status back to valid for next tests
|
||||
dag.index.UnsetStatusFlags(blockNode1, statusValidateFailed)
|
||||
|
||||
// Test rejecting the block due to bad context
|
||||
originalBits := block2.MsgBlock().Header.Bits
|
||||
block2.MsgBlock().Header.Bits = 0
|
||||
err = dag.maybeAcceptBlock(block2, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
|
||||
"Expected: %s, got: <nil>", ErrUnexpectedDifficulty)
|
||||
}
|
||||
ruleErr, ok = err.(RuleError)
|
||||
if !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrUnexpectedDifficulty {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
|
||||
"Unexpected error. Want: %s, got: %s", ErrUnexpectedDifficulty, ruleErr.ErrorCode)
|
||||
}
|
||||
|
||||
// Set block2's bits back to valid for next tests
|
||||
block2.MsgBlock().Header.Bits = originalBits
|
||||
|
||||
// Test rejecting the node due to database error
|
||||
databaseErrorMessage := "database error"
|
||||
guard := monkey.Patch(dbStoreBlock, func(dbTx database.Tx, block *util.Block) error {
|
||||
return errors.New(databaseErrorMessage)
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
err = dag.maybeAcceptBlock(block2, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to database error: "+
|
||||
"Expected: %s, got: <nil>", databaseErrorMessage)
|
||||
}
|
||||
if !strings.Contains(err.Error(), databaseErrorMessage) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to database error: "+
|
||||
"Unexpected error. Want: %s, got: %s", databaseErrorMessage, err)
|
||||
}
|
||||
guard.Unpatch()
|
||||
|
||||
// Test rejecting the node due to index error
|
||||
indexErrorMessage := "index error"
|
||||
guard = monkey.Patch((*blockIndex).flushToDB, func(_ *blockIndex) error {
|
||||
return errors.New(indexErrorMessage)
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
err = dag.maybeAcceptBlock(block2, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to index error: "+
|
||||
"Expected %s, got: <nil>", indexErrorMessage)
|
||||
}
|
||||
if !strings.Contains(err.Error(), indexErrorMessage) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to index error: "+
|
||||
"Unexpected error. Want: %s, got: %s", indexErrorMessage, err)
|
||||
}
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
var (
|
||||
// idByHashIndexBucketName is the name of the db bucket used to house
|
||||
// the block hash -> block id index.
|
||||
idByHashIndexBucketName = []byte("idbyhashidx")
|
||||
|
||||
// hashByIDIndexBucketName is the name of the db bucket used to house
|
||||
// the block id -> block hash index.
|
||||
hashByIDIndexBucketName = []byte("hashbyididx")
|
||||
|
||||
currentBlockIDKey = []byte("currentblockid")
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// This is a mapping between block hashes and unique IDs. The ID
|
||||
// is simply a sequentially incremented uint64 that is used instead of block hash
|
||||
// for the indexers. This is useful because it is only 8 bytes versus 32 bytes
|
||||
// hashes and thus saves a ton of space when a block is referenced in an index.
|
||||
// It consists of three buckets: the first bucket maps the hash of each
|
||||
// block to the unique ID and the second maps that ID back to the block hash.
|
||||
// The third bucket contains the last received block ID, and is used
|
||||
// when starting the node to check that the enabled indexes are up to date
|
||||
// with the latest received block, and if not, initiate recovery process.
|
||||
//
|
||||
// The serialized format for keys and values in the block hash to ID bucket is:
|
||||
// <hash> = <ID>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash 32 bytes
|
||||
// ID uint64 8 bytes
|
||||
// -----
|
||||
// Total: 40 bytes
|
||||
//
|
||||
// The serialized format for keys and values in the ID to block hash bucket is:
|
||||
// <ID> = <hash>
|
||||
//
|
||||
// Field Type Size
|
||||
// ID uint64 8 bytes
|
||||
// hash daghash.Hash 32 bytes
|
||||
// -----
|
||||
// Total: 40 bytes
|
||||
//
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const blockIDSize = 8 // 8 bytes for block ID
|
||||
|
||||
// DBFetchBlockIDByHash uses an existing database transaction to retrieve the
|
||||
// block id for the provided hash from the index.
|
||||
func DBFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint64, error) {
|
||||
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
|
||||
serializedID := hashIndex.Get(hash[:])
|
||||
if serializedID == nil {
|
||||
return 0, fmt.Errorf("no entry in the block ID index for block with hash %s", hash)
|
||||
}
|
||||
|
||||
return DeserializeBlockID(serializedID), nil
|
||||
}
|
||||
|
||||
// DBFetchBlockHashBySerializedID uses an existing database transaction to
|
||||
// retrieve the hash for the provided serialized block id from the index.
|
||||
func DBFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
|
||||
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
|
||||
hashBytes := idIndex.Get(serializedID)
|
||||
if hashBytes == nil {
|
||||
return nil, fmt.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID))
|
||||
}
|
||||
|
||||
var hash daghash.Hash
|
||||
copy(hash[:], hashBytes)
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
|
||||
// the index entries for the hash to id and id to hash mappings for the provided
|
||||
// values.
|
||||
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, serializedID []byte) error {
|
||||
// Add the block hash to ID mapping to the index.
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(idByHashIndexBucketName)
|
||||
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the block ID to hash mapping to the index.
|
||||
idIndex := meta.Bucket(hashByIDIndexBucketName)
|
||||
return idIndex.Put(serializedID[:], hash[:])
|
||||
}
|
||||
|
||||
// DBFetchCurrentBlockID returns the last known block ID.
|
||||
func DBFetchCurrentBlockID(dbTx database.Tx) uint64 {
|
||||
serializedID := dbTx.Metadata().Get(currentBlockIDKey)
|
||||
if serializedID == nil {
|
||||
return 0
|
||||
}
|
||||
return DeserializeBlockID(serializedID)
|
||||
}
|
||||
|
||||
// DeserializeBlockID returns a deserialized block id
|
||||
func DeserializeBlockID(serializedID []byte) uint64 {
|
||||
return byteOrder.Uint64(serializedID)
|
||||
}
|
||||
|
||||
// SerializeBlockID returns a serialized block id
|
||||
func SerializeBlockID(blockID uint64) []byte {
|
||||
serializedBlockID := make([]byte, blockIDSize)
|
||||
byteOrder.PutUint64(serializedBlockID, blockID)
|
||||
return serializedBlockID
|
||||
}
|
||||
|
||||
// DBFetchBlockHashByID uses an existing database transaction to retrieve the
|
||||
// hash for the provided block id from the index.
|
||||
func DBFetchBlockHashByID(dbTx database.Tx, id uint64) (*daghash.Hash, error) {
|
||||
return DBFetchBlockHashBySerializedID(dbTx, SerializeBlockID(id))
|
||||
}
|
||||
|
||||
func createBlockID(dbTx database.Tx, blockHash *daghash.Hash) (uint64, error) {
|
||||
currentBlockID := DBFetchCurrentBlockID(dbTx)
|
||||
newBlockID := currentBlockID + 1
|
||||
serializedNewBlockID := SerializeBlockID(newBlockID)
|
||||
err := dbTx.Metadata().Put(currentBlockIDKey, serializedNewBlockID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dbPutBlockIDIndexEntry(dbTx, blockHash, serializedNewBlockID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return newBlockID, nil
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bou.ke/monkey"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
)
|
||||
|
||||
func TestAncestorErrors(t *testing.T) {
|
||||
node := newTestNode(newSet(), int32(0x10000000), 0, time.Unix(0, 0), dagconfig.MainNetParams.K)
|
||||
node.chainHeight = 2
|
||||
ancestor := node.SelectedAncestor(3)
|
||||
if ancestor != nil {
|
||||
t.Errorf("TestAncestorErrors: Ancestor() unexpectedly returned a node. Expected: <nil>")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlushToDBErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestFlushToDBErrors", Config{
|
||||
DAGParams: &dagconfig.SimNetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFlushToDBErrors: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Call flushToDB without anything to flush. This should succeed
|
||||
err = dag.index.flushToDB()
|
||||
if err != nil {
|
||||
t.Errorf("TestFlushToDBErrors: flushToDB without anything to flush: "+
|
||||
"Unexpected flushToDB error: %s", err)
|
||||
}
|
||||
|
||||
// Mark the genesis block as dirty
|
||||
dag.index.SetStatusFlags(dag.genesis, statusValid)
|
||||
|
||||
// Test flushToDB failure due to database error
|
||||
databaseErrorMessage := "database error"
|
||||
guard := monkey.Patch(dbStoreBlockNode, func(_ database.Tx, _ *blockNode) error {
|
||||
return errors.New(databaseErrorMessage)
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
err = dag.index.flushToDB()
|
||||
if err == nil {
|
||||
t.Errorf("TestFlushToDBErrors: flushToDB failure due to database error: "+
|
||||
"Expected: %s, got: <nil>", databaseErrorMessage)
|
||||
}
|
||||
if !strings.Contains(err.Error(), databaseErrorMessage) {
|
||||
t.Errorf("TestFlushToDBErrors: flushToDB failure due to database error: "+
|
||||
"Unexpected flushToDB error. Expected: %s, got: %s", databaseErrorMessage, err)
|
||||
}
|
||||
}
|
||||
@@ -1,143 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// BlockLocator is used to help locate a specific block. The algorithm for
|
||||
// building the block locator is to add block hashes in reverse order on the
|
||||
// block's selected parent chain until the desired stop block is reached.
|
||||
// In order to keep the list of locator hashes to a reasonable number of entries,
|
||||
// the step between each entry is doubled each loop iteration to exponentially
|
||||
// decrease the number of hashes as a function of the distance from the block
|
||||
// being located.
|
||||
//
|
||||
// For example, assume a selected parent chain with IDs as depicted below, and the
|
||||
// stop block is genesis:
|
||||
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
|
||||
//
|
||||
// The block locator for block 17 would be the hashes of blocks:
|
||||
// [17 16 14 11 7 2 genesis]
|
||||
type BlockLocator []*daghash.Hash
|
||||
|
||||
// BlockLocatorFromHashes returns a block locator from start and stop hash.
|
||||
// See BlockLocator for details on the algorithm used to create a block locator.
|
||||
//
|
||||
// In addition to the general algorithm referenced above, this function will
|
||||
// return the block locator for the selected tip if the passed hash is not currently
|
||||
// known.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) BlockLocatorFromHashes(startHash, stopHash *daghash.Hash) BlockLocator {
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
startNode := dag.index.LookupNode(startHash)
|
||||
var stopNode *blockNode
|
||||
if !stopHash.IsEqual(&daghash.ZeroHash) {
|
||||
stopNode = dag.index.LookupNode(stopHash)
|
||||
}
|
||||
return dag.blockLocator(startNode, stopNode)
|
||||
}
|
||||
|
||||
// LatestBlockLocator returns a block locator for the current tips of the DAG.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) LatestBlockLocator() BlockLocator {
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
return dag.blockLocator(nil, nil)
|
||||
}
|
||||
|
||||
// blockLocator returns a block locator for the passed start and stop nodes.
|
||||
// The default value for the start node is the selected tip, and the default
|
||||
// values of the stop node is the genesis block.
|
||||
//
|
||||
// See the BlockLocator type comments for more details.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for reads).
|
||||
func (dag *BlockDAG) blockLocator(startNode, stopNode *blockNode) BlockLocator {
|
||||
// Use the selected tip if requested.
|
||||
if startNode == nil {
|
||||
startNode = dag.virtual.selectedParent
|
||||
}
|
||||
|
||||
if stopNode == nil {
|
||||
stopNode = dag.genesis
|
||||
}
|
||||
|
||||
// We use the selected parent of the start node, so the
|
||||
// block locator won't contain the start node.
|
||||
startNode = startNode.selectedParent
|
||||
|
||||
// If the start node or the stop node are not in the
|
||||
// virtual's selected parent chain, we replace them with their
|
||||
// closest selected parent that is part of the virtual's
|
||||
// selected parent chain.
|
||||
for !dag.IsInSelectedParentChain(stopNode.hash) {
|
||||
stopNode = stopNode.selectedParent
|
||||
}
|
||||
|
||||
for !dag.IsInSelectedParentChain(startNode.hash) {
|
||||
startNode = startNode.selectedParent
|
||||
}
|
||||
|
||||
// Calculate the max number of entries that will ultimately be in the
|
||||
// block locator. See the description of the algorithm for how these
|
||||
// numbers are derived.
|
||||
|
||||
// startNode.hash + stopNode.hash.
|
||||
// Then floor(log2(startNode.chainHeight-stopNode.chainHeight)) entries for the skip portion.
|
||||
maxEntries := 2 + util.FastLog2Floor(startNode.chainHeight-stopNode.chainHeight)
|
||||
locator := make(BlockLocator, 0, maxEntries)
|
||||
|
||||
step := uint64(1)
|
||||
for node := startNode; node != nil; {
|
||||
locator = append(locator, node.hash)
|
||||
|
||||
// Nothing more to add once the stop node has been added.
|
||||
if node.chainHeight == stopNode.chainHeight {
|
||||
break
|
||||
}
|
||||
|
||||
// Calculate chainHeight of previous node to include ensuring the
|
||||
// final node is stopNode.
|
||||
nextChainHeight := node.chainHeight - step
|
||||
if nextChainHeight < stopNode.chainHeight {
|
||||
nextChainHeight = stopNode.chainHeight
|
||||
}
|
||||
|
||||
// walk backwards through the nodes to the correct ancestor.
|
||||
node = node.SelectedAncestor(nextChainHeight)
|
||||
|
||||
// Double the distance between included hashes.
|
||||
step *= 2
|
||||
}
|
||||
|
||||
return locator
|
||||
}
|
||||
|
||||
// FindNextLocatorBoundaries returns the lowest unknown block locator, hash
|
||||
// and the highest known block locator hash. This is used to create the
|
||||
// next block locator to find the highest shared known chain block with the
|
||||
// sync peer.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for reads).
|
||||
func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (startHash, stopHash *daghash.Hash) {
|
||||
// Find the most recent locator block hash in the DAG. In the case none of
|
||||
// the hashes in the locator are in the DAG, fall back to the genesis block.
|
||||
stopNode := dag.genesis
|
||||
nextBlockLocatorIndex := int64(len(locator) - 1)
|
||||
for i, hash := range locator {
|
||||
node := dag.index.LookupNode(hash)
|
||||
if node != nil {
|
||||
stopNode = node
|
||||
nextBlockLocatorIndex = int64(i) - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
if nextBlockLocatorIndex < 0 {
|
||||
return nil, stopNode.hash
|
||||
}
|
||||
return locator[nextBlockLocatorIndex], stopNode.hash
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestChainHeight(t *testing.T) {
|
||||
phantomK := uint32(2)
|
||||
buildNode := buildNodeGenerator(phantomK, true)
|
||||
|
||||
node0 := buildNode(setFromSlice())
|
||||
node1 := buildNode(setFromSlice(node0))
|
||||
node2 := buildNode(setFromSlice(node0))
|
||||
node3 := buildNode(setFromSlice(node0))
|
||||
node4 := buildNode(setFromSlice(node1, node2, node3))
|
||||
node5 := buildNode(setFromSlice(node1, node2, node3))
|
||||
node6 := buildNode(setFromSlice(node1, node2, node3))
|
||||
node7 := buildNode(setFromSlice(node0))
|
||||
node8 := buildNode(setFromSlice(node7))
|
||||
node9 := buildNode(setFromSlice(node8))
|
||||
node10 := buildNode(setFromSlice(node9, node6))
|
||||
|
||||
// Because nodes 7 & 8 were mined secretly, node10's selected
|
||||
// parent will be node6, although node9 is higher. So in this
|
||||
// case, node10.height and node10.chainHeight will be different
|
||||
|
||||
tests := []struct {
|
||||
node *blockNode
|
||||
expectedChainHeight uint64
|
||||
}{
|
||||
{
|
||||
node: node0,
|
||||
expectedChainHeight: 0,
|
||||
},
|
||||
{
|
||||
node: node1,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node2,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node3,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node4,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node5,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node6,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node7,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node8,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node9,
|
||||
expectedChainHeight: 3,
|
||||
},
|
||||
{
|
||||
node: node10,
|
||||
expectedChainHeight: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if test.node.chainHeight != test.expectedChainHeight {
|
||||
t.Errorf("block %v expected chain height %v but got %v", test.node, test.expectedChainHeight, test.node.chainHeight)
|
||||
}
|
||||
if calculateChainHeight(test.node) != test.expectedChainHeight {
|
||||
t.Errorf("block %v expected calculated chain height %v but got %v", test.node, test.expectedChainHeight, test.node.chainHeight)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,162 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// blockSet implements a basic unsorted set of blocks
|
||||
type blockSet map[daghash.Hash]*blockNode
|
||||
|
||||
// newSet creates a new, empty BlockSet
|
||||
func newSet() blockSet {
|
||||
return map[daghash.Hash]*blockNode{}
|
||||
}
|
||||
|
||||
// setFromSlice converts a slice of blocks into an unordered set represented as map
|
||||
func setFromSlice(blocks ...*blockNode) blockSet {
|
||||
set := newSet()
|
||||
for _, block := range blocks {
|
||||
set.add(block)
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
// maxHeight returns the height of the highest block in the block set
|
||||
func (bs blockSet) maxHeight() uint64 {
|
||||
var maxHeight uint64
|
||||
for _, node := range bs {
|
||||
if maxHeight < node.height {
|
||||
maxHeight = node.height
|
||||
}
|
||||
}
|
||||
return maxHeight
|
||||
}
|
||||
|
||||
// add adds a block to this BlockSet
|
||||
func (bs blockSet) add(block *blockNode) {
|
||||
bs[*block.hash] = block
|
||||
}
|
||||
|
||||
// remove removes a block from this BlockSet, if exists
|
||||
// Does nothing if this set does not contain the block
|
||||
func (bs blockSet) remove(block *blockNode) {
|
||||
delete(bs, *block.hash)
|
||||
}
|
||||
|
||||
// clone clones thie block set
|
||||
func (bs blockSet) clone() blockSet {
|
||||
clone := newSet()
|
||||
for _, block := range bs {
|
||||
clone.add(block)
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
||||
// subtract returns the difference between the BlockSet and another BlockSet
|
||||
func (bs blockSet) subtract(other blockSet) blockSet {
|
||||
diff := newSet()
|
||||
for _, block := range bs {
|
||||
if !other.contains(block) {
|
||||
diff.add(block)
|
||||
}
|
||||
}
|
||||
return diff
|
||||
}
|
||||
|
||||
// addSet adds all blocks in other set to this set
|
||||
func (bs blockSet) addSet(other blockSet) {
|
||||
for _, block := range other {
|
||||
bs.add(block)
|
||||
}
|
||||
}
|
||||
|
||||
// addSlice adds provided slice to this set
|
||||
func (bs blockSet) addSlice(slice []*blockNode) {
|
||||
for _, block := range slice {
|
||||
bs.add(block)
|
||||
}
|
||||
}
|
||||
|
||||
// union returns a BlockSet that contains all blocks included in this set,
|
||||
// the other set, or both
|
||||
func (bs blockSet) union(other blockSet) blockSet {
|
||||
union := bs.clone()
|
||||
|
||||
union.addSet(other)
|
||||
|
||||
return union
|
||||
}
|
||||
|
||||
// contains returns true iff this set contains block
|
||||
func (bs blockSet) contains(block *blockNode) bool {
|
||||
_, ok := bs[*block.hash]
|
||||
return ok
|
||||
}
|
||||
|
||||
// containsHash returns true iff this set contains a block hash
|
||||
func (bs blockSet) containsHash(hash *daghash.Hash) bool {
|
||||
_, ok := bs[*hash]
|
||||
return ok
|
||||
}
|
||||
|
||||
// hashesEqual returns true if the given hashes are equal to the hashes
|
||||
// of the blocks in this set.
|
||||
// NOTE: The given hash slice must not contain duplicates.
|
||||
func (bs blockSet) hashesEqual(hashes []*daghash.Hash) bool {
|
||||
if len(hashes) != len(bs) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, hash := range hashes {
|
||||
if _, wasFound := bs[*hash]; !wasFound {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// hashes returns the hashes of the blocks in this set.
|
||||
func (bs blockSet) hashes() []*daghash.Hash {
|
||||
hashes := make([]*daghash.Hash, 0, len(bs))
|
||||
for _, node := range bs {
|
||||
hashes = append(hashes, node.hash)
|
||||
}
|
||||
daghash.Sort(hashes)
|
||||
return hashes
|
||||
}
|
||||
|
||||
func (bs blockSet) String() string {
|
||||
nodeStrs := make([]string, 0, len(bs))
|
||||
for _, node := range bs {
|
||||
nodeStrs = append(nodeStrs, node.String())
|
||||
}
|
||||
return strings.Join(nodeStrs, ",")
|
||||
}
|
||||
|
||||
// anyChildInSet returns true iff any child of block is contained within this set
|
||||
func (bs blockSet) anyChildInSet(block *blockNode) bool {
|
||||
for _, child := range block.children {
|
||||
if bs.contains(child) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (bs blockSet) bluest() *blockNode {
|
||||
var bluestNode *blockNode
|
||||
var maxScore uint64
|
||||
for _, node := range bs {
|
||||
if bluestNode == nil ||
|
||||
node.blueScore > maxScore ||
|
||||
(node.blueScore == maxScore && daghash.Less(node.hash, bluestNode.hash)) {
|
||||
bluestNode = node
|
||||
maxScore = node.blueScore
|
||||
}
|
||||
}
|
||||
return bluestNode
|
||||
}
|
||||
@@ -1,257 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// CheckpointConfirmations is the number of blocks before the end of the current
|
||||
// best block chain that a good checkpoint candidate must be.
|
||||
const CheckpointConfirmations = 2016
|
||||
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// daghash.Hash. It only differs from the one available in daghash in that
|
||||
// it ignores the error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, hashes.
|
||||
func newHashFromStr(hexStr string) *daghash.Hash {
|
||||
hash, _ := daghash.NewHashFromStr(hexStr)
|
||||
return hash
|
||||
}
|
||||
|
||||
// newTxIDFromStr converts the passed big-endian hex string into a
|
||||
// daghash.TxID. It only differs from the one available in daghash in that
|
||||
// it ignores the error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, IDs.
|
||||
func newTxIDFromStr(hexStr string) *daghash.TxID {
|
||||
txID, _ := daghash.NewTxIDFromStr(hexStr)
|
||||
return txID
|
||||
}
|
||||
|
||||
// Checkpoints returns a slice of checkpoints (regardless of whether they are
|
||||
// already known). When there are no checkpoints for the chain, it will return
|
||||
// nil.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) Checkpoints() []dagconfig.Checkpoint {
|
||||
return dag.checkpoints
|
||||
}
|
||||
|
||||
// HasCheckpoints returns whether this BlockDAG has checkpoints defined.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) HasCheckpoints() bool {
|
||||
return len(dag.checkpoints) > 0
|
||||
}
|
||||
|
||||
// LatestCheckpoint returns the most recent checkpoint (regardless of whether it
|
||||
// is already known). When there are no defined checkpoints for the active chain
|
||||
// instance, it will return nil.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) LatestCheckpoint() *dagconfig.Checkpoint {
|
||||
if !dag.HasCheckpoints() {
|
||||
return nil
|
||||
}
|
||||
return &dag.checkpoints[len(dag.checkpoints)-1]
|
||||
}
|
||||
|
||||
// verifyCheckpoint returns whether the passed block chain height and hash combination
|
||||
// match the checkpoint data. It also returns true if there is no checkpoint
|
||||
// data for the passed block chain height.
|
||||
func (dag *BlockDAG) verifyCheckpoint(chainHeight uint64, hash *daghash.Hash) bool {
|
||||
if !dag.HasCheckpoints() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Nothing to check if there is no checkpoint data for the block chainHeight.
|
||||
checkpoint, exists := dag.checkpointsByChainHeight[chainHeight]
|
||||
if !exists {
|
||||
return true
|
||||
}
|
||||
|
||||
if !checkpoint.Hash.IsEqual(hash) {
|
||||
return false
|
||||
}
|
||||
|
||||
log.Infof("Verified checkpoint at chainHeight %d/block %s", checkpoint.ChainHeight,
|
||||
checkpoint.Hash)
|
||||
return true
|
||||
}
|
||||
|
||||
// findPreviousCheckpoint finds the most recent checkpoint that is already
|
||||
// available in the downloaded portion of the block chain and returns the
|
||||
// associated block node. It returns nil if a checkpoint can't be found (this
|
||||
// should really only happen for blocks before the first checkpoint).
|
||||
//
|
||||
// This function MUST be called with the DAG lock held (for reads).
|
||||
func (dag *BlockDAG) findPreviousCheckpoint() (*blockNode, error) {
|
||||
if !dag.HasCheckpoints() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Perform the initial search to find and cache the latest known
|
||||
// checkpoint if the best chain is not known yet or we haven't already
|
||||
// previously searched.
|
||||
checkpoints := dag.checkpoints
|
||||
numCheckpoints := len(checkpoints)
|
||||
if dag.checkpointNode == nil && dag.nextCheckpoint == nil {
|
||||
// Loop backwards through the available checkpoints to find one
|
||||
// that is already available.
|
||||
for i := numCheckpoints - 1; i >= 0; i-- {
|
||||
node := dag.index.LookupNode(checkpoints[i].Hash)
|
||||
if node == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Checkpoint found. Cache it for future lookups and
|
||||
// set the next expected checkpoint accordingly.
|
||||
dag.checkpointNode = node
|
||||
if i < numCheckpoints-1 {
|
||||
dag.nextCheckpoint = &checkpoints[i+1]
|
||||
}
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// No known latest checkpoint. This will only happen on blocks
|
||||
// before the first known checkpoint. So, set the next expected
|
||||
// checkpoint to the first checkpoint and return the fact there
|
||||
// is no latest known checkpoint block.
|
||||
dag.nextCheckpoint = &checkpoints[0]
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// At this point we've already searched for the latest known checkpoint,
|
||||
// so when there is no next checkpoint, the current checkpoint lockin
|
||||
// will always be the latest known checkpoint.
|
||||
if dag.nextCheckpoint == nil {
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// When there is a next checkpoint and the chain height of the current
|
||||
// selected tip of the DAG does not exceed it, the current checkpoint
|
||||
// lockin is still the latest known checkpoint.
|
||||
if dag.selectedTip().chainHeight < dag.nextCheckpoint.ChainHeight {
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// We've reached or exceeded the next checkpoint height. Note that
|
||||
// once a checkpoint lockin has been reached, forks are prevented from
|
||||
// any blocks before the checkpoint, so we don't have to worry about the
|
||||
// checkpoint going away out from under us due to a chain reorganize.
|
||||
|
||||
// Cache the latest known checkpoint for future lookups. Note that if
|
||||
// this lookup fails something is very wrong since the chain has already
|
||||
// passed the checkpoint which was verified as accurate before inserting
|
||||
// it.
|
||||
checkpointNode := dag.index.LookupNode(dag.nextCheckpoint.Hash)
|
||||
if checkpointNode == nil {
|
||||
return nil, AssertError(fmt.Sprintf("findPreviousCheckpoint "+
|
||||
"failed lookup of known good block node %s",
|
||||
dag.nextCheckpoint.Hash))
|
||||
}
|
||||
dag.checkpointNode = checkpointNode
|
||||
|
||||
// Set the next expected checkpoint.
|
||||
checkpointIndex := -1
|
||||
for i := numCheckpoints - 1; i >= 0; i-- {
|
||||
if checkpoints[i].Hash.IsEqual(dag.nextCheckpoint.Hash) {
|
||||
checkpointIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
dag.nextCheckpoint = nil
|
||||
if checkpointIndex != -1 && checkpointIndex < numCheckpoints-1 {
|
||||
dag.nextCheckpoint = &checkpoints[checkpointIndex+1]
|
||||
}
|
||||
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// isNonstandardTransaction determines whether a transaction contains any
|
||||
// scripts which are not one of the standard types.
|
||||
func isNonstandardTransaction(tx *util.Tx) bool {
|
||||
// Check all of the output public key scripts for non-standard scripts.
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
scriptClass := txscript.GetScriptClass(txOut.ScriptPubKey)
|
||||
if scriptClass == txscript.NonStandardTy {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCheckpointCandidate returns whether or not the passed block is a good
|
||||
// checkpoint candidate.
|
||||
//
|
||||
// The factors used to determine a good checkpoint are:
|
||||
// - The block must be in the main chain
|
||||
// - The block must be at least 'CheckpointConfirmations' blocks prior to the
|
||||
// current end of the main chain
|
||||
// - The timestamps for the blocks before and after the checkpoint must have
|
||||
// timestamps which are also before and after the checkpoint, respectively
|
||||
// (due to the median time allowance this is not always the case)
|
||||
// - The block must not contain any strange transaction such as those with
|
||||
// nonstandard scripts
|
||||
//
|
||||
// The intent is that candidates are reviewed by a developer to make the final
|
||||
// decision and then manually added to the list of checkpoints for a network.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) IsCheckpointCandidate(block *util.Block) (bool, error) {
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
|
||||
// A checkpoint must be in the DAG.
|
||||
node := dag.index.LookupNode(block.Hash())
|
||||
if node == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Ensure the chain height of the passed block and the entry for the block
|
||||
// in the DAG match. This should always be the case unless the
|
||||
// caller provided an invalid block.
|
||||
if node.chainHeight != block.ChainHeight() {
|
||||
return false, fmt.Errorf("passed block chain height of %d does not "+
|
||||
"match the its height in the DAG: %d", block.ChainHeight(),
|
||||
node.chainHeight)
|
||||
}
|
||||
|
||||
// A checkpoint must be at least CheckpointConfirmations blocks
|
||||
// before the end of the main chain.
|
||||
dagChainHeight := dag.selectedTip().chainHeight
|
||||
if node.chainHeight > (dagChainHeight - CheckpointConfirmations) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must be have at least one block after it.
|
||||
//
|
||||
// This should always succeed since the check above already made sure it
|
||||
// is CheckpointConfirmations back, but be safe in case the constant
|
||||
// changes.
|
||||
if len(node.children) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must be have at least one block before it.
|
||||
if &node.selectedParent == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must have transactions that only contain standard
|
||||
// scripts.
|
||||
for _, tx := range block.Transactions() {
|
||||
if isNonstandardTransaction(tx) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// All of the checks passed, so the block is a candidate.
|
||||
return true, nil
|
||||
}
|
||||
@@ -1,279 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/util/txsort"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// compactFeeData is a specialized data type to store a compact list of fees
|
||||
// inside a block.
|
||||
// Every transaction gets a single uint64 value, stored as a plain binary list.
|
||||
// The transactions are ordered the same way they are ordered inside the block, making it easy
|
||||
// to traverse every transaction in a block and extract its fee.
|
||||
//
|
||||
// compactFeeFactory is used to create such a list.
|
||||
// compactFeeIterator is used to iterate over such a list.
|
||||
|
||||
type compactFeeData []byte
|
||||
|
||||
func (cfd compactFeeData) Len() int {
|
||||
return len(cfd) / 8
|
||||
}
|
||||
|
||||
type compactFeeFactory struct {
|
||||
buffer *bytes.Buffer
|
||||
writer *bufio.Writer
|
||||
}
|
||||
|
||||
func newCompactFeeFactory() *compactFeeFactory {
|
||||
buffer := bytes.NewBuffer([]byte{})
|
||||
return &compactFeeFactory{
|
||||
buffer: buffer,
|
||||
writer: bufio.NewWriter(buffer),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfw *compactFeeFactory) add(txFee uint64) error {
|
||||
return binary.Write(cfw.writer, binary.LittleEndian, txFee)
|
||||
}
|
||||
|
||||
func (cfw *compactFeeFactory) data() (compactFeeData, error) {
|
||||
err := cfw.writer.Flush()
|
||||
|
||||
return compactFeeData(cfw.buffer.Bytes()), err
|
||||
}
|
||||
|
||||
type compactFeeIterator struct {
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func (cfd compactFeeData) iterator() *compactFeeIterator {
|
||||
return &compactFeeIterator{
|
||||
reader: bufio.NewReader(bytes.NewBuffer(cfd)),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfr *compactFeeIterator) next() (uint64, error) {
|
||||
var txFee uint64
|
||||
|
||||
err := binary.Read(cfr.reader, binary.LittleEndian, &txFee)
|
||||
|
||||
return txFee, err
|
||||
}
|
||||
|
||||
// The following functions relate to storing and retrieving fee data from the database
|
||||
var feeBucket = []byte("fees")
|
||||
|
||||
// getBluesFeeData returns the compactFeeData for all nodes's blues,
|
||||
// used to calculate the fees this blockNode needs to pay
|
||||
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
|
||||
bluesFeeData := make(map[daghash.Hash]compactFeeData)
|
||||
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
|
||||
}
|
||||
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bluesFeeData, nil
|
||||
}
|
||||
|
||||
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
|
||||
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating or retrieving fee bucket: %s", err)
|
||||
}
|
||||
|
||||
return feeBucket.Put(blockHash.CloneBytes(), feeData)
|
||||
}
|
||||
|
||||
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
|
||||
feeBucket := dbTx.Metadata().Bucket(feeBucket)
|
||||
if feeBucket == nil {
|
||||
return nil, errors.New("Fee bucket does not exist")
|
||||
}
|
||||
|
||||
feeData := feeBucket.Get(blockHash.CloneBytes())
|
||||
if feeData == nil {
|
||||
return nil, fmt.Errorf("No fee data found for block %s", blockHash)
|
||||
}
|
||||
|
||||
return feeData, nil
|
||||
}
|
||||
|
||||
// The following functions deal with building and validating the coinbase transaction
|
||||
|
||||
func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
|
||||
if node.isGenesis() {
|
||||
return nil
|
||||
}
|
||||
blockCoinbaseTx := block.CoinbaseTransaction().MsgTx()
|
||||
scriptPubKey, extraData, err := DeserializeCoinbasePayload(blockCoinbaseTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expectedCoinbaseTransaction, err := node.expectedCoinbaseTransaction(dag, txsAcceptanceData, scriptPubKey, extraData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !expectedCoinbaseTransaction.Hash().IsEqual(block.CoinbaseTransaction().Hash()) {
|
||||
return ruleError(ErrBadCoinbaseTransaction, "Coinbase transaction is not built as expected")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// expectedCoinbaseTransaction returns the coinbase transaction for the current block
|
||||
func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData, scriptPubKey []byte, extraData []byte) (*util.Tx, error) {
|
||||
bluesFeeData, err := node.getBluesFeeData(dag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txIns := []*wire.TxIn{}
|
||||
txOuts := []*wire.TxOut{}
|
||||
|
||||
for _, blue := range node.blues {
|
||||
txIn, txOut, err := coinbaseInputAndOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txIns = append(txIns, txIn)
|
||||
if txOut != nil {
|
||||
txOuts = append(txOuts, txOut)
|
||||
}
|
||||
}
|
||||
payload, err := SerializeCoinbasePayload(scriptPubKey, extraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
coinbaseTx := wire.NewSubnetworkMsgTx(wire.TxVersion, txIns, txOuts, subnetworkid.SubnetworkIDCoinbase, 0, payload)
|
||||
sortedCoinbaseTx := txsort.Sort(coinbaseTx)
|
||||
return util.NewTx(sortedCoinbaseTx), nil
|
||||
}
|
||||
|
||||
// SerializeCoinbasePayload builds the coinbase payload based on the provided scriptPubKey and extra data.
|
||||
func SerializeCoinbasePayload(scriptPubKey []byte, extraData []byte) ([]byte, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := wire.WriteVarInt(w, uint64(len(scriptPubKey)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = w.Write(scriptPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = w.Write(extraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w.Bytes(), nil
|
||||
}
|
||||
|
||||
// DeserializeCoinbasePayload deserialize the coinbase payload to its component (scriptPubKey and extra data).
|
||||
func DeserializeCoinbasePayload(tx *wire.MsgTx) (scriptPubKey []byte, extraData []byte, err error) {
|
||||
r := bytes.NewReader(tx.Payload)
|
||||
scriptPubKeyLen, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
scriptPubKey = make([]byte, scriptPubKeyLen)
|
||||
_, err = r.Read(scriptPubKey)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
extraData = make([]byte, r.Len())
|
||||
if r.Len() != 0 {
|
||||
_, err = r.Read(extraData)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return scriptPubKey, extraData, nil
|
||||
}
|
||||
|
||||
// feeInputAndOutputForBlueBlock calculates the input and output that should go into the coinbase transaction of blueBlock
|
||||
// If blueBlock gets no fee - returns only txIn and nil for txOut
|
||||
func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (
|
||||
*wire.TxIn, *wire.TxOut, error) {
|
||||
|
||||
blockTxsAcceptanceData, ok := txsAcceptanceData[*blueBlock.hash]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
|
||||
}
|
||||
blockFeeData, ok := feeData[*blueBlock.hash]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("No feeData for block %s", blueBlock.hash)
|
||||
}
|
||||
|
||||
if len(blockTxsAcceptanceData) != blockFeeData.Len() {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"length of accepted transaction data(%d) and fee data(%d) is not equal for block %s",
|
||||
len(blockTxsAcceptanceData), blockFeeData.Len(), blueBlock.hash)
|
||||
}
|
||||
|
||||
txIn := &wire.TxIn{
|
||||
SignatureScript: []byte{},
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID(*blueBlock.hash),
|
||||
Index: math.MaxUint32,
|
||||
},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
|
||||
totalFees := uint64(0)
|
||||
feeIterator := blockFeeData.iterator()
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData {
|
||||
fee, err := feeIterator.next()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
|
||||
}
|
||||
if txAcceptanceData.IsAccepted {
|
||||
totalFees += fee
|
||||
}
|
||||
}
|
||||
|
||||
totalReward := CalcBlockSubsidy(blueBlock.height, dag.dagParams) + totalFees
|
||||
|
||||
if totalReward == 0 {
|
||||
return txIn, nil, nil
|
||||
}
|
||||
|
||||
// the ScriptPubKey for the coinbase is parsed from the coinbase payload
|
||||
scriptPubKey, _, err := DeserializeCoinbasePayload(blockTxsAcceptanceData[0].Tx.MsgTx())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
txOut := &wire.TxOut{
|
||||
Value: totalReward,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
|
||||
return txIn, txOut, nil
|
||||
}
|
||||
@@ -1,211 +0,0 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
_ "github.com/daglabs/btcd/database/ffldb"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
func loadBlocksWithLog(t *testing.T, filename string) ([]*util.Block, error) {
|
||||
blocks, err := LoadBlocks(filename)
|
||||
if err == nil {
|
||||
t.Logf("Loaded %d blocks from file %s", len(blocks), filename)
|
||||
for i, b := range blocks {
|
||||
t.Logf("Block #%d: %s", i, b.Hash())
|
||||
}
|
||||
}
|
||||
return blocks, err
|
||||
}
|
||||
|
||||
// loadUTXOSet returns a utxo view loaded from a file.
|
||||
func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
// The utxostore file format is:
|
||||
// <tx hash><output index><serialized utxo len><serialized utxo>
|
||||
//
|
||||
// The output index and serialized utxo len are little endian uint32s
|
||||
// and the serialized utxo uses the format described in dagio.go.
|
||||
|
||||
filename = filepath.Join("testdata", filename)
|
||||
fi, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Choose read based on whether the file is compressed or not.
|
||||
var r io.Reader
|
||||
if strings.HasSuffix(filename, ".bz2") {
|
||||
r = bzip2.NewReader(fi)
|
||||
} else {
|
||||
r = fi
|
||||
}
|
||||
defer fi.Close()
|
||||
|
||||
utxoSet := NewFullUTXOSet()
|
||||
for {
|
||||
// Tx ID of the utxo entry.
|
||||
var txID daghash.TxID
|
||||
_, err := io.ReadAtLeast(r, txID[:], len(txID[:]))
|
||||
if err != nil {
|
||||
// Expected EOF at the right offset.
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Output index of the utxo entry.
|
||||
var index uint32
|
||||
err = binary.Read(r, binary.LittleEndian, &index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Num of serialized utxo entry bytes.
|
||||
var numBytes uint32
|
||||
err = binary.Read(r, binary.LittleEndian, &numBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Serialized utxo entry.
|
||||
serialized := make([]byte, numBytes)
|
||||
_, err = io.ReadAtLeast(r, serialized, int(numBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Deserialize it and add it to the view.
|
||||
entry, err := deserializeUTXOEntry(serialized)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoSet.utxoCollection[wire.Outpoint{TxID: txID, Index: index}] = entry
|
||||
}
|
||||
|
||||
return utxoSet, nil
|
||||
}
|
||||
|
||||
// TestSetCoinbaseMaturity makes the ability to set the coinbase maturity
|
||||
// available when running tests.
|
||||
func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
|
||||
dag.dagParams.BlockCoinbaseMaturity = maturity
|
||||
}
|
||||
|
||||
// newTestDAG returns a DAG that is usable for syntetic tests. It is
|
||||
// important to note that this chain has no database associated with it, so
|
||||
// it is not usable with all functions and the tests must take care when making
|
||||
// use of it.
|
||||
func newTestDAG(params *dagconfig.Params) *BlockDAG {
|
||||
// Create a genesis block node and block index index populated with it
|
||||
// for use when creating the fake chain below.
|
||||
node := newBlockNode(¶ms.GenesisBlock.Header, newSet(), params.K)
|
||||
index := newBlockIndex(nil, params)
|
||||
index.AddNode(node)
|
||||
|
||||
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
|
||||
return &BlockDAG{
|
||||
dagParams: params,
|
||||
timeSource: NewMedianTime(),
|
||||
targetTimePerBlock: targetTimePerBlock,
|
||||
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,
|
||||
TimestampDeviationTolerance: params.TimestampDeviationTolerance,
|
||||
powMaxBits: util.BigToCompact(params.PowMax),
|
||||
index: index,
|
||||
virtual: newVirtualBlock(setFromSlice(node), params.K),
|
||||
genesis: index.LookupNode(params.GenesisHash),
|
||||
warningCaches: newThresholdCaches(vbNumBits),
|
||||
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
|
||||
}
|
||||
}
|
||||
|
||||
// newTestNode creates a block node connected to the passed parent with the
|
||||
// provided fields populated and fake values for the other fields.
|
||||
func newTestNode(parents blockSet, blockVersion int32, bits uint32, timestamp time.Time, phantomK uint32) *blockNode {
|
||||
// Make up a header and create a block node from it.
|
||||
header := &wire.BlockHeader{
|
||||
Version: blockVersion,
|
||||
ParentHashes: parents.hashes(),
|
||||
Bits: bits,
|
||||
Timestamp: timestamp,
|
||||
HashMerkleRoot: &daghash.ZeroHash,
|
||||
AcceptedIDMerkleRoot: &daghash.ZeroHash,
|
||||
UTXOCommitment: &daghash.ZeroHash,
|
||||
}
|
||||
return newBlockNode(header, parents, phantomK)
|
||||
}
|
||||
|
||||
func addNodeAsChildToParents(node *blockNode) {
|
||||
for _, parent := range node.parents {
|
||||
parent.children.add(node)
|
||||
}
|
||||
}
|
||||
|
||||
func buildNodeGenerator(phantomK uint32, withChildren bool) func(parents blockSet) *blockNode {
|
||||
// For the purposes of these tests, we'll create blockNodes whose hashes are a
|
||||
// series of numbers from 1 to 255.
|
||||
hashCounter := byte(1)
|
||||
buildNode := func(parents blockSet) *blockNode {
|
||||
block := newBlockNode(nil, parents, phantomK)
|
||||
block.hash = &daghash.Hash{hashCounter}
|
||||
hashCounter++
|
||||
|
||||
return block
|
||||
}
|
||||
if withChildren {
|
||||
return func(parents blockSet) *blockNode {
|
||||
node := buildNode(parents)
|
||||
addNodeAsChildToParents(node)
|
||||
return node
|
||||
}
|
||||
}
|
||||
return buildNode
|
||||
}
|
||||
|
||||
// checkRuleError ensures the type of the two passed errors are of the
|
||||
// same type (either both nil or both of type RuleError) and their error codes
|
||||
// match when not nil.
|
||||
func checkRuleError(gotErr, wantErr error) error {
|
||||
// Ensure the error code is of the expected type and the error
|
||||
// code matches the value specified in the test instance.
|
||||
if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) {
|
||||
return fmt.Errorf("wrong error - got %T (%[1]v), want %T",
|
||||
gotErr, wantErr)
|
||||
}
|
||||
if gotErr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure the want error type is a script error.
|
||||
werr, ok := wantErr.(RuleError)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected test error type %T", wantErr)
|
||||
}
|
||||
|
||||
// Ensure the error codes match. It's safe to use a raw type assert
|
||||
// here since the code above already proved they are the same type and
|
||||
// the want error is a script error.
|
||||
gotErrorCode := gotErr.(RuleError).ErrorCode
|
||||
if gotErrorCode != werr.ErrorCode {
|
||||
return fmt.Errorf("mismatched error code - got %v (%v), want %v",
|
||||
gotErrorCode, gotErr, werr.ErrorCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,586 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/btcec"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// A variable length quantity (VLQ) is an encoding that uses an arbitrary number
|
||||
// of binary octets to represent an arbitrarily large integer. The scheme
|
||||
// employs a most significant byte (MSB) base-128 encoding where the high bit in
|
||||
// each byte indicates whether or not the byte is the final one. In addition,
|
||||
// to ensure there are no redundant encodings, an offset is subtracted every
|
||||
// time a group of 7 bits is shifted out. Therefore each integer can be
|
||||
// represented in exactly one way, and each representation stands for exactly
|
||||
// one integer.
|
||||
//
|
||||
// Another nice property of this encoding is that it provides a compact
|
||||
// representation of values that are typically used to indicate sizes. For
|
||||
// example, the values 0 - 127 are represented with a single byte, 128 - 16511
|
||||
// with two bytes, and 16512 - 2113663 with three bytes.
|
||||
//
|
||||
// While the encoding allows arbitrarily large integers, it is artificially
|
||||
// limited in this code to an unsigned 64-bit integer for efficiency purposes.
|
||||
//
|
||||
// Example encodings:
|
||||
// 0 -> [0x00]
|
||||
// 127 -> [0x7f] * Max 1-byte value
|
||||
// 128 -> [0x80 0x00]
|
||||
// 129 -> [0x80 0x01]
|
||||
// 255 -> [0x80 0x7f]
|
||||
// 256 -> [0x81 0x00]
|
||||
// 16511 -> [0xff 0x7f] * Max 2-byte value
|
||||
// 16512 -> [0x80 0x80 0x00]
|
||||
// 32895 -> [0x80 0xff 0x7f]
|
||||
// 2113663 -> [0xff 0xff 0x7f] * Max 3-byte value
|
||||
// 270549119 -> [0xff 0xff 0xff 0x7f] * Max 4-byte value
|
||||
// 2^64-1 -> [0x80 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0x7f]
|
||||
//
|
||||
// References:
|
||||
// https://en.wikipedia.org/wiki/Variable-length_quantity
|
||||
// http://www.codecodex.com/wiki/Variable-Length_Integers
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// serializeSizeVLQ returns the number of bytes it would take to serialize the
|
||||
// passed number as a variable-length quantity according to the format described
|
||||
// above.
|
||||
func serializeSizeVLQ(n uint64) int {
|
||||
size := 1
|
||||
for ; n > 0x7f; n = (n >> 7) - 1 {
|
||||
size++
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
// putVLQ serializes the provided number to a variable-length quantity according
|
||||
// to the format described above and returns the number of bytes of the encoded
|
||||
// value. The result is placed directly into the passed byte slice which must
|
||||
// be at least large enough to handle the number of bytes returned by the
|
||||
// serializeSizeVLQ function or it will panic.
|
||||
func putVLQ(target []byte, n uint64) int {
|
||||
offset := 0
|
||||
for ; ; offset++ {
|
||||
// The high bit is set when another byte follows.
|
||||
highBitMask := byte(0x80)
|
||||
if offset == 0 {
|
||||
highBitMask = 0x00
|
||||
}
|
||||
|
||||
target[offset] = byte(n&0x7f) | highBitMask
|
||||
if n <= 0x7f {
|
||||
break
|
||||
}
|
||||
n = (n >> 7) - 1
|
||||
}
|
||||
|
||||
// Reverse the bytes so it is MSB-encoded.
|
||||
for i, j := 0, offset; i < j; i, j = i+1, j-1 {
|
||||
target[i], target[j] = target[j], target[i]
|
||||
}
|
||||
|
||||
return offset + 1
|
||||
}
|
||||
|
||||
// deserializeVLQ deserializes the provided variable-length quantity according
|
||||
// to the format described above. It also returns the number of bytes
|
||||
// deserialized.
|
||||
func deserializeVLQ(serialized []byte) (uint64, int) {
|
||||
var n uint64
|
||||
var size int
|
||||
for _, val := range serialized {
|
||||
size++
|
||||
n = (n << 7) | uint64(val&0x7f)
|
||||
if val&0x80 != 0x80 {
|
||||
break
|
||||
}
|
||||
n++
|
||||
}
|
||||
|
||||
return n, size
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// In order to reduce the size of stored scripts, a domain specific compression
|
||||
// algorithm is used which recognizes standard scripts and stores them using
|
||||
// less bytes than the original script. The compression algorithm used here was
|
||||
// obtained from Bitcoin Core, so all credits for the algorithm go to it.
|
||||
//
|
||||
// The general serialized format is:
|
||||
//
|
||||
// <script size or type><script data>
|
||||
//
|
||||
// Field Type Size
|
||||
// script size or type VLQ variable
|
||||
// script data []byte variable
|
||||
//
|
||||
// The specific serialized format for each recognized standard script is:
|
||||
//
|
||||
// - Pay-to-pubkey-hash: (21 bytes) - <0><20-byte pubkey hash>
|
||||
// - Pay-to-script-hash: (21 bytes) - <1><20-byte script hash>
|
||||
// - Pay-to-pubkey**: (33 bytes) - <2, 3, 4, or 5><32-byte pubkey X value>
|
||||
// 2, 3 = compressed pubkey with bit 0 specifying the y coordinate to use
|
||||
// 4, 5 = uncompressed pubkey with bit 0 specifying the y coordinate to use
|
||||
// ** Only valid public keys starting with 0x02, 0x03, and 0x04 are supported.
|
||||
//
|
||||
// Any scripts which are not recognized as one of the aforementioned standard
|
||||
// scripts are encoded using the general serialized format and encode the script
|
||||
// size as the sum of the actual size of the script and the number of special
|
||||
// cases.
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// The following constants specify the special constants used to identify a
|
||||
// special script type in the domain-specific compressed script encoding.
|
||||
//
|
||||
// NOTE: This section specifically does not use iota since these values are
|
||||
// serialized and must be stable for long-term storage.
|
||||
const (
|
||||
// cstPayToPubKeyHash identifies a compressed pay-to-pubkey-hash script.
|
||||
cstPayToPubKeyHash = 0
|
||||
|
||||
// cstPayToScriptHash identifies a compressed pay-to-script-hash script.
|
||||
cstPayToScriptHash = 1
|
||||
|
||||
// cstPayToPubKeyComp2 identifies a compressed pay-to-pubkey script to
|
||||
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyComp2 = 2
|
||||
|
||||
// cstPayToPubKeyComp3 identifies a compressed pay-to-pubkey script to
|
||||
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyComp3 = 3
|
||||
|
||||
// cstPayToPubKeyUncomp4 identifies a compressed pay-to-pubkey script to
|
||||
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyUncomp4 = 4
|
||||
|
||||
// cstPayToPubKeyUncomp5 identifies a compressed pay-to-pubkey script to
|
||||
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyUncomp5 = 5
|
||||
|
||||
// numSpecialScripts is the number of special scripts recognized by the
|
||||
// domain-specific script compression algorithm.
|
||||
numSpecialScripts = 6
|
||||
)
|
||||
|
||||
// isPubKeyHash returns whether or not the passed public key script is a
|
||||
// standard pay-to-pubkey-hash script along with the pubkey hash it is paying to
|
||||
// if it is.
|
||||
func isPubKeyHash(script []byte) (bool, []byte) {
|
||||
if len(script) == 25 && script[0] == txscript.OpDup &&
|
||||
script[1] == txscript.OpHash160 &&
|
||||
script[2] == txscript.OpData20 &&
|
||||
script[23] == txscript.OpEqualVerify &&
|
||||
script[24] == txscript.OpCheckSig {
|
||||
|
||||
return true, script[3:23]
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// isScriptHash returns whether or not the passed public key script is a
|
||||
// standard pay-to-script-hash script along with the script hash it is paying to
|
||||
// if it is.
|
||||
func isScriptHash(script []byte) (bool, []byte) {
|
||||
if len(script) == 23 && script[0] == txscript.OpHash160 &&
|
||||
script[1] == txscript.OpData20 &&
|
||||
script[22] == txscript.OpEqual {
|
||||
|
||||
return true, script[2:22]
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// isPubKey returns whether or not the passed public key script is a standard
|
||||
// pay-to-pubkey script that pays to a valid compressed or uncompressed public
|
||||
// key along with the serialized pubkey it is paying to if it is.
|
||||
//
|
||||
// NOTE: This function ensures the public key is actually valid since the
|
||||
// compression algorithm requires valid pubkeys. It does not support hybrid
|
||||
// pubkeys. This means that even if the script has the correct form for a
|
||||
// pay-to-pubkey script, this function will only return true when it is paying
|
||||
// to a valid compressed or uncompressed pubkey.
|
||||
func isPubKey(script []byte) (bool, []byte) {
|
||||
// Pay-to-compressed-pubkey script.
|
||||
if len(script) == 35 && script[0] == txscript.OpData33 &&
|
||||
script[34] == txscript.OpCheckSig && (script[1] == 0x02 ||
|
||||
script[1] == 0x03) {
|
||||
|
||||
// Ensure the public key is valid.
|
||||
serializedPubKey := script[1:34]
|
||||
_, err := btcec.ParsePubKey(serializedPubKey, btcec.S256())
|
||||
if err == nil {
|
||||
return true, serializedPubKey
|
||||
}
|
||||
}
|
||||
|
||||
// Pay-to-uncompressed-pubkey script.
|
||||
if len(script) == 67 && script[0] == txscript.OpData65 &&
|
||||
script[66] == txscript.OpCheckSig && script[1] == 0x04 {
|
||||
|
||||
// Ensure the public key is valid.
|
||||
serializedPubKey := script[1:66]
|
||||
_, err := btcec.ParsePubKey(serializedPubKey, btcec.S256())
|
||||
if err == nil {
|
||||
return true, serializedPubKey
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// compressedScriptSize returns the number of bytes the passed script would take
|
||||
// when encoded with the domain specific compression algorithm described above.
|
||||
func compressedScriptSize(scriptPubKey []byte) int {
|
||||
// Pay-to-pubkey-hash script.
|
||||
if valid, _ := isPubKeyHash(scriptPubKey); valid {
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-script-hash script.
|
||||
if valid, _ := isScriptHash(scriptPubKey); valid {
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-pubkey (compressed or uncompressed) script.
|
||||
if valid, _ := isPubKey(scriptPubKey); valid {
|
||||
return 33
|
||||
}
|
||||
|
||||
// When none of the above special cases apply, encode the script as is
|
||||
// preceded by the sum of its size and the number of special cases
|
||||
// encoded as a variable length quantity.
|
||||
return serializeSizeVLQ(uint64(len(scriptPubKey)+numSpecialScripts)) +
|
||||
len(scriptPubKey)
|
||||
}
|
||||
|
||||
// decodeCompressedScriptSize treats the passed serialized bytes as a compressed
|
||||
// script, possibly followed by other data, and returns the number of bytes it
|
||||
// occupies taking into account the special encoding of the script size by the
|
||||
// domain specific compression algorithm described above.
|
||||
func decodeCompressedScriptSize(serialized []byte) int {
|
||||
scriptSize, bytesRead := deserializeVLQ(serialized)
|
||||
if bytesRead == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
switch scriptSize {
|
||||
case cstPayToPubKeyHash:
|
||||
return 21
|
||||
|
||||
case cstPayToScriptHash:
|
||||
return 21
|
||||
|
||||
case cstPayToPubKeyComp2, cstPayToPubKeyComp3, cstPayToPubKeyUncomp4,
|
||||
cstPayToPubKeyUncomp5:
|
||||
return 33
|
||||
}
|
||||
|
||||
scriptSize -= numSpecialScripts
|
||||
scriptSize += uint64(bytesRead)
|
||||
return int(scriptSize)
|
||||
}
|
||||
|
||||
// putCompressedScript compresses the passed script according to the domain
|
||||
// specific compression algorithm described above directly into the passed
|
||||
// target byte slice. The target byte slice must be at least large enough to
|
||||
// handle the number of bytes returned by the compressedScriptSize function or
|
||||
// it will panic.
|
||||
func putCompressedScript(target, scriptPubKey []byte) int {
|
||||
// Pay-to-pubkey-hash script.
|
||||
if valid, hash := isPubKeyHash(scriptPubKey); valid {
|
||||
target[0] = cstPayToPubKeyHash
|
||||
copy(target[1:21], hash)
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-script-hash script.
|
||||
if valid, hash := isScriptHash(scriptPubKey); valid {
|
||||
target[0] = cstPayToScriptHash
|
||||
copy(target[1:21], hash)
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-pubkey (compressed or uncompressed) script.
|
||||
if valid, serializedPubKey := isPubKey(scriptPubKey); valid {
|
||||
pubKeyFormat := serializedPubKey[0]
|
||||
switch pubKeyFormat {
|
||||
case 0x02, 0x03:
|
||||
target[0] = pubKeyFormat
|
||||
copy(target[1:33], serializedPubKey[1:33])
|
||||
return 33
|
||||
case 0x04:
|
||||
// Encode the oddness of the serialized pubkey into the
|
||||
// compressed script type.
|
||||
target[0] = pubKeyFormat | (serializedPubKey[64] & 0x01)
|
||||
copy(target[1:33], serializedPubKey[1:33])
|
||||
return 33
|
||||
}
|
||||
}
|
||||
|
||||
// When none of the above special cases apply, encode the unmodified
|
||||
// script preceded by the sum of its size and the number of special
|
||||
// cases encoded as a variable length quantity.
|
||||
encodedSize := uint64(len(scriptPubKey) + numSpecialScripts)
|
||||
vlqSizeLen := putVLQ(target, encodedSize)
|
||||
copy(target[vlqSizeLen:], scriptPubKey)
|
||||
return vlqSizeLen + len(scriptPubKey)
|
||||
}
|
||||
|
||||
// decompressScript returns the original script obtained by decompressing the
|
||||
// passed compressed script according to the domain specific compression
|
||||
// algorithm described above.
|
||||
//
|
||||
// NOTE: The script parameter must already have been proven to be long enough
|
||||
// to contain the number of bytes returned by decodeCompressedScriptSize or it
|
||||
// will panic. This is acceptable since it is only an internal function.
|
||||
func decompressScript(compressedScriptPubKey []byte) []byte {
|
||||
// In practice this function will not be called with a zero-length or
|
||||
// nil script since the nil script encoding includes the length, however
|
||||
// the code below assumes the length exists, so just return nil now if
|
||||
// the function ever ends up being called with a nil script in the
|
||||
// future.
|
||||
if len(compressedScriptPubKey) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode the script size and examine it for the special cases.
|
||||
encodedScriptSize, bytesRead := deserializeVLQ(compressedScriptPubKey)
|
||||
switch encodedScriptSize {
|
||||
// Pay-to-pubkey-hash script. The resulting script is:
|
||||
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
|
||||
case cstPayToPubKeyHash:
|
||||
scriptPubKey := make([]byte, 25)
|
||||
scriptPubKey[0] = txscript.OpDup
|
||||
scriptPubKey[1] = txscript.OpHash160
|
||||
scriptPubKey[2] = txscript.OpData20
|
||||
copy(scriptPubKey[3:], compressedScriptPubKey[bytesRead:bytesRead+20])
|
||||
scriptPubKey[23] = txscript.OpEqualVerify
|
||||
scriptPubKey[24] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-script-hash script. The resulting script is:
|
||||
// <OP_HASH160><20 byte script hash><OP_EQUAL>
|
||||
case cstPayToScriptHash:
|
||||
scriptPubKey := make([]byte, 23)
|
||||
scriptPubKey[0] = txscript.OpHash160
|
||||
scriptPubKey[1] = txscript.OpData20
|
||||
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+20])
|
||||
scriptPubKey[22] = txscript.OpEqual
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-compressed-pubkey script. The resulting script is:
|
||||
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
|
||||
case cstPayToPubKeyComp2, cstPayToPubKeyComp3:
|
||||
scriptPubKey := make([]byte, 35)
|
||||
scriptPubKey[0] = txscript.OpData33
|
||||
scriptPubKey[1] = byte(encodedScriptSize)
|
||||
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+32])
|
||||
scriptPubKey[34] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-uncompressed-pubkey script. The resulting script is:
|
||||
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
|
||||
case cstPayToPubKeyUncomp4, cstPayToPubKeyUncomp5:
|
||||
// Change the leading byte to the appropriate compressed pubkey
|
||||
// identifier (0x02 or 0x03) so it can be decoded as a
|
||||
// compressed pubkey. This really should never fail since the
|
||||
// encoding ensures it is valid before compressing to this type.
|
||||
compressedKey := make([]byte, 33)
|
||||
compressedKey[0] = byte(encodedScriptSize - 2)
|
||||
copy(compressedKey[1:], compressedScriptPubKey[1:])
|
||||
key, err := btcec.ParsePubKey(compressedKey, btcec.S256())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
scriptPubKey := make([]byte, 67)
|
||||
scriptPubKey[0] = txscript.OpData65
|
||||
copy(scriptPubKey[1:], key.SerializeUncompressed())
|
||||
scriptPubKey[66] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
}
|
||||
|
||||
// When none of the special cases apply, the script was encoded using
|
||||
// the general format, so reduce the script size by the number of
|
||||
// special cases and return the unmodified script.
|
||||
scriptSize := int(encodedScriptSize - numSpecialScripts)
|
||||
scriptPubKey := make([]byte, scriptSize)
|
||||
copy(scriptPubKey, compressedScriptPubKey[bytesRead:bytesRead+scriptSize])
|
||||
return scriptPubKey
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// In order to reduce the size of stored amounts, a domain specific compression
|
||||
// algorithm is used which relies on there typically being a lot of zeroes at
|
||||
// end of the amounts. The compression algorithm used here was obtained from
|
||||
// Bitcoin Core, so all credits for the algorithm go to it.
|
||||
//
|
||||
// While this is simply exchanging one uint64 for another, the resulting value
|
||||
// for typical amounts has a much smaller magnitude which results in fewer bytes
|
||||
// when encoded as variable length quantity. For example, consider the amount
|
||||
// of 0.1 BTC which is 10000000 satoshi. Encoding 10000000 as a VLQ would take
|
||||
// 4 bytes while encoding the compressed value of 8 as a VLQ only takes 1 byte.
|
||||
//
|
||||
// Essentially the compression is achieved by splitting the value into an
|
||||
// exponent in the range [0-9] and a digit in the range [1-9], when possible,
|
||||
// and encoding them in a way that can be decoded. More specifically, the
|
||||
// encoding is as follows:
|
||||
// - 0 is 0
|
||||
// - Find the exponent, e, as the largest power of 10 that evenly divides the
|
||||
// value up to a maximum of 9
|
||||
// - When e < 9, the final digit can't be 0 so store it as d and remove it by
|
||||
// dividing the value by 10 (call the result n). The encoded value is thus:
|
||||
// 1 + 10*(9*n + d-1) + e
|
||||
// - When e==9, the only thing known is the amount is not 0. The encoded value
|
||||
// is thus:
|
||||
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
|
||||
//
|
||||
// Example encodings:
|
||||
// (The numbers in parenthesis are the number of bytes when serialized as a VLQ)
|
||||
// 0 (1) -> 0 (1) * 0.00000000 BTC
|
||||
// 1000 (2) -> 4 (1) * 0.00001000 BTC
|
||||
// 10000 (2) -> 5 (1) * 0.00010000 BTC
|
||||
// 12345678 (4) -> 111111101(4) * 0.12345678 BTC
|
||||
// 50000000 (4) -> 47 (1) * 0.50000000 BTC
|
||||
// 100000000 (4) -> 9 (1) * 1.00000000 BTC
|
||||
// 500000000 (5) -> 49 (1) * 5.00000000 BTC
|
||||
// 1000000000 (5) -> 10 (1) * 10.00000000 BTC
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// compressTxOutAmount compresses the passed amount according to the domain
|
||||
// specific compression algorithm described above.
|
||||
func compressTxOutAmount(amount uint64) uint64 {
|
||||
// No need to do any work if it's zero.
|
||||
if amount == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Find the largest power of 10 (max of 9) that evenly divides the
|
||||
// value.
|
||||
exponent := uint64(0)
|
||||
for amount%10 == 0 && exponent < 9 {
|
||||
amount /= 10
|
||||
exponent++
|
||||
}
|
||||
|
||||
// The compressed result for exponents less than 9 is:
|
||||
// 1 + 10*(9*n + d-1) + e
|
||||
if exponent < 9 {
|
||||
lastDigit := amount % 10
|
||||
amount /= 10
|
||||
return 1 + 10*(9*amount+lastDigit-1) + exponent
|
||||
}
|
||||
|
||||
// The compressed result for an exponent of 9 is:
|
||||
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
|
||||
return 10 + 10*(amount-1)
|
||||
}
|
||||
|
||||
// decompressTxOutAmount returns the original amount the passed compressed
|
||||
// amount represents according to the domain specific compression algorithm
|
||||
// described above.
|
||||
func decompressTxOutAmount(amount uint64) uint64 {
|
||||
// No need to do any work if it's zero.
|
||||
if amount == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// The decompressed amount is either of the following two equations:
|
||||
// x = 1 + 10*(9*n + d - 1) + e
|
||||
// x = 1 + 10*(n - 1) + 9
|
||||
amount--
|
||||
|
||||
// The decompressed amount is now one of the following two equations:
|
||||
// x = 10*(9*n + d - 1) + e
|
||||
// x = 10*(n - 1) + 9
|
||||
exponent := amount % 10
|
||||
amount /= 10
|
||||
|
||||
// The decompressed amount is now one of the following two equations:
|
||||
// x = 9*n + d - 1 | where e < 9
|
||||
// x = n - 1 | where e = 9
|
||||
n := uint64(0)
|
||||
if exponent < 9 {
|
||||
lastDigit := amount%9 + 1
|
||||
amount /= 9
|
||||
n = amount*10 + lastDigit
|
||||
} else {
|
||||
n = amount + 1
|
||||
}
|
||||
|
||||
// Apply the exponent.
|
||||
for ; exponent > 0; exponent-- {
|
||||
n *= 10
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Compressed transaction outputs consist of an amount and a public key script
|
||||
// both compressed using the domain specific compression algorithms previously
|
||||
// described.
|
||||
//
|
||||
// The serialized format is:
|
||||
//
|
||||
// <compressed amount><compressed script>
|
||||
//
|
||||
// Field Type Size
|
||||
// compressed amount VLQ variable
|
||||
// compressed script []byte variable
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// compressedTxOutSize returns the number of bytes the passed transaction output
|
||||
// fields would take when encoded with the format described above.
|
||||
func compressedTxOutSize(amount uint64, scriptPubKey []byte) int {
|
||||
return serializeSizeVLQ(compressTxOutAmount(amount)) +
|
||||
compressedScriptSize(scriptPubKey)
|
||||
}
|
||||
|
||||
// putCompressedTxOut compresses the passed amount and script according to their
|
||||
// domain specific compression algorithms and encodes them directly into the
|
||||
// passed target byte slice with the format described above. The target byte
|
||||
// slice must be at least large enough to handle the number of bytes returned by
|
||||
// the compressedTxOutSize function or it will panic.
|
||||
func putCompressedTxOut(target []byte, amount uint64, scriptPubKey []byte) int {
|
||||
offset := putVLQ(target, compressTxOutAmount(amount))
|
||||
offset += putCompressedScript(target[offset:], scriptPubKey)
|
||||
return offset
|
||||
}
|
||||
|
||||
// decodeCompressedTxOut decodes the passed compressed txout, possibly followed
|
||||
// by other data, into its uncompressed amount and script and returns them along
|
||||
// with the number of bytes they occupied prior to decompression.
|
||||
func decodeCompressedTxOut(serialized []byte) (uint64, []byte, int, error) {
|
||||
// Deserialize the compressed amount and ensure there are bytes
|
||||
// remaining for the compressed script.
|
||||
compressedAmount, bytesRead := deserializeVLQ(serialized)
|
||||
if bytesRead >= len(serialized) {
|
||||
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
|
||||
"data after compressed amount")
|
||||
}
|
||||
|
||||
// Decode the compressed script size and ensure there are enough bytes
|
||||
// left in the slice for it.
|
||||
scriptSize := decodeCompressedScriptSize(serialized[bytesRead:])
|
||||
if len(serialized[bytesRead:]) < scriptSize {
|
||||
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
|
||||
"data after script size")
|
||||
}
|
||||
|
||||
// Decompress and return the amount and script.
|
||||
amount := decompressTxOutAmount(compressedAmount)
|
||||
script := decompressScript(serialized[bytesRead : bytesRead+scriptSize])
|
||||
return amount, script, bytesRead + scriptSize, nil
|
||||
}
|
||||
@@ -1,436 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||
// is an error. This is only provided for the hard-coded constants so errors in
|
||||
// the source code can be detected. It will only (and must only) be called with
|
||||
// hard-coded values.
|
||||
func hexToBytes(s string) []byte {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic("invalid hex in source file: " + s)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// TestVLQ ensures the variable length quantity serialization, deserialization,
|
||||
// and size calculation works as expected.
|
||||
func TestVLQ(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
val uint64
|
||||
serialized []byte
|
||||
}{
|
||||
{0, hexToBytes("00")},
|
||||
{1, hexToBytes("01")},
|
||||
{127, hexToBytes("7f")},
|
||||
{128, hexToBytes("8000")},
|
||||
{129, hexToBytes("8001")},
|
||||
{255, hexToBytes("807f")},
|
||||
{256, hexToBytes("8100")},
|
||||
{16383, hexToBytes("fe7f")},
|
||||
{16384, hexToBytes("ff00")},
|
||||
{16511, hexToBytes("ff7f")}, // Max 2-byte value
|
||||
{16512, hexToBytes("808000")},
|
||||
{16513, hexToBytes("808001")},
|
||||
{16639, hexToBytes("80807f")},
|
||||
{32895, hexToBytes("80ff7f")},
|
||||
{2113663, hexToBytes("ffff7f")}, // Max 3-byte value
|
||||
{2113664, hexToBytes("80808000")},
|
||||
{270549119, hexToBytes("ffffff7f")}, // Max 4-byte value
|
||||
{270549120, hexToBytes("8080808000")},
|
||||
{2147483647, hexToBytes("86fefefe7f")},
|
||||
{2147483648, hexToBytes("86fefeff00")},
|
||||
{4294967295, hexToBytes("8efefefe7f")}, // Max uint32, 5 bytes
|
||||
// Max uint64, 10 bytes
|
||||
{18446744073709551615, hexToBytes("80fefefefefefefefe7f")},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the value is calculated properly.
|
||||
gotSize := serializeSizeVLQ(test.val)
|
||||
if gotSize != len(test.serialized) {
|
||||
t.Errorf("serializeSizeVLQ: did not get expected size "+
|
||||
"for %d - got %d, want %d", test.val, gotSize,
|
||||
len(test.serialized))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the value serializes to the expected bytes.
|
||||
gotBytes := make([]byte, gotSize)
|
||||
gotBytesWritten := putVLQ(gotBytes, test.val)
|
||||
if !bytes.Equal(gotBytes, test.serialized) {
|
||||
t.Errorf("putVLQUnchecked: did not get expected bytes "+
|
||||
"for %d - got %x, want %x", test.val, gotBytes,
|
||||
test.serialized)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.serialized) {
|
||||
t.Errorf("putVLQUnchecked: did not get expected number "+
|
||||
"of bytes written for %d - got %d, want %d",
|
||||
test.val, gotBytesWritten, len(test.serialized))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the serialized bytes deserialize to the expected
|
||||
// value.
|
||||
gotVal, gotBytesRead := deserializeVLQ(test.serialized)
|
||||
if gotVal != test.val {
|
||||
t.Errorf("deserializeVLQ: did not get expected value "+
|
||||
"for %x - got %d, want %d", test.serialized,
|
||||
gotVal, test.val)
|
||||
continue
|
||||
}
|
||||
if gotBytesRead != len(test.serialized) {
|
||||
t.Errorf("deserializeVLQ: did not get expected number "+
|
||||
"of bytes read for %d - got %d, want %d",
|
||||
test.serialized, gotBytesRead,
|
||||
len(test.serialized))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestScriptCompression ensures the domain-specific script compression and
|
||||
// decompression works as expected.
|
||||
func TestScriptCompression(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
uncompressed []byte
|
||||
compressed []byte
|
||||
}{
|
||||
{
|
||||
name: "nil",
|
||||
uncompressed: nil,
|
||||
compressed: hexToBytes("06"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey-hash 1",
|
||||
uncompressed: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
|
||||
compressed: hexToBytes("001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey-hash 2",
|
||||
uncompressed: hexToBytes("76a914e34cce70c86373273efcc54ce7d2a491bb4a0e8488ac"),
|
||||
compressed: hexToBytes("00e34cce70c86373273efcc54ce7d2a491bb4a0e84"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-script-hash 1",
|
||||
uncompressed: hexToBytes("a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87"),
|
||||
compressed: hexToBytes("01da1745e9b549bd0bfa1a569971c77eba30cd5a4b"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-script-hash 2",
|
||||
uncompressed: hexToBytes("a914f815b036d9bbbce5e9f2a00abd1bf3dc91e9551087"),
|
||||
compressed: hexToBytes("01f815b036d9bbbce5e9f2a00abd1bf3dc91e95510"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey compressed 0x02",
|
||||
uncompressed: hexToBytes("2102192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4ac"),
|
||||
compressed: hexToBytes("02192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey compressed 0x03",
|
||||
uncompressed: hexToBytes("2103b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65ac"),
|
||||
compressed: hexToBytes("03b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 0x04 even",
|
||||
uncompressed: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
|
||||
compressed: hexToBytes("04192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 0x04 odd",
|
||||
uncompressed: hexToBytes("410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac"),
|
||||
compressed: hexToBytes("0511db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey invalid pubkey",
|
||||
uncompressed: hexToBytes("3302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
|
||||
compressed: hexToBytes("293302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
|
||||
},
|
||||
{
|
||||
name: "requires 2 size bytes - data push 200 bytes",
|
||||
uncompressed: append(hexToBytes("4cc8"), bytes.Repeat([]byte{0x00}, 200)...),
|
||||
// [0x80, 0x50] = 208 as a variable length quantity
|
||||
// [0x4c, 0xc8] = OP_PUSHDATA1 200
|
||||
compressed: append(hexToBytes("80504cc8"), bytes.Repeat([]byte{0x00}, 200)...),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the value is calculated properly.
|
||||
gotSize := compressedScriptSize(test.uncompressed)
|
||||
if gotSize != len(test.compressed) {
|
||||
t.Errorf("compressedScriptSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the script compresses to the expected bytes.
|
||||
gotCompressed := make([]byte, gotSize)
|
||||
gotBytesWritten := putCompressedScript(gotCompressed,
|
||||
test.uncompressed)
|
||||
if !bytes.Equal(gotCompressed, test.compressed) {
|
||||
t.Errorf("putCompressedScript (%s): did not get "+
|
||||
"expected bytes - got %x, want %x", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.compressed) {
|
||||
t.Errorf("putCompressedScript (%s): did not get "+
|
||||
"expected number of bytes written - got %d, "+
|
||||
"want %d", test.name, gotBytesWritten,
|
||||
len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the compressed script size is properly decoded from
|
||||
// the compressed script.
|
||||
gotDecodedSize := decodeCompressedScriptSize(test.compressed)
|
||||
if gotDecodedSize != len(test.compressed) {
|
||||
t.Errorf("decodeCompressedScriptSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotDecodedSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the script decompresses to the expected bytes.
|
||||
gotDecompressed := decompressScript(test.compressed)
|
||||
if !bytes.Equal(gotDecompressed, test.uncompressed) {
|
||||
t.Errorf("decompressScript (%s): did not get expected "+
|
||||
"bytes - got %x, want %x", test.name,
|
||||
gotDecompressed, test.uncompressed)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestScriptCompressionErrors ensures calling various functions related to
|
||||
// script compression with incorrect data returns the expected results.
|
||||
func TestScriptCompressionErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// A nil script must result in a decoded size of 0.
|
||||
if gotSize := decodeCompressedScriptSize(nil); gotSize != 0 {
|
||||
t.Fatalf("decodeCompressedScriptSize with nil script did not "+
|
||||
"return 0 - got %d", gotSize)
|
||||
}
|
||||
|
||||
// A nil script must result in a nil decompressed script.
|
||||
if gotScript := decompressScript(nil); gotScript != nil {
|
||||
t.Fatalf("decompressScript with nil script did not return nil "+
|
||||
"decompressed script - got %x", gotScript)
|
||||
}
|
||||
|
||||
// A compressed script for a pay-to-pubkey (uncompressed) that results
|
||||
// in an invalid pubkey must result in a nil decompressed script.
|
||||
compressedScript := hexToBytes("04012d74d0cb94344c9569c2e77901573d8d" +
|
||||
"7903c3ebec3a957724895dca52c6b4")
|
||||
if gotScript := decompressScript(compressedScript); gotScript != nil {
|
||||
t.Fatalf("decompressScript with compressed pay-to-"+
|
||||
"uncompressed-pubkey that is invalid did not return "+
|
||||
"nil decompressed script - got %x", gotScript)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAmountCompression ensures the domain-specific transaction output amount
|
||||
// compression and decompression works as expected.
|
||||
func TestAmountCompression(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
uncompressed uint64
|
||||
compressed uint64
|
||||
}{
|
||||
{
|
||||
name: "0 BTC",
|
||||
uncompressed: 0,
|
||||
compressed: 0,
|
||||
},
|
||||
{
|
||||
name: "546 Satoshi (current network dust value)",
|
||||
uncompressed: 546,
|
||||
compressed: 4911,
|
||||
},
|
||||
{
|
||||
name: "0.00001 BTC (typical transaction fee)",
|
||||
uncompressed: 1000,
|
||||
compressed: 4,
|
||||
},
|
||||
{
|
||||
name: "0.0001 BTC (typical transaction fee)",
|
||||
uncompressed: 10000,
|
||||
compressed: 5,
|
||||
},
|
||||
{
|
||||
name: "0.12345678 BTC",
|
||||
uncompressed: 12345678,
|
||||
compressed: 111111101,
|
||||
},
|
||||
{
|
||||
name: "0.5 BTC",
|
||||
uncompressed: 50000000,
|
||||
compressed: 48,
|
||||
},
|
||||
{
|
||||
name: "1 BTC",
|
||||
uncompressed: 100000000,
|
||||
compressed: 9,
|
||||
},
|
||||
{
|
||||
name: "5 BTC",
|
||||
uncompressed: 500000000,
|
||||
compressed: 49,
|
||||
},
|
||||
{
|
||||
name: "21000000 BTC (max minted coins)",
|
||||
uncompressed: 2100000000000000,
|
||||
compressed: 21000000,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the amount compresses to the expected value.
|
||||
gotCompressed := compressTxOutAmount(test.uncompressed)
|
||||
if gotCompressed != test.compressed {
|
||||
t.Errorf("compressTxOutAmount (%s): did not get "+
|
||||
"expected value - got %d, want %d", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the value decompresses to the expected value.
|
||||
gotDecompressed := decompressTxOutAmount(test.compressed)
|
||||
if gotDecompressed != test.uncompressed {
|
||||
t.Errorf("decompressTxOutAmount (%s): did not get "+
|
||||
"expected value - got %d, want %d", test.name,
|
||||
gotDecompressed, test.uncompressed)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCompressedTxOut ensures the transaction output serialization and
|
||||
// deserialization works as expected.
|
||||
func TestCompressedTxOut(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
amount uint64
|
||||
scriptPubKey []byte
|
||||
compressed []byte
|
||||
}{
|
||||
{
|
||||
name: "pay-to-pubkey-hash dust",
|
||||
amount: 546,
|
||||
scriptPubKey: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
|
||||
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 1 BTC",
|
||||
amount: 100000000,
|
||||
scriptPubKey: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
|
||||
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the txout is calculated properly.
|
||||
gotSize := compressedTxOutSize(test.amount, test.scriptPubKey)
|
||||
if gotSize != len(test.compressed) {
|
||||
t.Errorf("compressedTxOutSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the txout compresses to the expected value.
|
||||
gotCompressed := make([]byte, gotSize)
|
||||
gotBytesWritten := putCompressedTxOut(gotCompressed,
|
||||
test.amount, test.scriptPubKey)
|
||||
if !bytes.Equal(gotCompressed, test.compressed) {
|
||||
t.Errorf("compressTxOut (%s): did not get expected "+
|
||||
"bytes - got %x, want %x", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.compressed) {
|
||||
t.Errorf("compressTxOut (%s): did not get expected "+
|
||||
"number of bytes written - got %d, want %d",
|
||||
test.name, gotBytesWritten,
|
||||
len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the serialized bytes are decoded back to the expected
|
||||
// uncompressed values.
|
||||
gotAmount, gotScript, gotBytesRead, err := decodeCompressedTxOut(
|
||||
test.compressed)
|
||||
if err != nil {
|
||||
t.Errorf("decodeCompressedTxOut (%s): unexpected "+
|
||||
"error: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
if gotAmount != test.amount {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected amount - got %d, want %d",
|
||||
test.name, gotAmount, test.amount)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(gotScript, test.scriptPubKey) {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected script - got %x, want %x",
|
||||
test.name, gotScript, test.scriptPubKey)
|
||||
continue
|
||||
}
|
||||
if gotBytesRead != len(test.compressed) {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected number of bytes read - got %d, want %d",
|
||||
test.name, gotBytesRead, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestTxOutCompressionErrors ensures calling various functions related to
|
||||
// txout compression with incorrect data returns the expected results.
|
||||
func TestTxOutCompressionErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// A compressed txout with missing compressed script must error.
|
||||
compressedTxOut := hexToBytes("00")
|
||||
_, _, _, err := decodeCompressedTxOut(compressedTxOut)
|
||||
if !isDeserializeErr(err) {
|
||||
t.Fatalf("decodeCompressedTxOut with missing compressed script "+
|
||||
"did not return expected error type - got %T, want "+
|
||||
"errDeserialize", err)
|
||||
}
|
||||
|
||||
// A compressed txout with short compressed script must error.
|
||||
compressedTxOut = hexToBytes("0010")
|
||||
_, _, _, err = decodeCompressedTxOut(compressedTxOut)
|
||||
if !isDeserializeErr(err) {
|
||||
t.Fatalf("decodeCompressedTxOut with short compressed script "+
|
||||
"did not return expected error type - got %T, want "+
|
||||
"errDeserialize", err)
|
||||
}
|
||||
}
|
||||
1975
blockdag/dag.go
1975
blockdag/dag.go
File diff suppressed because it is too large
Load Diff
1386
blockdag/dag_test.go
1386
blockdag/dag_test.go
File diff suppressed because it is too large
Load Diff
@@ -1,854 +0,0 @@
|
||||
// Copyright (c) 2015-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/binaryserializer"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// blockHdrSize is the size of a block header. This is simply the
|
||||
// constant from wire and is only provided here for convenience since
|
||||
// wire.MaxBlockHeaderPayload is quite long.
|
||||
blockHdrSize = wire.MaxBlockHeaderPayload
|
||||
|
||||
// latestUTXOSetBucketVersion is the current version of the UTXO set
|
||||
// bucket that is used to track all unspent outputs.
|
||||
latestUTXOSetBucketVersion = 1
|
||||
)
|
||||
|
||||
var (
|
||||
// blockIndexBucketName is the name of the db bucket used to house to the
|
||||
// block headers and contextual information.
|
||||
blockIndexBucketName = []byte("blockheaderidx")
|
||||
|
||||
// dagStateKeyName is the name of the db key used to store the DAG
|
||||
// tip hashes.
|
||||
dagStateKeyName = []byte("dagstate")
|
||||
|
||||
// utxoSetVersionKeyName is the name of the db key used to store the
|
||||
// version of the utxo set currently in the database.
|
||||
utxoSetVersionKeyName = []byte("utxosetversion")
|
||||
|
||||
// utxoSetBucketName is the name of the db bucket used to house the
|
||||
// unspent transaction output set.
|
||||
utxoSetBucketName = []byte("utxoset")
|
||||
|
||||
// utxoDiffsBucketName is the name of the db bucket used to house the
|
||||
// diffs and diff children of blocks.
|
||||
utxoDiffsBucketName = []byte("utxodiffs")
|
||||
|
||||
// subnetworksBucketName is the name of the db bucket used to store the
|
||||
// subnetwork registry.
|
||||
subnetworksBucketName = []byte("subnetworks")
|
||||
|
||||
// localSubnetworkKeyName is the name of the db key used to store the
|
||||
// node's local subnetwork ID.
|
||||
localSubnetworkKeyName = []byte("localsubnetworkidkey")
|
||||
|
||||
// byteOrder is the preferred byte order used for serializing numeric
|
||||
// fields for storage in the database.
|
||||
byteOrder = binary.LittleEndian
|
||||
)
|
||||
|
||||
// errNotInDAG signifies that a block hash or height that is not in the
|
||||
// DAG was requested.
|
||||
type errNotInDAG string
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e errNotInDAG) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// isNotInDAGErr returns whether or not the passed error is an
|
||||
// errNotInDAG error.
|
||||
func isNotInDAGErr(err error) bool {
|
||||
_, ok := err.(errNotInDAG)
|
||||
return ok
|
||||
}
|
||||
|
||||
// errDeserialize signifies that a problem was encountered when deserializing
|
||||
// data.
|
||||
type errDeserialize string
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e errDeserialize) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// isDeserializeErr returns whether or not the passed error is an errDeserialize
|
||||
// error.
|
||||
func isDeserializeErr(err error) bool {
|
||||
_, ok := err.(errDeserialize)
|
||||
return ok
|
||||
}
|
||||
|
||||
// dbPutVersion uses an existing database transaction to update the provided
|
||||
// key in the metadata bucket to the given version. It is primarily used to
|
||||
// track versions on entities such as buckets.
|
||||
func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
|
||||
var serialized [4]byte
|
||||
byteOrder.PutUint32(serialized[:], version)
|
||||
return dbTx.Metadata().Put(key, serialized[:])
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The unspent transaction output (UTXO) set consists of an entry for each
|
||||
// unspent output using a format that is optimized to reduce space using domain
|
||||
// specific compression algorithms. This format is a slightly modified version
|
||||
// of the format used in Bitcoin Core.
|
||||
//
|
||||
// Each entry is keyed by an outpoint as specified below. It is important to
|
||||
// note that the key encoding uses a VLQ, which employs an MSB encoding so
|
||||
// iteration of UTXOs when doing byte-wise comparisons will produce them in
|
||||
// order.
|
||||
//
|
||||
// The serialized key format is:
|
||||
// <hash><output index>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash daghash.HashSize
|
||||
// output index VLQ variable
|
||||
//
|
||||
// The serialized value format is:
|
||||
//
|
||||
// <header code><compressed txout>
|
||||
//
|
||||
// Field Type Size
|
||||
// header code VLQ variable
|
||||
// compressed txout
|
||||
// compressed amount VLQ variable
|
||||
// compressed script []byte variable
|
||||
//
|
||||
// The serialized header code format is:
|
||||
// bit 0 - containing transaction is a coinbase
|
||||
// bits 1-x - height of the block that contains the unspent txout
|
||||
//
|
||||
// Example 1:
|
||||
// From tx in main blockchain:
|
||||
// Blk 1, b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
|
||||
//
|
||||
// 03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52
|
||||
// <><------------------------------------------------------------------>
|
||||
// | |
|
||||
// header code compressed txout
|
||||
//
|
||||
// - header code: 0x03 (coinbase, height 1)
|
||||
// - compressed txout:
|
||||
// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC)
|
||||
// - 0x04: special script type pay-to-pubkey
|
||||
// - 0x96...52: x-coordinate of the pubkey
|
||||
//
|
||||
// Example 2:
|
||||
// From tx in main blockchain:
|
||||
// Blk 113931, 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2
|
||||
//
|
||||
// 8cf316800900b8025be1b3efc63b0ad48e7f9f10e87544528d58
|
||||
// <----><------------------------------------------>
|
||||
// | |
|
||||
// header code compressed txout
|
||||
//
|
||||
// - header code: 0x8cf316 (not coinbase, height 113931)
|
||||
// - compressed txout:
|
||||
// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 BTC)
|
||||
// - 0x00: special script type pay-to-pubkey-hash
|
||||
// - 0xb8...58: pubkey hash
|
||||
//
|
||||
// Example 3:
|
||||
// From tx in main blockchain:
|
||||
// Blk 338156, 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22
|
||||
//
|
||||
// a8a2588ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6
|
||||
// <----><-------------------------------------------------->
|
||||
// | |
|
||||
// header code compressed txout
|
||||
//
|
||||
// - header code: 0xa8a258 (not coinbase, height 338156)
|
||||
// - compressed txout:
|
||||
// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 BTC)
|
||||
// - 0x01: special script type pay-to-script-hash
|
||||
// - 0x1d...e6: script hash
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// maxUint32VLQSerializeSize is the maximum number of bytes a max uint32 takes
|
||||
// to serialize as a VLQ.
|
||||
var maxUint32VLQSerializeSize = serializeSizeVLQ(1<<32 - 1)
|
||||
|
||||
// outpointKeyPool defines a concurrent safe free list of byte slices used to
|
||||
// provide temporary buffers for outpoint database keys.
|
||||
var outpointKeyPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := make([]byte, daghash.HashSize+maxUint32VLQSerializeSize)
|
||||
return &b // Pointer to slice to avoid boxing alloc.
|
||||
},
|
||||
}
|
||||
|
||||
// outpointKey returns a key suitable for use as a database key in the UTXO set
|
||||
// while making use of a free list. A new buffer is allocated if there are not
|
||||
// already any available on the free list. The returned byte slice should be
|
||||
// returned to the free list by using the recycleOutpointKey function when the
|
||||
// caller is done with it _unless_ the slice will need to live for longer than
|
||||
// the caller can calculate such as when used to write to the database.
|
||||
func outpointKey(outpoint wire.Outpoint) *[]byte {
|
||||
// A VLQ employs an MSB encoding, so they are useful not only to reduce
|
||||
// the amount of storage space, but also so iteration of UTXOs when
|
||||
// doing byte-wise comparisons will produce them in order.
|
||||
key := outpointKeyPool.Get().(*[]byte)
|
||||
idx := uint64(outpoint.Index)
|
||||
*key = (*key)[:daghash.HashSize+serializeSizeVLQ(idx)]
|
||||
copy(*key, outpoint.TxID[:])
|
||||
putVLQ((*key)[daghash.HashSize:], idx)
|
||||
return key
|
||||
}
|
||||
|
||||
// recycleOutpointKey puts the provided byte slice, which should have been
|
||||
// obtained via the outpointKey function, back on the free list.
|
||||
func recycleOutpointKey(key *[]byte) {
|
||||
outpointKeyPool.Put(key)
|
||||
}
|
||||
|
||||
// dbPutUTXODiff uses an existing database transaction to update the UTXO set
|
||||
// in the database based on the provided UTXO view contents and state. In
|
||||
// particular, only the entries that have been marked as modified are written
|
||||
// to the database.
|
||||
func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
|
||||
utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
|
||||
for outpoint := range diff.toRemove {
|
||||
key := outpointKey(outpoint)
|
||||
err := utxoBucket.Delete(*key)
|
||||
recycleOutpointKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for outpoint, entry := range diff.toAdd {
|
||||
// Serialize and store the UTXO entry.
|
||||
serialized := serializeUTXOEntry(entry)
|
||||
|
||||
key := outpointKey(outpoint)
|
||||
err := utxoBucket.Put(*key, serialized)
|
||||
// NOTE: The key is intentionally not recycled here since the
|
||||
// database interface contract prohibits modifications. It will
|
||||
// be garbage collected normally when the database is done with
|
||||
// it.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type dagState struct {
|
||||
TipHashes []*daghash.Hash
|
||||
LastFinalityPoint *daghash.Hash
|
||||
}
|
||||
|
||||
// serializeDAGState returns the serialization of the DAG state.
|
||||
// This is data to be stored in the DAG state bucket.
|
||||
func serializeDAGState(state *dagState) ([]byte, error) {
|
||||
return json.Marshal(state)
|
||||
}
|
||||
|
||||
// deserializeDAGState deserializes the passed serialized DAG state.
|
||||
// This is data stored in the DAG state bucket and is updated after
|
||||
// every block is connected to the DAG.
|
||||
func deserializeDAGState(serializedData []byte) (*dagState, error) {
|
||||
var state *dagState
|
||||
err := json.Unmarshal(serializedData, &state)
|
||||
if err != nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: "corrupt DAG state",
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// dbPutDAGState uses an existing database transaction to store the latest
|
||||
// tip hashes of the DAG.
|
||||
func dbPutDAGState(dbTx database.Tx, state *dagState) error {
|
||||
serializedData, err := serializeDAGState(state)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dbTx.Metadata().Put(dagStateKeyName, serializedData)
|
||||
}
|
||||
|
||||
// createDAGState initializes both the database and the DAG state to the
|
||||
// genesis block. This includes creating the necessary buckets, so it
|
||||
// must only be called on an uninitialized database.
|
||||
func (dag *BlockDAG) createDAGState() error {
|
||||
// Create the initial the database DAG state including creating the
|
||||
// necessary index buckets and inserting the genesis block.
|
||||
err := dag.db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
|
||||
// Create the bucket that houses the block index data.
|
||||
_, err := meta.CreateBucket(blockIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the buckets that house the utxo set, the utxo diffs, and their
|
||||
// version.
|
||||
_, err = meta.CreateBucket(utxoSetBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = meta.CreateBucket(utxoDiffsBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbPutVersion(dbTx, utxoSetVersionKeyName,
|
||||
latestUTXOSetBucketVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the bucket that houses the registered subnetworks.
|
||||
_, err = meta.CreateBucket(subnetworksBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := dbPutLocalSubnetworkID(dbTx, dag.subnetworkID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := meta.CreateBucketIfNotExists(idByHashIndexBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucketIfNotExists(hashByIDIndexBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) removeDAGState() error {
|
||||
err := dag.db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
|
||||
err := meta.DeleteBucket(blockIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(utxoSetBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(utxoDiffsBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Metadata().Delete(utxoSetVersionKeyName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(subnetworksBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Metadata().Delete(localSubnetworkKeyName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dbPutLocalSubnetworkID(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID) error {
|
||||
if subnetworkID == nil {
|
||||
return dbTx.Metadata().Put(localSubnetworkKeyName, []byte{})
|
||||
}
|
||||
return dbTx.Metadata().Put(localSubnetworkKeyName, subnetworkID[:])
|
||||
}
|
||||
|
||||
// initDAGState attempts to load and initialize the DAG state from the
|
||||
// database. When the db does not yet contain any DAG state, both it and the
|
||||
// DAG state are initialized to the genesis block.
|
||||
func (dag *BlockDAG) initDAGState() error {
|
||||
// Determine the state of the DAG database. We may need to initialize
|
||||
// everything from scratch or upgrade certain buckets.
|
||||
var initialized bool
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
initialized = dbTx.Metadata().Get(dagStateKeyName) != nil
|
||||
if initialized {
|
||||
var localSubnetworkID *subnetworkid.SubnetworkID
|
||||
localSubnetworkIDBytes := dbTx.Metadata().Get(localSubnetworkKeyName)
|
||||
if len(localSubnetworkIDBytes) != 0 {
|
||||
localSubnetworkID = &subnetworkid.SubnetworkID{}
|
||||
localSubnetworkID.SetBytes(localSubnetworkIDBytes)
|
||||
}
|
||||
if !localSubnetworkID.IsEqual(dag.subnetworkID) {
|
||||
return fmt.Errorf("Cannot start btcd with subnetwork ID %s because"+
|
||||
" its database is already built with subnetwork ID %s. If you"+
|
||||
" want to switch to a new database, please reset the"+
|
||||
" database by starting btcd with --reset-db flag", dag.subnetworkID, localSubnetworkID)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !initialized {
|
||||
// At this point the database has not already been initialized, so
|
||||
// initialize both it and the chain state to the genesis block.
|
||||
return dag.createDAGState()
|
||||
}
|
||||
|
||||
// Attempt to load the DAG state from the database.
|
||||
return dag.db.View(func(dbTx database.Tx) error {
|
||||
// Fetch the stored DAG tipHashes from the database metadata.
|
||||
// When it doesn't exist, it means the database hasn't been
|
||||
// initialized for use with the DAG yet, so break out now to allow
|
||||
// that to happen under a writable database transaction.
|
||||
serializedData := dbTx.Metadata().Get(dagStateKeyName)
|
||||
log.Tracef("Serialized DAG tip hashes: %x", serializedData)
|
||||
state, err := deserializeDAGState(serializedData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load all of the headers from the data for the known DAG
|
||||
// and construct the block index accordingly. Since the
|
||||
// number of nodes are already known, perform a single alloc
|
||||
// for them versus a whole bunch of little ones to reduce
|
||||
// pressure on the GC.
|
||||
log.Infof("Loading block index...")
|
||||
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
|
||||
var i int32
|
||||
var lastNode *blockNode
|
||||
var unprocessedBlockNodes []*blockNode
|
||||
cursor := blockIndexBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
node, err := dag.deserializeBlockNode(cursor.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check to see if this node had been stored in the the block DB
|
||||
// but not yet accepted. If so, add it to a slice to be processed later.
|
||||
if node.status == statusDataStored {
|
||||
unprocessedBlockNodes = append(unprocessedBlockNodes, node)
|
||||
continue
|
||||
}
|
||||
|
||||
if lastNode == nil {
|
||||
if !node.hash.IsEqual(dag.dagParams.GenesisHash) {
|
||||
return AssertError(fmt.Sprintf("initDAGState: Expected "+
|
||||
"first entry in block index to be genesis block, "+
|
||||
"found %s", node.hash))
|
||||
}
|
||||
} else {
|
||||
if len(node.parents) == 0 {
|
||||
return AssertError(fmt.Sprintf("initDAGState: Could "+
|
||||
"not find any parent for block %s", node.hash))
|
||||
}
|
||||
}
|
||||
|
||||
// Add the node to its parents children, connect it,
|
||||
// and add it to the block index.
|
||||
node.updateParentsChildren()
|
||||
dag.index.addNode(node)
|
||||
|
||||
if node.status.KnownValid() {
|
||||
dag.blockCount++
|
||||
}
|
||||
|
||||
lastNode = node
|
||||
i++
|
||||
}
|
||||
|
||||
// Load all of the known UTXO entries and construct the full
|
||||
// UTXO set accordingly. Since the number of entries is already
|
||||
// known, perform a single alloc for them versus a whole bunch
|
||||
// of little ones to reduce pressure on the GC.
|
||||
log.Infof("Loading UTXO set...")
|
||||
|
||||
utxoEntryBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
|
||||
|
||||
// Determine how many UTXO entries will be loaded into the index so we can
|
||||
// allocate the right amount.
|
||||
var utxoEntryCount int32
|
||||
cursor = utxoEntryBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
utxoEntryCount++
|
||||
}
|
||||
|
||||
fullUTXOCollection := make(utxoCollection, utxoEntryCount)
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
// Deserialize the outpoint
|
||||
outpoint, err := deserializeOutpoint(cursor.Key())
|
||||
if err != nil {
|
||||
// Ensure any deserialization errors are returned as database
|
||||
// corruption errors.
|
||||
if isDeserializeErr(err) {
|
||||
return database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt outpoint: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Deserialize the utxo entry
|
||||
entry, err := deserializeUTXOEntry(cursor.Value())
|
||||
if err != nil {
|
||||
// Ensure any deserialization errors are returned as database
|
||||
// corruption errors.
|
||||
if isDeserializeErr(err) {
|
||||
return database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt utxo entry: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
fullUTXOCollection[*outpoint] = entry
|
||||
}
|
||||
|
||||
// Apply the loaded utxoCollection to the virtual block.
|
||||
dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection)
|
||||
if err != nil {
|
||||
return AssertError(fmt.Sprintf("Error loading UTXOSet: %s", err))
|
||||
}
|
||||
|
||||
// Apply the stored tips to the virtual block.
|
||||
tips := newSet()
|
||||
for _, tipHash := range state.TipHashes {
|
||||
tip := dag.index.LookupNode(tipHash)
|
||||
if tip == nil {
|
||||
return AssertError(fmt.Sprintf("initDAGState: cannot find "+
|
||||
"DAG tip %s in block index", state.TipHashes))
|
||||
}
|
||||
tips.add(tip)
|
||||
}
|
||||
dag.virtual.SetTips(tips)
|
||||
|
||||
// Set the last finality point
|
||||
dag.lastFinalityPoint = dag.index.LookupNode(state.LastFinalityPoint)
|
||||
dag.finalizeNodesBelowFinalityPoint(false)
|
||||
|
||||
// Go over any unprocessed blockNodes and process them now.
|
||||
for _, node := range unprocessedBlockNodes {
|
||||
// Check to see if the block exists in the block DB. If it
|
||||
// doesn't, the database has certainly been corrupted.
|
||||
blockExists, err := dbTx.HasBlock(node.hash)
|
||||
if err != nil {
|
||||
return AssertError(fmt.Sprintf("initDAGState: HasBlock "+
|
||||
"for block %s failed: %s", node.hash, err))
|
||||
}
|
||||
if !blockExists {
|
||||
return AssertError(fmt.Sprintf("initDAGState: block %s "+
|
||||
"exists in block index but not in block db", node.hash))
|
||||
}
|
||||
|
||||
// Attempt to accept the block.
|
||||
block, err := dbFetchBlockByNode(dbTx, node)
|
||||
isOrphan, delay, err := dag.ProcessBlock(block, BFWasStored)
|
||||
if err != nil {
|
||||
log.Warnf("Block %s, which was not previously processed, "+
|
||||
"failed to be accepted to the DAG: %s", node.hash, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// If the block is an orphan or is delayed then it couldn't have
|
||||
// possibly been written to the block index in the first place.
|
||||
if isOrphan {
|
||||
return AssertError(fmt.Sprintf("Block %s, which was not "+
|
||||
"previously processed, turned out to be an orphan, which is "+
|
||||
"impossible.", node.hash))
|
||||
}
|
||||
if delay != 0 {
|
||||
return AssertError(fmt.Sprintf("Block %s, which was not "+
|
||||
"previously processed, turned out to be delayed, which is "+
|
||||
"impossible.", node.hash))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// deserializeBlockNode parses a value in the block index bucket and returns a block node.
|
||||
func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
|
||||
buffer := bytes.NewReader(blockRow)
|
||||
|
||||
var header wire.BlockHeader
|
||||
err := header.Deserialize(buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node := &blockNode{
|
||||
hash: header.BlockHash(),
|
||||
version: header.Version,
|
||||
bits: header.Bits,
|
||||
nonce: header.Nonce,
|
||||
timestamp: header.Timestamp.Unix(),
|
||||
hashMerkleRoot: header.HashMerkleRoot,
|
||||
acceptedIDMerkleRoot: header.AcceptedIDMerkleRoot,
|
||||
utxoCommitment: header.UTXOCommitment,
|
||||
}
|
||||
|
||||
node.children = newSet()
|
||||
node.parents = newSet()
|
||||
|
||||
for _, hash := range header.ParentHashes {
|
||||
parent := dag.index.LookupNode(hash)
|
||||
if parent == nil {
|
||||
return nil, AssertError(fmt.Sprintf("deserializeBlockNode: Could "+
|
||||
"not find parent %s for block %s", hash, header.BlockHash()))
|
||||
}
|
||||
node.parents.add(parent)
|
||||
}
|
||||
|
||||
statusByte, err := buffer.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.status = blockStatus(statusByte)
|
||||
|
||||
selectedParentHash := &daghash.Hash{}
|
||||
if _, err := io.ReadFull(buffer, selectedParentHash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Because genesis doesn't have selected parent, it's serialized as zero hash
|
||||
if !selectedParentHash.IsEqual(&daghash.ZeroHash) {
|
||||
node.selectedParent = dag.index.LookupNode(selectedParentHash)
|
||||
}
|
||||
|
||||
node.blueScore, err = binaryserializer.Uint64(buffer, byteOrder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bluesCount, err := wire.ReadVarInt(buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node.blues = make([]*blockNode, bluesCount)
|
||||
for i := uint64(0); i < bluesCount; i++ {
|
||||
hash := &daghash.Hash{}
|
||||
if _, err := io.ReadFull(buffer, hash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.blues[i] = dag.index.LookupNode(hash)
|
||||
}
|
||||
|
||||
node.height = calculateNodeHeight(node)
|
||||
node.chainHeight = calculateChainHeight(node)
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// dbFetchBlockByNode uses an existing database transaction to retrieve the
|
||||
// raw block for the provided node, deserialize it, and return a util.Block
|
||||
// of it.
|
||||
func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error) {
|
||||
// Load the raw block bytes from the database.
|
||||
blockBytes, err := dbTx.FetchBlock(node.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the encapsulated block.
|
||||
block, err := util.NewBlockFromBytes(blockBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
// dbStoreBlockNode stores the block node data into the block
|
||||
// index bucket. This overwrites the current entry if there exists one.
|
||||
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
|
||||
// Serialize block data to be stored.
|
||||
w := bytes.NewBuffer(make([]byte, 0, blockHdrSize+1))
|
||||
header := node.Header()
|
||||
err := header.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = w.WriteByte(byte(node.status))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Because genesis doesn't have selected parent, it's serialized as zero hash
|
||||
selectedParentHash := &daghash.ZeroHash
|
||||
if node.selectedParent != nil {
|
||||
selectedParentHash = node.selectedParent.hash
|
||||
}
|
||||
_, err = w.Write(selectedParentHash[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = binaryserializer.PutUint64(w, byteOrder, node.blueScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = wire.WriteVarInt(w, uint64(len(node.blues)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, blue := range node.blues {
|
||||
_, err = w.Write(blue.hash[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
value := w.Bytes()
|
||||
|
||||
// Write block header data to block index bucket.
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
key := BlockIndexKey(node.hash, node.blueScore)
|
||||
return blockIndexBucket.Put(key, value)
|
||||
}
|
||||
|
||||
// dbStoreBlock stores the provided block in the database if it is not already
|
||||
// there. The full block data is written to ffldb.
|
||||
func dbStoreBlock(dbTx database.Tx, block *util.Block) error {
|
||||
hasBlock, err := dbTx.HasBlock(block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hasBlock {
|
||||
return nil
|
||||
}
|
||||
return dbTx.StoreBlock(block)
|
||||
}
|
||||
|
||||
// BlockIndexKey generates the binary key for an entry in the block index
|
||||
// bucket. The key is composed of the block blue score encoded as a big-endian
|
||||
// 64-bit unsigned int followed by the 32 byte block hash.
|
||||
// The blue score component is important for iteration order.
|
||||
func BlockIndexKey(blockHash *daghash.Hash, blueScore uint64) []byte {
|
||||
indexKey := make([]byte, daghash.HashSize+8)
|
||||
binary.BigEndian.PutUint64(indexKey[0:8], blueScore)
|
||||
copy(indexKey[8:daghash.HashSize+8], blockHash[:])
|
||||
return indexKey
|
||||
}
|
||||
|
||||
func blockHashFromBlockIndexKey(BlockIndexKey []byte) (*daghash.Hash, error) {
|
||||
return daghash.NewHash(BlockIndexKey[8 : daghash.HashSize+8])
|
||||
}
|
||||
|
||||
// BlockByHash returns the block from the DAG with the given hash.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
|
||||
// Lookup the block hash in block index and ensure it is in the DAG
|
||||
node := dag.index.LookupNode(hash)
|
||||
if node == nil {
|
||||
str := fmt.Sprintf("block %s is not in the main chain", hash)
|
||||
return nil, errNotInDAG(str)
|
||||
}
|
||||
|
||||
// Load the block from the database and return it.
|
||||
var block *util.Block
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
block, err = dbFetchBlockByNode(dbTx, node)
|
||||
return err
|
||||
})
|
||||
return block, err
|
||||
}
|
||||
|
||||
// BlockHashesFrom returns a slice of blocks starting from startHash
|
||||
// ordered by blueScore. If startHash is nil then the genesis block is used.
|
||||
//
|
||||
// This method MUST be called with the DAG lock held
|
||||
func (dag *BlockDAG) BlockHashesFrom(startHash *daghash.Hash, limit int) ([]*daghash.Hash, error) {
|
||||
blockHashes := make([]*daghash.Hash, 0, limit)
|
||||
if startHash == nil {
|
||||
startHash = dag.genesis.hash
|
||||
|
||||
// If we're starting from the beginning we should include the
|
||||
// genesis hash in the result
|
||||
blockHashes = append(blockHashes, dag.genesis.hash)
|
||||
}
|
||||
if !dag.BlockExists(startHash) {
|
||||
return nil, fmt.Errorf("block %s not found", startHash)
|
||||
}
|
||||
blueScore, err := dag.BlueScoreByBlockHash(startHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dag.index.db.View(func(dbTx database.Tx) error {
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
startKey := BlockIndexKey(startHash, blueScore)
|
||||
|
||||
cursor := blockIndexBucket.Cursor()
|
||||
cursor.Seek(startKey)
|
||||
for ok := cursor.Next(); ok; ok = cursor.Next() {
|
||||
key := cursor.Key()
|
||||
blockHash, err := blockHashFromBlockIndexKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHashes = append(blockHashes, blockHash)
|
||||
if len(blockHashes) == limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockHashes, nil
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package blockdag implements bitcoin block handling and chain selection rules.
|
||||
|
||||
The bitcoin block handling and chain selection rules are an integral, and quite
|
||||
likely the most important, part of bitcoin. Unfortunately, at the time of
|
||||
this writing, these rules are also largely undocumented and had to be
|
||||
ascertained from the bitcoind source code. At its core, bitcoin is a
|
||||
distributed consensus of which blocks are valid and which ones will comprise the
|
||||
main block chain (public ledger) that ultimately determines accepted
|
||||
transactions, so it is extremely important that fully validating nodes agree on
|
||||
all rules.
|
||||
|
||||
At a high level, this package provides support for inserting new blocks into
|
||||
the block chain according to the aforementioned rules. It includes
|
||||
functionality such as rejecting duplicate blocks, ensuring blocks and
|
||||
transactions follow all rules, orphan handling, and best chain selection along
|
||||
with reorganization.
|
||||
|
||||
Since this package does not deal with other bitcoin specifics such as network
|
||||
communication or wallets, it provides a notification system which gives the
|
||||
caller a high level of flexibility in how they want to react to certain events
|
||||
such as orphan blocks which need their parents requested and newly connected
|
||||
main chain blocks which might result in wallet updates.
|
||||
|
||||
Bitcoin Chain Processing Overview
|
||||
|
||||
Before a block is allowed into the block chain, it must go through an intensive
|
||||
series of validation rules. The following list serves as a general outline of
|
||||
those rules to provide some intuition into what is going on under the hood, but
|
||||
is by no means exhaustive:
|
||||
|
||||
- Reject duplicate blocks
|
||||
- Perform a series of sanity checks on the block and its transactions such as
|
||||
verifying proof of work, timestamps, number and character of transactions,
|
||||
transaction amounts, script complexity, and merkle root calculations
|
||||
- Compare the block against predetermined checkpoints for expected timestamps
|
||||
and difficulty based on elapsed time since the checkpoint
|
||||
- Save the most recent orphan blocks for a limited time in case their parent
|
||||
blocks become available
|
||||
- Stop processing if the block is an orphan as the rest of the processing
|
||||
depends on the block's position within the block chain
|
||||
- Perform a series of more thorough checks that depend on the block's position
|
||||
within the block chain such as verifying block difficulties adhere to
|
||||
difficulty retarget rules, timestamps are after the median of the last
|
||||
several blocks, all transactions are finalized, checkpoint blocks match, and
|
||||
block versions are in line with the previous blocks
|
||||
- Determine how the block fits into the chain and perform different actions
|
||||
accordingly in order to ensure any side chains which have higher difficulty
|
||||
than the main chain become the new main chain
|
||||
- When a block is being connected to the main chain (either through
|
||||
reorganization of a side chain to the main chain or just extending the
|
||||
main chain), perform further checks on the block's transactions such as
|
||||
verifying transaction duplicates, script complexity for the combination of
|
||||
connected scripts, coinbase maturity, double spends, and connected
|
||||
transaction values
|
||||
- Run the transaction scripts to verify the spender is allowed to spend the
|
||||
coins
|
||||
- Insert the block into the block database
|
||||
|
||||
Errors
|
||||
|
||||
Errors returned by this package are either the raw errors provided by underlying
|
||||
calls or of type blockchain.RuleError. This allows the caller to differentiate
|
||||
between unexpected errors, such as database errors, versus errors due to rule
|
||||
violations through type assertions. In addition, callers can programmatically
|
||||
determine the specific rule violation by examining the ErrorCode field of the
|
||||
type asserted blockchain.RuleError.
|
||||
|
||||
Bitcoin Improvement Proposals
|
||||
|
||||
This package includes spec changes outlined by the following BIPs:
|
||||
|
||||
BIP0016 (https://en.bitcoin.it/wiki/BIP_0016)
|
||||
BIP0030 (https://en.bitcoin.it/wiki/BIP_0030)
|
||||
BIP0034 (https://en.bitcoin.it/wiki/BIP_0034)
|
||||
*/
|
||||
package blockdag
|
||||
@@ -1,483 +0,0 @@
|
||||
package blockdag_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/util/testtools"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/mining"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// TestFinality checks that the finality mechanism works as expected.
|
||||
// This is how the flow goes:
|
||||
// 1) We build a chain of blockdag.FinalityInterval blocks and call its tip altChainTip.
|
||||
// 2) We build another chain (let's call it mainChain) of 2 * blockdag.FinalityInterval
|
||||
// blocks, which points to genesis, and then we check that the block in that
|
||||
// chain with height of blockdag.FinalityInterval is marked as finality point (This is
|
||||
// very predictable, because the blue score of each new block in a chain is the
|
||||
// parents plus one).
|
||||
// 3) We make a new child to block with height (2 * blockdag.FinalityInterval - 1)
|
||||
// in mainChain, and we check that connecting it to the DAG
|
||||
// doesn't affect the last finality point.
|
||||
// 4) We make a block that points to genesis, and check that it
|
||||
// gets rejected because its blue score is lower then the last finality
|
||||
// point.
|
||||
// 5) We make a block that points to altChainTip, and check that it
|
||||
// gets rejected because it doesn't have the last finality point in
|
||||
// its selected parent chain.
|
||||
func TestFinality(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
buildNodeToDag := func(parentHashes []*daghash.Hash) (*util.Block, error) {
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, nil, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block := util.NewBlock(msgBlock)
|
||||
|
||||
isOrphan, delay, err := dag.ProcessBlock(block, blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if delay != 0 {
|
||||
return nil, fmt.Errorf("ProcessBlock: block " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
return nil, fmt.Errorf("ProcessBlock: unexpected returned orphan block")
|
||||
}
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
genesis := util.NewBlock(params.GenesisBlock)
|
||||
currentNode := genesis
|
||||
|
||||
// First we build a chain of blockdag.FinalityInterval blocks for future use
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
altChainTip := currentNode
|
||||
|
||||
// Now we build a new chain of 2 * blockdag.FinalityInterval blocks, pointed to genesis, and
|
||||
// we expect the block with height 1 * blockdag.FinalityInterval to be the last finality point
|
||||
currentNode = genesis
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
expectedFinalityPoint := currentNode
|
||||
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !dag.LastFinalityPointHash().IsEqual(expectedFinalityPoint.Hash()) {
|
||||
t.Errorf("TestFinality: dag.lastFinalityPoint expected to be %v but got %v", expectedFinalityPoint, dag.LastFinalityPointHash())
|
||||
}
|
||||
|
||||
// Here we check that even if we create a parallel tip (a new tip with
|
||||
// the same parents as the current one) with the same blue score as the
|
||||
// current tip, it still won't affect the last finality point.
|
||||
_, err = buildNodeToDag(currentNode.MsgBlock().Header.ParentHashes)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
}
|
||||
if !dag.LastFinalityPointHash().IsEqual(expectedFinalityPoint.Hash()) {
|
||||
t.Errorf("TestFinality: dag.lastFinalityPoint was unexpectly changed")
|
||||
}
|
||||
|
||||
// Here we check that a block with lower blue score than the last finality
|
||||
// point will get rejected
|
||||
fakeCoinbaseTx, err := dag.NextBlockCoinbaseTransaction(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("NextBlockCoinbaseTransaction: %s", err)
|
||||
}
|
||||
merkleRoot := blockdag.BuildHashMerkleTreeStore([]*util.Tx{fakeCoinbaseTx}).Root()
|
||||
beforeFinalityBlock := wire.NewMsgBlock(&wire.BlockHeader{
|
||||
Version: 0x10000000,
|
||||
ParentHashes: []*daghash.Hash{genesis.Hash()},
|
||||
HashMerkleRoot: merkleRoot,
|
||||
AcceptedIDMerkleRoot: &daghash.ZeroHash,
|
||||
UTXOCommitment: &daghash.ZeroHash,
|
||||
Timestamp: dag.SelectedTipHeader().Timestamp,
|
||||
Bits: genesis.MsgBlock().Header.Bits,
|
||||
})
|
||||
beforeFinalityBlock.AddTransaction(fakeCoinbaseTx.MsgTx())
|
||||
_, _, err = dag.ProcessBlock(util.NewBlock(beforeFinalityBlock), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
|
||||
}
|
||||
rErr, ok := err.(blockdag.RuleError)
|
||||
if ok {
|
||||
if rErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Here we check that a block that doesn't have the last finality point in
|
||||
// its selected parent chain will get rejected
|
||||
_, err = buildNodeToDag([]*daghash.Hash{altChainTip.Hash()})
|
||||
if err == nil {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
|
||||
}
|
||||
rErr, ok = err.(blockdag.RuleError)
|
||||
if ok {
|
||||
if rErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", rErr)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFinalityInterval tests that the finality interval is
|
||||
// smaller then wire.MaxInvPerMsg, so when a peer receives
|
||||
// a getblocks message it should always be able to send
|
||||
// all the necessary invs.
|
||||
func TestFinalityInterval(t *testing.T) {
|
||||
if blockdag.FinalityInterval > wire.MaxInvPerMsg {
|
||||
t.Errorf("blockdag.FinalityInterval should be lower or equal to wire.MaxInvPerMsg")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSubnetworkRegistry tests the full subnetwork registry flow
|
||||
func TestSubnetworkRegistry(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.K = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
gasLimit := uint64(12345)
|
||||
subnetworkID, err := testtools.RegisterSubnetworkForTest(dag, ¶ms, gasLimit)
|
||||
if err != nil {
|
||||
t.Fatalf("could not register network: %s", err)
|
||||
}
|
||||
limit, err := dag.SubnetworkStore.GasLimit(subnetworkID)
|
||||
if err != nil {
|
||||
t.Fatalf("could not retrieve gas limit: %s", err)
|
||||
}
|
||||
if limit != gasLimit {
|
||||
t.Fatalf("unexpected gas limit. want: %d, got: %d", gasLimit, limit)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainedTransactions(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup dag instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
block1, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{params.GenesisHash}, nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(block1), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: block1 " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: block1 got unexpectedly orphaned")
|
||||
}
|
||||
cbTx := block1.Transactions[0]
|
||||
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build signature script: %s", err)
|
||||
}
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := &wire.TxOut{
|
||||
ScriptPubKey: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}
|
||||
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
|
||||
|
||||
chainedTxIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *tx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
|
||||
scriptPubKey, err := txscript.PayToScriptHashScript(blockdag.OpTrueScript)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build public key script: %s", err)
|
||||
}
|
||||
chainedTxOut := &wire.TxOut{
|
||||
ScriptPubKey: scriptPubKey,
|
||||
Value: uint64(1),
|
||||
}
|
||||
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
|
||||
|
||||
block2, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
//Checks that dag.ProcessBlock fails because we don't allow a transaction to spend another transaction from the same block
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Errorf("ProcessBlock expected an error")
|
||||
} else if rErr, ok := err.(blockdag.RuleError); ok {
|
||||
if rErr.ErrorCode != blockdag.ErrMissingTxOut {
|
||||
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, rErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: block2 " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Errorf("ProcessBlock: block2 got unexpectedly orphaned")
|
||||
}
|
||||
|
||||
nonChainedTxIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
nonChainedTxOut := &wire.TxOut{
|
||||
ScriptPubKey: scriptPubKey,
|
||||
Value: uint64(1),
|
||||
}
|
||||
nonChainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{nonChainedTxIn}, []*wire.TxOut{nonChainedTxOut})
|
||||
|
||||
block3, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
//Checks that dag.ProcessBlock doesn't fail because all of its transaction are dependant on transactions from previous blocks
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(block3), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Errorf("ProcessBlock: %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: block3 " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Errorf("ProcessBlock: block3 got unexpectedly orphaned")
|
||||
}
|
||||
}
|
||||
|
||||
// TestGasLimit tests the gas limit rules
|
||||
func TestGasLimit(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.K = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// First we prepare a subnetwork and a block with coinbase outputs to fund our tests
|
||||
gasLimit := uint64(12345)
|
||||
subnetworkID, err := testtools.RegisterSubnetworkForTest(dag, ¶ms, gasLimit)
|
||||
if err != nil {
|
||||
t.Fatalf("could not register network: %s", err)
|
||||
}
|
||||
|
||||
cbTxs := []*wire.MsgTx{}
|
||||
for i := 0; i < 4; i++ {
|
||||
fundsBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(fundsBlock), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: the funds block " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: fundsBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
cbTxs = append(cbTxs, fundsBlock.Transactions[util.CoinbaseTransactionIndex])
|
||||
}
|
||||
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build signature script: %s", err)
|
||||
}
|
||||
|
||||
scriptPubKey, err := txscript.PayToScriptHashScript(blockdag.OpTrueScript)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build public key script: %s", err)
|
||||
}
|
||||
|
||||
tx1In := &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[0].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
tx1Out := &wire.TxOut{
|
||||
Value: cbTxs[0].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
tx1 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx1In}, []*wire.TxOut{tx1Out}, subnetworkID, 10000, []byte{})
|
||||
|
||||
tx2In := &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[1].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
tx2Out := &wire.TxOut{
|
||||
Value: cbTxs[1].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
tx2 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx2In}, []*wire.TxOut{tx2Out}, subnetworkID, 10000, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that has transactions that exceed the gas limit
|
||||
overLimitBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(overLimitBlock), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Fatalf("ProcessBlock expected to have an error in block that exceeds gas limit")
|
||||
}
|
||||
rErr, ok := err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
|
||||
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
overflowGasTxIn := &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[2].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
overflowGasTxOut := &wire.TxOut{
|
||||
Value: cbTxs[2].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
overflowGasTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{overflowGasTxIn}, []*wire.TxOut{overflowGasTxOut},
|
||||
subnetworkID, math.MaxUint64, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that its transactions' gas overflows uint64
|
||||
overflowGasBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(overflowGasBlock), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Fatalf("ProcessBlock expected to have an error")
|
||||
}
|
||||
rErr, ok = err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
|
||||
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
nonExistentSubnetwork := &subnetworkid.SubnetworkID{123}
|
||||
nonExistentSubnetworkTxIn := &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[3].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
nonExistentSubnetworkTxOut := &wire.TxOut{
|
||||
Value: cbTxs[3].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
nonExistentSubnetworkTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{nonExistentSubnetworkTxIn},
|
||||
[]*wire.TxOut{nonExistentSubnetworkTxOut}, nonExistentSubnetwork, 1, []byte{})
|
||||
|
||||
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
// Here we check that we can't process a block with a transaction from a non-existent subnetwork
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
|
||||
expectedErrStr := fmt.Sprintf("Error getting gas limit for subnetworkID '%s': subnetwork '%s' not found",
|
||||
nonExistentSubnetwork, nonExistentSubnetwork)
|
||||
if err.Error() != expectedErrStr {
|
||||
t.Fatalf("ProcessBlock expected error \"%v\" but got \"%v\"", expectedErrStr, err)
|
||||
}
|
||||
|
||||
// Here we check that we can process a block with a transaction that doesn't exceed the gas limit
|
||||
validBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(validBlock), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
}
|
||||
@@ -1,318 +0,0 @@
|
||||
// Copyright (c) 2016 The Decred developers
|
||||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/blockdag/fullblocktests"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
_ "github.com/daglabs/btcd/database/ffldb"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// testDbType is the database backend type to use for the tests.
|
||||
testDbType = "ffldb"
|
||||
|
||||
// testDbRoot is the root directory used to create all test databases.
|
||||
testDbRoot = "testdbs"
|
||||
|
||||
// blockDataNet is the expected network in the test block data.
|
||||
blockDataNet = wire.MainNet
|
||||
)
|
||||
|
||||
// filesExists returns whether or not the named file or directory exists.
|
||||
func fileExists(name string) bool {
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isSupportedDbType returns whether or not the passed database type is
|
||||
// currently supported.
|
||||
func isSupportedDbType(dbType string) bool {
|
||||
supportedDrivers := database.SupportedDrivers()
|
||||
for _, driver := range supportedDrivers {
|
||||
if dbType == driver {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// DAGSetup is used to create a new db and chain instance with the genesis
|
||||
// block already inserted. In addition to the new chain instance, it returns
|
||||
// a teardown function the caller should invoke when done testing to clean up.
|
||||
func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func(), error) {
|
||||
if !isSupportedDbType(testDbType) {
|
||||
return nil, nil, fmt.Errorf("unsupported db type %v", testDbType)
|
||||
}
|
||||
|
||||
// Handle memory database specially since it doesn't need the disk
|
||||
// specific handling.
|
||||
var db database.DB
|
||||
var teardown func()
|
||||
if testDbType == "memdb" {
|
||||
ndb, err := database.Create(testDbType)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating db: %v", err)
|
||||
}
|
||||
db = ndb
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
teardown = func() {
|
||||
db.Close()
|
||||
}
|
||||
} else {
|
||||
// Create the root directory for test databases.
|
||||
if !fileExists(testDbRoot) {
|
||||
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
|
||||
err := fmt.Errorf("unable to create test db "+
|
||||
"root: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new database to store the accepted blocks into.
|
||||
dbPath := filepath.Join(testDbRoot, dbName)
|
||||
_ = os.RemoveAll(dbPath)
|
||||
ndb, err := database.Create(testDbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating db: %v", err)
|
||||
}
|
||||
db = ndb
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
teardown = func() {
|
||||
db.Close()
|
||||
os.RemoveAll(dbPath)
|
||||
os.RemoveAll(testDbRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the chain params to ensure any modifications the tests do to
|
||||
// the DAG parameters do not affect the global instance.
|
||||
paramsCopy := *params
|
||||
|
||||
// Create the main chain instance.
|
||||
chain, err := blockdag.New(&blockdag.Config{
|
||||
DB: db,
|
||||
DAGParams: ¶msCopy,
|
||||
Checkpoints: nil,
|
||||
TimeSource: blockdag.NewMedianTime(),
|
||||
SigCache: txscript.NewSigCache(1000),
|
||||
})
|
||||
if err != nil {
|
||||
teardown()
|
||||
err := fmt.Errorf("failed to create chain instance: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return chain, teardown, nil
|
||||
}
|
||||
|
||||
// TestFullBlocks ensures all tests generated by the fullblocktests package
|
||||
// have the expected result when processed via ProcessBlock.
|
||||
func TestFullBlocks(t *testing.T) {
|
||||
// TODO: (Stas) This test was disabled for until we have implemented Phantom
|
||||
// Ticket: https://daglabs.atlassian.net/browse/DEV-60
|
||||
t.SkipNow()
|
||||
|
||||
tests, err := fullblocktests.Generate(false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to generate tests: %v", err)
|
||||
}
|
||||
|
||||
// Create a new database and chain instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("fullblocktest",
|
||||
&dagconfig.RegressionNetParams)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup chain instance: %v", err)
|
||||
return
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// testAcceptedBlock attempts to process the block in the provided test
|
||||
// instance and ensures that it was accepted according to the flags
|
||||
// specified in the test.
|
||||
testAcceptedBlock := func(item fullblocktests.AcceptedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetChainHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
isOrphan, delay, err := dag.ProcessBlock(block,
|
||||
blockdag.BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("block %q (hash %s, height %d) should "+
|
||||
"have been accepted: %v", item.Name,
|
||||
block.Hash(), blockHeight, err)
|
||||
}
|
||||
|
||||
if delay != item.Delay {
|
||||
t.Fatalf("block %q (hash %s, height %d) unexpected "+
|
||||
"delay -- got %v, want %v", item.Name,
|
||||
block.Hash(), blockHeight, delay,
|
||||
item.Delay)
|
||||
}
|
||||
|
||||
if isOrphan != item.IsOrphan {
|
||||
t.Fatalf("block %q (hash %s, height %d) unexpected "+
|
||||
"orphan flag -- got %v, want %v", item.Name,
|
||||
block.Hash(), blockHeight, isOrphan,
|
||||
item.IsOrphan)
|
||||
}
|
||||
}
|
||||
|
||||
// testRejectedBlock attempts to process the block in the provided test
|
||||
// instance and ensures that it was rejected with the reject code
|
||||
// specified in the test.
|
||||
testRejectedBlock := func(item fullblocktests.RejectedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetChainHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
_, _, err := dag.ProcessBlock(block, blockdag.BFNone)
|
||||
if err == nil {
|
||||
t.Fatalf("block %q (hash %s, height %d) should not "+
|
||||
"have been accepted", item.Name, block.Hash(),
|
||||
blockHeight)
|
||||
}
|
||||
|
||||
// Ensure the error code is of the expected type and the reject
|
||||
// code matches the value specified in the test instance.
|
||||
rerr, ok := err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
t.Fatalf("block %q (hash %s, height %d) returned "+
|
||||
"unexpected error type -- got %T, want "+
|
||||
"blockchain.RuleError", item.Name, block.Hash(),
|
||||
blockHeight, err)
|
||||
}
|
||||
if rerr.ErrorCode != item.RejectCode {
|
||||
t.Fatalf("block %q (hash %s, height %d) does not have "+
|
||||
"expected reject code -- got %v, want %v",
|
||||
item.Name, block.Hash(), blockHeight,
|
||||
rerr.ErrorCode, item.RejectCode)
|
||||
}
|
||||
}
|
||||
|
||||
// testRejectedNonCanonicalBlock attempts to decode the block in the
|
||||
// provided test instance and ensures that it failed to decode with a
|
||||
// message error.
|
||||
testRejectedNonCanonicalBlock := func(item fullblocktests.RejectedNonCanonicalBlock) {
|
||||
headerLen := len(item.RawBlock)
|
||||
if headerLen > 80 {
|
||||
headerLen = 80
|
||||
}
|
||||
blockHash := daghash.DoubleHashH(item.RawBlock[0:headerLen])
|
||||
blockHeight := item.Height
|
||||
t.Logf("Testing block %s (hash %s, height %d)", item.Name,
|
||||
blockHash, blockHeight)
|
||||
|
||||
// Ensure there is an error due to deserializing the block.
|
||||
var msgBlock wire.MsgBlock
|
||||
err := msgBlock.BtcDecode(bytes.NewReader(item.RawBlock), 0)
|
||||
if _, ok := err.(*wire.MessageError); !ok {
|
||||
t.Fatalf("block %q (hash %s, height %d) should have "+
|
||||
"failed to decode", item.Name, blockHash,
|
||||
blockHeight)
|
||||
}
|
||||
}
|
||||
|
||||
// testOrphanOrRejectedBlock attempts to process the block in the
|
||||
// provided test instance and ensures that it was either accepted as an
|
||||
// orphan or rejected with a rule violation.
|
||||
testOrphanOrRejectedBlock := func(item fullblocktests.OrphanOrRejectedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetChainHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
isOrphan, delay, err := dag.ProcessBlock(block, blockdag.BFNone)
|
||||
if err != nil {
|
||||
// Ensure the error code is of the expected type.
|
||||
if _, ok := err.(blockdag.RuleError); !ok {
|
||||
t.Fatalf("block %q (hash %s, height %d) "+
|
||||
"returned unexpected error type -- "+
|
||||
"got %T, want blockchain.RuleError",
|
||||
item.Name, block.Hash(), blockHeight,
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
if delay != 0 {
|
||||
t.Fatalf("block %q (hash %s, height %d) "+
|
||||
"is too far in the future",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
}
|
||||
|
||||
if !isOrphan {
|
||||
t.Fatalf("block %q (hash %s, height %d) was accepted, "+
|
||||
"but is not considered an orphan", item.Name,
|
||||
block.Hash(), blockHeight)
|
||||
}
|
||||
}
|
||||
|
||||
// testExpectedTip ensures the current tip of the blockchain is the
|
||||
// block specified in the provided test instance.
|
||||
testExpectedTip := func(item fullblocktests.ExpectedTip) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetChainHeight(blockHeight)
|
||||
t.Logf("Testing tip for block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
// Ensure hash and height match.
|
||||
if dag.SelectedTipHash() != item.Block.BlockHash() ||
|
||||
dag.ChainHeight() != blockHeight { //TODO: (Ori) the use of dag.ChainHeight() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
|
||||
|
||||
t.Fatalf("block %q (hash %s, height %d) should be "+
|
||||
"the current tip -- got (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight, dag.SelectedTipHash(),
|
||||
dag.ChainHeight()) //TODO: (Ori) the use of dag.ChainHeight() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
|
||||
}
|
||||
}
|
||||
|
||||
for testNum, test := range tests {
|
||||
for itemNum, item := range test {
|
||||
switch item := item.(type) {
|
||||
case fullblocktests.AcceptedBlock:
|
||||
testAcceptedBlock(item)
|
||||
case fullblocktests.RejectedBlock:
|
||||
testRejectedBlock(item)
|
||||
case fullblocktests.RejectedNonCanonicalBlock:
|
||||
testRejectedNonCanonicalBlock(item)
|
||||
case fullblocktests.OrphanOrRejectedBlock:
|
||||
testOrphanOrRejectedBlock(item)
|
||||
case fullblocktests.ExpectedTip:
|
||||
testExpectedTip(item)
|
||||
default:
|
||||
t.Fatalf("test #%d, item #%d is not one of "+
|
||||
"the supported test instance types -- "+
|
||||
"got type: %T", testNum, itemNum, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
fullblocktests
|
||||
==============
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd/blockchain/fullblocktests)
|
||||
|
||||
Package fullblocktests provides a set of full block tests to be used for testing
|
||||
the consensus validation rules. The tests are intended to be flexible enough to
|
||||
allow both unit-style tests directly against the blockchain code as well as
|
||||
integration style tests over the peer-to-peer network. To achieve that goal,
|
||||
each test contains additional information about the expected result, however
|
||||
that information can be ignored when doing comparison tests between two
|
||||
independent versions over the peer-to-peer network.
|
||||
|
||||
This package has intentionally been designed so it can be used as a standalone
|
||||
package for any projects needing to test their implementation against a full set
|
||||
of blocks that exercise the consensus validation rules.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/daglabs/btcd/blockchain/fullblocktests
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Package fullblocktests is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
||||
@@ -1,20 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package fullblocktests provides a set of block consensus validation tests.
|
||||
|
||||
All of the generated test instances involve full blocks that are to be used for
|
||||
testing the consensus validation rules. The tests are intended to be flexible
|
||||
enough to allow both unit-style tests directly against the blockchain code as
|
||||
well as integration style tests over the peer-to-peer network. To achieve that
|
||||
goal, each test contains additional information about the expected result,
|
||||
however that information can be ignored when doing comparison tests between two
|
||||
independent versions over the peer-to-peer network.
|
||||
|
||||
This package has intentionally been designed so it can be used as a standalone
|
||||
package for any projects needing to test their implementation against a full set
|
||||
of blocks that exercise the consensus validation rules.
|
||||
*/
|
||||
package fullblocktests
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,143 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fullblocktests
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"math"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util/hdkeychain"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// wire.Hash. It only differs from the one available in daghash in that
|
||||
// it panics on an error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, hashes.
|
||||
func newHashFromStr(hexStr string) *daghash.Hash {
|
||||
hash, err := daghash.NewHashFromStr(hexStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// newTxIDFromStr converts the passed big-endian hex string into a
|
||||
// wire.TxID. It only differs from the one available in daghash in that
|
||||
// it panics on an error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, hashes.
|
||||
func newTxIDFromStr(hexStr string) *daghash.TxID {
|
||||
txID, err := daghash.NewTxIDFromStr(hexStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return txID
|
||||
}
|
||||
|
||||
// fromHex converts the passed hex string into a byte slice and will panic if
|
||||
// there is an error. This is only provided for the hard-coded constants so
|
||||
// errors in the source code can be detected. It will only (and must only) be
|
||||
// called for initialization purposes.
|
||||
func fromHex(s string) []byte {
|
||||
r, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic("invalid hex in source file: " + s)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
var (
|
||||
// bigOne is 1 represented as a big.Int. It is defined here to avoid
|
||||
// the overhead of creating it multiple times.
|
||||
bigOne = big.NewInt(1)
|
||||
|
||||
// regressionPowLimit is the highest proof of work value a Bitcoin block
|
||||
// can have for the regression test network. It is the value 2^255 - 1.
|
||||
regressionPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne)
|
||||
|
||||
// regTestGenesisBlock defines the genesis block of the block chain which serves
|
||||
// as the public transaction ledger for the regression test network.
|
||||
regTestGenesisBlock = wire.MsgBlock{
|
||||
Header: wire.BlockHeader{
|
||||
Version: 1,
|
||||
ParentHashes: []*daghash.Hash{},
|
||||
HashMerkleRoot: newHashFromStr("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"),
|
||||
Timestamp: time.Unix(0x5b28c636, 0), // 2018-06-19 09:00:38 +0000 UTC
|
||||
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
|
||||
Nonce: 1,
|
||||
},
|
||||
Transactions: []*wire.MsgTx{{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: fromHex("04ffff001d010445" +
|
||||
"5468652054696d65732030332f4a616e2f" +
|
||||
"32303039204368616e63656c6c6f72206f" +
|
||||
"6e206272696e6b206f66207365636f6e64" +
|
||||
"206261696c6f757420666f72206261686b73"),
|
||||
Sequence: math.MaxUint64,
|
||||
}},
|
||||
TxOut: []*wire.TxOut{{
|
||||
Value: 0,
|
||||
ScriptPubKey: fromHex("4104678afdb0fe5548271967f1" +
|
||||
"a67130b7105cd6a828e03909a67962e0ea1f" +
|
||||
"61deb649f6bc3f4cef38c4f35504e51ec138" +
|
||||
"c4f35504e51ec112de5c384df7ba0b8d578a" +
|
||||
"4c702b6bf11d5fac"),
|
||||
}},
|
||||
LockTime: 0,
|
||||
SubnetworkID: *subnetworkid.SubnetworkIDNative,
|
||||
}},
|
||||
}
|
||||
)
|
||||
|
||||
// regressionNetParams defines the network parameters for the regression test
|
||||
// network.
|
||||
//
|
||||
// NOTE: The test generator intentionally does not use the existing definitions
|
||||
// in the dagconfig package since the intent is to be able to generate known
|
||||
// good tests which exercise that code. Using the dagconfig parameters would
|
||||
// allow them to change out from under the tests potentially invalidating them.
|
||||
var regressionNetParams = &dagconfig.Params{
|
||||
Name: "regtest",
|
||||
Net: wire.RegTest,
|
||||
DefaultPort: "18444",
|
||||
|
||||
// DAG parameters
|
||||
GenesisBlock: ®TestGenesisBlock,
|
||||
GenesisHash: newHashFromStr("5bec7567af40504e0994db3b573c186fffcc4edefe096ff2e58d00523bd7e8a6"),
|
||||
PowMax: regressionPowLimit,
|
||||
BlockCoinbaseMaturity: 100,
|
||||
SubsidyReductionInterval: 150,
|
||||
TargetTimePerBlock: time.Second * 10, // 10 seconds
|
||||
DifficultyAdjustmentWindowSize: 2640,
|
||||
TimestampDeviationTolerance: 132,
|
||||
GenerateSupported: true,
|
||||
|
||||
// Checkpoints ordered from oldest to newest.
|
||||
Checkpoints: nil,
|
||||
|
||||
// Mempool parameters
|
||||
RelayNonStdTxs: true,
|
||||
|
||||
// Address encoding magics
|
||||
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
|
||||
|
||||
// BIP32 hierarchical deterministic extended key magics
|
||||
HDKeyIDPair: hdkeychain.HDKeyPairRegressionNet,
|
||||
|
||||
// BIP44 coin type used in the hierarchical deterministic path for
|
||||
// address generation.
|
||||
HDCoinType: 1,
|
||||
}
|
||||
@@ -1,225 +0,0 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// acceptanceIndexName is the human-readable name for the index.
|
||||
acceptanceIndexName = "acceptance index"
|
||||
)
|
||||
|
||||
var (
|
||||
// acceptanceIndexKey is the key of the acceptance index and the db bucket used
|
||||
// to house it.
|
||||
acceptanceIndexKey = []byte("acceptanceidx")
|
||||
)
|
||||
|
||||
// AcceptanceIndex implements a txAcceptanceData by block hash index. That is to say,
|
||||
// it stores a mapping between a block's hash and the set of transactions that the
|
||||
// block accepts among its blue blocks.
|
||||
type AcceptanceIndex struct {
|
||||
db database.DB
|
||||
dag *blockdag.BlockDAG
|
||||
}
|
||||
|
||||
// Ensure the AcceptanceIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*AcceptanceIndex)(nil)
|
||||
|
||||
// NewAcceptanceIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping between block hashes and their txAcceptanceData.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that in
|
||||
// turn is used by the blockdag package. This allows the index to be
|
||||
// seamlessly maintained along with the DAG.
|
||||
func NewAcceptanceIndex() *AcceptanceIndex {
|
||||
return &AcceptanceIndex{}
|
||||
}
|
||||
|
||||
// DropAcceptanceIndex drops the acceptance index from the provided database if it
|
||||
// exists.
|
||||
func DropAcceptanceIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
return dropIndex(db, acceptanceIndexKey, acceptanceIndexName, interrupt)
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Key() []byte {
|
||||
return acceptanceIndexKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Name() string {
|
||||
return acceptanceIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time. It creates the bucket for the
|
||||
// acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Create(dbTx database.Tx) error {
|
||||
_, err := dbTx.Metadata().CreateBucket(acceptanceIndexKey)
|
||||
return err
|
||||
}
|
||||
|
||||
// Init initializes the hash-based acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
idx.dag = dag
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the DAG.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) ConnectBlock(dbTx database.Tx, _ *util.Block, blockID uint64, _ *blockdag.BlockDAG,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
return dbPutTxsAcceptanceData(dbTx, blockID, txsAcceptanceData)
|
||||
}
|
||||
|
||||
// TxsAcceptanceData returns the acceptance data of all the transactions that
|
||||
// were accepted by the block with hash blockHash.
|
||||
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
var txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
txsAcceptanceData, err = dbFetchTxsAcceptanceDataByHash(dbTx, blockHash)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return txsAcceptanceData, nil
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
for blockID := currentBlockID + 1; blockID <= lastKnownBlockID; blockID++ {
|
||||
hash, err := blockdag.DBFetchBlockHashByID(dbTx, currentBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = idx.ConnectBlock(dbTx, nil, blockID, nil, txAcceptanceData, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dbPutTxsAcceptanceData(dbTx database.Tx, blockID uint64,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(txsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
|
||||
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedTxsAcceptanceData)
|
||||
}
|
||||
|
||||
func dbFetchTxsAcceptanceDataByHash(dbTx database.Tx,
|
||||
hash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
|
||||
blockID, err := blockdag.DBFetchBlockIDByHash(dbTx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dbFetchTxsAcceptanceDataByID(dbTx, blockID)
|
||||
}
|
||||
|
||||
func dbFetchTxsAcceptanceDataByID(dbTx database.Tx,
|
||||
blockID uint64) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
serializedBlockID := blockdag.SerializeBlockID(blockID)
|
||||
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
|
||||
serializedTxsAcceptanceData := bucket.Get(serializedBlockID)
|
||||
if serializedTxsAcceptanceData == nil {
|
||||
return nil, fmt.Errorf("no entry in the accpetance index for block id %d", blockID)
|
||||
}
|
||||
|
||||
return deserializeMultiBlockTxsAcceptanceData(serializedTxsAcceptanceData)
|
||||
}
|
||||
|
||||
type serializableTxAcceptanceData struct {
|
||||
MsgTx wire.MsgTx
|
||||
IsAccepted bool
|
||||
}
|
||||
|
||||
type serializableBlockTxsAcceptanceData []serializableTxAcceptanceData
|
||||
|
||||
type serializableMultiBlockTxsAcceptanceData map[daghash.Hash]serializableBlockTxsAcceptanceData
|
||||
|
||||
func serializeMultiBlockTxsAcceptanceData(
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) ([]byte, error) {
|
||||
// Convert MultiBlockTxsAcceptanceData to a serializable format
|
||||
serializableData := make(serializableMultiBlockTxsAcceptanceData, len(txsAcceptanceData))
|
||||
for hash, blockTxsAcceptanceData := range txsAcceptanceData {
|
||||
serializableBlockData := make(serializableBlockTxsAcceptanceData, len(blockTxsAcceptanceData))
|
||||
for i, txAcceptanceData := range blockTxsAcceptanceData {
|
||||
serializableBlockData[i] = serializableTxAcceptanceData{
|
||||
MsgTx: *txAcceptanceData.Tx.MsgTx(),
|
||||
IsAccepted: txAcceptanceData.IsAccepted,
|
||||
}
|
||||
}
|
||||
serializableData[hash] = serializableBlockData
|
||||
}
|
||||
|
||||
// Serialize
|
||||
var buffer bytes.Buffer
|
||||
encoder := gob.NewEncoder(&buffer)
|
||||
err := encoder.Encode(serializableData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func deserializeMultiBlockTxsAcceptanceData(
|
||||
serializedTxsAcceptanceData []byte) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
// Deserialize
|
||||
buffer := bytes.NewBuffer(serializedTxsAcceptanceData)
|
||||
decoder := gob.NewDecoder(buffer)
|
||||
var serializedData serializableMultiBlockTxsAcceptanceData
|
||||
err := decoder.Decode(&serializedData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert serializable format to MultiBlockTxsAcceptanceData
|
||||
txsAcceptanceData := make(blockdag.MultiBlockTxsAcceptanceData, len(serializedData))
|
||||
for hash, serializableBlockData := range serializedData {
|
||||
blockTxsAcceptanceData := make(blockdag.BlockTxsAcceptanceData, len(serializableBlockData))
|
||||
for i, txData := range serializableBlockData {
|
||||
msgTx := txData.MsgTx
|
||||
blockTxsAcceptanceData[i] = blockdag.TxAcceptanceData{
|
||||
Tx: util.NewTx(&msgTx),
|
||||
IsAccepted: txData.IsAccepted,
|
||||
}
|
||||
}
|
||||
txsAcceptanceData[hash] = blockTxsAcceptanceData
|
||||
}
|
||||
|
||||
return txsAcceptanceData, nil
|
||||
}
|
||||
@@ -1,924 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// addrIndexName is the human-readable name for the index.
|
||||
addrIndexName = "address index"
|
||||
|
||||
// level0MaxEntries is the maximum number of transactions that are
|
||||
// stored in level 0 of an address index entry. Subsequent levels store
|
||||
// 2^n * level0MaxEntries entries, or in words, double the maximum of
|
||||
// the previous level.
|
||||
level0MaxEntries = 8
|
||||
|
||||
// addrKeySize is the number of bytes an address key consumes in the
|
||||
// index. It consists of 1 byte address type + 20 bytes hash160.
|
||||
addrKeySize = 1 + 20
|
||||
|
||||
// levelKeySize is the number of bytes a level key in the address index
|
||||
// consumes. It consists of the address key + 1 byte for the level.
|
||||
levelKeySize = addrKeySize + 1
|
||||
|
||||
// levelOffset is the offset in the level key which identifes the level.
|
||||
levelOffset = levelKeySize - 1
|
||||
|
||||
// addrKeyTypePubKeyHash is the address type in an address key which
|
||||
// represents both a pay-to-pubkey-hash and a pay-to-pubkey address.
|
||||
// This is done because both are identical for the purposes of the
|
||||
// address index.
|
||||
addrKeyTypePubKeyHash = 0
|
||||
|
||||
// addrKeyTypeScriptHash is the address type in an address key which
|
||||
// represents a pay-to-script-hash address. This is necessary because
|
||||
// the hash of a pubkey address might be the same as that of a script
|
||||
// hash.
|
||||
addrKeyTypeScriptHash = 1
|
||||
|
||||
// Size of a transaction entry. It consists of 8 bytes block id + 4
|
||||
// bytes offset + 4 bytes length.
|
||||
txEntrySize = 8 + 4 + 4
|
||||
)
|
||||
|
||||
var (
|
||||
// addrIndexKey is the key of the address index and the db bucket used
|
||||
// to house it.
|
||||
addrIndexKey = []byte("txbyaddridx")
|
||||
|
||||
// errUnsupportedAddressType is an error that is used to signal an
|
||||
// unsupported address type has been used.
|
||||
errUnsupportedAddressType = errors.New("address type is not supported " +
|
||||
"by the address index")
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The address index maps addresses referenced in the blockchain to a list of
|
||||
// all the transactions involving that address. Transactions are stored
|
||||
// according to their order of appearance in the blockchain. That is to say
|
||||
// first by block height and then by offset inside the block. It is also
|
||||
// important to note that this implementation requires the transaction index
|
||||
// since it is needed in order to catch up old blocks due to the fact the spent
|
||||
// outputs will already be pruned from the utxo set.
|
||||
//
|
||||
// The approach used to store the index is similar to a log-structured merge
|
||||
// tree (LSM tree) and is thus similar to how leveldb works internally.
|
||||
//
|
||||
// Every address consists of one or more entries identified by a level starting
|
||||
// from 0 where each level holds a maximum number of entries such that each
|
||||
// subsequent level holds double the maximum of the previous one. In equation
|
||||
// form, the number of entries each level holds is 2^n * firstLevelMaxSize.
|
||||
//
|
||||
// New transactions are appended to level 0 until it becomes full at which point
|
||||
// the entire level 0 entry is appended to the level 1 entry and level 0 is
|
||||
// cleared. This process continues until level 1 becomes full at which point it
|
||||
// will be appended to level 2 and cleared and so on.
|
||||
//
|
||||
// The result of this is the lower levels contain newer transactions and the
|
||||
// transactions within each level are ordered from oldest to newest.
|
||||
//
|
||||
// The intent of this approach is to provide a balance between space efficiency
|
||||
// and indexing cost. Storing one entry per transaction would have the lowest
|
||||
// indexing cost, but would waste a lot of space because the same address hash
|
||||
// would be duplicated for every transaction key. On the other hand, storing a
|
||||
// single entry with all transactions would be the most space efficient, but
|
||||
// would cause indexing cost to grow quadratically with the number of
|
||||
// transactions involving the same address. The approach used here provides
|
||||
// logarithmic insertion and retrieval.
|
||||
//
|
||||
// The serialized key format is:
|
||||
//
|
||||
// <addr type><addr hash><level>
|
||||
//
|
||||
// Field Type Size
|
||||
// addr type uint8 1 byte
|
||||
// addr hash hash160 20 bytes
|
||||
// level uint8 1 byte
|
||||
// -----
|
||||
// Total: 22 bytes
|
||||
//
|
||||
// The serialized value format is:
|
||||
//
|
||||
// [<block id><start offset><tx length>,...]
|
||||
//
|
||||
// Field Type Size
|
||||
// block id uint64 8 bytes
|
||||
// start offset uint32 4 bytes
|
||||
// tx length uint32 4 bytes
|
||||
// -----
|
||||
// Total: 16 bytes per indexed tx
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// fetchBlockHashFunc defines a callback function to use in order to convert a
|
||||
// serialized block ID to an associated block hash.
|
||||
type fetchBlockHashFunc func(serializedID []byte) (*daghash.Hash, error)
|
||||
|
||||
// serializeAddrIndexEntry serializes the provided block id and transaction
|
||||
// location according to the format described in detail above.
|
||||
func serializeAddrIndexEntry(blockID uint64, txLoc wire.TxLoc) []byte {
|
||||
// Serialize the entry.
|
||||
serialized := make([]byte, 16)
|
||||
byteOrder.PutUint64(serialized, blockID)
|
||||
byteOrder.PutUint32(serialized[8:], uint32(txLoc.TxStart))
|
||||
byteOrder.PutUint32(serialized[12:], uint32(txLoc.TxLen))
|
||||
return serialized
|
||||
}
|
||||
|
||||
// deserializeAddrIndexEntry decodes the passed serialized byte slice into the
|
||||
// provided region struct according to the format described in detail above and
|
||||
// uses the passed block hash fetching function in order to conver the block ID
|
||||
// to the associated block hash.
|
||||
func deserializeAddrIndexEntry(serialized []byte, region *database.BlockRegion, fetchBlockHash fetchBlockHashFunc) error {
|
||||
// Ensure there are enough bytes to decode.
|
||||
if len(serialized) < txEntrySize {
|
||||
return errDeserialize("unexpected end of data")
|
||||
}
|
||||
|
||||
hash, err := fetchBlockHash(serialized[0:8])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
region.Hash = hash
|
||||
region.Offset = byteOrder.Uint32(serialized[8:12])
|
||||
region.Len = byteOrder.Uint32(serialized[12:16])
|
||||
return nil
|
||||
}
|
||||
|
||||
// keyForLevel returns the key for a specific address and level in the address
|
||||
// index entry.
|
||||
func keyForLevel(addrKey [addrKeySize]byte, level uint8) [levelKeySize]byte {
|
||||
var key [levelKeySize]byte
|
||||
copy(key[:], addrKey[:])
|
||||
key[levelOffset] = level
|
||||
return key
|
||||
}
|
||||
|
||||
// dbPutAddrIndexEntry updates the address index to include the provided entry
|
||||
// according to the level-based scheme described in detail above.
|
||||
func dbPutAddrIndexEntry(bucket internalBucket, addrKey [addrKeySize]byte, blockID uint64, txLoc wire.TxLoc) error {
|
||||
// Start with level 0 and its initial max number of entries.
|
||||
curLevel := uint8(0)
|
||||
maxLevelBytes := level0MaxEntries * txEntrySize
|
||||
|
||||
// Simply append the new entry to level 0 and return now when it will
|
||||
// fit. This is the most common path.
|
||||
newData := serializeAddrIndexEntry(blockID, txLoc)
|
||||
level0Key := keyForLevel(addrKey, 0)
|
||||
level0Data := bucket.Get(level0Key[:])
|
||||
if len(level0Data)+len(newData) <= maxLevelBytes {
|
||||
mergedData := newData
|
||||
if len(level0Data) > 0 {
|
||||
mergedData = make([]byte, len(level0Data)+len(newData))
|
||||
copy(mergedData, level0Data)
|
||||
copy(mergedData[len(level0Data):], newData)
|
||||
}
|
||||
return bucket.Put(level0Key[:], mergedData)
|
||||
}
|
||||
|
||||
// At this point, level 0 is full, so merge each level into higher
|
||||
// levels as many times as needed to free up level 0.
|
||||
prevLevelData := level0Data
|
||||
for {
|
||||
// Each new level holds twice as much as the previous one.
|
||||
curLevel++
|
||||
maxLevelBytes *= 2
|
||||
|
||||
// Move to the next level as long as the current level is full.
|
||||
curLevelKey := keyForLevel(addrKey, curLevel)
|
||||
curLevelData := bucket.Get(curLevelKey[:])
|
||||
if len(curLevelData) == maxLevelBytes {
|
||||
prevLevelData = curLevelData
|
||||
continue
|
||||
}
|
||||
|
||||
// The current level has room for the data in the previous one,
|
||||
// so merge the data from previous level into it.
|
||||
mergedData := prevLevelData
|
||||
if len(curLevelData) > 0 {
|
||||
mergedData = make([]byte, len(curLevelData)+
|
||||
len(prevLevelData))
|
||||
copy(mergedData, curLevelData)
|
||||
copy(mergedData[len(curLevelData):], prevLevelData)
|
||||
}
|
||||
err := bucket.Put(curLevelKey[:], mergedData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Move all of the levels before the previous one up a level.
|
||||
for mergeLevel := curLevel - 1; mergeLevel > 0; mergeLevel-- {
|
||||
mergeLevelKey := keyForLevel(addrKey, mergeLevel)
|
||||
prevLevelKey := keyForLevel(addrKey, mergeLevel-1)
|
||||
prevData := bucket.Get(prevLevelKey[:])
|
||||
err := bucket.Put(mergeLevelKey[:], prevData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Finally, insert the new entry into level 0 now that it is empty.
|
||||
return bucket.Put(level0Key[:], newData)
|
||||
}
|
||||
|
||||
// dbFetchAddrIndexEntries returns block regions for transactions referenced by
|
||||
// the given address key and the number of entries skipped since it could have
|
||||
// been less in the case where there are less total entries than the requested
|
||||
// number of entries to skip.
|
||||
func dbFetchAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, numToSkip, numRequested uint32, reverse bool, fetchBlockHash fetchBlockHashFunc) ([]database.BlockRegion, uint32, error) {
|
||||
// When the reverse flag is not set, all levels need to be fetched
|
||||
// because numToSkip and numRequested are counted from the oldest
|
||||
// transactions (highest level) and thus the total count is needed.
|
||||
// However, when the reverse flag is set, only enough records to satisfy
|
||||
// the requested amount are needed.
|
||||
var level uint8
|
||||
var serialized []byte
|
||||
for !reverse || len(serialized) < int(numToSkip+numRequested)*txEntrySize {
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
levelData := bucket.Get(curLevelKey[:])
|
||||
if levelData == nil {
|
||||
// Stop when there are no more levels.
|
||||
break
|
||||
}
|
||||
|
||||
// Higher levels contain older transactions, so prepend them.
|
||||
prepended := make([]byte, len(serialized)+len(levelData))
|
||||
copy(prepended, levelData)
|
||||
copy(prepended[len(levelData):], serialized)
|
||||
serialized = prepended
|
||||
level++
|
||||
}
|
||||
|
||||
// When the requested number of entries to skip is larger than the
|
||||
// number available, skip them all and return now with the actual number
|
||||
// skipped.
|
||||
numEntries := uint32(len(serialized) / txEntrySize)
|
||||
if numToSkip >= numEntries {
|
||||
return nil, numEntries, nil
|
||||
}
|
||||
|
||||
// Nothing more to do when there are no requested entries.
|
||||
if numRequested == 0 {
|
||||
return nil, numToSkip, nil
|
||||
}
|
||||
|
||||
// Limit the number to load based on the number of available entries,
|
||||
// the number to skip, and the number requested.
|
||||
numToLoad := numEntries - numToSkip
|
||||
if numToLoad > numRequested {
|
||||
numToLoad = numRequested
|
||||
}
|
||||
|
||||
// Start the offset after all skipped entries and load the calculated
|
||||
// number.
|
||||
results := make([]database.BlockRegion, numToLoad)
|
||||
for i := uint32(0); i < numToLoad; i++ {
|
||||
// Calculate the read offset according to the reverse flag.
|
||||
var offset uint32
|
||||
if reverse {
|
||||
offset = (numEntries - numToSkip - i - 1) * txEntrySize
|
||||
} else {
|
||||
offset = (numToSkip + i) * txEntrySize
|
||||
}
|
||||
|
||||
// Deserialize and populate the result.
|
||||
err := deserializeAddrIndexEntry(serialized[offset:],
|
||||
&results[i], fetchBlockHash)
|
||||
if err != nil {
|
||||
// Ensure any deserialization errors are returned as
|
||||
// database corruption errors.
|
||||
if isDeserializeErr(err) {
|
||||
err = database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("failed to "+
|
||||
"deserialized address index "+
|
||||
"for key %x: %s", addrKey, err),
|
||||
}
|
||||
}
|
||||
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return results, numToSkip, nil
|
||||
}
|
||||
|
||||
// minEntriesToReachLevel returns the minimum number of entries that are
|
||||
// required to reach the given address index level.
|
||||
func minEntriesToReachLevel(level uint8) int {
|
||||
maxEntriesForLevel := level0MaxEntries
|
||||
minRequired := 1
|
||||
for l := uint8(1); l <= level; l++ {
|
||||
minRequired += maxEntriesForLevel
|
||||
maxEntriesForLevel *= 2
|
||||
}
|
||||
return minRequired
|
||||
}
|
||||
|
||||
// maxEntriesForLevel returns the maximum number of entries allowed for the
|
||||
// given address index level.
|
||||
func maxEntriesForLevel(level uint8) int {
|
||||
numEntries := level0MaxEntries
|
||||
for l := level; l > 0; l-- {
|
||||
numEntries *= 2
|
||||
}
|
||||
return numEntries
|
||||
}
|
||||
|
||||
// dbRemoveAddrIndexEntries removes the specified number of entries from from
|
||||
// the address index for the provided key. An assertion error will be returned
|
||||
// if the count exceeds the total number of entries in the index.
|
||||
func dbRemoveAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, count int) error {
|
||||
// Nothing to do if no entries are being deleted.
|
||||
if count <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make use of a local map to track pending updates and define a closure
|
||||
// to apply it to the database. This is done in order to reduce the
|
||||
// number of database reads and because there is more than one exit
|
||||
// path that needs to apply the updates.
|
||||
pendingUpdates := make(map[uint8][]byte)
|
||||
applyPending := func() error {
|
||||
for level, data := range pendingUpdates {
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
if len(data) == 0 {
|
||||
err := bucket.Delete(curLevelKey[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
err := bucket.Put(curLevelKey[:], data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loop forwards through the levels while removing entries until the
|
||||
// specified number has been removed. This will potentially result in
|
||||
// entirely empty lower levels which will be backfilled below.
|
||||
var highestLoadedLevel uint8
|
||||
numRemaining := count
|
||||
for level := uint8(0); numRemaining > 0; level++ {
|
||||
// Load the data for the level from the database.
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
curLevelData := bucket.Get(curLevelKey[:])
|
||||
if len(curLevelData) == 0 && numRemaining > 0 {
|
||||
return AssertError(fmt.Sprintf("dbRemoveAddrIndexEntries "+
|
||||
"not enough entries for address key %x to "+
|
||||
"delete %d entries", addrKey, count))
|
||||
}
|
||||
pendingUpdates[level] = curLevelData
|
||||
highestLoadedLevel = level
|
||||
|
||||
// Delete the entire level as needed.
|
||||
numEntries := len(curLevelData) / txEntrySize
|
||||
if numRemaining >= numEntries {
|
||||
pendingUpdates[level] = nil
|
||||
numRemaining -= numEntries
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove remaining entries to delete from the level.
|
||||
offsetEnd := len(curLevelData) - (numRemaining * txEntrySize)
|
||||
pendingUpdates[level] = curLevelData[:offsetEnd]
|
||||
break
|
||||
}
|
||||
|
||||
// When all elements in level 0 were not removed there is nothing left
|
||||
// to do other than updating the database.
|
||||
if len(pendingUpdates[0]) != 0 {
|
||||
return applyPending()
|
||||
}
|
||||
|
||||
// At this point there are one or more empty levels before the current
|
||||
// level which need to be backfilled and the current level might have
|
||||
// had some entries deleted from it as well. Since all levels after
|
||||
// level 0 are required to either be empty, half full, or completely
|
||||
// full, the current level must be adjusted accordingly by backfilling
|
||||
// each previous levels in a way which satisfies the requirements. Any
|
||||
// entries that are left are assigned to level 0 after the loop as they
|
||||
// are guaranteed to fit by the logic in the loop. In other words, this
|
||||
// effectively squashes all remaining entries in the current level into
|
||||
// the lowest possible levels while following the level rules.
|
||||
//
|
||||
// Note that the level after the current level might also have entries
|
||||
// and gaps are not allowed, so this also keeps track of the lowest
|
||||
// empty level so the code below knows how far to backfill in case it is
|
||||
// required.
|
||||
lowestEmptyLevel := uint8(255)
|
||||
curLevelData := pendingUpdates[highestLoadedLevel]
|
||||
curLevelMaxEntries := maxEntriesForLevel(highestLoadedLevel)
|
||||
for level := highestLoadedLevel; level > 0; level-- {
|
||||
// When there are not enough entries left in the current level
|
||||
// for the number that would be required to reach it, clear the
|
||||
// the current level which effectively moves them all up to the
|
||||
// previous level on the next iteration. Otherwise, there are
|
||||
// are sufficient entries, so update the current level to
|
||||
// contain as many entries as possible while still leaving
|
||||
// enough remaining entries required to reach the level.
|
||||
numEntries := len(curLevelData) / txEntrySize
|
||||
prevLevelMaxEntries := curLevelMaxEntries / 2
|
||||
minPrevRequired := minEntriesToReachLevel(level - 1)
|
||||
if numEntries < prevLevelMaxEntries+minPrevRequired {
|
||||
lowestEmptyLevel = level
|
||||
pendingUpdates[level] = nil
|
||||
} else {
|
||||
// This level can only be completely full or half full,
|
||||
// so choose the appropriate offset to ensure enough
|
||||
// entries remain to reach the level.
|
||||
var offset int
|
||||
if numEntries-curLevelMaxEntries >= minPrevRequired {
|
||||
offset = curLevelMaxEntries * txEntrySize
|
||||
} else {
|
||||
offset = prevLevelMaxEntries * txEntrySize
|
||||
}
|
||||
pendingUpdates[level] = curLevelData[:offset]
|
||||
curLevelData = curLevelData[offset:]
|
||||
}
|
||||
|
||||
curLevelMaxEntries = prevLevelMaxEntries
|
||||
}
|
||||
pendingUpdates[0] = curLevelData
|
||||
if len(curLevelData) == 0 {
|
||||
lowestEmptyLevel = 0
|
||||
}
|
||||
|
||||
// When the highest loaded level is empty, it's possible the level after
|
||||
// it still has data and thus that data needs to be backfilled as well.
|
||||
for len(pendingUpdates[highestLoadedLevel]) == 0 {
|
||||
// When the next level is empty too, the is no data left to
|
||||
// continue backfilling, so there is nothing left to do.
|
||||
// Otherwise, populate the pending updates map with the newly
|
||||
// loaded data and update the highest loaded level accordingly.
|
||||
level := highestLoadedLevel + 1
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
levelData := bucket.Get(curLevelKey[:])
|
||||
if len(levelData) == 0 {
|
||||
break
|
||||
}
|
||||
pendingUpdates[level] = levelData
|
||||
highestLoadedLevel = level
|
||||
|
||||
// At this point the highest level is not empty, but it might
|
||||
// be half full. When that is the case, move it up a level to
|
||||
// simplify the code below which backfills all lower levels that
|
||||
// are still empty. This also means the current level will be
|
||||
// empty, so the loop will perform another another iteration to
|
||||
// potentially backfill this level with data from the next one.
|
||||
curLevelMaxEntries := maxEntriesForLevel(level)
|
||||
if len(levelData)/txEntrySize != curLevelMaxEntries {
|
||||
pendingUpdates[level] = nil
|
||||
pendingUpdates[level-1] = levelData
|
||||
level--
|
||||
curLevelMaxEntries /= 2
|
||||
}
|
||||
|
||||
// Backfill all lower levels that are still empty by iteratively
|
||||
// halfing the data until the lowest empty level is filled.
|
||||
for level > lowestEmptyLevel {
|
||||
offset := (curLevelMaxEntries / 2) * txEntrySize
|
||||
pendingUpdates[level] = levelData[:offset]
|
||||
levelData = levelData[offset:]
|
||||
pendingUpdates[level-1] = levelData
|
||||
level--
|
||||
curLevelMaxEntries /= 2
|
||||
}
|
||||
|
||||
// The lowest possible empty level is now the highest loaded
|
||||
// level.
|
||||
lowestEmptyLevel = highestLoadedLevel
|
||||
}
|
||||
|
||||
// Apply the pending updates.
|
||||
return applyPending()
|
||||
}
|
||||
|
||||
// addrToKey converts known address types to an addrindex key. An error is
|
||||
// returned for unsupported types.
|
||||
func addrToKey(addr util.Address) ([addrKeySize]byte, error) {
|
||||
switch addr := addr.(type) {
|
||||
case *util.AddressPubKeyHash:
|
||||
var result [addrKeySize]byte
|
||||
result[0] = addrKeyTypePubKeyHash
|
||||
copy(result[1:], addr.Hash160()[:])
|
||||
return result, nil
|
||||
|
||||
case *util.AddressScriptHash:
|
||||
var result [addrKeySize]byte
|
||||
result[0] = addrKeyTypeScriptHash
|
||||
copy(result[1:], addr.Hash160()[:])
|
||||
return result, nil
|
||||
}
|
||||
|
||||
return [addrKeySize]byte{}, errUnsupportedAddressType
|
||||
}
|
||||
|
||||
// AddrIndex implements a transaction by address index. That is to say, it
|
||||
// supports querying all transactions that reference a given address because
|
||||
// they are either crediting or debiting the address. The returned transactions
|
||||
// are ordered according to their order of appearance in the blockchain. In
|
||||
// other words, first by block height and then by offset inside the block.
|
||||
//
|
||||
// In addition, support is provided for a memory-only index of unconfirmed
|
||||
// transactions such as those which are kept in the memory pool before inclusion
|
||||
// in a block.
|
||||
type AddrIndex struct {
|
||||
// The following fields are set when the instance is created and can't
|
||||
// be changed afterwards, so there is no need to protect them with a
|
||||
// separate mutex.
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
|
||||
// The following fields are used to quickly link transactions and
|
||||
// addresses that have not been included into a block yet when an
|
||||
// address index is being maintained. The are protected by the
|
||||
// unconfirmedLock field.
|
||||
//
|
||||
// The txnsByAddr field is used to keep an index of all transactions
|
||||
// which either create an output to a given address or spend from a
|
||||
// previous output to it keyed by the address.
|
||||
//
|
||||
// The addrsByTx field is essentially the reverse and is used to
|
||||
// keep an index of all addresses which a given transaction involves.
|
||||
// This allows fairly efficient updates when transactions are removed
|
||||
// once they are included into a block.
|
||||
unconfirmedLock sync.RWMutex
|
||||
txnsByAddr map[[addrKeySize]byte]map[daghash.TxID]*util.Tx
|
||||
addrsByTx map[daghash.TxID]map[[addrKeySize]byte]struct{}
|
||||
}
|
||||
|
||||
// Ensure the AddrIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*AddrIndex)(nil)
|
||||
|
||||
// Ensure the AddrIndex type implements the NeedsInputser interface.
|
||||
var _ NeedsInputser = (*AddrIndex)(nil)
|
||||
|
||||
// NeedsInputs signals that the index requires the referenced inputs in order
|
||||
// to properly create the index.
|
||||
//
|
||||
// This implements the NeedsInputser interface.
|
||||
func (idx *AddrIndex) NeedsInputs() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Init is only provided to satisfy the Indexer interface as there is nothing to
|
||||
// initialize for this index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Init(db database.DB, _ *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
return nil
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Key() []byte {
|
||||
return addrIndexKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Name() string {
|
||||
return addrIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time. It creates the bucket for the address
|
||||
// index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Create(dbTx database.Tx) error {
|
||||
_, err := dbTx.Metadata().CreateBucket(addrIndexKey)
|
||||
return err
|
||||
}
|
||||
|
||||
// writeIndexData represents the address index data to be written for one block.
|
||||
// It consists of the address mapped to an ordered list of the transactions
|
||||
// that involve the address in block. It is ordered so the transactions can be
|
||||
// stored in the order they appear in the block.
|
||||
type writeIndexData map[[addrKeySize]byte][]int
|
||||
|
||||
// indexScriptPubKey extracts all standard addresses from the passed public key
|
||||
// script and maps each of them to the associated transaction using the passed
|
||||
// map.
|
||||
func (idx *AddrIndex) indexScriptPubKey(data writeIndexData, scriptPubKey []byte, txIdx int) {
|
||||
// Nothing to index if the script is non-standard or otherwise doesn't
|
||||
// contain any addresses.
|
||||
_, addr, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
|
||||
idx.dagParams)
|
||||
if err != nil || addr == nil {
|
||||
return
|
||||
}
|
||||
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
// Ignore unsupported address types.
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid inserting the transaction more than once. Since the
|
||||
// transactions are indexed serially any duplicates will be
|
||||
// indexed in a row, so checking the most recent entry for the
|
||||
// address is enough to detect duplicates.
|
||||
indexedTxns := data[addrKey]
|
||||
numTxns := len(indexedTxns)
|
||||
if numTxns > 0 && indexedTxns[numTxns-1] == txIdx {
|
||||
return
|
||||
}
|
||||
indexedTxns = append(indexedTxns, txIdx)
|
||||
data[addrKey] = indexedTxns
|
||||
}
|
||||
|
||||
// indexBlock extract all of the standard addresses from all of the transactions
|
||||
// in the passed block and maps each of them to the associated transaction using
|
||||
// the passed map.
|
||||
func (idx *AddrIndex) indexBlock(data writeIndexData, block *util.Block, dag *blockdag.BlockDAG) {
|
||||
for txIdx, tx := range block.Transactions() {
|
||||
// Coinbases do not reference any inputs. Since the block is
|
||||
// required to have already gone through full validation, it has
|
||||
// already been proven on the first transaction in the block is
|
||||
// a coinbase.
|
||||
if txIdx > util.CoinbaseTransactionIndex {
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
// The UTXO should always have the input since
|
||||
// the index contract requires it, however, be
|
||||
// safe and simply ignore any missing entries.
|
||||
entry, ok := dag.GetUTXOEntry(txIn.PreviousOutpoint)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
idx.indexScriptPubKey(data, entry.ScriptPubKey(), txIdx)
|
||||
}
|
||||
}
|
||||
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
idx.indexScriptPubKey(data, txOut.ScriptPubKey, txIdx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the main chain. This indexer adds a mapping for each address
|
||||
// the transactions in the block involve.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
|
||||
_ blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
// The offset and length of the transactions within the serialized
|
||||
// block.
|
||||
txLocs, err := block.TxLoc()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build all of the address to transaction mappings in a local map.
|
||||
addrsToTxns := make(writeIndexData)
|
||||
idx.indexBlock(addrsToTxns, block, dag)
|
||||
|
||||
// Add all of the index entries for each address.
|
||||
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
|
||||
for addrKey, txIdxs := range addrsToTxns {
|
||||
for _, txIdx := range txIdxs {
|
||||
err := dbPutAddrIndexEntry(addrIdxBucket, addrKey,
|
||||
blockID, txLocs[txIdx])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DisconnectBlock is invoked by the index manager when a block has been
|
||||
// disconnected from the main chain. This indexer removes the address mappings
|
||||
// each transaction in the block involve.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) DisconnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG) error {
|
||||
// Build all of the address to transaction mappings in a local map.
|
||||
addrsToTxns := make(writeIndexData)
|
||||
idx.indexBlock(addrsToTxns, block, dag)
|
||||
|
||||
// Remove all of the index entries for each address.
|
||||
bucket := dbTx.Metadata().Bucket(addrIndexKey)
|
||||
for addrKey, txIdxs := range addrsToTxns {
|
||||
err := dbRemoveAddrIndexEntries(bucket, addrKey, len(txIdxs))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxRegionsForAddress returns a slice of block regions which identify each
|
||||
// transaction that involves the passed address according to the specified
|
||||
// number to skip, number requested, and whether or not the results should be
|
||||
// reversed. It also returns the number actually skipped since it could be less
|
||||
// in the case where there are not enough entries.
|
||||
//
|
||||
// NOTE: These results only include transactions confirmed in blocks. See the
|
||||
// UnconfirmedTxnsForAddress method for obtaining unconfirmed transactions
|
||||
// that involve a given address.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) TxRegionsForAddress(dbTx database.Tx, addr util.Address, numToSkip, numRequested uint32, reverse bool) ([]database.BlockRegion, uint32, error) {
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var regions []database.BlockRegion
|
||||
var skipped uint32
|
||||
err = idx.db.View(func(dbTx database.Tx) error {
|
||||
// Create closure to lookup the block hash given the ID using
|
||||
// the database transaction.
|
||||
fetchBlockHash := func(id []byte) (*daghash.Hash, error) {
|
||||
// Deserialize and populate the result.
|
||||
return blockdag.DBFetchBlockHashBySerializedID(dbTx, id)
|
||||
}
|
||||
|
||||
var err error
|
||||
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
|
||||
regions, skipped, err = dbFetchAddrIndexEntries(addrIdxBucket,
|
||||
addrKey, numToSkip, numRequested, reverse,
|
||||
fetchBlockHash)
|
||||
return err
|
||||
})
|
||||
|
||||
return regions, skipped, err
|
||||
}
|
||||
|
||||
// indexUnconfirmedAddresses modifies the unconfirmed (memory-only) address
|
||||
// index to include mappings for the addresses encoded by the passed public key
|
||||
// script to the transaction.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) indexUnconfirmedAddresses(scriptPubKey []byte, tx *util.Tx) {
|
||||
// The error is ignored here since the only reason it can fail is if the
|
||||
// script fails to parse and it was already validated before being
|
||||
// admitted to the mempool.
|
||||
_, addr, _ := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
|
||||
idx.dagParams)
|
||||
// Ignore unsupported address types.
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Add a mapping from the address to the transaction.
|
||||
idx.unconfirmedLock.Lock()
|
||||
addrIndexEntry := idx.txnsByAddr[addrKey]
|
||||
if addrIndexEntry == nil {
|
||||
addrIndexEntry = make(map[daghash.TxID]*util.Tx)
|
||||
idx.txnsByAddr[addrKey] = addrIndexEntry
|
||||
}
|
||||
addrIndexEntry[*tx.ID()] = tx
|
||||
|
||||
// Add a mapping from the transaction to the address.
|
||||
addrsByTxEntry := idx.addrsByTx[*tx.ID()]
|
||||
if addrsByTxEntry == nil {
|
||||
addrsByTxEntry = make(map[[addrKeySize]byte]struct{})
|
||||
idx.addrsByTx[*tx.ID()] = addrsByTxEntry
|
||||
}
|
||||
addrsByTxEntry[addrKey] = struct{}{}
|
||||
idx.unconfirmedLock.Unlock()
|
||||
}
|
||||
|
||||
// AddUnconfirmedTx adds all addresses related to the transaction to the
|
||||
// unconfirmed (memory-only) address index.
|
||||
//
|
||||
// NOTE: This transaction MUST have already been validated by the memory pool
|
||||
// before calling this function with it and have all of the inputs available in
|
||||
// the provided utxo view. Failure to do so could result in some or all
|
||||
// addresses not being indexed.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) AddUnconfirmedTx(tx *util.Tx, utxoSet blockdag.UTXOSet) {
|
||||
// Index addresses of all referenced previous transaction outputs.
|
||||
//
|
||||
// The existence checks are elided since this is only called after the
|
||||
// transaction has already been validated and thus all inputs are
|
||||
// already known to exist.
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
entry, ok := utxoSet.Get(txIn.PreviousOutpoint)
|
||||
if !ok {
|
||||
// Ignore missing entries. This should never happen
|
||||
// in practice since the function comments specifically
|
||||
// call out all inputs must be available.
|
||||
continue
|
||||
}
|
||||
idx.indexUnconfirmedAddresses(entry.ScriptPubKey(), tx)
|
||||
}
|
||||
|
||||
// Index addresses of all created outputs.
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
idx.indexUnconfirmedAddresses(txOut.ScriptPubKey, tx)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveUnconfirmedTx removes the passed transaction from the unconfirmed
|
||||
// (memory-only) address index.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) RemoveUnconfirmedTx(txID *daghash.TxID) {
|
||||
idx.unconfirmedLock.Lock()
|
||||
defer idx.unconfirmedLock.Unlock()
|
||||
|
||||
// Remove all address references to the transaction from the address
|
||||
// index and remove the entry for the address altogether if it no longer
|
||||
// references any transactions.
|
||||
for addrKey := range idx.addrsByTx[*txID] {
|
||||
delete(idx.txnsByAddr[addrKey], *txID)
|
||||
if len(idx.txnsByAddr[addrKey]) == 0 {
|
||||
delete(idx.txnsByAddr, addrKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the entry from the transaction to address lookup map as well.
|
||||
delete(idx.addrsByTx, *txID)
|
||||
}
|
||||
|
||||
// UnconfirmedTxnsForAddress returns all transactions currently in the
|
||||
// unconfirmed (memory-only) address index that involve the passed address.
|
||||
// Unsupported address types are ignored and will result in no results.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr util.Address) []*util.Tx {
|
||||
// Ignore unsupported address types.
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Protect concurrent access.
|
||||
idx.unconfirmedLock.RLock()
|
||||
defer idx.unconfirmedLock.RUnlock()
|
||||
|
||||
// Return a new slice with the results if there are any. This ensures
|
||||
// safe concurrency.
|
||||
if txns, exists := idx.txnsByAddr[addrKey]; exists {
|
||||
addressTxns := make([]*util.Tx, 0, len(txns))
|
||||
for _, tx := range txns {
|
||||
addressTxns = append(addressTxns, tx)
|
||||
}
|
||||
return addressTxns
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
return fmt.Errorf("addrindex was turned off for %d blocks and can't be recovered."+
|
||||
" To resume working drop the addrindex with --dropaddrindex", lastKnownBlockID-currentBlockID)
|
||||
}
|
||||
|
||||
// NewAddrIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping of all addresses in the blockchain to the respective transactions
|
||||
// that involve them.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that in
|
||||
// turn is used by the blockchain package. This allows the index to be
|
||||
// seamlessly maintained along with the chain.
|
||||
func NewAddrIndex(dagParams *dagconfig.Params) *AddrIndex {
|
||||
return &AddrIndex{
|
||||
dagParams: dagParams,
|
||||
txnsByAddr: make(map[[addrKeySize]byte]map[daghash.TxID]*util.Tx),
|
||||
addrsByTx: make(map[daghash.TxID]map[[addrKeySize]byte]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// DropAddrIndex drops the address index from the provided database if it
|
||||
// exists.
|
||||
func DropAddrIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
return dropIndex(db, addrIndexKey, addrIndexName, interrupt)
|
||||
}
|
||||
@@ -1,276 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// addrIndexBucket provides a mock address index database bucket by implementing
|
||||
// the internalBucket interface.
|
||||
type addrIndexBucket struct {
|
||||
levels map[[levelKeySize]byte][]byte
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of the mock address index bucket.
|
||||
func (b *addrIndexBucket) Clone() *addrIndexBucket {
|
||||
levels := make(map[[levelKeySize]byte][]byte)
|
||||
for k, v := range b.levels {
|
||||
vCopy := make([]byte, len(v))
|
||||
copy(vCopy, v)
|
||||
levels[k] = vCopy
|
||||
}
|
||||
return &addrIndexBucket{levels: levels}
|
||||
}
|
||||
|
||||
// Get returns the value associated with the key from the mock address index
|
||||
// bucket.
|
||||
//
|
||||
// This is part of the internalBucket interface.
|
||||
func (b *addrIndexBucket) Get(key []byte) []byte {
|
||||
var levelKey [levelKeySize]byte
|
||||
copy(levelKey[:], key)
|
||||
return b.levels[levelKey]
|
||||
}
|
||||
|
||||
// Put stores the provided key/value pair to the mock address index bucket.
|
||||
//
|
||||
// This is part of the internalBucket interface.
|
||||
func (b *addrIndexBucket) Put(key []byte, value []byte) error {
|
||||
var levelKey [levelKeySize]byte
|
||||
copy(levelKey[:], key)
|
||||
b.levels[levelKey] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes the provided key from the mock address index bucket.
|
||||
//
|
||||
// This is part of the internalBucket interface.
|
||||
func (b *addrIndexBucket) Delete(key []byte) error {
|
||||
var levelKey [levelKeySize]byte
|
||||
copy(levelKey[:], key)
|
||||
delete(b.levels, levelKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
// printLevels returns a string with a visual representation of the provided
|
||||
// address key taking into account the max size of each level. It is useful
|
||||
// when creating and debugging test cases.
|
||||
func (b *addrIndexBucket) printLevels(addrKey [addrKeySize]byte) string {
|
||||
highestLevel := uint8(0)
|
||||
for k := range b.levels {
|
||||
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
|
||||
continue
|
||||
}
|
||||
level := uint8(k[levelOffset])
|
||||
if level > highestLevel {
|
||||
highestLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
var levelBuf bytes.Buffer
|
||||
_, _ = levelBuf.WriteString("\n")
|
||||
maxEntries := level0MaxEntries
|
||||
for level := uint8(0); level <= highestLevel; level++ {
|
||||
data := b.levels[keyForLevel(addrKey, level)]
|
||||
numEntries := len(data) / txEntrySize
|
||||
for i := 0; i < numEntries; i++ {
|
||||
start := i * txEntrySize
|
||||
num := byteOrder.Uint32(data[start:])
|
||||
_, _ = levelBuf.WriteString(fmt.Sprintf("%02d ", num))
|
||||
}
|
||||
for i := numEntries; i < maxEntries; i++ {
|
||||
_, _ = levelBuf.WriteString("_ ")
|
||||
}
|
||||
_, _ = levelBuf.WriteString("\n")
|
||||
maxEntries *= 2
|
||||
}
|
||||
|
||||
return levelBuf.String()
|
||||
}
|
||||
|
||||
// sanityCheck ensures that all data stored in the bucket for the given address
|
||||
// adheres to the level-based rules described by the address index
|
||||
// documentation.
|
||||
func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal int) error {
|
||||
// Find the highest level for the key.
|
||||
highestLevel := uint8(0)
|
||||
for k := range b.levels {
|
||||
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
|
||||
continue
|
||||
}
|
||||
level := uint8(k[levelOffset])
|
||||
if level > highestLevel {
|
||||
highestLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the expected total number of entries are present and that
|
||||
// all levels adhere to the rules described in the address index
|
||||
// documentation.
|
||||
var totalEntries int
|
||||
maxEntries := level0MaxEntries
|
||||
for level := uint8(0); level <= highestLevel; level++ {
|
||||
// Level 0 can'have more entries than the max allowed if the
|
||||
// levels after it have data and it can't be empty. All other
|
||||
// levels must either be half full or full.
|
||||
data := b.levels[keyForLevel(addrKey, level)]
|
||||
numEntries := len(data) / txEntrySize
|
||||
totalEntries += numEntries
|
||||
if level == 0 {
|
||||
if (highestLevel != 0 && numEntries == 0) ||
|
||||
numEntries > maxEntries {
|
||||
|
||||
return fmt.Errorf("level %d has %d entries",
|
||||
level, numEntries)
|
||||
}
|
||||
} else if numEntries != maxEntries && numEntries != maxEntries/2 {
|
||||
return fmt.Errorf("level %d has %d entries", level,
|
||||
numEntries)
|
||||
}
|
||||
maxEntries *= 2
|
||||
}
|
||||
if totalEntries != expectedTotal {
|
||||
return fmt.Errorf("expected %d entries - got %d", expectedTotal,
|
||||
totalEntries)
|
||||
}
|
||||
|
||||
// Ensure all of the numbers are in order starting from the highest
|
||||
// level moving to the lowest level.
|
||||
expectedNum := uint32(0)
|
||||
for level := highestLevel + 1; level > 0; level-- {
|
||||
data := b.levels[keyForLevel(addrKey, level)]
|
||||
numEntries := len(data) / txEntrySize
|
||||
for i := 0; i < numEntries; i++ {
|
||||
start := i * txEntrySize
|
||||
num := byteOrder.Uint32(data[start:])
|
||||
if num != expectedNum {
|
||||
return fmt.Errorf("level %d offset %d does "+
|
||||
"not contain the expected number of "+
|
||||
"%d - got %d", level, i, num,
|
||||
expectedNum)
|
||||
}
|
||||
expectedNum++
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestAddrIndexLevels ensures that adding and deleting entries to the address
|
||||
// index creates multiple levels as described by the address index
|
||||
// documentation.
|
||||
func TestAddrIndexLevels(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
key [addrKeySize]byte
|
||||
numInsert int
|
||||
printLevels bool // Set to help debug a specific test.
|
||||
}{
|
||||
{
|
||||
name: "level 0 not full",
|
||||
numInsert: level0MaxEntries - 1,
|
||||
},
|
||||
{
|
||||
name: "level 1 half",
|
||||
numInsert: level0MaxEntries + 1,
|
||||
},
|
||||
{
|
||||
name: "level 1 full",
|
||||
numInsert: level0MaxEntries*2 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 half, level 1 half",
|
||||
numInsert: level0MaxEntries*3 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 half, level 1 full",
|
||||
numInsert: level0MaxEntries*4 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 full, level 1 half",
|
||||
numInsert: level0MaxEntries*5 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 full, level 1 full",
|
||||
numInsert: level0MaxEntries*6 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 3 half, level 2 half, level 1 half",
|
||||
numInsert: level0MaxEntries*7 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 3 full, level 2 half, level 1 full",
|
||||
numInsert: level0MaxEntries*12 + 1,
|
||||
},
|
||||
}
|
||||
|
||||
nextTest:
|
||||
for testNum, test := range tests {
|
||||
// Insert entries in order.
|
||||
populatedBucket := &addrIndexBucket{
|
||||
levels: make(map[[levelKeySize]byte][]byte),
|
||||
}
|
||||
for i := 0; i < test.numInsert; i++ {
|
||||
txLoc := wire.TxLoc{TxStart: i * 2}
|
||||
err := dbPutAddrIndexEntry(populatedBucket, test.key,
|
||||
uint64(i), txLoc)
|
||||
if err != nil {
|
||||
t.Errorf("dbPutAddrIndexEntry #%d (%s) - "+
|
||||
"unexpected error: %v", testNum,
|
||||
test.name, err)
|
||||
continue nextTest
|
||||
}
|
||||
}
|
||||
if test.printLevels {
|
||||
t.Log(populatedBucket.printLevels(test.key))
|
||||
}
|
||||
|
||||
// Delete entries from the populated bucket until all entries
|
||||
// have been deleted. The bucket is reset to the fully
|
||||
// populated bucket on each iteration so every combination is
|
||||
// tested. Notice the upper limit purposes exceeds the number
|
||||
// of entries to ensure attempting to delete more entries than
|
||||
// there are works correctly.
|
||||
for numDelete := 0; numDelete <= test.numInsert+1; numDelete++ {
|
||||
// Clone populated bucket to run each delete against.
|
||||
bucket := populatedBucket.Clone()
|
||||
|
||||
// Remove the number of entries for this iteration.
|
||||
err := dbRemoveAddrIndexEntries(bucket, test.key,
|
||||
numDelete)
|
||||
if err != nil {
|
||||
if numDelete <= test.numInsert {
|
||||
t.Errorf("dbRemoveAddrIndexEntries (%s) "+
|
||||
" delete %d - unexpected error: "+
|
||||
"%v", test.name, numDelete, err)
|
||||
continue nextTest
|
||||
}
|
||||
}
|
||||
if test.printLevels {
|
||||
t.Log(bucket.printLevels(test.key))
|
||||
}
|
||||
|
||||
// Sanity check the levels to ensure the adhere to all
|
||||
// rules.
|
||||
numExpected := test.numInsert
|
||||
if numDelete <= test.numInsert {
|
||||
numExpected -= numDelete
|
||||
}
|
||||
err = bucket.sanityCheck(test.key, numExpected)
|
||||
if err != nil {
|
||||
t.Errorf("sanity check fail (%s) delete %d: %v",
|
||||
test.name, numDelete, err)
|
||||
continue nextTest
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,367 +0,0 @@
|
||||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/util/gcs"
|
||||
"github.com/daglabs/btcd/util/gcs/builder"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// cfIndexName is the human-readable name for the index.
|
||||
cfIndexName = "committed filter index"
|
||||
)
|
||||
|
||||
// Committed filters come in two flavours: basic and extended. They are
|
||||
// generated and dropped in pairs, and both are indexed by a block's hash.
|
||||
// Besides holding different content, they also live in different buckets.
|
||||
var (
|
||||
// cfIndexParentBucketKey is the name of the parent bucket used to house
|
||||
// the index. The rest of the buckets live below this bucket.
|
||||
cfIndexParentBucketKey = []byte("cfindexparentbucket")
|
||||
|
||||
// cfIndexKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cfilters.
|
||||
cfIndexKeys = [][]byte{
|
||||
[]byte("cf0byhashidx"),
|
||||
[]byte("cf1byhashidx"),
|
||||
}
|
||||
|
||||
// cfHeaderKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cf headers.
|
||||
cfHeaderKeys = [][]byte{
|
||||
[]byte("cf0headerbyhashidx"),
|
||||
[]byte("cf1headerbyhashidx"),
|
||||
}
|
||||
|
||||
// cfHashKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cf hashes.
|
||||
cfHashKeys = [][]byte{
|
||||
[]byte("cf0hashbyhashidx"),
|
||||
[]byte("cf1hashbyhashidx"),
|
||||
}
|
||||
|
||||
maxFilterType = uint8(len(cfHeaderKeys) - 1)
|
||||
)
|
||||
|
||||
// dbFetchFilterIdxEntry retrieves a data blob from the filter index database.
|
||||
// An entry's absence is not considered an error.
|
||||
func dbFetchFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash) ([]byte, error) {
|
||||
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
||||
return idx.Get(h[:]), nil
|
||||
}
|
||||
|
||||
// dbStoreFilterIdxEntry stores a data blob in the filter index database.
|
||||
func dbStoreFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash, f []byte) error {
|
||||
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
||||
return idx.Put(h[:], f)
|
||||
}
|
||||
|
||||
// dbDeleteFilterIdxEntry deletes a data blob from the filter index database.
|
||||
func dbDeleteFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash) error {
|
||||
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
||||
return idx.Delete(h[:])
|
||||
}
|
||||
|
||||
// CfIndex implements a committed filter (cf) by hash index.
|
||||
type CfIndex struct {
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
}
|
||||
|
||||
// Ensure the CfIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*CfIndex)(nil)
|
||||
|
||||
// Init initializes the hash-based cf index. This is part of the Indexer
|
||||
// interface.
|
||||
func (idx *CfIndex) Init(db database.DB, _ *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
return nil
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice. This is
|
||||
// part of the Indexer interface.
|
||||
func (idx *CfIndex) Key() []byte {
|
||||
return cfIndexParentBucketKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index. This is part of the
|
||||
// Indexer interface.
|
||||
func (idx *CfIndex) Name() string {
|
||||
return cfIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs to
|
||||
// be created for the first time. It creates buckets for the two hash-based cf
|
||||
// indexes (simple, extended).
|
||||
func (idx *CfIndex) Create(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
|
||||
cfIndexParentBucket, err := meta.CreateBucket(cfIndexParentBucketKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bucketName := range cfIndexKeys {
|
||||
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucketName := range cfHeaderKeys {
|
||||
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucketName := range cfHashKeys {
|
||||
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// storeFilter stores a given filter, and performs the steps needed to
|
||||
// generate the filter's header.
|
||||
func storeFilter(dbTx database.Tx, block *util.Block, f *gcs.Filter,
|
||||
filterType wire.FilterType) error {
|
||||
if uint8(filterType) > maxFilterType {
|
||||
return errors.New("unsupported filter type")
|
||||
}
|
||||
|
||||
// Figure out which buckets to use.
|
||||
fkey := cfIndexKeys[filterType]
|
||||
hkey := cfHeaderKeys[filterType]
|
||||
hashkey := cfHashKeys[filterType]
|
||||
|
||||
// Start by storing the filter.
|
||||
h := block.Hash()
|
||||
filterBytes, err := f.NBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbStoreFilterIdxEntry(dbTx, fkey, h, filterBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Next store the filter hash.
|
||||
filterHash, err := builder.GetFilterHash(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbStoreFilterIdxEntry(dbTx, hashkey, h, filterHash[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then fetch the previous block's filter header.
|
||||
var prevHeader *daghash.Hash
|
||||
header := block.MsgBlock().Header
|
||||
if header.IsGenesis() {
|
||||
prevHeader = &daghash.ZeroHash
|
||||
} else {
|
||||
// TODO(Evgeny): Current implementation of GCS filter inherited from chain
|
||||
// (single parent) and must be ported to DAG (multiple parents)
|
||||
var parentHash *daghash.Hash
|
||||
if header.NumParentBlocks() != 0 {
|
||||
parentHash = header.ParentHashes[0]
|
||||
}
|
||||
prevFilterHashBytes, err := dbFetchFilterIdxEntry(dbTx, hkey, parentHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct the new block's filter header, and store it.
|
||||
prevHeader, err = daghash.NewHash(prevFilterHashBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fh, err := builder.MakeHeaderForFilter(f, prevHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dbStoreFilterIdxEntry(dbTx, hkey, h, fh[:])
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the main chain. This indexer adds a hash-to-cf mapping for
|
||||
// every passed block. This is part of the Indexer interface.
|
||||
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *util.Block, _ uint64,
|
||||
_ *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
f, err := builder.BuildBasicFilter(block.MsgBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = storeFilter(dbTx, block, f, wire.GCSFilterRegular)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err = builder.BuildExtFilter(block.MsgBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return storeFilter(dbTx, block, f, wire.GCSFilterExtended)
|
||||
}
|
||||
|
||||
// DisconnectBlock is invoked by the index manager when a block has been
|
||||
// disconnected from the main chain. This indexer removes the hash-to-cf
|
||||
// mapping for every passed block. This is part of the Indexer interface.
|
||||
func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *util.Block,
|
||||
_ *blockdag.BlockDAG) error {
|
||||
|
||||
for _, key := range cfIndexKeys {
|
||||
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range cfHeaderKeys {
|
||||
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range cfHashKeys {
|
||||
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// entryByBlockHash fetches a filter index entry of a particular type
|
||||
// (eg. filter, filter header, etc) for a filter type and block hash.
|
||||
func (idx *CfIndex) entryByBlockHash(filterTypeKeys [][]byte,
|
||||
filterType wire.FilterType, h *daghash.Hash) ([]byte, error) {
|
||||
|
||||
if uint8(filterType) > maxFilterType {
|
||||
return nil, errors.New("unsupported filter type")
|
||||
}
|
||||
key := filterTypeKeys[filterType]
|
||||
|
||||
var entry []byte
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
entry, err = dbFetchFilterIdxEntry(dbTx, key, h)
|
||||
return err
|
||||
})
|
||||
return entry, err
|
||||
}
|
||||
|
||||
// entriesByBlockHashes batch fetches a filter index entry of a particular type
|
||||
// (eg. filter, filter header, etc) for a filter type and slice of block hashes.
|
||||
func (idx *CfIndex) entriesByBlockHashes(filterTypeKeys [][]byte,
|
||||
filterType wire.FilterType, blockHashes []*daghash.Hash) ([][]byte, error) {
|
||||
|
||||
if uint8(filterType) > maxFilterType {
|
||||
return nil, errors.New("unsupported filter type")
|
||||
}
|
||||
key := filterTypeKeys[filterType]
|
||||
|
||||
entries := make([][]byte, 0, len(blockHashes))
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
for _, blockHash := range blockHashes {
|
||||
entry, err := dbFetchFilterIdxEntry(dbTx, key, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// FilterByBlockHash returns the serialized contents of a block's basic or
|
||||
// extended committed filter.
|
||||
func (idx *CfIndex) FilterByBlockHash(h *daghash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfIndexKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FiltersByBlockHashes returns the serialized contents of a block's basic or
|
||||
// extended committed filter for a set of blocks by hash.
|
||||
func (idx *CfIndex) FiltersByBlockHashes(blockHashes []*daghash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfIndexKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// FilterHeaderByBlockHash returns the serialized contents of a block's basic
|
||||
// or extended committed filter header.
|
||||
func (idx *CfIndex) FilterHeaderByBlockHash(h *daghash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfHeaderKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FilterHeadersByBlockHashes returns the serialized contents of a block's basic
|
||||
// or extended committed filter header for a set of blocks by hash.
|
||||
func (idx *CfIndex) FilterHeadersByBlockHashes(blockHashes []*daghash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfHeaderKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// FilterHashByBlockHash returns the serialized contents of a block's basic
|
||||
// or extended committed filter hash.
|
||||
func (idx *CfIndex) FilterHashByBlockHash(h *daghash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfHashKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FilterHashesByBlockHashes returns the serialized contents of a block's basic
|
||||
// or extended committed filter hash for a set of blocks by hash.
|
||||
func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*daghash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfHashKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *CfIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
return fmt.Errorf("cfindex was turned off for %d blocks and can't be recovered."+
|
||||
" To resume working drop the cfindex with --dropcfindex", lastKnownBlockID-currentBlockID)
|
||||
}
|
||||
|
||||
// NewCfIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping of the hashes of all blocks in the blockchain to their respective
|
||||
// committed filters.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that
|
||||
// in turn is used by the blockchain package. This allows the index to be
|
||||
// seamlessly maintained along with the chain.
|
||||
func NewCfIndex(dagParams *dagconfig.Params) *CfIndex {
|
||||
return &CfIndex{dagParams: dagParams}
|
||||
}
|
||||
|
||||
// DropCfIndex drops the CF index from the provided database if exists.
|
||||
func DropCfIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
return dropIndex(db, cfIndexParentBucketKey, cfIndexName, interrupt)
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package indexers implements optional block chain indexes.
|
||||
*/
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
var (
|
||||
// byteOrder is the preferred byte order used for serializing numeric
|
||||
// fields for storage in the database.
|
||||
byteOrder = binary.LittleEndian
|
||||
|
||||
// errInterruptRequested indicates that an operation was cancelled due
|
||||
// to a user-requested interrupt.
|
||||
errInterruptRequested = errors.New("interrupt requested")
|
||||
)
|
||||
|
||||
// NeedsInputser provides a generic interface for an indexer to specify the it
|
||||
// requires the ability to look up inputs for a transaction.
|
||||
type NeedsInputser interface {
|
||||
NeedsInputs() bool
|
||||
}
|
||||
|
||||
// Indexer provides a generic interface for an indexer that is managed by an
|
||||
// index manager such as the Manager type provided by this package.
|
||||
type Indexer interface {
|
||||
// Key returns the key of the index as a byte slice.
|
||||
Key() []byte
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
Name() string
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time.
|
||||
Create(dbTx database.Tx) error
|
||||
|
||||
// Init is invoked when the index manager is first initializing the
|
||||
// index. This differs from the Create method in that it is called on
|
||||
// every load, including the case the index was just created.
|
||||
Init(db database.DB, dag *blockdag.BlockDAG) error
|
||||
|
||||
// ConnectBlock is invoked when the index manager is notified that a new
|
||||
// block has been connected to the DAG.
|
||||
ConnectBlock(dbTx database.Tx,
|
||||
block *util.Block,
|
||||
blockID uint64,
|
||||
dag *blockdag.BlockDAG,
|
||||
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData,
|
||||
virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error
|
||||
}
|
||||
|
||||
// AssertError identifies an error that indicates an internal code consistency
|
||||
// issue and should be treated as a critical and unrecoverable error.
|
||||
type AssertError string
|
||||
|
||||
// Error returns the assertion error as a huma-readable string and satisfies
|
||||
// the error interface.
|
||||
func (e AssertError) Error() string {
|
||||
return "assertion failed: " + string(e)
|
||||
}
|
||||
|
||||
// errDeserialize signifies that a problem was encountered when deserializing
|
||||
// data.
|
||||
type errDeserialize string
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e errDeserialize) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// isDeserializeErr returns whether or not the passed error is an errDeserialize
|
||||
// error.
|
||||
func isDeserializeErr(err error) bool {
|
||||
_, ok := err.(errDeserialize)
|
||||
return ok
|
||||
}
|
||||
|
||||
// internalBucket is an abstraction over a database bucket. It is used to make
|
||||
// the code easier to test since it allows mock objects in the tests to only
|
||||
// implement these functions instead of everything a database.Bucket supports.
|
||||
type internalBucket interface {
|
||||
Get(key []byte) []byte
|
||||
Put(key []byte, value []byte) error
|
||||
Delete(key []byte) error
|
||||
}
|
||||
|
||||
// interruptRequested returns true when the provided channel has been closed.
|
||||
// This simplifies early shutdown slightly since the caller can just use an if
|
||||
// statement instead of a select.
|
||||
func interruptRequested(interrupted <-chan struct{}) bool {
|
||||
select {
|
||||
case <-interrupted:
|
||||
return true
|
||||
default:
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -1,389 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
var (
|
||||
// indexTipsBucketName is the name of the db bucket used to house the
|
||||
// current tip of each index.
|
||||
indexTipsBucketName = []byte("idxtips")
|
||||
|
||||
indexCurrentBlockIDBucketName = []byte("idxcurrentblockid")
|
||||
)
|
||||
|
||||
// Manager defines an index manager that manages multiple optional indexes and
|
||||
// implements the blockchain.IndexManager interface so it can be seamlessly
|
||||
// plugged into normal chain processing.
|
||||
type Manager struct {
|
||||
db database.DB
|
||||
enabledIndexes []Indexer
|
||||
}
|
||||
|
||||
// Ensure the Manager type implements the blockchain.IndexManager interface.
|
||||
var _ blockdag.IndexManager = (*Manager)(nil)
|
||||
|
||||
// indexDropKey returns the key for an index which indicates it is in the
|
||||
// process of being dropped.
|
||||
func indexDropKey(idxKey []byte) []byte {
|
||||
dropKey := make([]byte, len(idxKey)+1)
|
||||
dropKey[0] = 'd'
|
||||
copy(dropKey[1:], idxKey)
|
||||
return dropKey
|
||||
}
|
||||
|
||||
// maybeFinishDrops determines if each of the enabled indexes are in the middle
|
||||
// of being dropped and finishes dropping them when the are. This is necessary
|
||||
// because dropping and index has to be done in several atomic steps rather than
|
||||
// one big atomic step due to the massive number of entries.
|
||||
func (m *Manager) maybeFinishDrops(interrupt <-chan struct{}) error {
|
||||
indexNeedsDrop := make([]bool, len(m.enabledIndexes))
|
||||
err := m.db.View(func(dbTx database.Tx) error {
|
||||
// None of the indexes needs to be dropped if the index tips
|
||||
// bucket hasn't been created yet.
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
if indexesBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark the indexer as requiring a drop if one is already in
|
||||
// progress.
|
||||
for i, indexer := range m.enabledIndexes {
|
||||
dropKey := indexDropKey(indexer.Key())
|
||||
if indexesBucket.Get(dropKey) != nil {
|
||||
indexNeedsDrop[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
// Finish dropping any of the enabled indexes that are already in the
|
||||
// middle of being dropped.
|
||||
for i, indexer := range m.enabledIndexes {
|
||||
if !indexNeedsDrop[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infof("Resuming %s drop", indexer.Name())
|
||||
err := dropIndex(m.db, indexer.Key(), indexer.Name(), interrupt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// maybeCreateIndexes determines if each of the enabled indexes have already
|
||||
// been created and creates them if not.
|
||||
func (m *Manager) maybeCreateIndexes(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
// Nothing to do if the index tip already exists.
|
||||
idxKey := indexer.Key()
|
||||
if indexesBucket.Get(idxKey) != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// The tip for the index does not exist, so create it and
|
||||
// invoke the create callback for the index so it can perform
|
||||
// any one-time initialization it requires.
|
||||
if err := indexer.Create(dbTx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO (Mike): this is temporary solution to prevent node from not starting
|
||||
// because it thinks indexers are not initialized.
|
||||
// Indexers, however, do not work properly, and a general solution to their work operation is required
|
||||
indexesBucket.Put(idxKey, []byte{0})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init initializes the enabled indexes. This is called during chain
|
||||
// initialization and primarily consists of catching up all indexes to the
|
||||
// current best chain tip. This is necessary since each index can be disabled
|
||||
// and re-enabled at any time and attempting to catch-up indexes at the same
|
||||
// time new blocks are being downloaded would lead to an overall longer time to
|
||||
// catch up due to the I/O contention.
|
||||
//
|
||||
// This is part of the blockchain.IndexManager interface.
|
||||
func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) error {
|
||||
// Nothing to do when no indexes are enabled.
|
||||
if len(m.enabledIndexes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
m.db = db
|
||||
|
||||
// Finish and drops that were previously interrupted.
|
||||
if err := m.maybeFinishDrops(interrupt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the initial state for the indexes as needed.
|
||||
err := m.db.Update(func(dbTx database.Tx) error {
|
||||
// Create the bucket for the current tips as needed.
|
||||
meta := dbTx.Metadata()
|
||||
_, err := meta.CreateBucketIfNotExists(indexTipsBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucketIfNotExists(indexCurrentBlockIDBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.maybeCreateIndexes(dbTx)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize each of the enabled indexes.
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
if err := indexer.Init(db, blockDAG); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return m.recoverIfNeeded()
|
||||
}
|
||||
|
||||
// recoverIfNeeded checks if the node worked for some time
|
||||
// without one of the current enabled indexes, and if it's
|
||||
// the case, recovers the missing blocks from the index.
|
||||
func (m *Manager) recoverIfNeeded() error {
|
||||
return m.db.Update(func(dbTx database.Tx) error {
|
||||
lastKnownBlockID := blockdag.DBFetchCurrentBlockID(dbTx)
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
serializedCurrentIdxBlockID := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Get(indexer.Key())
|
||||
currentIdxBlockID := uint64(0)
|
||||
if serializedCurrentIdxBlockID != nil {
|
||||
currentIdxBlockID = blockdag.DeserializeBlockID(serializedCurrentIdxBlockID)
|
||||
}
|
||||
if lastKnownBlockID > currentIdxBlockID {
|
||||
err := indexer.Recover(dbTx, currentIdxBlockID, lastKnownBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// ConnectBlock must be invoked when a block is extending the main chain. It
|
||||
// keeps track of the state of each index it is managing, performs some sanity
|
||||
// checks, and invokes each indexer.
|
||||
//
|
||||
// This is part of the blockchain.IndexManager interface.
|
||||
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
// Call each of the currently active optional indexes with the block
|
||||
// being connected so they can update accordingly.
|
||||
for _, index := range m.enabledIndexes {
|
||||
// Notify the indexer with the connected block so it can index it.
|
||||
if err := index.ConnectBlock(dbTx, block, blockID, dag, txsAcceptanceData, virtualTxsAcceptanceData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the new block ID index entry for the block being connected and
|
||||
// update the current internal block ID accordingly.
|
||||
err := m.updateIndexersWithCurrentBlockID(dbTx, block.Hash(), blockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) updateIndexersWithCurrentBlockID(dbTx database.Tx, blockHash *daghash.Hash, blockID uint64) error {
|
||||
serializedBlockID := blockdag.SerializeBlockID(blockID)
|
||||
for _, index := range m.enabledIndexes {
|
||||
err := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Put(index.Key(), serializedBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewManager returns a new index manager with the provided indexes enabled.
|
||||
//
|
||||
// The manager returned satisfies the blockchain.IndexManager interface and thus
|
||||
// cleanly plugs into the normal blockchain processing path.
|
||||
func NewManager(enabledIndexes []Indexer) *Manager {
|
||||
return &Manager{
|
||||
enabledIndexes: enabledIndexes,
|
||||
}
|
||||
}
|
||||
|
||||
// dropIndex drops the passed index from the database. Since indexes can be
|
||||
// massive, it deletes the index in multiple database transactions in order to
|
||||
// keep memory usage to reasonable levels. It also marks the drop in progress
|
||||
// so the drop can be resumed if it is stopped before it is done before the
|
||||
// index can be used again.
|
||||
func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan struct{}) error {
|
||||
// Nothing to do if the index doesn't already exist.
|
||||
var needsDelete bool
|
||||
err := db.View(func(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
if indexesBucket != nil && indexesBucket.Get(idxKey) != nil {
|
||||
needsDelete = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !needsDelete {
|
||||
log.Infof("Not dropping %s because it does not exist", idxName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark that the index is in the process of being dropped so that it
|
||||
// can be resumed on the next start if interrupted before the process is
|
||||
// complete.
|
||||
log.Infof("Dropping all %s entries. This might take a while...",
|
||||
idxName)
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
return indexesBucket.Put(indexDropKey(idxKey), idxKey)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since the indexes can be so large, attempting to simply delete
|
||||
// the bucket in a single database transaction would result in massive
|
||||
// memory usage and likely crash many systems due to ulimits. In order
|
||||
// to avoid this, use a cursor to delete a maximum number of entries out
|
||||
// of the bucket at a time. Recurse buckets depth-first to delete any
|
||||
// sub-buckets.
|
||||
const maxDeletions = 2000000
|
||||
var totalDeleted uint64
|
||||
|
||||
// Recurse through all buckets in the index, cataloging each for
|
||||
// later deletion.
|
||||
var subBuckets [][][]byte
|
||||
var subBucketClosure func(database.Tx, []byte, [][]byte) error
|
||||
subBucketClosure = func(dbTx database.Tx,
|
||||
subBucket []byte, tlBucket [][]byte) error {
|
||||
// Get full bucket name and append to subBuckets for later
|
||||
// deletion.
|
||||
var bucketName [][]byte
|
||||
if (tlBucket == nil) || (len(tlBucket) == 0) {
|
||||
bucketName = append(bucketName, subBucket)
|
||||
} else {
|
||||
bucketName = append(tlBucket, subBucket)
|
||||
}
|
||||
subBuckets = append(subBuckets, bucketName)
|
||||
// Recurse sub-buckets to append to subBuckets slice.
|
||||
bucket := dbTx.Metadata()
|
||||
for _, subBucketName := range bucketName {
|
||||
bucket = bucket.Bucket(subBucketName)
|
||||
}
|
||||
return bucket.ForEachBucket(func(k []byte) error {
|
||||
return subBucketClosure(dbTx, k, bucketName)
|
||||
})
|
||||
}
|
||||
|
||||
// Call subBucketClosure with top-level bucket.
|
||||
err = db.View(func(dbTx database.Tx) error {
|
||||
return subBucketClosure(dbTx, idxKey, nil)
|
||||
})
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Iterate through each sub-bucket in reverse, deepest-first, deleting
|
||||
// all keys inside them and then dropping the buckets themselves.
|
||||
for i := range subBuckets {
|
||||
bucketName := subBuckets[len(subBuckets)-1-i]
|
||||
// Delete maxDeletions key/value pairs at a time.
|
||||
for numDeleted := maxDeletions; numDeleted == maxDeletions; {
|
||||
numDeleted = 0
|
||||
err := db.Update(func(dbTx database.Tx) error {
|
||||
subBucket := dbTx.Metadata()
|
||||
for _, subBucketName := range bucketName {
|
||||
subBucket = subBucket.Bucket(subBucketName)
|
||||
}
|
||||
cursor := subBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() &&
|
||||
numDeleted < maxDeletions {
|
||||
|
||||
if err := cursor.Delete(); err != nil {
|
||||
return err
|
||||
}
|
||||
numDeleted++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numDeleted > 0 {
|
||||
totalDeleted += uint64(numDeleted)
|
||||
log.Infof("Deleted %d keys (%d total) from %s",
|
||||
numDeleted, totalDeleted, idxName)
|
||||
}
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
// Drop the bucket itself.
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata()
|
||||
for j := 0; j < len(bucketName)-1; j++ {
|
||||
bucket = bucket.Bucket(bucketName[j])
|
||||
}
|
||||
return bucket.DeleteBucket(bucketName[len(bucketName)-1])
|
||||
})
|
||||
}
|
||||
|
||||
// Remove the index tip, index bucket, and in-progress drop flag now
|
||||
// that all index entries have been removed.
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
indexesBucket := meta.Bucket(indexTipsBucketName)
|
||||
if err := indexesBucket.Delete(idxKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := meta.Bucket(indexCurrentBlockIDBucketName).Delete(idxKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return indexesBucket.Delete(indexDropKey(idxKey))
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Dropped %s", idxName)
|
||||
return nil
|
||||
}
|
||||
@@ -1,434 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// txIndexName is the human-readable name for the index.
|
||||
txIndexName = "transaction index"
|
||||
|
||||
includingBlocksIndexKeyEntrySize = 8 // 4 bytes for offset + 4 bytes for transaction length
|
||||
)
|
||||
|
||||
var (
|
||||
includingBlocksIndexKey = []byte("includingblocksidx")
|
||||
|
||||
acceptingBlocksIndexKey = []byte("acceptingblocksidx")
|
||||
)
|
||||
|
||||
// txsAcceptedByVirtual is the in-memory index of txIDs that were accepted
|
||||
// by the current virtual
|
||||
var txsAcceptedByVirtual map[daghash.TxID]bool
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The transaction index consists of an entry for every transaction in the DAG.
|
||||
//
|
||||
// There are two buckets used in total. The first bucket maps the hash of
|
||||
// each transaction to its location in each block it's included in. The second bucket
|
||||
// contains all of the blocks that from their viewpoint the transaction has been
|
||||
// accepted (i.e. the transaction is found in their blue set without double spends),
|
||||
// and their blue block (or themselves) that included the transaction.
|
||||
//
|
||||
// NOTE: Although it is technically possible for multiple transactions to have
|
||||
// the same hash as long as the previous transaction with the same hash is fully
|
||||
// spent, this code only stores the most recent one because doing otherwise
|
||||
// would add a non-trivial amount of space and overhead for something that will
|
||||
// realistically never happen per the probability and even if it did, the old
|
||||
// one must be fully spent and so the most likely transaction a caller would
|
||||
// want for a given hash is the most recent one anyways.
|
||||
//
|
||||
// The including blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
|
||||
//
|
||||
// <block id> = <start offset><tx length>
|
||||
//
|
||||
// Field Type Size
|
||||
// block id uint64 8 bytes
|
||||
// start offset uint32 4 bytes
|
||||
// tx length uint32 4 bytes
|
||||
// -----
|
||||
// Total: 16 bytes
|
||||
//
|
||||
// The accepting blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
|
||||
//
|
||||
// <accepting block id> = <including block id>
|
||||
//
|
||||
// Field Type Size
|
||||
// accepting block id uint64 8 bytes
|
||||
// including block id uint64 8 bytes
|
||||
// -----
|
||||
// Total: 16 bytes
|
||||
//
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
func putIncludingBlocksEntry(target []byte, txLoc wire.TxLoc) {
|
||||
byteOrder.PutUint32(target, uint32(txLoc.TxStart))
|
||||
byteOrder.PutUint32(target[4:], uint32(txLoc.TxLen))
|
||||
}
|
||||
|
||||
func dbPutIncludingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
|
||||
bucket, err := dbTx.Metadata().Bucket(includingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
|
||||
}
|
||||
|
||||
func dbPutAcceptingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
|
||||
bucket, err := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
|
||||
}
|
||||
|
||||
// dbFetchFirstTxRegion uses an existing database transaction to fetch the block
|
||||
// region for the provided transaction hash from the transaction index. When
|
||||
// there is no entry for the provided hash, nil will be returned for the both
|
||||
// the region and the error.
|
||||
//
|
||||
// P.S Because the transaction can be found in multiple blocks, this function arbitarily
|
||||
// returns the first block region that is stored in the txindex.
|
||||
func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.BlockRegion, error) {
|
||||
// Load the record from the database and return now if it doesn't exist.
|
||||
txBucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txID[:])
|
||||
if txBucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No block region "+
|
||||
"was found for %s", txID),
|
||||
}
|
||||
}
|
||||
cursor := txBucket.Cursor()
|
||||
if ok := cursor.First(); !ok {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No block region "+
|
||||
"was found for %s", txID),
|
||||
}
|
||||
}
|
||||
serializedBlockID := cursor.Key()
|
||||
serializedData := cursor.Value()
|
||||
if len(serializedData) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Ensure the serialized data has enough bytes to properly deserialize.
|
||||
if len(serializedData) < includingBlocksIndexKeyEntrySize {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt transaction index "+
|
||||
"entry for %s", txID),
|
||||
}
|
||||
}
|
||||
|
||||
// Load the block hash associated with the block ID.
|
||||
hash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
|
||||
if err != nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt transaction index "+
|
||||
"entry for %s: %s", txID, err),
|
||||
}
|
||||
}
|
||||
|
||||
// Deserialize the final entry.
|
||||
region := database.BlockRegion{Hash: &daghash.Hash{}}
|
||||
copy(region.Hash[:], hash[:])
|
||||
region.Offset = byteOrder.Uint32(serializedData[:4])
|
||||
region.Len = byteOrder.Uint32(serializedData[4:])
|
||||
|
||||
return ®ion, nil
|
||||
}
|
||||
|
||||
// dbAddTxIndexEntries uses an existing database transaction to add a
|
||||
// transaction index entry for every transaction in the passed block.
|
||||
func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint64, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// The offset and length of the transactions within the serialized
|
||||
// block.
|
||||
txLocs, err := block.TxLoc()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As an optimization, allocate a single slice big enough to hold all
|
||||
// of the serialized transaction index entries for the block and
|
||||
// serialize them directly into the slice. Then, pass the appropriate
|
||||
// subslice to the database to be written. This approach significantly
|
||||
// cuts down on the number of required allocations.
|
||||
includingBlocksOffset := 0
|
||||
serializedIncludingBlocksValues := make([]byte, len(block.Transactions())*includingBlocksIndexKeyEntrySize)
|
||||
for i, tx := range block.Transactions() {
|
||||
putIncludingBlocksEntry(serializedIncludingBlocksValues[includingBlocksOffset:], txLocs[i])
|
||||
endOffset := includingBlocksOffset + includingBlocksIndexKeyEntrySize
|
||||
err := dbPutIncludingBlocksEntry(dbTx, tx.ID(), blockID,
|
||||
serializedIncludingBlocksValues[includingBlocksOffset:endOffset:endOffset])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
includingBlocksOffset += includingBlocksIndexKeyEntrySize
|
||||
}
|
||||
|
||||
for includingBlockHash, blockTxsAcceptanceData := range txsAcceptanceData {
|
||||
var includingBlockID uint64
|
||||
if includingBlockHash.IsEqual(block.Hash()) {
|
||||
includingBlockID = blockID
|
||||
} else {
|
||||
includingBlockID, err = blockdag.DBFetchBlockIDByHash(dbTx, &includingBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
serializedIncludingBlockID := blockdag.SerializeBlockID(includingBlockID)
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData {
|
||||
err = dbPutAcceptingBlocksEntry(dbTx, txAcceptanceData.Tx.ID(), blockID, serializedIncludingBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateTxsAcceptedByVirtual(virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// Initialize a new txsAcceptedByVirtual
|
||||
entries := 0
|
||||
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
|
||||
entries += len(blockTxsAcceptanceData)
|
||||
}
|
||||
txsAcceptedByVirtual = make(map[daghash.TxID]bool, entries)
|
||||
|
||||
// Copy virtualTxsAcceptanceData to txsAcceptedByVirtual
|
||||
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData {
|
||||
txsAcceptedByVirtual[*txAcceptanceData.Tx.ID()] = true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxIndex implements a transaction by hash index. That is to say, it supports
|
||||
// querying all transactions by their hash.
|
||||
type TxIndex struct {
|
||||
db database.DB
|
||||
}
|
||||
|
||||
// Ensure the TxIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*TxIndex)(nil)
|
||||
|
||||
// Init initializes the hash-based transaction index. In particular, it finds
|
||||
// the highest used block ID and stores it for later use when connecting or
|
||||
// disconnecting blocks.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
|
||||
// Initialize the txsAcceptedByVirtual index
|
||||
virtualTxsAcceptanceData, err := dag.TxsAcceptedByVirtual()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Key() []byte {
|
||||
return includingBlocksIndexKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Name() string {
|
||||
return txIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time. It creates the buckets for the hash-based
|
||||
// transaction index and the internal block ID indexes.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Create(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
if _, err := meta.CreateBucket(includingBlocksIndexKey); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := meta.CreateBucket(acceptingBlocksIndexKey)
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the DAG. This indexer adds a hash-to-transaction mapping
|
||||
// for every transaction in the passed block.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
|
||||
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
if err := dbAddTxIndexEntries(dbTx, block, blockID, acceptedTxsData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxFirstBlockRegion returns the first block region for the provided transaction hash
|
||||
// from the transaction index. The block region can in turn be used to load the
|
||||
// raw transaction bytes. When there is no entry for the provided hash, nil
|
||||
// will be returned for the both the entry and the error.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *TxIndex) TxFirstBlockRegion(txID *daghash.TxID) (*database.BlockRegion, error) {
|
||||
var region *database.BlockRegion
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
region, err = dbFetchFirstTxRegion(dbTx, txID)
|
||||
return err
|
||||
})
|
||||
return region, err
|
||||
}
|
||||
|
||||
// TxBlocks returns the hashes of the blocks where the transaction exists
|
||||
func (idx *TxIndex) TxBlocks(txHash *daghash.Hash) ([]*daghash.Hash, error) {
|
||||
blockHashes := make([]*daghash.Hash, 0)
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
blockHashes, err = dbFetchTxBlocks(dbTx, txHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return blockHashes, err
|
||||
}
|
||||
|
||||
func dbFetchTxBlocks(dbTx database.Tx, txHash *daghash.Hash) ([]*daghash.Hash, error) {
|
||||
blockHashes := make([]*daghash.Hash, 0)
|
||||
bucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txHash[:])
|
||||
if bucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No including blocks "+
|
||||
"were found for %s", txHash),
|
||||
}
|
||||
}
|
||||
err := bucket.ForEach(func(serializedBlockID, _ []byte) error {
|
||||
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHashes = append(blockHashes, blockHash)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockHashes, nil
|
||||
}
|
||||
|
||||
// BlockThatAcceptedTx returns the hash of the block where the transaction got accepted (from the virtual block point of view)
|
||||
func (idx *TxIndex) BlockThatAcceptedTx(dag *blockdag.BlockDAG, txID *daghash.TxID) (*daghash.Hash, error) {
|
||||
var acceptingBlock *daghash.Hash
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
acceptingBlock, err = dbFetchTxAcceptingBlock(dbTx, txID, dag)
|
||||
return err
|
||||
})
|
||||
return acceptingBlock, err
|
||||
}
|
||||
|
||||
func dbFetchTxAcceptingBlock(dbTx database.Tx, txID *daghash.TxID, dag *blockdag.BlockDAG) (*daghash.Hash, error) {
|
||||
// If the transaction was accepted by the current virtual,
|
||||
// return the zeroHash immediately
|
||||
if _, ok := txsAcceptedByVirtual[*txID]; ok {
|
||||
return &daghash.ZeroHash, nil
|
||||
}
|
||||
|
||||
bucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(txID[:])
|
||||
if bucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No accepting blocks bucket "+
|
||||
"exists for %s", txID),
|
||||
}
|
||||
}
|
||||
cursor := bucket.Cursor()
|
||||
if !cursor.First() {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("Accepting blocks bucket is "+
|
||||
"empty for %s", txID),
|
||||
}
|
||||
}
|
||||
for ; cursor.Key() != nil; cursor.Next() {
|
||||
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, cursor.Key())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dag.IsInSelectedParentChain(blockHash) {
|
||||
return blockHash, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewTxIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping of the hashes of all transactions in the blockchain to the respective
|
||||
// block, location within the block, and size of the transaction.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that in
|
||||
// turn is used by the blockchain package. This allows the index to be
|
||||
// seamlessly maintained along with the chain.
|
||||
func NewTxIndex() *TxIndex {
|
||||
return &TxIndex{}
|
||||
}
|
||||
|
||||
// DropTxIndex drops the transaction index from the provided database if it
|
||||
// exists. Since the address index relies on it, the address index will also be
|
||||
// dropped when it exists.
|
||||
func DropTxIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
err := dropIndex(db, addrIndexKey, addrIndexName, interrupt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dropIndex(db, includingBlocksIndexKey, addrIndexName, interrupt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dropIndex(db, acceptingBlocksIndexKey, txIndexName, interrupt)
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
return fmt.Errorf("txindex was turned off for %d blocks and can't be recovered."+
|
||||
" To resume working drop the txindex with --droptxindex", lastKnownBlockID-currentBlockID)
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/mining"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
func createTransaction(t *testing.T, value uint64, originTx *wire.MsgTx, outputIndex uint32) *wire.MsgTx {
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating signature script: %s", err)
|
||||
}
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: *originTx.TxID(),
|
||||
Index: outputIndex,
|
||||
},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
txOut := wire.NewTxOut(value, blockdag.OpTrueScript)
|
||||
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
|
||||
|
||||
return tx
|
||||
}
|
||||
|
||||
func TestTxIndexConnectBlock(t *testing.T) {
|
||||
blocks := make(map[daghash.Hash]*util.Block)
|
||||
|
||||
txIndex := NewTxIndex()
|
||||
indexManager := NewManager([]Indexer{txIndex})
|
||||
|
||||
params := dagconfig.SimNetParams
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
params.K = 1
|
||||
|
||||
config := blockdag.Config{
|
||||
IndexManager: indexManager,
|
||||
DAGParams: ¶ms,
|
||||
}
|
||||
|
||||
dag, teardown, err := blockdag.DAGSetup("TestTxIndexConnectBlock", config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
if teardown != nil {
|
||||
defer teardown()
|
||||
}
|
||||
|
||||
prepareAndProcessBlock := func(parentHashes []*daghash.Hash, transactions []*wire.MsgTx, blockName string) *wire.MsgBlock {
|
||||
block, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, transactions, false)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: block %v got unexpected error from PrepareBlockForTest: %v", blockName, err)
|
||||
}
|
||||
utilBlock := util.NewBlock(block)
|
||||
blocks[*block.BlockHash()] = utilBlock
|
||||
isOrphan, delay, err := dag.ProcessBlock(utilBlock, blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: dag.ProcessBlock got unexpected error for block %v: %v", blockName, err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("TestTxIndexConnectBlock: block %s "+
|
||||
"is too far in the future", blockName)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestTxIndexConnectBlock: block %v was unexpectedly orphan", blockName)
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
block1 := prepareAndProcessBlock([]*daghash.Hash{params.GenesisHash}, nil, "1")
|
||||
block2Tx := createTransaction(t, block1.Transactions[0].TxOut[0].Value, block1.Transactions[0], 0)
|
||||
block2 := prepareAndProcessBlock([]*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{block2Tx}, "2")
|
||||
block3Tx := createTransaction(t, block2.Transactions[0].TxOut[0].Value, block2.Transactions[0], 0)
|
||||
block3 := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3")
|
||||
|
||||
block2TxID := block2Tx.TxID()
|
||||
block2TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
|
||||
if err != nil {
|
||||
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
|
||||
}
|
||||
block3Hash := block3.BlockHash()
|
||||
if !block2TxNewAcceptedBlock.IsEqual(block3Hash) {
|
||||
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
|
||||
"been accepted in block %v but instead got accepted in block %v", block3Hash, block2TxNewAcceptedBlock)
|
||||
}
|
||||
|
||||
block3TxID := block3Tx.TxID()
|
||||
block3TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block3TxID)
|
||||
if err != nil {
|
||||
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
|
||||
}
|
||||
if !block3TxNewAcceptedBlock.IsEqual(&daghash.ZeroHash) {
|
||||
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
|
||||
"been accepted by the virtual block but instead got accepted in block %v", block3TxNewAcceptedBlock)
|
||||
}
|
||||
|
||||
block3A := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3A")
|
||||
block4 := prepareAndProcessBlock([]*daghash.Hash{block3.BlockHash()}, nil, "4")
|
||||
prepareAndProcessBlock([]*daghash.Hash{block3A.BlockHash(), block4.BlockHash()}, nil, "5")
|
||||
|
||||
block2TxAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
|
||||
if err != nil {
|
||||
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
|
||||
}
|
||||
block3AHash := block3A.BlockHash()
|
||||
if !block2TxAcceptedBlock.IsEqual(block3AHash) {
|
||||
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
|
||||
"been accepted in block %v but instead got accepted in block %v", block3AHash, block2TxAcceptedBlock)
|
||||
}
|
||||
|
||||
region, err := txIndex.TxFirstBlockRegion(block3TxID)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: no block region was found for block3Tx")
|
||||
}
|
||||
regionBlock, ok := blocks[*region.Hash]
|
||||
if !ok {
|
||||
t.Fatalf("TestTxIndexConnectBlock: couldn't find block with hash %v", region.Hash)
|
||||
}
|
||||
|
||||
regionBlockBytes, err := regionBlock.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: Couldn't serialize block to bytes")
|
||||
}
|
||||
block3TxInBlock := regionBlockBytes[region.Offset : region.Offset+region.Len]
|
||||
|
||||
block3TxBuf := bytes.NewBuffer(make([]byte, 0, block3Tx.SerializeSize()))
|
||||
block3Tx.BtcEncode(block3TxBuf, 0)
|
||||
blockTxBytes := block3TxBuf.Bytes()
|
||||
|
||||
if !reflect.DeepEqual(blockTxBytes, block3TxInBlock) {
|
||||
t.Errorf("TestTxIndexConnectBlock: the block region that was in the bucket doesn't match block3Tx")
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,218 +0,0 @@
|
||||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxAllowedOffsetSeconds is the maximum number of seconds in either
|
||||
// direction that local clock will be adjusted. When the median time
|
||||
// of the network is outside of this range, no offset will be applied.
|
||||
maxAllowedOffsetSecs = 70 * 60 // 1 hour 10 minutes
|
||||
|
||||
// similarTimeSecs is the number of seconds in either direction from the
|
||||
// local clock that is used to determine that it is likley wrong and
|
||||
// hence to show a warning.
|
||||
similarTimeSecs = 5 * 60 // 5 minutes
|
||||
)
|
||||
|
||||
var (
|
||||
// maxMedianTimeEntries is the maximum number of entries allowed in the
|
||||
// median time data. This is a variable as opposed to a constant so the
|
||||
// test code can modify it.
|
||||
maxMedianTimeEntries = 200
|
||||
)
|
||||
|
||||
// MedianTimeSource provides a mechanism to add several time samples which are
|
||||
// used to determine a median time which is then used as an offset to the local
|
||||
// clock.
|
||||
type MedianTimeSource interface {
|
||||
// AdjustedTime returns the current time adjusted by the median time
|
||||
// offset as calculated from the time samples added by AddTimeSample.
|
||||
AdjustedTime() time.Time
|
||||
|
||||
// AddTimeSample adds a time sample that is used when determining the
|
||||
// median time of the added samples.
|
||||
AddTimeSample(id string, timeVal time.Time)
|
||||
|
||||
// Offset returns the number of seconds to adjust the local clock based
|
||||
// upon the median of the time samples added by AddTimeData.
|
||||
Offset() time.Duration
|
||||
}
|
||||
|
||||
// int64Sorter implements sort.Interface to allow a slice of 64-bit integers to
|
||||
// be sorted.
|
||||
type int64Sorter []int64
|
||||
|
||||
// Len returns the number of 64-bit integers in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s int64Sorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Swap swaps the 64-bit integers at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s int64Sorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Less returns whether the 64-bit integer with index i should sort before the
|
||||
// 64-bit integer with index j. It is part of the sort.Interface
|
||||
// implementation.
|
||||
func (s int64Sorter) Less(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
}
|
||||
|
||||
// medianTime provides an implementation of the MedianTimeSource interface.
|
||||
// It is limited to maxMedianTimeEntries includes the same buggy behavior as
|
||||
// the time offset mechanism in Bitcoin Core. This is necessary because it is
|
||||
// used in the consensus code.
|
||||
type medianTime struct {
|
||||
mtx sync.Mutex
|
||||
knownIDs map[string]struct{}
|
||||
offsets []int64
|
||||
offsetSecs int64
|
||||
invalidTimeChecked bool
|
||||
}
|
||||
|
||||
// Ensure the medianTime type implements the MedianTimeSource interface.
|
||||
var _ MedianTimeSource = (*medianTime)(nil)
|
||||
|
||||
// AdjustedTime returns the current time adjusted by the median time offset as
|
||||
// calculated from the time samples added by AddTimeSample.
|
||||
//
|
||||
// This function is safe for concurrent access and is part of the
|
||||
// MedianTimeSource interface implementation.
|
||||
func (m *medianTime) AdjustedTime() time.Time {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
// Limit the adjusted time to 1 second precision.
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
return now.Add(time.Duration(m.offsetSecs) * time.Second)
|
||||
}
|
||||
|
||||
// AddTimeSample adds a time sample that is used when determining the median
|
||||
// time of the added samples.
|
||||
//
|
||||
// This function is safe for concurrent access and is part of the
|
||||
// MedianTimeSource interface implementation.
|
||||
func (m *medianTime) AddTimeSample(sourceID string, timeVal time.Time) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
// Don't add time data from the same source.
|
||||
if _, exists := m.knownIDs[sourceID]; exists {
|
||||
return
|
||||
}
|
||||
m.knownIDs[sourceID] = struct{}{}
|
||||
|
||||
// Truncate the provided offset to seconds and append it to the slice
|
||||
// of offsets while respecting the maximum number of allowed entries by
|
||||
// replacing the oldest entry with the new entry once the maximum number
|
||||
// of entries is reached.
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
offsetSecs := int64(timeVal.Sub(now).Seconds())
|
||||
numOffsets := len(m.offsets)
|
||||
if numOffsets == maxMedianTimeEntries && maxMedianTimeEntries > 0 {
|
||||
m.offsets = m.offsets[1:]
|
||||
numOffsets--
|
||||
}
|
||||
m.offsets = append(m.offsets, offsetSecs)
|
||||
numOffsets++
|
||||
|
||||
// Sort the offsets so the median can be obtained as needed later.
|
||||
sortedOffsets := make([]int64, numOffsets)
|
||||
copy(sortedOffsets, m.offsets)
|
||||
sort.Sort(int64Sorter(sortedOffsets))
|
||||
|
||||
offsetDuration := time.Duration(offsetSecs) * time.Second
|
||||
log.Debugf("Added time sample of %s (total: %d)", offsetDuration,
|
||||
numOffsets)
|
||||
|
||||
// NOTE: The following code intentionally has a bug to mirror the
|
||||
// buggy behavior in Bitcoin Core since the median time is used in the
|
||||
// consensus rules.
|
||||
//
|
||||
// In particular, the offset is only updated when the number of entries
|
||||
// is odd, but the max number of entries is 200, an even number. Thus,
|
||||
// the offset will never be updated again once the max number of entries
|
||||
// is reached.
|
||||
|
||||
// The median offset is only updated when there are enough offsets and
|
||||
// the number of offsets is odd so the middle value is the true median.
|
||||
// Thus, there is nothing to do when those conditions are not met.
|
||||
if numOffsets < 5 || numOffsets&0x01 != 1 {
|
||||
return
|
||||
}
|
||||
|
||||
// At this point the number of offsets in the list is odd, so the
|
||||
// middle value of the sorted offsets is the median.
|
||||
median := sortedOffsets[numOffsets/2]
|
||||
|
||||
// Set the new offset when the median offset is within the allowed
|
||||
// offset range.
|
||||
if math.Abs(float64(median)) < maxAllowedOffsetSecs {
|
||||
m.offsetSecs = median
|
||||
} else {
|
||||
// The median offset of all added time data is larger than the
|
||||
// maximum allowed offset, so don't use an offset. This
|
||||
// effectively limits how far the local clock can be skewed.
|
||||
m.offsetSecs = 0
|
||||
|
||||
if !m.invalidTimeChecked {
|
||||
m.invalidTimeChecked = true
|
||||
|
||||
// Find if any time samples have a time that is close
|
||||
// to the local time.
|
||||
var remoteHasCloseTime bool
|
||||
for _, offset := range sortedOffsets {
|
||||
if math.Abs(float64(offset)) < similarTimeSecs {
|
||||
remoteHasCloseTime = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Warn if none of the time samples are close.
|
||||
if !remoteHasCloseTime {
|
||||
log.Warnf("Please check your date and time " +
|
||||
"are correct! btcd will not work " +
|
||||
"properly with an invalid time")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
medianDuration := time.Duration(m.offsetSecs) * time.Second
|
||||
log.Debugf("New time offset: %d", medianDuration)
|
||||
}
|
||||
|
||||
// Offset returns the number of seconds to adjust the local clock based upon the
|
||||
// median of the time samples added by AddTimeData.
|
||||
//
|
||||
// This function is safe for concurrent access and is part of the
|
||||
// MedianTimeSource interface implementation.
|
||||
func (m *medianTime) Offset() time.Duration {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
return time.Duration(m.offsetSecs) * time.Second
|
||||
}
|
||||
|
||||
// NewMedianTime returns a new instance of concurrency-safe implementation of
|
||||
// the MedianTimeSource interface. The returned implementation contains the
|
||||
// rules necessary for proper time handling in the chain consensus rules and
|
||||
// expects the time samples to be added from the timestamp field of the version
|
||||
// message received from remote peers that successfully connect and negotiate.
|
||||
func NewMedianTime() MedianTimeSource {
|
||||
return &medianTime{
|
||||
knownIDs: make(map[string]struct{}),
|
||||
offsets: make([]int64, 0, maxMedianTimeEntries),
|
||||
}
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestMedianTime tests the medianTime implementation.
|
||||
func TestMedianTime(t *testing.T) {
|
||||
tests := []struct {
|
||||
in []int64
|
||||
wantOffset int64
|
||||
useDupID bool
|
||||
}{
|
||||
// Not enough samples must result in an offset of 0.
|
||||
{in: []int64{1}, wantOffset: 0},
|
||||
{in: []int64{1, 2}, wantOffset: 0},
|
||||
{in: []int64{1, 2, 3}, wantOffset: 0},
|
||||
{in: []int64{1, 2, 3, 4}, wantOffset: 0},
|
||||
|
||||
// Various number of entries. The expected offset is only
|
||||
// updated on odd number of elements.
|
||||
{in: []int64{-13, 57, -4, -23, -12}, wantOffset: -12},
|
||||
{in: []int64{55, -13, 61, -52, 39, 55}, wantOffset: 39},
|
||||
{in: []int64{-62, -58, -30, -62, 51, -30, 15}, wantOffset: -30},
|
||||
{in: []int64{29, -47, 39, 54, 42, 41, 8, -33}, wantOffset: 39},
|
||||
{in: []int64{37, 54, 9, -21, -56, -36, 5, -11, -39}, wantOffset: -11},
|
||||
{in: []int64{57, -28, 25, -39, 9, 63, -16, 19, -60, 25}, wantOffset: 9},
|
||||
{in: []int64{-5, -4, -3, -2, -1}, wantOffset: -3, useDupID: true},
|
||||
|
||||
// The offset stops being updated once the max number of entries
|
||||
// has been reached. This is actually a bug from Bitcoin Core,
|
||||
// but since the time is ultimately used as a part of the
|
||||
// consensus rules, it must be mirrored.
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52}, wantOffset: 17},
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45}, wantOffset: 17},
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45, 4}, wantOffset: 17},
|
||||
|
||||
// Offsets that are too far away from the local time should
|
||||
// be ignored.
|
||||
{in: []int64{-4201, 4202, -4203, 4204, -4205}, wantOffset: 0},
|
||||
|
||||
// Exercise the condition where the median offset is greater
|
||||
// than the max allowed adjustment, but there is at least one
|
||||
// sample that is close enough to the current time to avoid
|
||||
// triggering a warning about an invalid local clock.
|
||||
{in: []int64{4201, 4202, 4203, 4204, -299}, wantOffset: 0},
|
||||
}
|
||||
|
||||
// Modify the max number of allowed median time entries for these tests.
|
||||
maxMedianTimeEntries = 10
|
||||
defer func() { maxMedianTimeEntries = 200 }()
|
||||
|
||||
for i, test := range tests {
|
||||
filter := NewMedianTime()
|
||||
for j, offset := range test.in {
|
||||
id := strconv.Itoa(j)
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
tOffset := now.Add(time.Duration(offset) * time.Second)
|
||||
filter.AddTimeSample(id, tOffset)
|
||||
|
||||
// Ensure the duplicate IDs are ignored.
|
||||
if test.useDupID {
|
||||
// Modify the offsets to ensure the final median
|
||||
// would be different if the duplicate is added.
|
||||
tOffset = tOffset.Add(time.Duration(offset) *
|
||||
time.Second)
|
||||
filter.AddTimeSample(id, tOffset)
|
||||
}
|
||||
}
|
||||
|
||||
// Since it is possible that the time.Now call in AddTimeSample
|
||||
// and the time.Now calls here in the tests will be off by one
|
||||
// second, allow a fudge factor to compensate.
|
||||
gotOffset := filter.Offset()
|
||||
wantOffset := time.Duration(test.wantOffset) * time.Second
|
||||
wantOffset2 := time.Duration(test.wantOffset-1) * time.Second
|
||||
if gotOffset != wantOffset && gotOffset != wantOffset2 {
|
||||
t.Errorf("Offset #%d: unexpected offset -- got %v, "+
|
||||
"want %v or %v", i, gotOffset, wantOffset,
|
||||
wantOffset2)
|
||||
continue
|
||||
}
|
||||
|
||||
// Since it is possible that the time.Now call in AdjustedTime
|
||||
// and the time.Now call here in the tests will be off by one
|
||||
// second, allow a fudge factor to compensate.
|
||||
adjustedTime := filter.AdjustedTime()
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
wantTime := now.Add(filter.Offset())
|
||||
wantTime2 := now.Add(filter.Offset() - time.Second)
|
||||
if !adjustedTime.Equal(wantTime) && !adjustedTime.Equal(wantTime2) {
|
||||
t.Errorf("AdjustedTime #%d: unexpected result -- got %v, "+
|
||||
"want %v or %v", i, adjustedTime, wantTime,
|
||||
wantTime2)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// phantom calculates and returns the block's blue set, selected parent and blue score.
|
||||
// Chain start is determined by going down the DAG through the selected path
|
||||
// (follow the selected parent of each block) k + 1 steps.
|
||||
// The blue set of a block are all blue blocks in its past.
|
||||
// To optimize memory usage, for each block we are storing only the blue blocks in
|
||||
// its selected parent's anticone that are in the future of the chain start
|
||||
// as well as the selected parent itself - the rest of the
|
||||
// blue set can be restored by traversing the selected parent chain and combining
|
||||
// the .blues of all blocks in it.
|
||||
// The blue score is the total number of blocks in this block's blue set
|
||||
// of the selected parent. (the blue score of the genesis block is defined as 0)
|
||||
// The selected parent is chosen by determining which block's parent will give this block the highest blue score.
|
||||
func phantom(block *blockNode, k uint32) (blues []*blockNode, selectedParent *blockNode, score uint64) {
|
||||
bestScore := uint64(0)
|
||||
var bestParent *blockNode
|
||||
var bestBlues []*blockNode
|
||||
var bestHash *daghash.Hash
|
||||
for _, parent := range block.parents {
|
||||
chainStart := digToChainStart(parent, k)
|
||||
candidates := blueCandidates(chainStart)
|
||||
blues := traverseCandidates(block, candidates, parent)
|
||||
score := uint64(len(blues)) + parent.blueScore
|
||||
|
||||
if score > bestScore || (score == bestScore && (bestHash == nil || daghash.Less(parent.hash, bestHash))) {
|
||||
bestScore = score
|
||||
bestBlues = blues
|
||||
bestParent = parent
|
||||
bestHash = parent.hash
|
||||
}
|
||||
}
|
||||
|
||||
return bestBlues, bestParent, bestScore
|
||||
}
|
||||
|
||||
// digToChainStart digs through the selected path and returns the block in depth k+1
|
||||
func digToChainStart(parent *blockNode, k uint32) *blockNode {
|
||||
current := parent
|
||||
|
||||
for i := uint32(0); i < k; i++ {
|
||||
if current.isGenesis() {
|
||||
break
|
||||
}
|
||||
current = current.selectedParent
|
||||
}
|
||||
|
||||
return current
|
||||
}
|
||||
|
||||
func blueCandidates(chainStart *blockNode) blockSet {
|
||||
candidates := newSet()
|
||||
candidates.add(chainStart)
|
||||
|
||||
queue := []*blockNode{chainStart}
|
||||
for len(queue) > 0 {
|
||||
var current *blockNode
|
||||
current, queue = queue[0], queue[1:]
|
||||
|
||||
children := current.children
|
||||
for _, child := range children {
|
||||
if !candidates.contains(child) {
|
||||
candidates.add(child)
|
||||
queue = append(queue, child)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return candidates
|
||||
}
|
||||
|
||||
//traverseCandidates returns all the blocks that are in the future of the chain start and in the anticone of the selected parent
|
||||
func traverseCandidates(newBlock *blockNode, candidates blockSet, selectedParent *blockNode) []*blockNode {
|
||||
blues := []*blockNode{}
|
||||
selectedParentPast := newSet()
|
||||
queue := newDownHeap()
|
||||
visited := newSet()
|
||||
|
||||
for _, parent := range newBlock.parents {
|
||||
queue.Push(parent)
|
||||
}
|
||||
|
||||
for queue.Len() > 0 {
|
||||
current := queue.pop()
|
||||
if candidates.contains(current) {
|
||||
if current == selectedParent || selectedParentPast.anyChildInSet(current) {
|
||||
selectedParentPast.add(current)
|
||||
} else {
|
||||
blues = append(blues, current)
|
||||
}
|
||||
for _, parent := range current.parents {
|
||||
if !visited.contains(parent) {
|
||||
visited.add(parent)
|
||||
queue.Push(parent)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return append(blues, selectedParent)
|
||||
}
|
||||
@@ -1,892 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
)
|
||||
|
||||
type testBlockData struct {
|
||||
parents []string
|
||||
id string //id is a virtual entity that is used only for tests so we can define relations between blocks without knowing their hash
|
||||
expectedScore uint64
|
||||
expectedSelectedParent string
|
||||
expectedBlues []string
|
||||
}
|
||||
|
||||
//TestPhantom iterate over several dag simulations, and checks
|
||||
//that the blue score, blue set and selected parent of each
|
||||
//block calculated as expected
|
||||
func TestPhantom(t *testing.T) {
|
||||
netParams := dagconfig.SimNetParams
|
||||
|
||||
blockVersion := int32(0x10000000)
|
||||
|
||||
tests := []struct {
|
||||
k uint32
|
||||
dagData []*testBlockData
|
||||
virtualBlockID string
|
||||
expectedReds []string
|
||||
}{
|
||||
{
|
||||
//Block hash order:AKJIHGFEDCB
|
||||
k: 1,
|
||||
virtualBlockID: "K",
|
||||
expectedReds: []string{"D"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "C",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "D",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "E",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "F",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
id: "G",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"D", "B", "C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "E"},
|
||||
id: "H",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"E", "B", "C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E", "G"},
|
||||
id: "I",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"G", "D", "E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F"},
|
||||
id: "J",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I", "J"},
|
||||
id: "K",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "H",
|
||||
expectedBlues: []string{"I", "G", "J", "F", "H"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
//block hash order:AVUTSRQPONMLKJIHGFEDCB
|
||||
k: 2,
|
||||
virtualBlockID: "V",
|
||||
expectedReds: []string{"D", "J", "P"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "C",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "D",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "E",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "F",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "G",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"G"},
|
||||
id: "H",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "G",
|
||||
expectedBlues: []string{"G"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "I",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "J",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "K",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "I",
|
||||
expectedBlues: []string{"I"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K", "H"},
|
||||
id: "L",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "K",
|
||||
expectedBlues: []string{"K"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F", "L"},
|
||||
id: "M",
|
||||
expectedScore: 10,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"L", "K", "I", "H", "G", "E", "B", "F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"G", "K"},
|
||||
id: "N",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "G",
|
||||
expectedBlues: []string{"K", "I", "E", "B", "G"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "N"},
|
||||
id: "O",
|
||||
expectedScore: 8,
|
||||
expectedSelectedParent: "N",
|
||||
expectedBlues: []string{"N"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "P",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P"},
|
||||
id: "Q",
|
||||
expectedScore: 10,
|
||||
expectedSelectedParent: "P",
|
||||
expectedBlues: []string{"O", "N", "K", "J", "I", "E", "P"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L", "Q"},
|
||||
id: "R",
|
||||
expectedScore: 11,
|
||||
expectedSelectedParent: "Q",
|
||||
expectedBlues: []string{"Q"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M", "R"},
|
||||
id: "S",
|
||||
expectedScore: 15,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"R", "Q", "O", "N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "F"},
|
||||
id: "T",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"H", "G", "F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M", "T"},
|
||||
id: "U",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"T", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"S", "U"},
|
||||
id: "V",
|
||||
expectedScore: 18,
|
||||
expectedSelectedParent: "S",
|
||||
expectedBlues: []string{"U", "T", "S"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
//Block hash order:AXWVUTSRQPONMLKJIHGFEDCB
|
||||
k: 1,
|
||||
virtualBlockID: "X",
|
||||
expectedReds: []string{"D", "F", "G", "H", "J", "K", "L", "N", "O", "Q", "R", "S", "U", "V"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "C",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "D",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "E",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "F",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "G",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "H",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "I",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "J",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "K",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "L",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "M",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "N",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F", "G", "J"},
|
||||
id: "O",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"J", "G", "F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B", "M", "I"},
|
||||
id: "P",
|
||||
expectedScore: 6,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"M", "I", "E", "C", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K", "E"},
|
||||
id: "Q",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"K", "D", "E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L", "N"},
|
||||
id: "R",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "L",
|
||||
expectedBlues: []string{"L"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I", "Q"},
|
||||
id: "S",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "Q",
|
||||
expectedBlues: []string{"Q"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K", "P"},
|
||||
id: "T",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "P",
|
||||
expectedBlues: []string{"P"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K", "L"},
|
||||
id: "U",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "K",
|
||||
expectedBlues: []string{"L", "K"},
|
||||
},
|
||||
{
|
||||
parents: []string{"U", "R"},
|
||||
id: "V",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"U", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"S", "U", "T"},
|
||||
id: "W",
|
||||
expectedScore: 8,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"V", "W", "H"},
|
||||
id: "X",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "W",
|
||||
expectedBlues: []string{"W"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
//Secret mining attack: The attacker is mining
|
||||
//blocks B,C,D,E,F,G,T in secret without propagating
|
||||
//them, so all blocks except T should be red, because
|
||||
//they don't follow the rules of PHANTOM that require
|
||||
//you to point to all the parents that you know, and
|
||||
//propagate your block as soon as it's mined
|
||||
|
||||
//Block hash order:AYXWVUTSRQPONMLKJIHGFEDCB
|
||||
k: 1,
|
||||
virtualBlockID: "Y",
|
||||
expectedReds: []string{"B", "C", "D", "E", "F", "G", "L"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "C",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "D",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "E",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "F",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F"},
|
||||
id: "G",
|
||||
expectedScore: 6,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "H",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "I",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I"},
|
||||
id: "J",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "H",
|
||||
expectedBlues: []string{"I", "H"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I"},
|
||||
id: "K",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "H",
|
||||
expectedBlues: []string{"I", "H"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "L",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "I",
|
||||
expectedBlues: []string{"I"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "K", "L"},
|
||||
id: "M",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "J",
|
||||
expectedBlues: []string{"K", "J"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "K", "L"},
|
||||
id: "N",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "J",
|
||||
expectedBlues: []string{"K", "J"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M"},
|
||||
id: "O",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M"},
|
||||
id: "P",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M"},
|
||||
id: "Q",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P", "Q"},
|
||||
id: "R",
|
||||
expectedScore: 10,
|
||||
expectedSelectedParent: "O",
|
||||
expectedBlues: []string{"Q", "P", "O"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P", "Q"},
|
||||
id: "S",
|
||||
expectedScore: 10,
|
||||
expectedSelectedParent: "O",
|
||||
expectedBlues: []string{"Q", "P", "O"},
|
||||
},
|
||||
{
|
||||
parents: []string{"G", "S", "R"},
|
||||
id: "T",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"S", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"S", "R"},
|
||||
id: "U",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"S", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "V",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "W",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"U", "T"},
|
||||
id: "X",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"V", "W", "X"},
|
||||
id: "Y",
|
||||
expectedScore: 17,
|
||||
expectedSelectedParent: "V",
|
||||
expectedBlues: []string{"X", "W", "V"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
//Censorship mining attack: The attacker is mining blocks B,C,D,E,F,G in secret without propagating them,
|
||||
//so all blocks except B and C should be red, because they don't follow the rules of
|
||||
//PHANTOM that require you to point to all the parents that you know
|
||||
|
||||
//Block hash order:AYXWVUTSRQPONMLKJIHGFEDCB
|
||||
k: 1,
|
||||
virtualBlockID: "Y",
|
||||
expectedReds: []string{"D", "E", "F", "G", "L"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "C",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "D",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "E",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "F",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F"},
|
||||
id: "G",
|
||||
expectedScore: 6,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "H",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "I",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I", "B"},
|
||||
id: "J",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"I", "H", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I", "B"},
|
||||
id: "K",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"I", "H", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "L",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "I",
|
||||
expectedBlues: []string{"I"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "K", "L", "C"},
|
||||
id: "M",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "J",
|
||||
expectedBlues: []string{"K", "C", "J"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "K", "L", "C"},
|
||||
id: "N",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "J",
|
||||
expectedBlues: []string{"K", "C", "J"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M", "D"},
|
||||
id: "O",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M", "D"},
|
||||
id: "P",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M", "D"},
|
||||
id: "Q",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P", "Q", "E"},
|
||||
id: "R",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "O",
|
||||
expectedBlues: []string{"Q", "P", "O"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P", "Q", "E"},
|
||||
id: "S",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "O",
|
||||
expectedBlues: []string{"Q", "P", "O"},
|
||||
},
|
||||
{
|
||||
parents: []string{"G", "S", "R"},
|
||||
id: "T",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"S", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"S", "R", "F"},
|
||||
id: "U",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"S", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "V",
|
||||
expectedScore: 16,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "W",
|
||||
expectedScore: 16,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "X",
|
||||
expectedScore: 16,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"V", "W", "X"},
|
||||
id: "Y",
|
||||
expectedScore: 19,
|
||||
expectedSelectedParent: "V",
|
||||
expectedBlues: []string{"X", "W", "V"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
netParams.K = test.k
|
||||
// Generate enough synthetic blocks for the rest of the test
|
||||
blockDAG := newTestDAG(&netParams)
|
||||
genesisNode := blockDAG.genesis
|
||||
blockTime := genesisNode.Header().Timestamp
|
||||
blockByIDMap := make(map[string]*blockNode)
|
||||
idByBlockMap := make(map[*blockNode]string)
|
||||
blockByIDMap["A"] = genesisNode
|
||||
idByBlockMap[genesisNode] = "A"
|
||||
|
||||
for _, blockData := range test.dagData {
|
||||
blockTime = blockTime.Add(time.Second)
|
||||
parents := blockSet{}
|
||||
for _, parentID := range blockData.parents {
|
||||
parent := blockByIDMap[parentID]
|
||||
parents.add(parent)
|
||||
}
|
||||
node := newTestNode(parents, blockVersion, 0, blockTime, test.k)
|
||||
node.hash = &daghash.Hash{} //It helps to predict hash order
|
||||
for i, char := range blockData.id {
|
||||
node.hash[i] = byte(char)
|
||||
}
|
||||
|
||||
blockDAG.index.AddNode(node)
|
||||
addNodeAsChildToParents(node)
|
||||
|
||||
blockByIDMap[blockData.id] = node
|
||||
idByBlockMap[node] = blockData.id
|
||||
|
||||
bluesIDs := make([]string, 0, len(node.blues))
|
||||
for _, blue := range node.blues {
|
||||
bluesIDs = append(bluesIDs, idByBlockMap[blue])
|
||||
}
|
||||
selectedParentID := idByBlockMap[node.selectedParent]
|
||||
fullDataStr := fmt.Sprintf("blues: %v, selectedParent: %v, score: %v",
|
||||
bluesIDs, selectedParentID, node.blueScore)
|
||||
if blockData.expectedScore != node.blueScore {
|
||||
t.Errorf("Test %d: Block %v expected to have score %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedScore, node.blueScore, fullDataStr)
|
||||
}
|
||||
if blockData.expectedSelectedParent != selectedParentID {
|
||||
t.Errorf("Test %d: Block %v expected to have selected parent %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedSelectedParent, selectedParentID, fullDataStr)
|
||||
}
|
||||
if !reflect.DeepEqual(blockData.expectedBlues, bluesIDs) {
|
||||
t.Errorf("Test %d: Block %v expected to have blues %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedBlues, bluesIDs, fullDataStr)
|
||||
}
|
||||
}
|
||||
|
||||
reds := make(map[string]bool)
|
||||
|
||||
for id := range blockByIDMap {
|
||||
reds[id] = true
|
||||
}
|
||||
|
||||
for tip := blockByIDMap[test.virtualBlockID]; tip.selectedParent != nil; tip = tip.selectedParent {
|
||||
tipID := idByBlockMap[tip]
|
||||
delete(reds, tipID)
|
||||
for _, blue := range tip.blues {
|
||||
blueID := idByBlockMap[blue]
|
||||
delete(reds, blueID)
|
||||
}
|
||||
}
|
||||
if !checkReds(test.expectedReds, reds) {
|
||||
redsIDs := make([]string, 0, len(reds))
|
||||
for id := range reds {
|
||||
redsIDs = append(redsIDs, id)
|
||||
}
|
||||
sort.Strings(redsIDs)
|
||||
sort.Strings(test.expectedReds)
|
||||
t.Errorf("Test %d: Expected reds %v but got %v", i, test.expectedReds, redsIDs)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func checkReds(expectedReds []string, reds map[string]bool) bool {
|
||||
if len(expectedReds) != len(reds) {
|
||||
return false
|
||||
}
|
||||
for _, redID := range expectedReds {
|
||||
if !reds[redID] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -1,214 +0,0 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// BehaviorFlags is a bitmask defining tweaks to the normal behavior when
|
||||
// performing DAG processing and consensus rules checks.
|
||||
type BehaviorFlags uint32
|
||||
|
||||
const (
|
||||
// BFFastAdd may be set to indicate that several checks can be avoided
|
||||
// for the block since it is already known to fit into the chain due to
|
||||
// already proving it correct links into the chain up to a known
|
||||
// checkpoint. This is primarily used for headers-first mode.
|
||||
BFFastAdd BehaviorFlags = 1 << iota
|
||||
|
||||
// BFNoPoWCheck may be set to indicate the proof of work check which
|
||||
// ensures a block hashes to a value less than the required target will
|
||||
// not be performed.
|
||||
BFNoPoWCheck
|
||||
|
||||
// BFWasUnorphaned may be set to indicate that a block was just now
|
||||
// unorphaned
|
||||
BFWasUnorphaned
|
||||
|
||||
// BFAfterDelay may be set to indicate that a block had timestamp too far
|
||||
// in the future, just finished the delay
|
||||
BFAfterDelay
|
||||
|
||||
// BFIsSync may be set to indicate that the block was sent as part of the
|
||||
// netsync process
|
||||
BFIsSync
|
||||
|
||||
// BFWasStored is set to indicate that the block was previously stored
|
||||
// in the block index but was never fully processed
|
||||
BFWasStored
|
||||
|
||||
// BFNone is a convenience value to specifically indicate no flags.
|
||||
BFNone BehaviorFlags = 0
|
||||
)
|
||||
|
||||
// BlockExists determines whether a block with the given hash exists in
|
||||
// the DAG.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) BlockExists(hash *daghash.Hash) bool {
|
||||
return dag.index.HaveBlock(hash)
|
||||
}
|
||||
|
||||
// processOrphans determines if there are any orphans which depend on the passed
|
||||
// block hash (they are no longer orphans if true) and potentially accepts them.
|
||||
// It repeats the process for the newly accepted blocks (to detect further
|
||||
// orphans which may no longer be orphans) until there are no more.
|
||||
//
|
||||
// The flags do not modify the behavior of this function directly, however they
|
||||
// are needed to pass along to maybeAcceptBlock.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) error {
|
||||
// Start with processing at least the passed hash. Leave a little room
|
||||
// for additional orphan blocks that need to be processed without
|
||||
// needing to grow the array in the common case.
|
||||
processHashes := make([]*daghash.Hash, 0, 10)
|
||||
processHashes = append(processHashes, hash)
|
||||
for len(processHashes) > 0 {
|
||||
// Pop the first hash to process from the slice.
|
||||
processHash := processHashes[0]
|
||||
processHashes[0] = nil // Prevent GC leak.
|
||||
processHashes = processHashes[1:]
|
||||
|
||||
// Look up all orphans that are parented by the block we just
|
||||
// accepted. This will typically only be one, but it could
|
||||
// be multiple if multiple blocks are mined and broadcast
|
||||
// around the same time. The one with the most proof of work
|
||||
// will eventually win out. An indexing for loop is
|
||||
// intentionally used over a range here as range does not
|
||||
// reevaluate the slice on each iteration nor does it adjust the
|
||||
// index for the modified slice.
|
||||
for i := 0; i < len(dag.prevOrphans[*processHash]); i++ {
|
||||
orphan := dag.prevOrphans[*processHash][i]
|
||||
if orphan == nil {
|
||||
log.Warnf("Found a nil entry at index %d in the "+
|
||||
"orphan dependency list for block %s", i,
|
||||
processHash)
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip this orphan if one or more of its parents are
|
||||
// still missing.
|
||||
_, err := lookupParentNodes(orphan.block, dag)
|
||||
if err != nil {
|
||||
if ruleErr, ok := err.(RuleError); ok && ruleErr.ErrorCode == ErrParentBlockUnknown {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove the orphan from the orphan pool.
|
||||
orphanHash := orphan.block.Hash()
|
||||
dag.removeOrphanBlock(orphan)
|
||||
i--
|
||||
|
||||
// Potentially accept the block into the block DAG.
|
||||
err = dag.maybeAcceptBlock(orphan.block, flags|BFWasUnorphaned)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add this block to the list of blocks to process so
|
||||
// any orphan blocks that depend on this block are
|
||||
// handled too.
|
||||
processHashes = append(processHashes, orphanHash)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessBlock is the main workhorse for handling insertion of new blocks into
|
||||
// the block chain. It includes functionality such as rejecting duplicate
|
||||
// blocks, ensuring blocks follow all rules, orphan handling, and insertion into
|
||||
// the block DAG.
|
||||
//
|
||||
// When no errors occurred during processing, the first return value indicates
|
||||
// whether or not the block is an orphan.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) ProcessBlock(block *util.Block, flags BehaviorFlags) (isOrphan bool, delay time.Duration, err error) {
|
||||
dag.dagLock.Lock()
|
||||
defer dag.dagLock.Unlock()
|
||||
|
||||
isDelayedBlock := flags&BFAfterDelay == BFAfterDelay
|
||||
wasBlockStored := flags&BFWasStored == BFWasStored
|
||||
|
||||
blockHash := block.Hash()
|
||||
log.Tracef("Processing block %s", blockHash)
|
||||
|
||||
// The block must not already exist in the DAG.
|
||||
if dag.BlockExists(blockHash) && !wasBlockStored {
|
||||
str := fmt.Sprintf("already have block %s", blockHash)
|
||||
return false, 0, ruleError(ErrDuplicateBlock, str)
|
||||
}
|
||||
|
||||
// The block must not already exist as an orphan.
|
||||
if _, exists := dag.orphans[*blockHash]; exists {
|
||||
str := fmt.Sprintf("already have block (orphan) %s", blockHash)
|
||||
return false, 0, ruleError(ErrDuplicateBlock, str)
|
||||
}
|
||||
|
||||
if !isDelayedBlock {
|
||||
// Perform preliminary sanity checks on the block and its transactions.
|
||||
delay, err := dag.checkBlockSanity(block, flags)
|
||||
if err != nil {
|
||||
return false, 0, err
|
||||
}
|
||||
|
||||
if delay != 0 {
|
||||
return false, delay, err
|
||||
}
|
||||
}
|
||||
|
||||
// Handle orphan blocks.
|
||||
allParentsExist := true
|
||||
for _, parentHash := range block.MsgBlock().Header.ParentHashes {
|
||||
if !dag.BlockExists(parentHash) {
|
||||
allParentsExist = false
|
||||
}
|
||||
}
|
||||
|
||||
if !allParentsExist {
|
||||
// Some orphans during netsync are a normal part of the process, since the anticone
|
||||
// of the chain-split is never explicitly requested.
|
||||
// Therefore, if we are during netsync - don't report orphans to default logs.
|
||||
//
|
||||
// The number K*2 was chosen since in peace times anticone is limited to K blocks,
|
||||
// while some red block can make it a bit bigger, but much more than that indicates
|
||||
// there might be some problem with the netsync process.
|
||||
if flags&BFIsSync == BFIsSync && uint32(len(dag.orphans)) < dag.dagParams.K*2 {
|
||||
log.Debugf("Adding orphan block %s. This is normal part of netsync process", blockHash)
|
||||
} else {
|
||||
log.Infof("Adding orphan block %s", blockHash)
|
||||
}
|
||||
dag.addOrphanBlock(block)
|
||||
|
||||
return true, 0, nil
|
||||
}
|
||||
|
||||
// The block has passed all context independent checks and appears sane
|
||||
// enough to potentially accept it into the block DAG.
|
||||
err = dag.maybeAcceptBlock(block, flags)
|
||||
if err != nil {
|
||||
return false, 0, err
|
||||
}
|
||||
|
||||
// Accept any orphan blocks that depend on this block (they are
|
||||
// no longer orphans) and repeat for those accepted blocks until
|
||||
// there are no more.
|
||||
err = dag.processOrphans(blockHash, flags)
|
||||
if err != nil {
|
||||
return false, 0, err
|
||||
}
|
||||
|
||||
log.Debugf("Accepted block %s", blockHash)
|
||||
|
||||
return false, 0, nil
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bou.ke/monkey"
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestProcessBlock(t *testing.T) {
|
||||
dag, teardownFunc, err := DAGSetup("TestProcessBlock", Config{
|
||||
DAGParams: &dagconfig.SimNetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup dag instance: %v", err)
|
||||
return
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Check that BFAfterDelay skip checkBlockSanity
|
||||
called := false
|
||||
guard := monkey.Patch((*BlockDAG).checkBlockSanity, func(_ *BlockDAG, _ *util.Block, _ BehaviorFlags) (time.Duration, error) {
|
||||
called = true
|
||||
return 0, nil
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
|
||||
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(&Block100000), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Errorf("ProcessBlock: %s", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Errorf("ProcessBlock: block is too far in the future")
|
||||
}
|
||||
if !isOrphan {
|
||||
t.Errorf("ProcessBlock: unexpected returned non orphan block")
|
||||
}
|
||||
if !called {
|
||||
t.Errorf("ProcessBlock: expected checkBlockSanity to be called")
|
||||
}
|
||||
|
||||
Block100000Copy := Block100000
|
||||
// Change nonce to change block hash
|
||||
Block100000Copy.Header.Nonce++
|
||||
called = false
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(&Block100000Copy), BFAfterDelay|BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Errorf("ProcessBlock: %s", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Errorf("ProcessBlock: block is too far in the future")
|
||||
}
|
||||
if !isOrphan {
|
||||
t.Errorf("ProcessBlock: unexpected returned non orphan block")
|
||||
}
|
||||
if called {
|
||||
t.Errorf("ProcessBlock: Didn't expected checkBlockSanity to be called")
|
||||
}
|
||||
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(dagconfig.SimNetParams.GenesisBlock), BFNone)
|
||||
expectedErrMsg := fmt.Sprintf("already have block %s", dagconfig.SimNetParams.GenesisHash)
|
||||
if err == nil || err.Error() != expectedErrMsg {
|
||||
t.Errorf("ProcessBlock: Expected error \"%s\" but got \"%s\"", expectedErrMsg, err)
|
||||
}
|
||||
}
|
||||
@@ -1,193 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/daglabs/btcd/util"
|
||||
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// SubnetworkStore stores the subnetworks data
|
||||
type SubnetworkStore struct {
|
||||
db database.DB
|
||||
}
|
||||
|
||||
func newSubnetworkStore(db database.DB) *SubnetworkStore {
|
||||
return &SubnetworkStore{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// registerSubnetworks scans a list of transactions, singles out
|
||||
// subnetwork registry transactions, validates them, and registers a new
|
||||
// subnetwork based on it.
|
||||
// This function returns an error if one or more transactions are invalid
|
||||
func registerSubnetworks(dbTx database.Tx, txs []*util.Tx) error {
|
||||
subnetworkRegistryTxs := make([]*wire.MsgTx, 0)
|
||||
for _, tx := range txs {
|
||||
msgTx := tx.MsgTx()
|
||||
|
||||
if msgTx.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDRegistry) {
|
||||
subnetworkRegistryTxs = append(subnetworkRegistryTxs, msgTx)
|
||||
}
|
||||
|
||||
if subnetworkid.Less(subnetworkid.SubnetworkIDRegistry, &msgTx.SubnetworkID) {
|
||||
// Transactions are ordered by subnetwork, so we can safely assume
|
||||
// that the rest of the transactions will not be subnetwork registry
|
||||
// transactions.
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, registryTx := range subnetworkRegistryTxs {
|
||||
subnetworkID, err := TxToSubnetworkID(registryTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sNet, err := dbGetSubnetwork(dbTx, subnetworkID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sNet == nil {
|
||||
createdSubnetwork := newSubnetwork(registryTx)
|
||||
err := dbRegisterSubnetwork(dbTx, subnetworkID, createdSubnetwork)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed registering subnetwork"+
|
||||
"for tx '%s': %s", registryTx.TxHash(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateSubnetworkRegistryTransaction makes sure that a given subnetwork registry
|
||||
// transaction is valid. Such a transaction is valid iff:
|
||||
// - Its entire payload is a uint64 (8 bytes)
|
||||
func validateSubnetworkRegistryTransaction(tx *wire.MsgTx) error {
|
||||
if len(tx.Payload) != 8 {
|
||||
return ruleError(ErrSubnetworkRegistry, fmt.Sprintf("validation failed: subnetwork registry"+
|
||||
"tx '%s' has an invalid payload", tx.TxHash()))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxToSubnetworkID creates a subnetwork ID from a subnetwork registry transaction
|
||||
func TxToSubnetworkID(tx *wire.MsgTx) (*subnetworkid.SubnetworkID, error) {
|
||||
txHash := tx.TxHash()
|
||||
return subnetworkid.New(util.Hash160(txHash[:]))
|
||||
}
|
||||
|
||||
// subnetwork returns a registered subnetwork. If the subnetwork does not exist
|
||||
// this method returns an error.
|
||||
func (s *SubnetworkStore) subnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
|
||||
var sNet *subnetwork
|
||||
var err error
|
||||
dbErr := s.db.View(func(dbTx database.Tx) error {
|
||||
sNet, err = dbGetSubnetwork(dbTx, subnetworkID)
|
||||
return nil
|
||||
})
|
||||
if dbErr != nil {
|
||||
return nil, fmt.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, dbErr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
return sNet, nil
|
||||
}
|
||||
|
||||
// GasLimit returns the gas limit of a registered subnetwork. If the subnetwork does not
|
||||
// exist this method returns an error.
|
||||
func (s *SubnetworkStore) GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
|
||||
sNet, err := s.subnetwork(subnetworkID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if sNet == nil {
|
||||
return 0, fmt.Errorf("subnetwork '%s' not found", subnetworkID)
|
||||
}
|
||||
|
||||
return sNet.gasLimit, nil
|
||||
}
|
||||
|
||||
// dbRegisterSubnetwork stores mappings from ID of the subnetwork to the subnetwork data.
|
||||
func dbRegisterSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID, network *subnetwork) error {
|
||||
// Serialize the subnetwork
|
||||
serializedSubnetwork, err := serializeSubnetwork(network)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize sub-netowrk '%s': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
// Store the subnetwork
|
||||
subnetworksBucket := dbTx.Metadata().Bucket(subnetworksBucketName)
|
||||
err = subnetworksBucket.Put(subnetworkID[:], serializedSubnetwork)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write sub-netowrk '%s': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbGetSubnetwork returns the subnetwork associated with subnetworkID or nil if the subnetwork was not found.
|
||||
func dbGetSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
|
||||
bucket := dbTx.Metadata().Bucket(subnetworksBucketName)
|
||||
serializedSubnetwork := bucket.Get(subnetworkID[:])
|
||||
if serializedSubnetwork == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return deserializeSubnetwork(serializedSubnetwork)
|
||||
}
|
||||
|
||||
type subnetwork struct {
|
||||
gasLimit uint64
|
||||
}
|
||||
|
||||
func newSubnetwork(tx *wire.MsgTx) *subnetwork {
|
||||
return &subnetwork{
|
||||
gasLimit: ExtractGasLimit(tx),
|
||||
}
|
||||
}
|
||||
|
||||
// ExtractGasLimit extracts the gas limit from the transaction payload
|
||||
func ExtractGasLimit(tx *wire.MsgTx) uint64 {
|
||||
return binary.LittleEndian.Uint64(tx.Payload[:8])
|
||||
}
|
||||
|
||||
// serializeSubnetwork serializes a subnetwork into the following binary format:
|
||||
// | gasLimit (8 bytes) |
|
||||
func serializeSubnetwork(sNet *subnetwork) ([]byte, error) {
|
||||
serializedSNet := bytes.NewBuffer(make([]byte, 0, 8))
|
||||
|
||||
// Write the gas limit
|
||||
err := binary.Write(serializedSNet, byteOrder, sNet.gasLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize subnetwork: %s", err)
|
||||
}
|
||||
|
||||
return serializedSNet.Bytes(), nil
|
||||
}
|
||||
|
||||
// deserializeSubnetwork deserializes a byte slice into a subnetwork.
|
||||
// See serializeSubnetwork for the binary format.
|
||||
func deserializeSubnetwork(serializedSNetBytes []byte) (*subnetwork, error) {
|
||||
serializedSNet := bytes.NewBuffer(serializedSNetBytes)
|
||||
|
||||
// Read the gas limit
|
||||
var gasLimit uint64
|
||||
err := binary.Read(serializedSNet, byteOrder, &gasLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to deserialize subnetwork: %s", err)
|
||||
}
|
||||
|
||||
return &subnetwork{
|
||||
gasLimit: gasLimit,
|
||||
}, nil
|
||||
}
|
||||
@@ -1,262 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
// This file functions are not considered safe for regular use, and should be used for test purposes only.
|
||||
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/database"
|
||||
_ "github.com/daglabs/btcd/database/ffldb" // blank import ffldb so that its init() function runs before tests
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// testDbType is the database backend type to use for the tests.
|
||||
testDbType = "ffldb"
|
||||
|
||||
// testDbRoot is the root directory used to create all test databases.
|
||||
testDbRoot = "testdbs"
|
||||
|
||||
// blockDataNet is the expected network in the test block data.
|
||||
blockDataNet = wire.MainNet
|
||||
)
|
||||
|
||||
// isSupportedDbType returns whether or not the passed database type is
|
||||
// currently supported.
|
||||
func isSupportedDbType(dbType string) bool {
|
||||
supportedDrivers := database.SupportedDrivers()
|
||||
for _, driver := range supportedDrivers {
|
||||
if dbType == driver {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// FileExists returns whether or not the named file or directory exists.
|
||||
func FileExists(name string) bool {
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// DAGSetup is used to create a new db and chain instance with the genesis
|
||||
// block already inserted. In addition to the new chain instance, it returns
|
||||
// a teardown function the caller should invoke when done testing to clean up.
|
||||
func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
|
||||
if !isSupportedDbType(testDbType) {
|
||||
return nil, nil, fmt.Errorf("unsupported db type %s", testDbType)
|
||||
}
|
||||
|
||||
var teardown func()
|
||||
|
||||
// To make sure that the teardown function is not called before any goroutines finished to run -
|
||||
// overwrite `spawn` to count the number of running goroutines
|
||||
spawnWaitGroup := sync.WaitGroup{}
|
||||
realSpawn := spawn
|
||||
spawn = func(f func()) {
|
||||
spawnWaitGroup.Add(1)
|
||||
realSpawn(func() {
|
||||
f()
|
||||
spawnWaitGroup.Done()
|
||||
})
|
||||
}
|
||||
|
||||
if config.DB == nil {
|
||||
// Create the root directory for test databases.
|
||||
if !FileExists(testDbRoot) {
|
||||
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
|
||||
err := fmt.Errorf("unable to create test db "+
|
||||
"root: %s", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(testDbRoot, dbName)
|
||||
_ = os.RemoveAll(dbPath)
|
||||
var err error
|
||||
config.DB, err = database.Create(testDbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
teardown = func() {
|
||||
spawnWaitGroup.Wait()
|
||||
spawn = realSpawn
|
||||
config.DB.Close()
|
||||
os.RemoveAll(dbPath)
|
||||
os.RemoveAll(testDbRoot)
|
||||
}
|
||||
} else {
|
||||
teardown = func() {
|
||||
spawnWaitGroup.Wait()
|
||||
spawn = realSpawn
|
||||
config.DB.Close()
|
||||
}
|
||||
}
|
||||
|
||||
config.TimeSource = NewMedianTime()
|
||||
config.SigCache = txscript.NewSigCache(1000)
|
||||
|
||||
// Create the DAG instance.
|
||||
dag, err := New(&config)
|
||||
if err != nil {
|
||||
teardown()
|
||||
err := fmt.Errorf("failed to create dag instance: %s", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return dag, teardown, nil
|
||||
}
|
||||
|
||||
// OpTrueScript is script returning TRUE
|
||||
var OpTrueScript = []byte{txscript.OpTrue}
|
||||
|
||||
type txSubnetworkData struct {
|
||||
subnetworkID *subnetworkid.SubnetworkID
|
||||
Gas uint64
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
func createTxForTest(numInputs uint32, numOutputs uint32, outputValue uint64, subnetworkData *txSubnetworkData) *wire.MsgTx {
|
||||
txIns := []*wire.TxIn{}
|
||||
txOuts := []*wire.TxOut{}
|
||||
|
||||
for i := uint32(0); i < numInputs; i++ {
|
||||
txIns = append(txIns, &wire.TxIn{
|
||||
PreviousOutpoint: *wire.NewOutpoint(&daghash.TxID{}, i),
|
||||
SignatureScript: []byte{},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
})
|
||||
}
|
||||
|
||||
for i := uint32(0); i < numOutputs; i++ {
|
||||
txOuts = append(txOuts, &wire.TxOut{
|
||||
ScriptPubKey: OpTrueScript,
|
||||
Value: outputValue,
|
||||
})
|
||||
}
|
||||
|
||||
if subnetworkData != nil {
|
||||
return wire.NewSubnetworkMsgTx(wire.TxVersion, txIns, txOuts, subnetworkData.subnetworkID, subnetworkData.Gas, subnetworkData.Payload)
|
||||
}
|
||||
|
||||
return wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
|
||||
}
|
||||
|
||||
// VirtualForTest is an exported version for virtualBlock, so that it can be returned by exported test_util methods
|
||||
type VirtualForTest *virtualBlock
|
||||
|
||||
// SetVirtualForTest replaces the dag's virtual block. This function is used for test purposes only
|
||||
func SetVirtualForTest(dag *BlockDAG, virtual VirtualForTest) VirtualForTest {
|
||||
oldVirtual := dag.virtual
|
||||
dag.virtual = virtual
|
||||
return VirtualForTest(oldVirtual)
|
||||
}
|
||||
|
||||
// GetVirtualFromParentsForTest generates a virtual block with the given parents.
|
||||
func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (VirtualForTest, error) {
|
||||
parents := newSet()
|
||||
for _, hash := range parentHashes {
|
||||
parent := dag.index.LookupNode(hash)
|
||||
if parent == nil {
|
||||
return nil, fmt.Errorf("GetVirtualFromParentsForTest: didn't found node for hash %s", hash)
|
||||
}
|
||||
parents.add(parent)
|
||||
}
|
||||
virtual := newVirtualBlock(parents, dag.dagParams.K)
|
||||
|
||||
pastUTXO, acceptanceData, err := dag.pastUTXO(&virtual.blockNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffFromAcceptanceData, err := virtual.blockNode.diffFromAcceptanceData(pastUTXO, acceptanceData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxo, err := pastUTXO.WithDiff(diffFromAcceptanceData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffUTXO := utxo.clone().(*DiffUTXOSet)
|
||||
err = diffUTXO.meldToBase()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
virtual.utxoSet = diffUTXO.base
|
||||
|
||||
return VirtualForTest(virtual), nil
|
||||
}
|
||||
|
||||
// LoadBlocks reads files containing bitcoin block data (gzipped but otherwise
|
||||
// in the format bitcoind writes) from disk and returns them as an array of
|
||||
// util.Block. This is largely borrowed from the test code in btcdb.
|
||||
func LoadBlocks(filename string) (blocks []*util.Block, err error) {
|
||||
var network = wire.MainNet
|
||||
var dr io.Reader
|
||||
var fi io.ReadCloser
|
||||
|
||||
fi, err = os.Open(filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasSuffix(filename, ".bz2") {
|
||||
dr = bzip2.NewReader(fi)
|
||||
} else {
|
||||
dr = fi
|
||||
}
|
||||
defer fi.Close()
|
||||
|
||||
var block *util.Block
|
||||
|
||||
err = nil
|
||||
for height := uint64(0); err == nil; height++ {
|
||||
var rintbuf uint32
|
||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
||||
if err == io.EOF {
|
||||
// hit end of file at expected offset: no warning
|
||||
height--
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if rintbuf != uint32(network) {
|
||||
break
|
||||
}
|
||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
||||
blocklen := rintbuf
|
||||
|
||||
rbytes := make([]byte, blocklen)
|
||||
|
||||
// read block
|
||||
dr.Read(rbytes)
|
||||
|
||||
block, err = util.NewBlockFromBytes(rbytes)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"bou.ke/monkey"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
)
|
||||
|
||||
func TestIsSupportedDbType(t *testing.T) {
|
||||
if !isSupportedDbType("ffldb") {
|
||||
t.Errorf("ffldb should be a supported DB driver")
|
||||
}
|
||||
if isSupportedDbType("madeUpDb") {
|
||||
t.Errorf("madeUpDb should not be a supported DB driver")
|
||||
}
|
||||
}
|
||||
|
||||
// TestDAGSetupErrors tests all error-cases in DAGSetup.
|
||||
// The non-error-cases are tested in the more general tests.
|
||||
func TestDAGSetupErrors(t *testing.T) {
|
||||
os.RemoveAll(testDbRoot)
|
||||
testDAGSetupErrorThroughPatching(t, "unable to create test db root: ", os.MkdirAll, func(path string, perm os.FileMode) error {
|
||||
return errors.New("Made up error")
|
||||
})
|
||||
|
||||
testDAGSetupErrorThroughPatching(t, "failed to create dag instance: ", New, func(config *Config) (*BlockDAG, error) {
|
||||
return nil, errors.New("Made up error")
|
||||
})
|
||||
|
||||
testDAGSetupErrorThroughPatching(t, "unsupported db type ", isSupportedDbType, func(dbType string) bool {
|
||||
return false
|
||||
})
|
||||
|
||||
testDAGSetupErrorThroughPatching(t, "error creating db: ", database.Create, func(dbType string, args ...interface{}) (database.DB, error) {
|
||||
return nil, errors.New("Made up error")
|
||||
})
|
||||
}
|
||||
|
||||
func testDAGSetupErrorThroughPatching(t *testing.T, expectedErrorMessage string, targetFunction interface{}, replacementFunction interface{}) {
|
||||
guard := monkey.Patch(targetFunction, replacementFunction)
|
||||
defer guard.Unpatch()
|
||||
_, tearDown, err := DAGSetup("TestDAGSetup", Config{
|
||||
DAGParams: &dagconfig.MainNetParams,
|
||||
})
|
||||
if tearDown != nil {
|
||||
defer tearDown()
|
||||
}
|
||||
if err == nil || !strings.HasPrefix(err.Error(), expectedErrorMessage) {
|
||||
t.Errorf("DAGSetup: expected error to have prefix '%s' but got error '%v'", expectedErrorMessage, err)
|
||||
}
|
||||
}
|
||||
BIN
blockdag/testdata/blk_0_to_4.dat
vendored
BIN
blockdag/testdata/blk_0_to_4.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3A.dat
vendored
BIN
blockdag/testdata/blk_3A.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3B.dat
vendored
BIN
blockdag/testdata/blk_3B.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3C.dat
vendored
BIN
blockdag/testdata/blk_3C.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3D.dat
vendored
BIN
blockdag/testdata/blk_3D.dat
vendored
Binary file not shown.
@@ -1,356 +0,0 @@
|
||||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// ThresholdState define the various threshold states used when voting on
|
||||
// consensus changes.
|
||||
type ThresholdState byte
|
||||
|
||||
// These constants are used to identify specific threshold states.
|
||||
const (
|
||||
// ThresholdDefined is the first state for each deployment and is the
|
||||
// state for the genesis block has by definition for all deployments.
|
||||
ThresholdDefined ThresholdState = iota
|
||||
|
||||
// ThresholdStarted is the state for a deployment once its start time
|
||||
// has been reached.
|
||||
ThresholdStarted
|
||||
|
||||
// ThresholdLockedIn is the state for a deployment during the retarget
|
||||
// period which is after the ThresholdStarted state period and the
|
||||
// number of blocks that have voted for the deployment equal or exceed
|
||||
// the required number of votes for the deployment.
|
||||
ThresholdLockedIn
|
||||
|
||||
// ThresholdActive is the state for a deployment for all blocks after a
|
||||
// retarget period in which the deployment was in the ThresholdLockedIn
|
||||
// state.
|
||||
ThresholdActive
|
||||
|
||||
// ThresholdFailed is the state for a deployment once its expiration
|
||||
// time has been reached and it did not reach the ThresholdLockedIn
|
||||
// state.
|
||||
ThresholdFailed
|
||||
|
||||
// numThresholdsStates is the maximum number of threshold states used in
|
||||
// tests.
|
||||
numThresholdsStates
|
||||
)
|
||||
|
||||
// thresholdStateStrings is a map of ThresholdState values back to their
|
||||
// constant names for pretty printing.
|
||||
var thresholdStateStrings = map[ThresholdState]string{
|
||||
ThresholdDefined: "ThresholdDefined",
|
||||
ThresholdStarted: "ThresholdStarted",
|
||||
ThresholdLockedIn: "ThresholdLockedIn",
|
||||
ThresholdActive: "ThresholdActive",
|
||||
ThresholdFailed: "ThresholdFailed",
|
||||
}
|
||||
|
||||
// String returns the ThresholdState as a human-readable name.
|
||||
func (t ThresholdState) String() string {
|
||||
if s := thresholdStateStrings[t]; s != "" {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("Unknown ThresholdState (%d)", int(t))
|
||||
}
|
||||
|
||||
// thresholdConditionChecker provides a generic interface that is invoked to
|
||||
// determine when a consensus rule change threshold should be changed.
|
||||
type thresholdConditionChecker interface {
|
||||
// BeginTime returns the unix timestamp for the median block time after
|
||||
// which voting on a rule change starts (at the next window).
|
||||
BeginTime() uint64
|
||||
|
||||
// EndTime returns the unix timestamp for the median block time after
|
||||
// which an attempted rule change fails if it has not already been
|
||||
// locked in or activated.
|
||||
EndTime() uint64
|
||||
|
||||
// RuleChangeActivationThreshold is the number of blocks for which the
|
||||
// condition must be true in order to lock in a rule change.
|
||||
RuleChangeActivationThreshold() uint64
|
||||
|
||||
// MinerConfirmationWindow is the number of blocks in each threshold
|
||||
// state retarget window.
|
||||
MinerConfirmationWindow() uint64
|
||||
|
||||
// Condition returns whether or not the rule change activation condition
|
||||
// has been met. This typically involves checking whether or not the
|
||||
// bit associated with the condition is set, but can be more complex as
|
||||
// needed.
|
||||
Condition(*blockNode) (bool, error)
|
||||
}
|
||||
|
||||
// thresholdStateCache provides a type to cache the threshold states of each
|
||||
// threshold window for a set of IDs.
|
||||
type thresholdStateCache struct {
|
||||
entries map[daghash.Hash]ThresholdState
|
||||
}
|
||||
|
||||
// Lookup returns the threshold state associated with the given hash along with
|
||||
// a boolean that indicates whether or not it is valid.
|
||||
func (c *thresholdStateCache) Lookup(hash *daghash.Hash) (ThresholdState, bool) {
|
||||
state, ok := c.entries[*hash]
|
||||
return state, ok
|
||||
}
|
||||
|
||||
// Update updates the cache to contain the provided hash to threshold state
|
||||
// mapping.
|
||||
func (c *thresholdStateCache) Update(hash *daghash.Hash, state ThresholdState) {
|
||||
c.entries[*hash] = state
|
||||
}
|
||||
|
||||
// newThresholdCaches returns a new array of caches to be used when calculating
|
||||
// threshold states.
|
||||
func newThresholdCaches(numCaches uint32) []thresholdStateCache {
|
||||
caches := make([]thresholdStateCache, numCaches)
|
||||
for i := 0; i < len(caches); i++ {
|
||||
caches[i] = thresholdStateCache{
|
||||
entries: make(map[daghash.Hash]ThresholdState),
|
||||
}
|
||||
}
|
||||
return caches
|
||||
}
|
||||
|
||||
// thresholdState returns the current rule change threshold state for the block
|
||||
// AFTER the given node and deployment ID. The cache is used to ensure the
|
||||
// threshold states for previous windows are only calculated once.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdConditionChecker, cache *thresholdStateCache) (ThresholdState, error) {
|
||||
// The threshold state for the window that contains the genesis block is
|
||||
// defined by definition.
|
||||
confirmationWindow := checker.MinerConfirmationWindow()
|
||||
if prevNode == nil || (prevNode.chainHeight+1) < confirmationWindow {
|
||||
return ThresholdDefined, nil
|
||||
}
|
||||
|
||||
// Get the ancestor that is the last block of the previous confirmation
|
||||
// window in order to get its threshold state. This can be done because
|
||||
// the state is the same for all blocks within a given window.
|
||||
prevNode = prevNode.SelectedAncestor(prevNode.chainHeight -
|
||||
(prevNode.chainHeight+1)%confirmationWindow)
|
||||
|
||||
// Iterate backwards through each of the previous confirmation windows
|
||||
// to find the most recently cached threshold state.
|
||||
var neededStates []*blockNode
|
||||
for prevNode != nil {
|
||||
// Nothing more to do if the state of the block is already
|
||||
// cached.
|
||||
if _, ok := cache.Lookup(prevNode.hash); ok {
|
||||
break
|
||||
}
|
||||
|
||||
// The start and expiration times are based on the median block
|
||||
// time, so calculate it now.
|
||||
medianTime := prevNode.PastMedianTime(dag)
|
||||
|
||||
// The state is simply defined if the start time hasn't been
|
||||
// been reached yet.
|
||||
if uint64(medianTime.Unix()) < checker.BeginTime() {
|
||||
cache.Update(prevNode.hash, ThresholdDefined)
|
||||
break
|
||||
}
|
||||
|
||||
// Add this node to the list of nodes that need the state
|
||||
// calculated and cached.
|
||||
neededStates = append(neededStates, prevNode)
|
||||
|
||||
// Get the ancestor that is the last block of the previous
|
||||
// confirmation window.
|
||||
prevNode = prevNode.RelativeAncestor(confirmationWindow)
|
||||
}
|
||||
|
||||
// Start with the threshold state for the most recent confirmation
|
||||
// window that has a cached state.
|
||||
state := ThresholdDefined
|
||||
if prevNode != nil {
|
||||
var ok bool
|
||||
state, ok = cache.Lookup(prevNode.hash)
|
||||
if !ok {
|
||||
return ThresholdFailed, AssertError(fmt.Sprintf(
|
||||
"thresholdState: cache lookup failed for %s",
|
||||
prevNode.hash))
|
||||
}
|
||||
}
|
||||
|
||||
// Since each threshold state depends on the state of the previous
|
||||
// window, iterate starting from the oldest unknown window.
|
||||
for neededNum := len(neededStates) - 1; neededNum >= 0; neededNum-- {
|
||||
prevNode := neededStates[neededNum]
|
||||
|
||||
switch state {
|
||||
case ThresholdDefined:
|
||||
// The deployment of the rule change fails if it expires
|
||||
// before it is accepted and locked in.
|
||||
medianTime := prevNode.PastMedianTime(dag)
|
||||
medianTimeUnix := uint64(medianTime.Unix())
|
||||
if medianTimeUnix >= checker.EndTime() {
|
||||
state = ThresholdFailed
|
||||
break
|
||||
}
|
||||
|
||||
// The state for the rule moves to the started state
|
||||
// once its start time has been reached (and it hasn't
|
||||
// already expired per the above).
|
||||
if medianTimeUnix >= checker.BeginTime() {
|
||||
state = ThresholdStarted
|
||||
}
|
||||
|
||||
case ThresholdStarted:
|
||||
// The deployment of the rule change fails if it expires
|
||||
// before it is accepted and locked in.
|
||||
medianTime := prevNode.PastMedianTime(dag)
|
||||
if uint64(medianTime.Unix()) >= checker.EndTime() {
|
||||
state = ThresholdFailed
|
||||
break
|
||||
}
|
||||
|
||||
// At this point, the rule change is still being voted
|
||||
// on by the miners, so iterate backwards through the
|
||||
// confirmation window to count all of the votes in it.
|
||||
var count uint64
|
||||
countNode := prevNode
|
||||
for i := uint64(0); i < confirmationWindow; i++ {
|
||||
condition, err := checker.Condition(countNode)
|
||||
if err != nil {
|
||||
return ThresholdFailed, err
|
||||
}
|
||||
if condition {
|
||||
count++
|
||||
}
|
||||
|
||||
// Get the previous block node.
|
||||
countNode = countNode.selectedParent
|
||||
}
|
||||
|
||||
// The state is locked in if the number of blocks in the
|
||||
// period that voted for the rule change meets the
|
||||
// activation threshold.
|
||||
if count >= checker.RuleChangeActivationThreshold() {
|
||||
state = ThresholdLockedIn
|
||||
}
|
||||
|
||||
case ThresholdLockedIn:
|
||||
// The new rule becomes active when its previous state
|
||||
// was locked in.
|
||||
state = ThresholdActive
|
||||
|
||||
// Nothing to do if the previous state is active or failed since
|
||||
// they are both terminal states.
|
||||
case ThresholdActive:
|
||||
case ThresholdFailed:
|
||||
}
|
||||
|
||||
// Update the cache to avoid recalculating the state in the
|
||||
// future.
|
||||
cache.Update(prevNode.hash, state)
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ThresholdState returns the current rule change threshold state of the given
|
||||
// deployment ID for the block AFTER the end of the current best chain.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) ThresholdState(deploymentID uint32) (ThresholdState, error) {
|
||||
dag.dagLock.Lock()
|
||||
state, err := dag.deploymentState(dag.selectedTip(), deploymentID)
|
||||
dag.dagLock.Unlock()
|
||||
|
||||
return state, err
|
||||
}
|
||||
|
||||
// IsDeploymentActive returns true if the target deploymentID is active, and
|
||||
// false otherwise.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
|
||||
dag.dagLock.Lock()
|
||||
state, err := dag.deploymentState(dag.selectedTip(), deploymentID)
|
||||
dag.dagLock.Unlock()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return state == ThresholdActive, nil
|
||||
}
|
||||
|
||||
// deploymentState returns the current rule change threshold for a given
|
||||
// deploymentID. The threshold is evaluated from the point of view of the block
|
||||
// node passed in as the first argument to this method.
|
||||
//
|
||||
// It is important to note that, as the variable name indicates, this function
|
||||
// expects the block node prior to the block for which the deployment state is
|
||||
// desired. In other words, the returned deployment state is for the block
|
||||
// AFTER the passed node.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) {
|
||||
if deploymentID > uint32(len(dag.dagParams.Deployments)) {
|
||||
return ThresholdFailed, DeploymentError(deploymentID)
|
||||
}
|
||||
|
||||
deployment := &dag.dagParams.Deployments[deploymentID]
|
||||
checker := deploymentChecker{deployment: deployment, chain: dag}
|
||||
cache := &dag.deploymentCaches[deploymentID]
|
||||
|
||||
return dag.thresholdState(prevNode, checker, cache)
|
||||
}
|
||||
|
||||
// initThresholdCaches initializes the threshold state caches for each warning
|
||||
// bit and defined deployment and provides warnings if the chain is current per
|
||||
// the warnUnknownVersions and warnUnknownRuleActivations functions.
|
||||
func (dag *BlockDAG) initThresholdCaches() error {
|
||||
// Initialize the warning and deployment caches by calculating the
|
||||
// threshold state for each of them. This will ensure the caches are
|
||||
// populated and any states that needed to be recalculated due to
|
||||
// definition changes is done now.
|
||||
prevNode := dag.selectedTip().selectedParent
|
||||
for bit := uint32(0); bit < vbNumBits; bit++ {
|
||||
checker := bitConditionChecker{bit: bit, chain: dag}
|
||||
cache := &dag.warningCaches[bit]
|
||||
_, err := dag.thresholdState(prevNode, checker, cache)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for id := 0; id < len(dag.dagParams.Deployments); id++ {
|
||||
deployment := &dag.dagParams.Deployments[id]
|
||||
cache := &dag.deploymentCaches[id]
|
||||
checker := deploymentChecker{deployment: deployment, chain: dag}
|
||||
_, err := dag.thresholdState(prevNode, checker, cache)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// No warnings about unknown rules or versions until the chain is
|
||||
// current.
|
||||
if dag.isCurrent() {
|
||||
// Warn if a high enough percentage of the last blocks have
|
||||
// unexpected versions.
|
||||
bestNode := dag.selectedTip()
|
||||
if err := dag.warnUnknownVersions(bestNode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Warn if any unknown new rules are either about to activate or
|
||||
// have already been activated.
|
||||
if err := dag.warnUnknownRuleActivations(bestNode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,134 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// TestThresholdStateStringer tests the stringized output for the
|
||||
// ThresholdState type.
|
||||
func TestThresholdStateStringer(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
in ThresholdState
|
||||
want string
|
||||
}{
|
||||
{ThresholdDefined, "ThresholdDefined"},
|
||||
{ThresholdStarted, "ThresholdStarted"},
|
||||
{ThresholdLockedIn, "ThresholdLockedIn"},
|
||||
{ThresholdActive, "ThresholdActive"},
|
||||
{ThresholdFailed, "ThresholdFailed"},
|
||||
{0xff, "Unknown ThresholdState (255)"},
|
||||
}
|
||||
|
||||
// Detect additional threshold states that don't have the stringer added.
|
||||
if len(tests)-1 != int(numThresholdsStates) {
|
||||
t.Errorf("It appears a threshold statewas added without " +
|
||||
"adding an associated stringer test")
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
result := test.in.String()
|
||||
if result != test.want {
|
||||
t.Errorf("String #%d\n got: %s want: %s", i, result,
|
||||
test.want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestThresholdStateCache ensure the threshold state cache works as intended
|
||||
// including adding entries, updating existing entries, and flushing.
|
||||
func TestThresholdStateCache(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
numEntries int
|
||||
state ThresholdState
|
||||
}{
|
||||
{name: "2 entries defined", numEntries: 2, state: ThresholdDefined},
|
||||
{name: "7 entries started", numEntries: 7, state: ThresholdStarted},
|
||||
{name: "10 entries active", numEntries: 10, state: ThresholdActive},
|
||||
{name: "5 entries locked in", numEntries: 5, state: ThresholdLockedIn},
|
||||
{name: "3 entries failed", numEntries: 3, state: ThresholdFailed},
|
||||
}
|
||||
|
||||
nextTest:
|
||||
for _, test := range tests {
|
||||
cache := &newThresholdCaches(1)[0]
|
||||
for i := 0; i < test.numEntries; i++ {
|
||||
var hash daghash.Hash
|
||||
hash[0] = uint8(i + 1)
|
||||
|
||||
// Ensure the hash isn't available in the cache already.
|
||||
_, ok := cache.Lookup(&hash)
|
||||
if ok {
|
||||
t.Errorf("Lookup (%s): has entry for hash %v",
|
||||
test.name, hash)
|
||||
continue nextTest
|
||||
}
|
||||
|
||||
// Ensure hash that was added to the cache reports it's
|
||||
// available and the state is the expected value.
|
||||
cache.Update(&hash, test.state)
|
||||
state, ok := cache.Lookup(&hash)
|
||||
if !ok {
|
||||
t.Errorf("Lookup (%s): missing entry for hash "+
|
||||
"%v", test.name, hash)
|
||||
continue nextTest
|
||||
}
|
||||
if state != test.state {
|
||||
t.Errorf("Lookup (%s): state mismatch - got "+
|
||||
"%v, want %v", test.name, state,
|
||||
test.state)
|
||||
continue nextTest
|
||||
}
|
||||
|
||||
// Ensure adding an existing hash with the same state
|
||||
// doesn't break the existing entry.
|
||||
cache.Update(&hash, test.state)
|
||||
state, ok = cache.Lookup(&hash)
|
||||
if !ok {
|
||||
t.Errorf("Lookup (%s): missing entry after "+
|
||||
"second add for hash %v", test.name,
|
||||
hash)
|
||||
continue nextTest
|
||||
}
|
||||
if state != test.state {
|
||||
t.Errorf("Lookup (%s): state mismatch after "+
|
||||
"second add - got %v, want %v",
|
||||
test.name, state, test.state)
|
||||
continue nextTest
|
||||
}
|
||||
|
||||
// Ensure adding an existing hash with a different state
|
||||
// updates the existing entry.
|
||||
newState := ThresholdFailed
|
||||
if newState == test.state {
|
||||
newState = ThresholdStarted
|
||||
}
|
||||
cache.Update(&hash, newState)
|
||||
state, ok = cache.Lookup(&hash)
|
||||
if !ok {
|
||||
t.Errorf("Lookup (%s): missing entry after "+
|
||||
"state change for hash %v", test.name,
|
||||
hash)
|
||||
continue nextTest
|
||||
}
|
||||
if state != newState {
|
||||
t.Errorf("Lookup (%s): state mismatch after "+
|
||||
"state change - got %v, want %v",
|
||||
test.name, state, newState)
|
||||
continue nextTest
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user