mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-21 03:03:08 +00:00
Compare commits
389 Commits
v0.0.6-dev
...
v0.1.2-rc2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
34fb066590 | ||
|
|
299826f392 | ||
|
|
3d8dd8724d | ||
|
|
b8a00f7519 | ||
|
|
4dfc8cf5b0 | ||
|
|
5a99e4d2f3 | ||
|
|
606cd668ff | ||
|
|
dd537f5143 | ||
|
|
a1c631be62 | ||
|
|
707a728656 | ||
|
|
80b5631a48 | ||
|
|
2373965551 | ||
|
|
65cbb6655b | ||
|
|
cdd96d0670 | ||
|
|
ad04bbde83 | ||
|
|
5374d95416 | ||
|
|
de9aa39cc5 | ||
|
|
98987f4a8f | ||
|
|
9745f31b69 | ||
|
|
ee08531a52 | ||
|
|
61baf7b260 | ||
|
|
650e4f735e | ||
|
|
550b12b041 | ||
|
|
a4bb070722 | ||
|
|
30fe0c279b | ||
|
|
e405dd5981 | ||
|
|
243b4b8021 | ||
|
|
dd4c93e1ef | ||
|
|
a07335d74d | ||
|
|
7567cd4cb9 | ||
|
|
51ff9e2562 | ||
|
|
5b8ab63890 | ||
|
|
3dd7dc4496 | ||
|
|
d90a08ecfa | ||
|
|
45dc1a3e7b | ||
|
|
4ffb5daa37 | ||
|
|
b9138b720d | ||
|
|
d8954f1339 | ||
|
|
eb953286ec | ||
|
|
41c8178ad3 | ||
|
|
aa74b51e6f | ||
|
|
f7800eb5c4 | ||
|
|
193add502f | ||
|
|
44c55900f8 | ||
|
|
4c0ea78026 | ||
|
|
03a93fe51e | ||
|
|
eca0514465 | ||
|
|
aadbebb720 | ||
|
|
5daab45947 | ||
|
|
607b838ded | ||
|
|
25bdaeed31 | ||
|
|
8b2d3f07ce | ||
|
|
a3dc2f7da7 | ||
|
|
bf36f9ceb6 | ||
|
|
11de12304e | ||
|
|
a10320ad7b | ||
|
|
fd2bbf3557 | ||
|
|
7f9cf17274 | ||
|
|
ba0e239557 | ||
|
|
ed606bfda3 | ||
|
|
c0463a8a68 | ||
|
|
52e0a0967d | ||
|
|
29bcc271b5 | ||
|
|
94ec159147 | ||
|
|
9d434de4a5 | ||
|
|
49418f4222 | ||
|
|
38b4749f20 | ||
|
|
045984e6b9 | ||
|
|
38883d1a98 | ||
|
|
b5f365d282 | ||
|
|
a7d3a40465 | ||
|
|
359b16fca9 | ||
|
|
8b8e73feb5 | ||
|
|
6044b6ac1a | ||
|
|
a177ea4f15 | ||
|
|
3a15aa4bae | ||
|
|
427185b6a8 | ||
|
|
b282734a3f | ||
|
|
6d765f58ba | ||
|
|
20819ca4cd | ||
|
|
2174a0a7f2 | ||
|
|
ea6f7a28c2 | ||
|
|
ac9aa74a75 | ||
|
|
d46857677f | ||
|
|
cd719b1d5b | ||
|
|
7cf15ac93b | ||
|
|
d8e3191469 | ||
|
|
784d3de4ca | ||
|
|
733d06af5a | ||
|
|
df91643976 | ||
|
|
ebf635e6ff | ||
|
|
e41d9866c3 | ||
|
|
d984151549 | ||
|
|
6099ce56bd | ||
|
|
e0b5c145f7 | ||
|
|
cf37f733ef | ||
|
|
66a92a243c | ||
|
|
4a88eea57e | ||
|
|
fbaf360a42 | ||
|
|
1346810af8 | ||
|
|
9cbab94264 | ||
|
|
48f29cc11f | ||
|
|
e2b57e6231 | ||
|
|
f72afc8bbb | ||
|
|
0d1f447cb7 | ||
|
|
818f8c93eb | ||
|
|
264ffaae93 | ||
|
|
03b7af9a13 | ||
|
|
e3d7e83d44 | ||
|
|
07651e51c8 | ||
|
|
1cd2eb9308 | ||
|
|
a140327dd2 | ||
|
|
c1f7ae72e0 | ||
|
|
3a12fe9b1d | ||
|
|
c25c9b25bd | ||
|
|
f46dec449d | ||
|
|
60ab6330ff | ||
|
|
89dee3e005 | ||
|
|
70d7009985 | ||
|
|
3322a892e9 | ||
|
|
61d066e958 | ||
|
|
7b9ffc6c25 | ||
|
|
7a163d4dd7 | ||
|
|
189a3380a2 | ||
|
|
8680231e5a | ||
|
|
30f0e95969 | ||
|
|
c94becf144 | ||
|
|
369ec449a8 | ||
|
|
f4c6859e51 | ||
|
|
683dd52fcf | ||
|
|
11e936d109 | ||
|
|
9adb105e37 | ||
|
|
7b6ed9a778 | ||
|
|
3218fc5a04 | ||
|
|
3f94f8ca4c | ||
|
|
0842778c2c | ||
|
|
1332e1aa68 | ||
|
|
e872ebc7b3 | ||
|
|
e68b242243 | ||
|
|
9cc2a7260b | ||
|
|
bcd73012de | ||
|
|
1fea2a9421 | ||
|
|
bb7d68deda | ||
|
|
3ab861227d | ||
|
|
8f0d98ef9b | ||
|
|
dbd8bf3d2c | ||
|
|
1b6b02e0d2 | ||
|
|
2402bae1ff | ||
|
|
3dcf8d88b8 | ||
|
|
dbf9c09a2e | ||
|
|
5e9fc2defc | ||
|
|
bdc3cbceaa | ||
|
|
a71528fefb | ||
|
|
6725742d2c | ||
|
|
9a510e2e23 | ||
|
|
08a4b0dbf6 | ||
|
|
0c9e55a358 | ||
|
|
532e57b61c | ||
|
|
b1f59914d2 | ||
|
|
9a54b286c9 | ||
|
|
6e4b18a498 | ||
|
|
b5f8a0452e | ||
|
|
fab043ef14 | ||
|
|
8e0e62f21a | ||
|
|
9a1c2e2641 | ||
|
|
8cbc6670cc | ||
|
|
28ee6a8026 | ||
|
|
af39e96e3e | ||
|
|
db6e9c773f | ||
|
|
47214121a7 | ||
|
|
7b07609fd8 | ||
|
|
acb4b3f260 | ||
|
|
e0221aa8ab | ||
|
|
cba346d753 | ||
|
|
0f34cfb1a2 | ||
|
|
ea846a3284 | ||
|
|
63bfac9740 | ||
|
|
7284815c21 | ||
|
|
80307d108b | ||
|
|
722437afe9 | ||
|
|
684cf4b5fa | ||
|
|
c95a7b13a6 | ||
|
|
1ce7f21026 | ||
|
|
7d7df10493 | ||
|
|
8179862e0b | ||
|
|
6828f623b4 | ||
|
|
2c88a5b2fe | ||
|
|
a7f08598f3 | ||
|
|
83bad65d3a | ||
|
|
1f35378a4d | ||
|
|
39eab7a6d5 | ||
|
|
9dd025d4da | ||
|
|
bb75ea5020 | ||
|
|
8dbd4a2bed | ||
|
|
24305cda68 | ||
|
|
770dfd147d | ||
|
|
a9ff9b0e70 | ||
|
|
3cc6f2d648 | ||
|
|
a8f0d7b05b | ||
|
|
13f06ca293 | ||
|
|
c88fa1492e | ||
|
|
40657a83f5 | ||
|
|
44dd58b461 | ||
|
|
47891b17ab | ||
|
|
f7fbfbf5c4 | ||
|
|
0e278ca22b | ||
|
|
c66fb294c8 | ||
|
|
88b7e7ca03 | ||
|
|
a9b659a36f | ||
|
|
90fc6ba3e7 | ||
|
|
8ea97aa3fd | ||
|
|
7c9f5a65d8 | ||
|
|
e2d3c4c821 | ||
|
|
92578e2853 | ||
|
|
3018c18616 | ||
|
|
3ac9fa83c1 | ||
|
|
c5b0398dac | ||
|
|
76f23d8a9b | ||
|
|
089cee0e1d | ||
|
|
982340456d | ||
|
|
13cf1f7715 | ||
|
|
d99af7424c | ||
|
|
40ad9c5d2b | ||
|
|
9dfc3091b4 | ||
|
|
e6a4ed04f3 | ||
|
|
e3aa8d65dc | ||
|
|
ece0fb83e8 | ||
|
|
683830d574 | ||
|
|
c5108a4abd | ||
|
|
40342eb45a | ||
|
|
adf4b4380e | ||
|
|
7371120481 | ||
|
|
1064b5009d | ||
|
|
850876e6a7 | ||
|
|
d4083cbdbe | ||
|
|
47c5eddf38 | ||
|
|
f6a6508eff | ||
|
|
a036618b44 | ||
|
|
2429b623fc | ||
|
|
f4850b9e7a | ||
|
|
e81ac5f19e | ||
|
|
31ccedf136 | ||
|
|
502b510ccd | ||
|
|
369031f963 | ||
|
|
a789680db1 | ||
|
|
90bda69931 | ||
|
|
9647cb3e08 | ||
|
|
79c9060909 | ||
|
|
20206789e0 | ||
|
|
1ddae35277 | ||
|
|
75a8c6459a | ||
|
|
7fc2430ab1 | ||
|
|
cf9af0fb5d | ||
|
|
db6d6293c7 | ||
|
|
ae25ec2e6b | ||
|
|
7521545682 | ||
|
|
169e96e851 | ||
|
|
893b8a88c8 | ||
|
|
c60711ab15 | ||
|
|
1b00e01030 | ||
|
|
f0c80905eb | ||
|
|
b07a118431 | ||
|
|
0ae06cd277 | ||
|
|
ed9165f533 | ||
|
|
c73113a12e | ||
|
|
480b2ca07c | ||
|
|
c72b914050 | ||
|
|
5cf7f01d3f | ||
|
|
552a5917c2 | ||
|
|
5c14719f14 | ||
|
|
d2353a189a | ||
|
|
4fcd705ae3 | ||
|
|
744c17b4c8 | ||
|
|
e2eca24b33 | ||
|
|
36d5ac189f | ||
|
|
1a569c7bd7 | ||
|
|
6bb53eaae3 | ||
|
|
747a9bb944 | ||
|
|
d2daf334a5 | ||
|
|
70737e4e94 | ||
|
|
5f49115cac | ||
|
|
534cb2bf5b | ||
|
|
187c525667 | ||
|
|
6032727965 | ||
|
|
bb3f23b6dc | ||
|
|
e5485ac5e6 | ||
|
|
594a209f83 | ||
|
|
9981ce7adb | ||
|
|
49ac97c7db | ||
|
|
bfdf7a2cf2 | ||
|
|
54b681460d | ||
|
|
2147d16c1f | ||
|
|
7c1cb47bd0 | ||
|
|
6acfa18d7c | ||
|
|
f0a675162c | ||
|
|
7a4deb6f18 | ||
|
|
96842353de | ||
|
|
5ce8875ce0 | ||
|
|
812819e92f | ||
|
|
5cb536643e | ||
|
|
4c6b8969d3 | ||
|
|
8ccc63752c | ||
|
|
1088b69616 | ||
|
|
541119dda2 | ||
|
|
7400eabc6d | ||
|
|
c3c429494f | ||
|
|
6d20202354 | ||
|
|
d6297a3192 | ||
|
|
e2f8d4e0aa | ||
|
|
589763e8ec | ||
|
|
c14c64d534 | ||
|
|
f7f44995d6 | ||
|
|
263737b3fb | ||
|
|
0c5f3d72bd | ||
|
|
ffd886498a | ||
|
|
76f5619de7 | ||
|
|
35703e7956 | ||
|
|
29231d8d14 | ||
|
|
396842ae40 | ||
|
|
072c753323 | ||
|
|
6250342b86 | ||
|
|
e4b2d869d4 | ||
|
|
ccca580a4b | ||
|
|
84970a8378 | ||
|
|
901bde1fd4 | ||
|
|
33a4183bfa | ||
|
|
0bc6e5bc92 | ||
|
|
8323e468da | ||
|
|
7912fe4c35 | ||
|
|
266e471941 | ||
|
|
4e6edd4ffd | ||
|
|
7069d173c6 | ||
|
|
aa51b5f071 | ||
|
|
da7c9c7dfb | ||
|
|
ec10346e79 | ||
|
|
2481871c10 | ||
|
|
ac1fd11a42 | ||
|
|
b1d3ca0206 | ||
|
|
5c5491e1e4 | ||
|
|
8dedca693e | ||
|
|
ca0619bbcf | ||
|
|
d7a2ab52a1 | ||
|
|
3b72aafbc6 | ||
|
|
dfd12cdaac | ||
|
|
08d94c7a47 | ||
|
|
b7b41f1a94 | ||
|
|
42109ec4d5 | ||
|
|
39ccc4b225 | ||
|
|
8acc738b27 | ||
|
|
945b3f8fbf | ||
|
|
a73f218402 | ||
|
|
eded4c2285 | ||
|
|
33036278ac | ||
|
|
6163d3b4ec | ||
|
|
22046bebc5 | ||
|
|
c67d4507b6 | ||
|
|
ea5e18ea11 | ||
|
|
1cc479dbf8 | ||
|
|
b4e7b59e7b | ||
|
|
8592ae9641 | ||
|
|
1362fc45e0 | ||
|
|
b34894e4da | ||
|
|
30f5ebd6d1 | ||
|
|
4292bcac72 | ||
|
|
8683258e4a | ||
|
|
e9ec8cd39c | ||
|
|
068a8d117d | ||
|
|
83a012de12 | ||
|
|
f36ae25baf | ||
|
|
298cda0617 | ||
|
|
b9e3fff5d1 | ||
|
|
ed76e2c962 | ||
|
|
77fae7b522 | ||
|
|
cd71e80eb3 | ||
|
|
3f7c73f331 | ||
|
|
4845a7f16c | ||
|
|
77fb901706 | ||
|
|
d3e70810af | ||
|
|
daa4481282 | ||
|
|
a3735da12a | ||
|
|
311c96122e | ||
|
|
b612426ead | ||
|
|
e99af346bf | ||
|
|
e22bc9af8f | ||
|
|
89ca293dc1 | ||
|
|
194ceace6f | ||
|
|
a79c6cecdb | ||
|
|
c5827febf7 | ||
|
|
7353a49469 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -2,7 +2,7 @@
|
||||
*~
|
||||
|
||||
# Databases
|
||||
btcd.db
|
||||
kaspad.db
|
||||
*-shm
|
||||
*-wal
|
||||
|
||||
@@ -38,6 +38,7 @@ _testmain.go
|
||||
.vscode
|
||||
debug
|
||||
debug.test
|
||||
__debug_bin
|
||||
|
||||
# CI
|
||||
version.txt
|
||||
|
||||
955
CHANGES
955
CHANGES
@@ -1,955 +0,0 @@
|
||||
============================================================================
|
||||
User visible changes for btcd
|
||||
A full-node bitcoin implementation written in Go
|
||||
============================================================================
|
||||
|
||||
Changes in 0.12.0 (Fri Nov 20 2015)
|
||||
- Protocol and network related changes:
|
||||
- Add a new checkpoint at block height 382320 (#555)
|
||||
- Implement BIP0065 which includes support for version 4 blocks, a new
|
||||
consensus opcode (OP_CHECKLOCKTIMEVERIFY) that enforces transaction
|
||||
lock times, and a double-threshold switchover mechanism (#535, #459,
|
||||
#455)
|
||||
- Implement BIP0111 which provides a new bloom filter service flag and
|
||||
hence provides support for protocol version 70011 (#499)
|
||||
- Add a new parameter --nopeerbloomfilters to allow disabling bloom
|
||||
filter support (#499)
|
||||
- Reject non-canonically encoded variable length integers (#507)
|
||||
- Add mainnet peer discovery DNS seed (seed.bitcoin.jonasschnelli.ch)
|
||||
(#496)
|
||||
- Correct reconnect handling for persistent peers (#463, #464)
|
||||
- Ignore requests for block headers if not fully synced (#444)
|
||||
- Add CLI support for specifying the zone id on IPv6 addresses (#538)
|
||||
- Fix a couple of issues where the initial block sync could stall (#518,
|
||||
#229, #486)
|
||||
- Fix an issue which prevented the --onion option from working as
|
||||
intended (#446)
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Require transactions to only include signatures encoded with the
|
||||
canonical 'low-s' encoding (#512)
|
||||
- Add a new parameter --minrelaytxfee to allow the minimum transaction
|
||||
fee in BTC/kB to be overridden (#520)
|
||||
- Retain memory pool transactions when they redeem another one that is
|
||||
removed when a block is accepted (#539)
|
||||
- Do not send reject messages for a transaction if it is valid but
|
||||
causes an orphan transaction which depends on it to be determined
|
||||
as invalid (#546)
|
||||
- Refrain from attempting to add orphans to the memory pool multiple
|
||||
times when the transaction they redeem is added (#551)
|
||||
- Modify minimum transaction fee calculations to scale based on bytes
|
||||
instead of full kilobyte boundaries (#521, #537)
|
||||
- Implement signature cache:
|
||||
- Provides a limited memory cache of validated signatures which is a
|
||||
huge optimization when verifying blocks for transactions that are
|
||||
already in the memory pool (#506)
|
||||
- Add a new parameter '--sigcachemaxsize' which allows the size of the
|
||||
new cache to be manually changed if desired (#506)
|
||||
- Mining support changes:
|
||||
- Notify getblocktemplate long polling clients when a block is pushed
|
||||
via submitblock (#488)
|
||||
- Speed up getblocktemplate by making use of the new signature cache
|
||||
(#506)
|
||||
- RPC changes:
|
||||
- Implement getmempoolinfo command (#453)
|
||||
- Implement getblockheader command (#461)
|
||||
- Modify createrawtransaction command to accept a new optional parameter
|
||||
'locktime' (#529)
|
||||
- Modify listunspent result to include the 'spendable' field (#440)
|
||||
- Modify getinfo command to include 'errors' field (#511)
|
||||
- Add timestamps to blockconnected and blockdisconnected notifications
|
||||
(#450)
|
||||
- Several modifications to searchrawtranscations command:
|
||||
- Accept a new optional parameter 'vinextra' which causes the results
|
||||
to include information about the outputs referenced by a transaction's
|
||||
inputs (#485, #487)
|
||||
- Skip entries in the mempool too (#495)
|
||||
- Accept a new optional parameter 'reverse' to return the results in
|
||||
reverse order (most recent to oldest) (#497)
|
||||
- Accept a new optional parameter 'filteraddrs' which causes the
|
||||
results to only include inputs and outputs which involve the
|
||||
provided addresses (#516)
|
||||
- Change the notification order to notify clients about mined
|
||||
transactions (recvtx, redeemingtx) before the blockconnected
|
||||
notification (#449)
|
||||
- Update verifymessage RPC to use the standard algorithm so it is
|
||||
compatible with other implementations (#515)
|
||||
- Improve ping statistics by pinging on an interval (#517)
|
||||
- Websocket changes:
|
||||
- Implement session command which returns a per-session unique id (#500,
|
||||
#503)
|
||||
- btcctl utility changes:
|
||||
- Add getmempoolinfo command (#453)
|
||||
- Add getblockheader command (#461)
|
||||
- Add getwalletinfo command (#471)
|
||||
- Notable developer-related package changes:
|
||||
- Introduce a new peer package which acts a common base for creating and
|
||||
concurrently managing bitcoin network peers (#445)
|
||||
- Various cleanup of the new peer package (#528, #531, #524, #534,
|
||||
#549)
|
||||
- Blocks heights now consistently use int32 everywhere (#481)
|
||||
- The BlockHeader type in the wire package now provides the BtcDecode
|
||||
and BtcEncode methods (#467)
|
||||
- Update wire package to recognize BIP0064 (getutxo) service bit (#489)
|
||||
- Export LockTimeThreshold constant from txscript package (#454)
|
||||
- Export MaxDataCarrierSize constant from txscript package (#466)
|
||||
- Provide new IsUnspendable function from the txscript package (#478)
|
||||
- Export variable length string functions from the wire package (#514)
|
||||
- Export DNS Seeds for each network from the chaincfg package (#544)
|
||||
- Preliminary work towards separating the memory pool into a separate
|
||||
package (#525, #548)
|
||||
- Misc changes:
|
||||
- Various documentation updates (#442, #462, #465, #460, #470, #473,
|
||||
#505, #530, #545)
|
||||
- Add installation instructions for gentoo (#542)
|
||||
- Ensure an error is shown if OS limits can't be set at startup (#498)
|
||||
- Tighten the standardness checks for multisig scripts (#526)
|
||||
- Test coverage improvement (#468, #494, #527, #543, #550)
|
||||
- Several optimizations (#457, #474, #475, #476, #508, #509)
|
||||
- Minor code cleanup and refactoring (#472, #479, #482, #519, #540)
|
||||
- Contributors (alphabetical order):
|
||||
- Ben Echols
|
||||
- Bruno Clermont
|
||||
- danda
|
||||
- Daniel Krawisz
|
||||
- Dario Nieuwenhuis
|
||||
- Dave Collins
|
||||
- David Hill
|
||||
- Javed Khan
|
||||
- Jonathan Gillham
|
||||
- Joseph Becher
|
||||
- Josh Rickmar
|
||||
- Justus Ranvier
|
||||
- Mawuli Adzoe
|
||||
- Olaoluwa Osuntokun
|
||||
- Rune T. Aune
|
||||
|
||||
Changes in 0.11.1 (Wed May 27 2015)
|
||||
- Protocol and network related changes:
|
||||
- Use correct sub-command in reject message for rejected transactions
|
||||
(#436, #437)
|
||||
- Add a new parameter --torisolation which forces new circuits for each
|
||||
connection when using tor (#430)
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Reduce the default number max number of allowed orphan transactions
|
||||
to 1000 (#419)
|
||||
- Add a new parameter --maxorphantx which allows the maximum number of
|
||||
orphan transactions stored in the mempool to be specified (#419)
|
||||
- RPC changes:
|
||||
- Modify listtransactions result to include the 'involveswatchonly' and
|
||||
'vout' fields (#427)
|
||||
- Update getrawtransaction result to omit the 'confirmations' field
|
||||
when it is 0 (#420, #422)
|
||||
- Update signrawtransaction result to include errors (#423)
|
||||
- btcctl utility changes:
|
||||
- Add gettxoutproof command (#428)
|
||||
- Add verifytxoutproof command (#428)
|
||||
- Notable developer-related package changes:
|
||||
- The btcec package now provides the ability to perform ECDH
|
||||
encryption and decryption (#375)
|
||||
- The block and header validation in the blockchain package has been
|
||||
split to help pave the way toward concurrent downloads (#386)
|
||||
- Misc changes:
|
||||
- Minor peer optimization (#433)
|
||||
- Contributors (alphabetical order):
|
||||
- Dave Collins
|
||||
- David Hill
|
||||
- Federico Bond
|
||||
- Ishbir Singh
|
||||
- Josh Rickmar
|
||||
|
||||
Changes in 0.11.0 (Wed May 06 2015)
|
||||
- Protocol and network related changes:
|
||||
- **IMPORTANT: Update is required due to the following point**
|
||||
- Correct a few corner cases in script handling which could result in
|
||||
forking from the network on non-standard transactions (#425)
|
||||
- Add a new checkpoint at block height 352940 (#418)
|
||||
- Optimized script execution (#395, #400, #404, #409)
|
||||
- Fix a case that could lead stalled syncs (#138, #296)
|
||||
- Network address manager changes:
|
||||
- Implement eclipse attack countermeasures as proposed in
|
||||
http://cs-people.bu.edu/heilman/eclipse (#370, #373)
|
||||
- Optional address indexing changes:
|
||||
- Fix an issue where a reorg could cause an orderly shutdown when the
|
||||
address index is active (#340, #357)
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Increase maximum allowed space for nulldata transactions to 80 bytes
|
||||
(#331)
|
||||
- Implement support for the following rules specified by BIP0062:
|
||||
- The S value in ECDSA signature must be at most half the curve order
|
||||
(rule 5) (#349)
|
||||
- Script execution must result in a single non-zero value on the stack
|
||||
(rule 6) (#347)
|
||||
- NOTE: All 7 rules of BIP0062 are now implemented
|
||||
- Use network adjusted time in finalized transaction checks to improve
|
||||
consistency across nodes (#332)
|
||||
- Process orphan transactions on acceptance of new transactions (#345)
|
||||
- RPC changes:
|
||||
- Add support for a limited RPC user which is not allowed admin level
|
||||
operations on the server (#363)
|
||||
- Implement node command for more unified control over connected peers
|
||||
(#79, #341)
|
||||
- Implement generate command for regtest/simnet to support
|
||||
deterministically mining a specified number of blocks (#362, #407)
|
||||
- Update searchrawtransactions to return the matching transactions in
|
||||
order (#354)
|
||||
- Correct an issue with searchrawtransactions where it could return
|
||||
duplicates (#346, #354)
|
||||
- Increase precision of 'difficulty' field in getblock result to 8
|
||||
(#414, #415)
|
||||
- Omit 'nextblockhash' field from getblock result when it is empty
|
||||
(#416, #417)
|
||||
- Add 'id' and 'timeoffset' fields to getpeerinfo result (#335)
|
||||
- Websocket changes:
|
||||
- Implement new commands stopnotifyspent, stopnotifyreceived,
|
||||
stopnotifyblocks, and stopnotifynewtransactions to allow clients to
|
||||
cancel notification registrations (#122, #342)
|
||||
- btcctl utility changes:
|
||||
- A single dash can now be used as an argument to cause that argument to
|
||||
be read from stdin (#348)
|
||||
- Add generate command
|
||||
- Notable developer-related package changes:
|
||||
- The new version 2 btcjson package has now replaced the deprecated
|
||||
version 1 package (#368)
|
||||
- The btcec package now performs all signing using RFC6979 deterministic
|
||||
signatures (#358, #360)
|
||||
- The txscript package has been significantly cleaned up and had a few
|
||||
API changes (#387, #388, #389, #390, #391, #392, #393, #395, #396,
|
||||
#400, #403, #404, #405, #406, #408, #409, #410, #412)
|
||||
- A new PkScriptLocs function has been added to the wire package MsgTx
|
||||
type which provides callers that deal with scripts optimization
|
||||
opportunities (#343)
|
||||
- Misc changes:
|
||||
- Minor wire hashing optimizations (#366, #367)
|
||||
- Other minor internal optimizations
|
||||
- Contributors (alphabetical order):
|
||||
- Alex Akselrod
|
||||
- Arne Brutschy
|
||||
- Chris Jepson
|
||||
- Daniel Krawisz
|
||||
- Dave Collins
|
||||
- David Hill
|
||||
- Jimmy Song
|
||||
- Jonas Nick
|
||||
- Josh Rickmar
|
||||
- Olaoluwa Osuntokun
|
||||
- Oleg Andreev
|
||||
|
||||
Changes in 0.10.0 (Sun Mar 01 2015)
|
||||
- Protocol and network related changes:
|
||||
- Add a new checkpoint at block height 343185
|
||||
- Implement BIP066 which includes support for version 3 blocks, a new
|
||||
consensus rule which prevents non-DER encoded signatures, and a
|
||||
double-threshold switchover mechanism
|
||||
- Rather than announcing all known addresses on getaddr requests which
|
||||
can possibly result in multiple messages, randomize the results and
|
||||
limit them to the max allowed by a single message (1000 addresses)
|
||||
- Add more reserved IP spaces to the address manager
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Make transactions which contain reserved opcodes nonstandard
|
||||
- No longer accept or relay free and low-fee transactions that have
|
||||
insufficient priority to be mined in the next block
|
||||
- Implement support for the following rules specified by BIP0062:
|
||||
- ECDSA signature must use strict DER encoding (rule 1)
|
||||
- The signature script must only contain push operations (rule 2)
|
||||
- All push operations must use the smallest possible encoding (rule 3)
|
||||
- All stack values interpreted as a number must be encoding using the
|
||||
shortest possible form (rule 4)
|
||||
- NOTE: Rule 1 was already enforced, however the entire script now
|
||||
evaluates to false rather than only the signature verification as
|
||||
required by BIP0062
|
||||
- Allow transactions with nulldata transaction outputs to be treated as
|
||||
standard
|
||||
- Mining support changes:
|
||||
- Modify the getblocktemplate RPC to generate and return block templates
|
||||
for version 3 blocks which are compatible with BIP0066
|
||||
- Allow getblocktemplate to serve blocks when the current time is
|
||||
less than the minimum allowed time for a generated block template
|
||||
(https://github.com/btcsuite/btcd/issues/209)
|
||||
- Crypto changes:
|
||||
- Optimize scalar multiplication by the base point by using a
|
||||
pre-computed table which results in approximately a 35% speedup
|
||||
(https://github.com/btcsuite/btcec/issues/2)
|
||||
- Optimize general scalar multiplication by using the secp256k1
|
||||
endomorphism which results in approximately a 17-20% speedup
|
||||
(https://github.com/btcsuite/btcec/issues/1)
|
||||
- Optimize general scalar multiplication by using non-adjacent form
|
||||
which results in approximately an additional 8% speedup
|
||||
(https://github.com/btcsuite/btcec/issues/3)
|
||||
- Implement optional address indexing:
|
||||
- Add a new parameter --addrindex which will enable the creation of an
|
||||
address index which can be queried to determine all transactions which
|
||||
involve a given address
|
||||
(https://github.com/btcsuite/btcd/issues/190)
|
||||
- Add a new logging subsystem for address index related operations
|
||||
- Support new searchrawtransactions RPC
|
||||
(https://github.com/btcsuite/btcd/issues/185)
|
||||
- RPC changes:
|
||||
- Require TLS version 1.2 as the minimum version for all TLS connections
|
||||
- Provide support for disabling TLS when only listening on localhost
|
||||
(https://github.com/btcsuite/btcd/pull/192)
|
||||
- Modify help output for all commands to provide much more consistent
|
||||
and detailed information
|
||||
- Correct case in getrawtransaction which would refuse to serve certain
|
||||
transactions with invalid scripts
|
||||
(https://github.com/btcsuite/btcd/issues/210)
|
||||
- Correct error handling in the getrawtransaction RPC which could lead
|
||||
to a crash in rare cases
|
||||
(https://github.com/btcsuite/btcd/issues/196)
|
||||
- Update getinfo RPC to include the appropriate 'timeoffset' calculated
|
||||
from the median network time
|
||||
- Modify listreceivedbyaddress result type to include txids field so it
|
||||
is compatible
|
||||
- Add 'iswatchonly' field to validateaddress result
|
||||
- Add 'startingpriority' and 'currentpriority' fields to getrawmempool
|
||||
(https://github.com/btcsuite/btcd/issues/178)
|
||||
- Don't omit the 'confirmations' field from getrawtransaction when it is
|
||||
zero
|
||||
- Websocket changes:
|
||||
- Modify the behavior of the rescan command to automatically register
|
||||
for notifications about transactions paying to rescanned addresses
|
||||
or spending outputs from the final rescan utxo set when the rescan
|
||||
is through the best block in the chain
|
||||
- btcctl utility changes:
|
||||
- Make the list of commands available via the -l option rather than
|
||||
dumping the entire list on usage errors
|
||||
- Alphabetize and categorize the list of commands by chain and wallet
|
||||
- Make the help option only show the help options instead of also
|
||||
dumping all of the commands
|
||||
- Make the usage syntax much more consistent and correct a few cases of
|
||||
misnamed fields
|
||||
(https://github.com/btcsuite/btcd/issues/305)
|
||||
- Improve usage errors to show the specific parameter number, reason,
|
||||
and error code
|
||||
- Only show the usage for specific command is shown when a valid command
|
||||
is provided with invalid parameters
|
||||
- Add support for a SOCK5 proxy
|
||||
- Modify output for integer fields (such as timestamps) to display
|
||||
normally instead in scientific notation
|
||||
- Add invalidateblock command
|
||||
- Add reconsiderblock command
|
||||
- Add createnewaccount command
|
||||
- Add renameaccount command
|
||||
- Add searchrawtransactions command
|
||||
- Add importaddress command
|
||||
- Add importpubkey command
|
||||
- showblock utility changes:
|
||||
- Remove utility in favor of the RPC getblock method
|
||||
- Notable developer-related package changes:
|
||||
- Many of the core packages have been relocated into the btcd repository
|
||||
(https://github.com/btcsuite/btcd/issues/214)
|
||||
- A new version of the btcjson package that has been completely
|
||||
redesigned from the ground up based based upon how the project has
|
||||
evolved and lessons learned while using it since it was first written
|
||||
is now available in the btcjson/v2/btcjson directory
|
||||
- This will ultimately replace the current version so anyone making
|
||||
use of this package will need to update their code accordingly
|
||||
- The btcec package now provides better facilities for working directly
|
||||
with its public and private keys without having to mix elements from
|
||||
the ecdsa package
|
||||
- Update the script builder to ensure all rules specified by BIP0062 are
|
||||
adhered to when creating scripts
|
||||
- The blockchain package now provides a MedianTimeSource interface and
|
||||
concrete implementation for providing time samples from remote peers
|
||||
and using that data to calculate an offset against the local time
|
||||
- Misc changes:
|
||||
- Fix a slow memory leak due to tickers not being stopped
|
||||
(https://github.com/btcsuite/btcd/issues/189)
|
||||
- Fix an issue where a mix of orphans and SPV clients could trigger a
|
||||
condition where peers would no longer be served
|
||||
(https://github.com/btcsuite/btcd/issues/231)
|
||||
- The RPC username and password can now contain symbols which previously
|
||||
conflicted with special symbols used in URLs
|
||||
- Improve handling of obtaining random nonces to prevent cases where it
|
||||
could error when not enough entropy was available
|
||||
- Improve handling of home directory creation errors such as in the case
|
||||
of unmounted symlinks (https://github.com/btcsuite/btcd/issues/193)
|
||||
- Improve the error reporting for rejected transactions to include the
|
||||
inputs which are missing and/or being double spent
|
||||
- Update sample config file with new options and correct a comment
|
||||
regarding the fact the RPC server only listens on localhost by default
|
||||
(https://github.com/btcsuite/btcd/issues/218)
|
||||
- Update the continuous integration builds to run several tools which
|
||||
help keep code quality high
|
||||
- Significant amount of internal code cleanup and improvements
|
||||
- Other minor internal optimizations
|
||||
- Code Contributors (alphabetical order):
|
||||
- Beldur
|
||||
- Ben Holden-Crowther
|
||||
- Dave Collins
|
||||
- David Evans
|
||||
- David Hill
|
||||
- Guilherme Salgado
|
||||
- Javed Khan
|
||||
- Jimmy Song
|
||||
- John C. Vernaleo
|
||||
- Jonathan Gillham
|
||||
- Josh Rickmar
|
||||
- Michael Ford
|
||||
- Michail Kargakis
|
||||
- kac
|
||||
- Olaoluwa Osuntokun
|
||||
|
||||
Changes in 0.9.0 (Sat Sep 20 2014)
|
||||
- Protocol and network related changes:
|
||||
- Add a new checkpoint at block height 319400
|
||||
- Add support for BIP0037 bloom filters
|
||||
(https://github.com/conformal/btcd/issues/132)
|
||||
- Implement BIP0061 reject handling and hence support for protocol
|
||||
version 70002 (https://github.com/conformal/btcd/issues/133)
|
||||
- Add testnet DNS seeds for peer discovery (testnet-seed.alexykot.me
|
||||
and testnet-seed.bitcoin.schildbach.de)
|
||||
- Add mainnet DNS seed for peer discovery (seeds.bitcoin.open-nodes.org)
|
||||
- Make multisig transactions with non-null dummy data nonstandard
|
||||
(https://github.com/conformal/btcd/issues/131)
|
||||
- Make transactions with an excessive number of signature operations
|
||||
nonstandard
|
||||
- Perform initial DNS lookups concurrently which allows connections
|
||||
more quickly
|
||||
- Improve the address manager to significantly reduce memory usage and
|
||||
add tests
|
||||
- Remove orphan transactions when they appear in a mined block
|
||||
(https://github.com/conformal/btcd/issues/166)
|
||||
- Apply incremental back off on connection retries for persistent peers
|
||||
that give invalid replies to mirror the logic used for failed
|
||||
connections (https://github.com/conformal/btcd/issues/103)
|
||||
- Correct rate-limiting of free and low-fee transactions
|
||||
- Mining support changes:
|
||||
- Implement getblocktemplate RPC with the following support:
|
||||
(https://github.com/conformal/btcd/issues/124)
|
||||
- BIP0022 Non-Optional Sections
|
||||
- BIP0022 Long Polling
|
||||
- BIP0023 Basic Pool Extensions
|
||||
- BIP0023 Mutation coinbase/append
|
||||
- BIP0023 Mutations time, time/increment, and time/decrement
|
||||
- BIP0023 Mutation transactions/add
|
||||
- BIP0023 Mutations prevblock, coinbase, and generation
|
||||
- BIP0023 Block Proposals
|
||||
- Implement built-in concurrent CPU miner
|
||||
(https://github.com/conformal/btcd/issues/137)
|
||||
NOTE: CPU mining on mainnet is pointless. This has been provided
|
||||
for testing purposes such as for the new simulation test network
|
||||
- Add --generate flag to enable CPU mining
|
||||
- Deprecate the --getworkkey flag in favor of --miningaddr which
|
||||
specifies which addresses generated blocks will choose from to pay
|
||||
the subsidy to
|
||||
- RPC changes:
|
||||
- Implement gettxout command
|
||||
(https://github.com/conformal/btcd/issues/141)
|
||||
- Implement validateaddress command
|
||||
- Implement verifymessage command
|
||||
- Mark getunconfirmedbalance RPC as wallet-only
|
||||
- Mark getwalletinfo RPC as wallet-only
|
||||
- Update getgenerate, setgenerate, gethashespersec, and getmininginfo
|
||||
to return the appropriate information about new CPU mining status
|
||||
- Modify getpeerinfo pingtime and pingwait field types to float64 so
|
||||
they are compatible
|
||||
- Improve disconnect handling for normal HTTP clients
|
||||
- Make error code returns for invalid hex more consistent
|
||||
- Websocket changes:
|
||||
- Switch to a new more efficient websocket package
|
||||
(https://github.com/conformal/btcd/issues/134)
|
||||
- Add rescanfinished notification
|
||||
- Modify the rescanprogress notification to include block hash as well
|
||||
as height (https://github.com/conformal/btcd/issues/151)
|
||||
- btcctl utility changes:
|
||||
- Accept --simnet flag which automatically selects the appropriate port
|
||||
and TLS certificates needed to communicate with btcd and btcwallet on
|
||||
the simulation test network
|
||||
- Fix createrawtransaction command to send amounts denominated in BTC
|
||||
- Add estimatefee command
|
||||
- Add estimatepriority command
|
||||
- Add getmininginfo command
|
||||
- Add getnetworkinfo command
|
||||
- Add gettxout command
|
||||
- Add lockunspent command
|
||||
- Add signrawtransaction command
|
||||
- addblock utility changes:
|
||||
- Accept --simnet flag which automatically selects the appropriate port
|
||||
and TLS certificates needed to communicate with btcd and btcwallet on
|
||||
the simulation test network
|
||||
- Notable developer-related package changes:
|
||||
- Provide a new bloom package in btcutil which allows creating and
|
||||
working with BIP0037 bloom filters
|
||||
- Provide a new hdkeychain package in btcutil which allows working with
|
||||
BIP0032 hierarchical deterministic key chains
|
||||
- Introduce a new btcnet package which houses network parameters
|
||||
- Provide new simnet network (--simnet) which is useful for private
|
||||
simulation testing
|
||||
- Enforce low S values in serialized signatures as detailed in BIP0062
|
||||
- Return errors from all methods on the btcdb.Db interface
|
||||
(https://github.com/conformal/btcdb/issues/5)
|
||||
- Allow behavior flags to alter btcchain.ProcessBlock
|
||||
(https://github.com/conformal/btcchain/issues/5)
|
||||
- Provide a new SerializeSize API for blocks
|
||||
(https://github.com/conformal/btcwire/issues/19)
|
||||
- Several of the core packages now work with Google App Engine
|
||||
- Misc changes:
|
||||
- Correct an issue where the database could corrupt under certain
|
||||
circumstances which would require a new chain download
|
||||
- Slightly optimize deserialization
|
||||
- Use the correct IP block for he.net
|
||||
- Fix an issue where it was possible the block manager could hang on
|
||||
shutdown
|
||||
- Update sample config file so the comments are on a separate line
|
||||
rather than the end of a line so they are not interpreted as settings
|
||||
(https://github.com/conformal/btcd/issues/135)
|
||||
- Correct an issue where getdata requests were not being properly
|
||||
throttled which could lead to larger than necessary memory usage
|
||||
- Always show help when given the help flag even when the config file
|
||||
contains invalid entries
|
||||
- General code cleanup and minor optimizations
|
||||
|
||||
Changes in 0.8.0-beta (Sun May 25 2014)
|
||||
- Btcd is now Beta (https://github.com/conformal/btcd/issues/130)
|
||||
- Add a new checkpoint at block height 300255
|
||||
- Protocol and network related changes:
|
||||
- Lower the minimum transaction relay fee to 1000 satoshi to match
|
||||
recent reference client changes
|
||||
(https://github.com/conformal/btcd/issues/100)
|
||||
- Raise the maximum signature script size to support standard 15-of-15
|
||||
multi-signature pay-to-sript-hash transactions with compressed pubkeys
|
||||
to remain compatible with the reference client
|
||||
(https://github.com/conformal/btcd/issues/128)
|
||||
- Reduce max bytes allowed for a standard nulldata transaction to 40 for
|
||||
compatibility with the reference client
|
||||
- Introduce a new btcnet package which houses all of the network params
|
||||
for each network (mainnet, testnet3, regtest) to ultimately enable
|
||||
easier addition and tweaking of networks without needing to change
|
||||
several packages
|
||||
- Fix several script discrepancies found by reference client test data
|
||||
- Add new DNS seed for peer discovery (seed.bitnodes.io)
|
||||
- Reduce the max known inventory cache from 20000 items to 1000 items
|
||||
- Fix an issue where unknown inventory types could lead to a hung peer
|
||||
- Implement inventory rebroadcast handler for sendrawtransaction
|
||||
(https://github.com/conformal/btcd/issues/99)
|
||||
- Update user agent to fully support BIP0014
|
||||
(https://github.com/conformal/btcwire/issues/10)
|
||||
- Implement initial mining support:
|
||||
- Add a new logging subsystem for mining related operations
|
||||
- Implement infrastructure for creating block templates
|
||||
- Provide options to control block template creation settings
|
||||
- Support the getwork RPC
|
||||
- Allow address identifiers to apply to more than one network since both
|
||||
testnet3 and the regression test network unfortunately use the same
|
||||
identifier
|
||||
- RPC changes:
|
||||
- Set the content type for HTTP POST RPC connections to application/json
|
||||
(https://github.com/conformal/btcd/issues/121)
|
||||
- Modified the RPC server startup so it only requires at least one valid
|
||||
listen interface
|
||||
- Correct an error path where it was possible certain errors would not
|
||||
be returned
|
||||
- Implement getwork command
|
||||
(https://github.com/conformal/btcd/issues/125)
|
||||
- Update sendrawtransaction command to reject orphans
|
||||
- Update sendrawtransaction command to include the reason a transaction
|
||||
was rejected
|
||||
- Update getinfo command to populate connection count field
|
||||
- Update getinfo command to include relay fee field
|
||||
(https://github.com/conformal/btcd/issues/107)
|
||||
- Allow transactions submitted with sendrawtransaction to bypass the
|
||||
rate limiter
|
||||
- Allow the getcurrentnet and getbestblock extensions to be accessed via
|
||||
HTTP POST in addition to Websockets
|
||||
(https://github.com/conformal/btcd/issues/127)
|
||||
- Websocket changes:
|
||||
- Rework notifications to ensure they are delivered in the order they
|
||||
occur
|
||||
- Rename notifynewtxs command to notifyreceived (funds received)
|
||||
- Rename notifyallnewtxs command to notifynewtransactions
|
||||
- Rename alltx notification to txaccepted
|
||||
- Rename allverbosetx notification to txacceptedverbose
|
||||
(https://github.com/conformal/btcd/issues/98)
|
||||
- Add rescan progress notification
|
||||
- Add recvtx notification
|
||||
- Add redeemingtx notification
|
||||
- Modify notifyspent command to accept an array of outpoints
|
||||
(https://github.com/conformal/btcd/issues/123)
|
||||
- Significantly optimize the rescan command to yield up to a 60x speed
|
||||
increase
|
||||
- btcctl utility changes:
|
||||
- Add createencryptedwallet command
|
||||
- Add getblockchaininfo command
|
||||
- Add importwallet command
|
||||
- Add addmultisigaddress command
|
||||
- Add setgenerate command
|
||||
- Accept --testnet and --wallet flags which automatically select
|
||||
the appropriate port and TLS certificates needed to communicate
|
||||
with btcd and btcwallet (https://github.com/conformal/btcd/issues/112)
|
||||
- Allow path expansion from config file entries
|
||||
(https://github.com/conformal/btcd/issues/113)
|
||||
- Minor refactor simplify handling of options
|
||||
- addblock utility changes:
|
||||
- Improve logging by making it consistent with the logging provided by
|
||||
btcd (https://github.com/conformal/btcd/issues/90)
|
||||
- Improve several package APIs for developers:
|
||||
- Add new amount type for consistently handling monetary values
|
||||
- Add new coin selector API
|
||||
- Add new WIF (Wallet Import Format) API
|
||||
- Add new crypto types for private keys and signatures
|
||||
- Add new API to sign transactions including script merging and hash
|
||||
types
|
||||
- Expose function to extract all pushed data from a script
|
||||
(https://github.com/conformal/btcscript/issues/8)
|
||||
- Misc changes:
|
||||
- Optimize address manager shuffling to do 67% less work on average
|
||||
- Resolve a couple of benign data races found by the race detector
|
||||
(https://github.com/conformal/btcd/issues/101)
|
||||
- Add IP address to all peer related errors to clarify which peer is the
|
||||
cause (https://github.com/conformal/btcd/issues/102)
|
||||
- Fix a UPNP case issue that prevented the --upnp option from working
|
||||
with some UPNP servers
|
||||
- Update documentation in the sample config file regarding debug levels
|
||||
- Adjust some logging levels to improve debug messages
|
||||
- Improve the throughput of query messages to the block manager
|
||||
- Several minor optimizations to reduce GC churn and enhance speed
|
||||
- Other minor refactoring
|
||||
- General code cleanup
|
||||
|
||||
Changes in 0.7.0 (Thu Feb 20 2014)
|
||||
- Fix an issue when parsing scripts which contain a multi-signature script
|
||||
which require zero signatures such as testnet block
|
||||
000000001881dccfeda317393c261f76d09e399e15e27d280e5368420f442632
|
||||
(https://github.com/conformal/btcscript/issues/7)
|
||||
- Add check to ensure all transactions accepted to mempool only contain
|
||||
canonical data pushes (https://github.com/conformal/btcscript/issues/6)
|
||||
- Fix an issue causing excessive memory consumption
|
||||
- Significantly rework and improve the websocket notification system:
|
||||
- Each client is now independent so slow clients no longer limit the
|
||||
speed of other connected clients
|
||||
- Potentially long-running operations such as rescans are now run in
|
||||
their own handler and rate-limited to one operation at a time without
|
||||
preventing simultaneous requests from the same client for the faster
|
||||
requests or notifications
|
||||
- A couple of scenarios which could cause shutdown to hang have been
|
||||
resolved
|
||||
- Update notifynewtx notifications to support all address types instead
|
||||
of only pay-to-pubkey-hash
|
||||
- Provide a --rpcmaxwebsockets option to allow limiting the number of
|
||||
concurrent websocket clients
|
||||
- Add a new websocket command notifyallnewtxs to request notifications
|
||||
(https://github.com/conformal/btcd/issues/86) (thanks @flammit)
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Add getnetworkhashps command
|
||||
- Add gettransaction command (wallet-specific)
|
||||
- Add signmessage command (wallet-specific)
|
||||
- Update getwork command to accept
|
||||
- Continue cleanup and work on implementing the RPC API:
|
||||
- Implement getnettotals command
|
||||
(https://github.com/conformal/btcd/issues/84)
|
||||
- Implement networkhashps command
|
||||
(https://github.com/conformal/btcd/issues/87)
|
||||
- Update getpeerinfo to always include syncnode field even when false
|
||||
- Remove help addenda for getpeerinfo now that it supports all fields
|
||||
- Close standard RPC connections on auth failure
|
||||
- Provide a --rpcmaxclients option to allow limiting the number of
|
||||
concurrent RPC clients (https://github.com/conformal/btcd/issues/68)
|
||||
- Include IP address in RPC auth failure log messages
|
||||
- Resolve a rather harmless data races found by the race detector
|
||||
(https://github.com/conformal/btcd/issues/94)
|
||||
- Increase block priority size and max standard transaction size to 50k
|
||||
and 100k, respectively (https://github.com/conformal/btcd/issues/71)
|
||||
- Add rate limiting of free transactions to the memory pool to prevent
|
||||
penny flooding (https://github.com/conformal/btcd/issues/40)
|
||||
- Provide a --logdir option (https://github.com/conformal/btcd/issues/95)
|
||||
- Change the default log file path to include the network
|
||||
- Add a new ScriptBuilder interface to btcscript to support creation of
|
||||
custom scripts (https://github.com/conformal/btcscript/issues/5)
|
||||
- General code cleanup
|
||||
|
||||
Changes in 0.6.0 (Tue Feb 04 2014)
|
||||
- Fix an issue when parsing scripts which contain invalid signatures that
|
||||
caused a chain fork on block
|
||||
0000000000000001e4241fd0b3469a713f41c5682605451c05d3033288fb2244
|
||||
- Correct an issue which could lead to an error in removeBlockNode
|
||||
(https://github.com/conformal/btcchain/issues/4)
|
||||
- Improve addblock utility as follows:
|
||||
- Check imported blocks against all chain rules and checkpoints
|
||||
- Skip blocks which are already known so you can stop and restart the
|
||||
import or start the import after you have already downloaded a portion
|
||||
of the chain
|
||||
- Correct an issue where the utility did not shutdown cleanly after
|
||||
processing all blocks
|
||||
- Add error on attempt to import orphan blocks
|
||||
- Improve error handling and reporting
|
||||
- Display statistics after input file has been fully processed
|
||||
- Rework, optimize, and improve headers-first mode:
|
||||
- Resuming the chain sync from any point before the final checkpoint
|
||||
will now use headers-first mode
|
||||
(https://github.com/conformal/btcd/issues/69)
|
||||
- Verify all checkpoints as opposed to only the final one
|
||||
- Reduce and bound memory usage
|
||||
- Rollback to the last known good point when a header does not match a
|
||||
checkpoint
|
||||
- Log information about what is happening with headers
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Add getaddednodeinfo command
|
||||
- Add getnettotals command
|
||||
- Add getblocktemplate command (wallet-specific)
|
||||
- Add getwork command (wallet-specific)
|
||||
- Add getnewaddress command (wallet-specific)
|
||||
- Add walletpassphrasechange command (wallet-specific)
|
||||
- Add walletlock command (wallet-specific)
|
||||
- Add sendfrom command (wallet-specific)
|
||||
- Add sendmany command (wallet-specific)
|
||||
- Add settxfee command (wallet-specific)
|
||||
- Add listsinceblock command (wallet-specific)
|
||||
- Add listaccounts command (wallet-specific)
|
||||
- Add keypoolrefill command (wallet-specific)
|
||||
- Add getreceivedbyaccount command (wallet-specific)
|
||||
- Add getrawchangeaddress command (wallet-specific)
|
||||
- Add gettxoutsetinfo command (wallet-specific)
|
||||
- Add listaddressgroupings command (wallet-specific)
|
||||
- Add listlockunspent command (wallet-specific)
|
||||
- Add listlock command (wallet-specific)
|
||||
- Add listreceivedbyaccount command (wallet-specific)
|
||||
- Add validateaddress command (wallet-specific)
|
||||
- Add verifymessage command (wallet-specific)
|
||||
- Add sendtoaddress command (wallet-specific)
|
||||
- Continue cleanup and work on implementing the RPC API:
|
||||
- Implement submitblock command
|
||||
(https://github.com/conformal/btcd/issues/61)
|
||||
- Implement help command
|
||||
- Implement ping command
|
||||
- Implement getaddednodeinfo command
|
||||
(https://github.com/conformal/btcd/issues/78)
|
||||
- Implement getinfo command
|
||||
- Update getpeerinfo to support bytesrecv and bytessent
|
||||
(https://github.com/conformal/btcd/issues/83)
|
||||
- Improve and correct several RPC server and websocket areas:
|
||||
- Change the connection endpoint for websockets from /wallet to /ws
|
||||
(https://github.com/conformal/btcd/issues/80)
|
||||
- Implement an alternative authentication for websockets so clients
|
||||
such as javascript from browsers that don't support setting HTTP
|
||||
headers can authenticate (https://github.com/conformal/btcd/issues/77)
|
||||
- Add an authentication deadline for RPC connections
|
||||
(https://github.com/conformal/btcd/issues/68)
|
||||
- Use standard authentication failure responses for RPC connections
|
||||
- Make automatically generated certificate more standard so it works
|
||||
from client such as node.js and Firefox
|
||||
- Correct some minor issues which could prevent the RPC server from
|
||||
shutting down in an orderly fashion
|
||||
- Make all websocket notifications require registration
|
||||
- Change the data sent over websockets to text since it is JSON-RPC
|
||||
- Allow connections that do not have an Origin header set
|
||||
- Expose and track the number of bytes read and written per peer
|
||||
(https://github.com/conformal/btcwire/issues/6)
|
||||
- Correct an issue with sendrawtransaction when invoked via websockets
|
||||
which prevented a minedtx notification from being added
|
||||
- Rescan operations issued from remote wallets are no stopped when
|
||||
the wallet disconnects mid-operation
|
||||
(https://github.com/conformal/btcd/issues/66)
|
||||
- Several optimizations related to fetching block information from the
|
||||
database
|
||||
- General code cleanup
|
||||
|
||||
Changes in 0.5.0 (Mon Jan 13 2014)
|
||||
- Optimize initial block download by introducing a new mode which
|
||||
downloads the block headers first (up to the final checkpoint)
|
||||
- Improve peer handling to remove the potential for slow peers to cause
|
||||
sluggishness amongst all peers
|
||||
(https://github.com/conformal/btcd/issues/63)
|
||||
- Fix an issue where the initial block sync could stall when the sync peer
|
||||
disconnects (https://github.com/conformal/btcd/issues/62)
|
||||
- Correct an issue where --externalip was doing a DNS lookup on the full
|
||||
host:port instead of just the host portion
|
||||
(https://github.com/conformal/btcd/issues/38)
|
||||
- Fix an issue which could lead to a panic on chain switches
|
||||
(https://github.com/conformal/btcd/issues/70)
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Show getdifficulty output as floating point to 6 digits of precision
|
||||
- Show all JSON object replies formatted as standard JSON
|
||||
- Allow btcctl getblock to accept optional params
|
||||
- Add getaccount command (wallet-specific)
|
||||
- Add getaccountaddress command (wallet-specific)
|
||||
- Add sendrawtransaction command
|
||||
- Continue cleanup and work on implementing RPC API calls
|
||||
- Update getrawmempool to support new optional verbose flag
|
||||
- Update getrawtransaction to match the reference client
|
||||
- Update getblock to support new optional verbose flag
|
||||
- Update raw transactions to fully match the reference client including
|
||||
support for all transaction types and address types
|
||||
- Correct getrawmempool fee field to return BTC instead of Satoshi
|
||||
- Correct getpeerinfo service flag to return 8 digit string so it
|
||||
matches the reference client
|
||||
- Correct verifychain to return a boolean
|
||||
- Implement decoderawtransaction command
|
||||
- Implement createrawtransaction command
|
||||
- Implement decodescript command
|
||||
- Implement gethashespersec command
|
||||
- Allow RPC handler overrides when invoked via a websocket versus
|
||||
legacy connection
|
||||
- Add new DNS seed for peer discovery
|
||||
- Display user agent on new valid peer log message
|
||||
(https://github.com/conformal/btcd/issues/64)
|
||||
- Notify wallet when new transactions that pay to registered addresses
|
||||
show up in the mempool before being mined into a block
|
||||
- Support a tor-specific proxy in addition to a normal proxy
|
||||
(https://github.com/conformal/btcd/issues/47)
|
||||
- Remove deprecated sqlite3 imports from utilities
|
||||
- Remove leftover profile write from addblock utility
|
||||
- Quite a bit of code cleanup and refactoring to improve maintainability
|
||||
|
||||
Changes in 0.4.0 (Thu Dec 12 2013)
|
||||
- Allow listen interfaces to be specified via --listen instead of only the
|
||||
port (https://github.com/conformal/btcd/issues/33)
|
||||
- Allow listen interfaces for the RPC server to be specified via
|
||||
--rpclisten instead of only the port
|
||||
(https://github.com/conformal/btcd/issues/34)
|
||||
- Only disable listening when --connect or --proxy are used when no
|
||||
--listen interface are specified
|
||||
(https://github.com/conformal/btcd/issues/10)
|
||||
- Add several new standard transaction checks to transaction memory pool:
|
||||
- Support nulldata scripts as standard
|
||||
- Only allow a max of one nulldata output per transaction
|
||||
- Enforce a maximum of 3 public keys in multi-signature transactions
|
||||
- The number of signatures in multi-signature transactions must not
|
||||
exceed the number of public keys
|
||||
- The number of inputs to a signature script must match the expected
|
||||
number of inputs for the script type
|
||||
- The number of inputs pushed onto the stack by a redeeming signature
|
||||
script must match the number of inputs consumed by the referenced
|
||||
public key script
|
||||
- When a block is connected, remove any transactions from the memory pool
|
||||
which are now double spends as a result of the newly connected
|
||||
transactions
|
||||
- Don't relay transactions resurrected during a chain switch since
|
||||
other peers will also be switching chains and therefore already know
|
||||
about them
|
||||
- Cleanup a few cases where rejected transactions showed as an error
|
||||
rather than as a rejected transaction
|
||||
- Ignore the default configuration file when --regtest (regression test
|
||||
mode) is specified
|
||||
- Implement TLS support for RPC including automatic certificate generation
|
||||
- Support HTTP authentication headers for web sockets
|
||||
- Update address manager to recognize and properly work with Tor
|
||||
addresses (https://github.com/conformal/btcd/issues/36) and
|
||||
(https://github.com/conformal/btcd/issues/37)
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Add the ability to specify a configuration file
|
||||
- Add a default entry for the RPC cert to point to the location
|
||||
it will likely be in the btcd home directory
|
||||
- Implement --version flag
|
||||
- Provide a --notls option to support non-TLS configurations
|
||||
- Fix a couple of minor races found by the Go race detector
|
||||
- Improve logging
|
||||
- Allow logging level to be specified on a per subsystem basis
|
||||
(https://github.com/conformal/btcd/issues/48)
|
||||
- Allow logging levels to be dynamically changed via RPC
|
||||
(https://github.com/conformal/btcd/issues/15)
|
||||
- Implement a rolling log file with a max of 10MB per file and a
|
||||
rotation size of 3 which results in a max logging size of 30 MB
|
||||
- Correct a minor issue with the rescanning websocket call
|
||||
(https://github.com/conformal/btcd/issues/54)
|
||||
- Fix a race with pushing address messages that could lead to a panic
|
||||
(https://github.com/conformal/btcd/issues/58)
|
||||
- Improve which external IP address is reported to peers based on which
|
||||
interface they are connected through
|
||||
(https://github.com/conformal/btcd/issues/35)
|
||||
- Add --externalip option to allow an external IP address to be specified
|
||||
for cases such as tor hidden services or advanced network configurations
|
||||
(https://github.com/conformal/btcd/issues/38)
|
||||
- Add --upnp option to support automatic port mapping via UPnP
|
||||
(https://github.com/conformal/btcd/issues/51)
|
||||
- Update Ctrl+C interrupt handler to properly sync address manager and
|
||||
remove the UPnP port mapping (if needed)
|
||||
- Continue cleanup and work on implementing RPC API calls
|
||||
- Add importprivkey (import private key) command to btcctl
|
||||
- Update getrawtransaction to provide addresses properly, support
|
||||
new verbose param, and match the reference implementation with the
|
||||
exception of MULTISIG (thanks @flammit)
|
||||
- Update getblock with new verbose flag (thanks @flammit)
|
||||
- Add listtransactions command to btcctl
|
||||
- Add getbalance command to btcctl
|
||||
- Add basic support for btcd to run as a native Windows service
|
||||
(https://github.com/conformal/btcd/issues/42)
|
||||
- Package addblock utility with Windows MSIs
|
||||
- Add support for TravisCI (continuous build integration)
|
||||
- Cleanup some documentation and usage
|
||||
- Several other minor bug fixes and general code cleanup
|
||||
|
||||
Changes in 0.3.3 (Wed Nov 13 2013)
|
||||
- Significantly improve initial block chain download speed
|
||||
(https://github.com/conformal/btcd/issues/20)
|
||||
- Add a new checkpoint at block height 267300
|
||||
- Optimize most recently used inventory handling
|
||||
(https://github.com/conformal/btcd/issues/21)
|
||||
- Optimize duplicate transaction input check
|
||||
(https://github.com/conformal/btcchain/issues/2)
|
||||
- Optimize transaction hashing
|
||||
(https://github.com/conformal/btcd/issues/25)
|
||||
- Rework and optimize wallet listener notifications
|
||||
(https://github.com/conformal/btcd/issues/22)
|
||||
- Optimize serialization and deserialization
|
||||
(https://github.com/conformal/btcd/issues/27)
|
||||
- Add support for minimum transaction fee to memory pool acceptance
|
||||
(https://github.com/conformal/btcd/issues/29)
|
||||
- Improve leveldb database performance by removing explicit GC call
|
||||
- Fix an issue where Ctrl+C was not always finishing orderly database
|
||||
shutdown
|
||||
- Fix an issue in the script handling for OP_CHECKSIG
|
||||
- Impose max limits on all variable length protocol entries to prevent
|
||||
abuse from malicious peers
|
||||
- Enforce DER signatures for transactions allowed into the memory pool
|
||||
- Separate the debug profile http server from the RPC server
|
||||
- Rework of the RPC code to improve performance and make the code cleaner
|
||||
- The getrawtransaction RPC call now properly checks the memory pool
|
||||
before consulting the db (https://github.com/conformal/btcd/issues/26)
|
||||
- Add support for the following RPC calls: getpeerinfo, getconnectedcount,
|
||||
addnode, verifychain
|
||||
(https://github.com/conformal/btcd/issues/13)
|
||||
(https://github.com/conformal/btcd/issues/17)
|
||||
- Implement rescan websocket extension to allow wallet rescans
|
||||
- Use correct paths for application data storage for all supported
|
||||
operating systems (https://github.com/conformal/btcd/issues/30)
|
||||
- Add a default redirect to the http profiling page when accessing the
|
||||
http profile server
|
||||
- Add a new --cpuprofile option which can be used to generate CPU
|
||||
profiling data on platforms that support it
|
||||
- Several other minor performance optimizations
|
||||
- Other minor bug fixes and general code cleanup
|
||||
|
||||
Changes in 0.3.2 (Tue Oct 22 2013)
|
||||
- Fix an issue that could cause the download of the block chain to stall
|
||||
(https://github.com/conformal/btcd/issues/12)
|
||||
- Remove deprecated sqlite as an available database backend
|
||||
- Close sqlite compile issue as sqlite has now been removed
|
||||
(https://github.com/conformal/btcd/issues/11)
|
||||
- Change default RPC ports to 8334 (mainnet) and 18334 (testnet)
|
||||
- Continue cleanup and work on implementing RPC API calls
|
||||
- Add support for the following RPC calls: getrawmempool,
|
||||
getbestblockhash, decoderawtransaction, getdifficulty,
|
||||
getconnectioncount, getpeerinfo, and addnode
|
||||
- Improve the btcctl utility that is used to issue JSON-RPC commands
|
||||
- Fix an issue preventing btcd from cleanly shutting down with the RPC
|
||||
stop command
|
||||
- Add a number of database interface tests to ensure backends implement
|
||||
the expected interface
|
||||
- Expose some additional information from btcscript to be used for
|
||||
identifying "standard"" transactions
|
||||
- Add support for plan9 - thanks @mischief
|
||||
(https://github.com/conformal/btcd/pull/19)
|
||||
- Other minor bug fixes and general code cleanup
|
||||
|
||||
Changes in 0.3.1-alpha (Tue Oct 15 2013)
|
||||
- Change default database to leveldb
|
||||
NOTE: This does mean you will have to redownload the block chain. Since we
|
||||
are still in alpha, we didn't feel writing a converter was worth the time as
|
||||
it would take away from more important issues at this stage
|
||||
- Add a warning if there are multiple block chain databases of different types
|
||||
- Fix issue with unexpected EOF in leveldb -- https://github.com/conformal/btcd/issues/18
|
||||
- Fix issue preventing block 21066 on testnet -- https://github.com/conformal/btcchain/issues/1
|
||||
- Fix issue preventing block 96464 on testnet -- https://github.com/conformal/btcscript/issues/1
|
||||
- Optimize transaction lookups
|
||||
- Correct a few cases of list removal that could result in improper cleanup
|
||||
of no longer needed orphans
|
||||
- Add functionality to increase ulimits on non-Windows platforms
|
||||
- Add support for mempool command which allows remote peers to query the
|
||||
transaction memory pool via the bitcoin protocol
|
||||
- Clean up logging a bit
|
||||
- Add a flag to disable checkpoints for developers
|
||||
- Add a lot of useful debug logging such as message summaries
|
||||
- Other minor bug fixes and general code cleanup
|
||||
|
||||
Initial Release 0.3.0-alpha (Sat Oct 05 2013):
|
||||
- Initial release
|
||||
105
Gopkg.lock
generated
105
Gopkg.lock
generated
@@ -1,105 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "bou.ke/monkey"
|
||||
packages = ["."]
|
||||
revision = "bdf6dea004c6fd1cdf4b25da8ad45a606c09409a"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aead/siphash"
|
||||
packages = ["."]
|
||||
revision = "83563a290f60225eb120d724600b9690c3fb536f"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/btclog"
|
||||
packages = ["."]
|
||||
revision = "84c8d2346e9fc8c7b947e243b9c24e6df9fd206a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/go-socks"
|
||||
packages = ["socks"]
|
||||
revision = "4720035b7bfd2a9bb130b1c184f8bbe41b6f0d0f"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/btcsuite/goleveldb"
|
||||
packages = ["leveldb","leveldb/cache","leveldb/comparer","leveldb/errors","leveldb/filter","leveldb/iterator","leveldb/journal","leveldb/memdb","leveldb/opt","leveldb/storage","leveldb/table","leveldb/util"]
|
||||
revision = "3fd0373267b6461dbefe91cef614278064d05465"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/btcsuite/snappy-go"
|
||||
packages = ["."]
|
||||
revision = "b3db38edf0a9a11a115eb6b022d8c946024a9ac0"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/websocket"
|
||||
packages = ["."]
|
||||
revision = "31079b6807923eb23992c421b114992b95131b55"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/btcsuite/winsvc"
|
||||
packages = ["eventlog","mgr","registry","svc","winapi"]
|
||||
revision = "f8fb11f83f7e860e3769a08e6811d1b399a43722"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jessevdk/go-flags"
|
||||
packages = ["."]
|
||||
revision = "c6ca198ec95c841fdb89fc0de7496fed11ab854e"
|
||||
version = "v1.4.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jrick/logrotate"
|
||||
packages = ["rotator"]
|
||||
revision = "a93b200c26cbae3bb09dd0dc2c7c7fe1468a034a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kkdai/bstream"
|
||||
packages = ["."]
|
||||
revision = "b3251f7901ec4dd4ec66b3210e8f4bd5c0f1c5a3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/miekg/dns"
|
||||
packages = ["."]
|
||||
revision = "cc8cd02140663157ce797c6650488d6c8563f31f"
|
||||
version = "v1.1.6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ed25519","ed25519/internal/edwards25519","ripemd160"]
|
||||
revision = "c2843e01d9a2bc60bb26ad24e09734fdc2d9ec58"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["bpf","internal/iana","internal/socket","ipv4","ipv6"]
|
||||
revision = "d8887717615a059821345a5c23649351b52a1c0b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "fead79001313d15903fb4605b4a1b781532cd93e"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "00392a00928f96fc94e2c8c65ce3a98cc6f5e2f93dda64d3c4502f2f38026e96"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
78
Gopkg.toml
78
Gopkg.toml
@@ -1,78 +0,0 @@
|
||||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "bou.ke/monkey"
|
||||
version = "1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aead/siphash"
|
||||
version = "1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/btclog"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/go-socks"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/btcsuite/goleveldb"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/websocket"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/btcsuite/winsvc"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
version = "1.1.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/jessevdk/go-flags"
|
||||
version = "1.4.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/jrick/logrotate"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/kkdai/bstream"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/miekg/dns"
|
||||
version = "1.1.6"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
10
Jenkinsfile
vendored
10
Jenkinsfile
vendored
@@ -1,10 +0,0 @@
|
||||
node {
|
||||
stage 'Checkout'
|
||||
checkout scm
|
||||
|
||||
stage 'Version'
|
||||
sh './deploy.sh version'
|
||||
|
||||
stage 'Build'
|
||||
sh "./deploy.sh build"
|
||||
}
|
||||
3
LICENSE
3
LICENSE
@@ -1,8 +1,9 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2018-2019 DAGLabs
|
||||
Copyright (c) 2018-2019 The kaspanet developers
|
||||
Copyright (c) 2013-2018 The btcsuite developers
|
||||
Copyright (c) 2015-2016 The Decred developers
|
||||
Copyright (c) 2013-2014 Conformal Systems LLC.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
|
||||
125
README.md
125
README.md
@@ -1,49 +1,24 @@
|
||||
btcd
|
||||
|
||||
Kaspad
|
||||
====
|
||||
Warning: This is pre-alpha software. There's no guarantee anything works.
|
||||
====
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd)
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad)
|
||||
|
||||
btcd is an alternative full node bitcoin implementation written in Go (golang).
|
||||
Kaspad is the reference full node Kaspa implementation written in Go (golang).
|
||||
|
||||
This project is currently under active development and is in a Beta state. It
|
||||
is extremely stable and has been in production use since October 2013.
|
||||
|
||||
It properly downloads, validates, and serves the block chain using the exact
|
||||
rules (including consensus bugs) for block acceptance as Bitcoin Core. We have
|
||||
taken great care to avoid btcd causing a fork to the block chain. It includes a
|
||||
full block validation testing framework which contains all of the 'official'
|
||||
block acceptance tests (and some additional ones) that is run on every pull
|
||||
request to help ensure it properly follows consensus. Also, it passes all of
|
||||
the JSON test data in the Bitcoin Core code.
|
||||
|
||||
It also properly relays newly mined blocks, maintains a transaction pool, and
|
||||
relays individual transactions that have not yet made it into a block. It
|
||||
ensures all individual transactions admitted to the pool follow the rules
|
||||
required by the block chain and also includes more strict checks which filter
|
||||
transactions based on miner requirements ("standard" transactions).
|
||||
|
||||
One key difference between btcd and Bitcoin Core is that btcd does *NOT* include
|
||||
wallet functionality and this was a very intentional design decision. See the
|
||||
blog entry [here](https://blog.conformal.com/btcd-not-your-moms-bitcoin-daemon)
|
||||
for more details. This means you can't actually make or receive payments
|
||||
directly with btcd. That functionality is provided by the
|
||||
[btcwallet](https://github.com/btcsuite/btcwallet) and
|
||||
[Paymetheus](https://github.com/btcsuite/Paymetheus) (Windows-only) projects
|
||||
which are both under active development.
|
||||
This project is currently under active development and is in a pre-Alpha state.
|
||||
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
|
||||
|
||||
## Requirements
|
||||
|
||||
[Go](http://golang.org) 1.8 or newer.
|
||||
Latest version of [Go](http://golang.org) (currently 1.13).
|
||||
|
||||
## Installation
|
||||
|
||||
#### Windows - MSI Available
|
||||
|
||||
https://github.com/daglabs/btcd/releases
|
||||
|
||||
#### Linux/BSD/MacOSX/POSIX - Build from Source
|
||||
#### Build from Source
|
||||
|
||||
- Install Go according to the installation instructions here:
|
||||
http://golang.org/doc/install
|
||||
@@ -55,92 +30,50 @@ $ go version
|
||||
$ go env GOROOT GOPATH
|
||||
```
|
||||
|
||||
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
|
||||
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
|
||||
recommended that `GOPATH` is set to a directory in your home directory such as
|
||||
`~/goprojects` to avoid write permission issues. It is also recommended to add
|
||||
`~/dev/go` to avoid write permission issues. It is also recommended to add
|
||||
`$GOPATH/bin` to your `PATH` at this point.
|
||||
|
||||
- Run the following commands to obtain btcd, all dependencies, and install it:
|
||||
- Run the following commands to obtain and install kaspad including all dependencies:
|
||||
|
||||
```bash
|
||||
$ # Install dep: https://golang.github.io/dep/docs/installation.html
|
||||
$ git clone https://github.com/daglabs/btcd $GOPATH/src/github.com/daglabs/btcd
|
||||
$ cd $GOPATH/src/github.com/daglabs/btcd
|
||||
$ dep ensure
|
||||
$ git clone https://github.com/kaspanet/kaspad $GOPATH/src/github.com/kaspanet/kaspad
|
||||
$ cd $GOPATH/src/github.com/kaspanet/kaspad
|
||||
$ ./test.sh
|
||||
$ go install . ./cmd/...
|
||||
```
|
||||
`./test.sh` tests can be skipped, but some things might not run correctly on your system if tests fail.
|
||||
|
||||
- btcd (and utilities) will now be installed in ```$GOPATH/bin```. If you did
|
||||
- Kaspad (and utilities) should now be installed in `$GOPATH/bin`. If you did
|
||||
not already add the bin directory to your system path during Go installation,
|
||||
we recommend you do so now.
|
||||
you are encouraged to do so now.
|
||||
|
||||
## Updating
|
||||
|
||||
#### Windows
|
||||
|
||||
Install a newer MSI
|
||||
|
||||
#### Linux/BSD/MacOSX/POSIX - Build from Source
|
||||
|
||||
- Run the following commands to update btcd, all dependencies, and install it:
|
||||
|
||||
```bash
|
||||
$ cd $GOPATH/src/github.com/daglabs/btcd
|
||||
$ git pull && dep ensure
|
||||
$ go install . ./cmd/...
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
btcd has several configuration options available to tweak how it runs, but all
|
||||
of the basic operations described in the intro section work with zero
|
||||
configuration.
|
||||
|
||||
#### Windows (Installed from MSI)
|
||||
|
||||
Launch btcd from your Start menu.
|
||||
Kaspad has several configuration options available to tweak how it runs, but all
|
||||
of the basic operations work with zero configuration.
|
||||
|
||||
#### Linux/BSD/POSIX/Source
|
||||
|
||||
```bash
|
||||
$ ./btcd
|
||||
$ ./kaspad
|
||||
```
|
||||
|
||||
## IRC
|
||||
|
||||
- irc.freenode.net
|
||||
- channel #btcd
|
||||
- [webchat](https://webchat.freenode.net/?channels=btcd)
|
||||
## Discord
|
||||
Join our discord server using the following link: https://discord.gg/WmGhhzk
|
||||
|
||||
## Issue Tracker
|
||||
|
||||
The [integrated github issue tracker](https://github.com/daglabs/btcd/issues)
|
||||
The [integrated github issue tracker](https://github.com/kaspanet/kaspad/issues)
|
||||
is used for this project.
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is a work-in-progress. It is located in the [docs](https://github.com/daglabs/btcd/tree/master/docs) folder.
|
||||
|
||||
## GPG Verification Key
|
||||
|
||||
All official release tags are signed by Conformal so users can ensure the code
|
||||
has not been tampered with and is coming from the btcsuite developers. To
|
||||
verify the signature perform the following:
|
||||
|
||||
- Download the Conformal public key:
|
||||
https://raw.githubusercontent.com/btcsuite/btcd/master/release/GIT-GPG-KEY-conformal.txt
|
||||
|
||||
- Import the public key into your GPG keyring:
|
||||
```bash
|
||||
gpg --import GIT-GPG-KEY-conformal.txt
|
||||
```
|
||||
|
||||
- Verify the release tag with the following command where `TAG_NAME` is a
|
||||
placeholder for the specific tag:
|
||||
```bash
|
||||
git tag -v TAG_NAME
|
||||
```
|
||||
The documentation is a work-in-progress. It is located in the [docs](https://github.com/kaspanet/kaspad/tree/master/docs) folder.
|
||||
|
||||
## License
|
||||
|
||||
btcd is licensed under the [copyfree](http://copyfree.org) ISC License.
|
||||
Kaspad is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/).
|
||||
|
||||
|
||||
@@ -7,32 +7,30 @@ package addrmgr
|
||||
import (
|
||||
"container/list"
|
||||
crand "crypto/rand" // for seeding
|
||||
"encoding/base32"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
type newBucket [newBucketCount]map[string]*KnownAddress
|
||||
type triedBucket [triedBucketCount]*list.List
|
||||
|
||||
// AddrManager provides a concurrency safe address manager for caching potential
|
||||
// peers on the bitcoin network.
|
||||
// peers on the Kaspa network.
|
||||
type AddrManager struct {
|
||||
mtx sync.Mutex
|
||||
peersFile string
|
||||
@@ -160,6 +158,10 @@ const (
|
||||
// will consider evicting an address.
|
||||
minBadDays = 7
|
||||
|
||||
// getAddrMin is the least addresses that we will send in response
|
||||
// to a getAddr. If we have less than this amount, we send everything.
|
||||
getAddrMin = 50
|
||||
|
||||
// getAddrMax is the most addresses that we will send in response
|
||||
// to a getAddr (in practise the most addresses we will return from a
|
||||
// call to AddressCache()).
|
||||
@@ -304,9 +306,8 @@ func (a *AddrManager) updateAddrTried(bucket int, ka *KnownAddress) {
|
||||
func (a *AddrManager) expireNew(bucket *newBucket, idx int, decrNewCounter func()) {
|
||||
// First see if there are any entries that are so bad we can just throw
|
||||
// them away. otherwise we throw away the oldest entry in the cache.
|
||||
// Bitcoind here chooses four random and just throws the oldest of
|
||||
// those away, but we keep track of oldest in the initial traversal and
|
||||
// use that information instead.
|
||||
// We keep track of oldest in the initial traversal and use that
|
||||
// information instead.
|
||||
var oldest *KnownAddress
|
||||
for k, v := range bucket[idx] {
|
||||
if v.isBad() {
|
||||
@@ -352,8 +353,7 @@ func (a *AddrManager) expireNewFullNodes(bucket int) {
|
||||
}
|
||||
|
||||
// pickTried selects an address from the tried bucket to be evicted.
|
||||
// We just choose the eldest. Bitcoind selects 4 random entries and throws away
|
||||
// the older of them.
|
||||
// We just choose the eldest.
|
||||
func (a *AddrManager) pickTried(subnetworkID *subnetworkid.SubnetworkID, bucket int) *list.Element {
|
||||
var oldest *KnownAddress
|
||||
var oldestElem *list.Element
|
||||
@@ -375,7 +375,6 @@ func (a *AddrManager) pickTried(subnetworkID *subnetworkid.SubnetworkID, bucket
|
||||
}
|
||||
|
||||
func (a *AddrManager) getNewBucket(netAddr, srcAddr *wire.NetAddress) int {
|
||||
// bitcoind:
|
||||
// doublesha256(key + sourcegroup + int64(doublesha256(key + group + sourcegroup))%bucket_per_source_group) % num_new_buckets
|
||||
|
||||
data1 := []byte{}
|
||||
@@ -397,7 +396,6 @@ func (a *AddrManager) getNewBucket(netAddr, srcAddr *wire.NetAddress) int {
|
||||
}
|
||||
|
||||
func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int {
|
||||
// bitcoind hashes this as:
|
||||
// doublesha256(key + group + truncate_to_64bits(doublesha256(key)) % buckets_per_group) % num_buckets
|
||||
data1 := []byte{}
|
||||
data1 = append(data1, a.key[:]...)
|
||||
@@ -416,7 +414,7 @@ func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int {
|
||||
return int(binary.LittleEndian.Uint64(hash2) % triedBucketCount)
|
||||
}
|
||||
|
||||
// addressHandler is the main handler for the address manager. It must be run
|
||||
// addressHandler is the main handler for the address manager. It must be run
|
||||
// as a goroutine.
|
||||
func (a *AddrManager) addressHandler() {
|
||||
dumpAddressTicker := time.NewTicker(dumpAddressInterval)
|
||||
@@ -532,7 +530,7 @@ func (a *AddrManager) savePeers() {
|
||||
}
|
||||
}
|
||||
|
||||
// loadPeers loads the known address from the saved file. If empty, missing, or
|
||||
// loadPeers loads the known address from the saved file. If empty, missing, or
|
||||
// malformed file, just don't load anything and start fresh
|
||||
func (a *AddrManager) loadPeers() {
|
||||
a.mtx.Lock()
|
||||
@@ -560,7 +558,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
}
|
||||
r, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s error opening file: %s", filePath, err)
|
||||
return errors.Errorf("%s error opening file: %s", filePath, err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
@@ -568,11 +566,11 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
dec := json.NewDecoder(r)
|
||||
err = dec.Decode(&sam)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading %s: %s", filePath, err)
|
||||
return errors.Errorf("error reading %s: %s", filePath, err)
|
||||
}
|
||||
|
||||
if sam.Version != serialisationVersion {
|
||||
return fmt.Errorf("unknown version %d in serialized "+
|
||||
return errors.Errorf("unknown version %d in serialized "+
|
||||
"addrmanager", sam.Version)
|
||||
}
|
||||
copy(a.key[:], sam.Key[:])
|
||||
@@ -581,18 +579,18 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
ka := new(KnownAddress)
|
||||
ka.na, err = a.DeserializeNetAddress(v.Addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to deserialize netaddress "+
|
||||
return errors.Errorf("failed to deserialize netaddress "+
|
||||
"%s: %s", v.Addr, err)
|
||||
}
|
||||
ka.srcAddr, err = a.DeserializeNetAddress(v.Src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to deserialize netaddress "+
|
||||
return errors.Errorf("failed to deserialize netaddress "+
|
||||
"%s: %s", v.Src, err)
|
||||
}
|
||||
if v.SubnetworkID != "" {
|
||||
ka.subnetworkID, err = subnetworkid.NewFromStr(v.SubnetworkID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to deserialize subnetwork id "+
|
||||
return errors.Errorf("failed to deserialize subnetwork id "+
|
||||
"%s: %s", v.SubnetworkID, err)
|
||||
}
|
||||
}
|
||||
@@ -611,7 +609,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
for _, val := range subnetworkNewBucket {
|
||||
ka, ok := a.addrIndex[val]
|
||||
if !ok {
|
||||
return fmt.Errorf("newbucket contains %s but "+
|
||||
return errors.Errorf("newbucket contains %s but "+
|
||||
"none in address list", val)
|
||||
}
|
||||
|
||||
@@ -628,7 +626,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
for _, val := range newBucket {
|
||||
ka, ok := a.addrIndex[val]
|
||||
if !ok {
|
||||
return fmt.Errorf("full nodes newbucket contains %s but "+
|
||||
return errors.Errorf("full nodes newbucket contains %s but "+
|
||||
"none in address list", val)
|
||||
}
|
||||
|
||||
@@ -649,7 +647,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
for _, val := range subnetworkTriedBucket {
|
||||
ka, ok := a.addrIndex[val]
|
||||
if !ok {
|
||||
return fmt.Errorf("Tried bucket contains %s but "+
|
||||
return errors.Errorf("Tried bucket contains %s but "+
|
||||
"none in address list", val)
|
||||
}
|
||||
|
||||
@@ -664,7 +662,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
for _, val := range triedBucket {
|
||||
ka, ok := a.addrIndex[val]
|
||||
if !ok {
|
||||
return fmt.Errorf("Full nodes tried bucket contains %s but "+
|
||||
return errors.Errorf("Full nodes tried bucket contains %s but "+
|
||||
"none in address list", val)
|
||||
}
|
||||
|
||||
@@ -677,12 +675,12 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
// Sanity checking.
|
||||
for k, v := range a.addrIndex {
|
||||
if v.refs == 0 && !v.tried {
|
||||
return fmt.Errorf("address %s after serialisation "+
|
||||
return errors.Errorf("address %s after serialisation "+
|
||||
"with no references", k)
|
||||
}
|
||||
|
||||
if v.refs > 0 && v.tried {
|
||||
return fmt.Errorf("address %s after serialisation "+
|
||||
return errors.Errorf("address %s after serialisation "+
|
||||
"which is both new and tried!", k)
|
||||
}
|
||||
}
|
||||
@@ -719,7 +717,7 @@ func (a *AddrManager) Start() {
|
||||
|
||||
// Start the address ticker to save addresses periodically.
|
||||
a.wg.Add(1)
|
||||
go a.addressHandler()
|
||||
spawn(a.addressHandler)
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down the address manager by stopping the main handler.
|
||||
@@ -736,8 +734,8 @@ func (a *AddrManager) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddAddresses adds new addresses to the address manager. It enforces a max
|
||||
// number of addresses and silently ignores duplicate addresses. It is
|
||||
// AddAddresses adds new addresses to the address manager. It enforces a max
|
||||
// number of addresses and silently ignores duplicate addresses. It is
|
||||
// safe for concurrent access.
|
||||
func (a *AddrManager) AddAddresses(addrs []*wire.NetAddress, srcAddr *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) {
|
||||
a.mtx.Lock()
|
||||
@@ -748,8 +746,8 @@ func (a *AddrManager) AddAddresses(addrs []*wire.NetAddress, srcAddr *wire.NetAd
|
||||
}
|
||||
}
|
||||
|
||||
// AddAddress adds a new address to the address manager. It enforces a max
|
||||
// number of addresses and silently ignores duplicate addresses. It is
|
||||
// AddAddress adds a new address to the address manager. It enforces a max
|
||||
// number of addresses and silently ignores duplicate addresses. It is
|
||||
// safe for concurrent access.
|
||||
func (a *AddrManager) AddAddress(addr, srcAddr *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) {
|
||||
a.mtx.Lock()
|
||||
@@ -769,11 +767,11 @@ func (a *AddrManager) AddAddressByIP(addrIP string, subnetworkID *subnetworkid.S
|
||||
// Put it in wire.Netaddress
|
||||
ip := net.ParseIP(addr)
|
||||
if ip == nil {
|
||||
return fmt.Errorf("invalid ip address %s", addr)
|
||||
return errors.Errorf("invalid ip address %s", addr)
|
||||
}
|
||||
port, err := strconv.ParseUint(portStr, 10, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid port %s: %s", portStr, err)
|
||||
return errors.Errorf("invalid port %s: %s", portStr, err)
|
||||
}
|
||||
na := wire.NewNetAddressIPPort(ip, uint16(port), 0)
|
||||
a.AddAddress(na, na, subnetworkID) // XXX use correct src address
|
||||
@@ -822,7 +820,7 @@ func (a *AddrManager) NeedMoreAddresses() bool {
|
||||
return allAddrs < needAddressThreshold
|
||||
}
|
||||
|
||||
// AddressCache returns the current address cache. It must be treated as
|
||||
// AddressCache returns the current address cache. It must be treated as
|
||||
// read-only (but since it is a copy now, this is not as dangerous).
|
||||
func (a *AddrManager) AddressCache(includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID) []*wire.NetAddress {
|
||||
a.mtx.Lock()
|
||||
@@ -844,6 +842,12 @@ func (a *AddrManager) AddressCache(includeAllSubnetworks bool, subnetworkID *sub
|
||||
if numAddresses > getAddrMax {
|
||||
numAddresses = getAddrMax
|
||||
}
|
||||
if len(allAddr) < getAddrMin {
|
||||
numAddresses = len(allAddr)
|
||||
}
|
||||
if len(allAddr) > getAddrMin && numAddresses < getAddrMin {
|
||||
numAddresses = getAddrMin
|
||||
}
|
||||
|
||||
// Fisher-Yates shuffle the array. We only need to do the first
|
||||
// `numAddresses' since we are throwing the rest.
|
||||
@@ -860,7 +864,6 @@ func (a *AddrManager) AddressCache(includeAllSubnetworks bool, subnetworkID *sub
|
||||
// reset resets the address manager by reinitialising the random source
|
||||
// and allocating fresh empty bucket storage.
|
||||
func (a *AddrManager) reset() {
|
||||
|
||||
a.addrIndex = make(map[string]*KnownAddress)
|
||||
|
||||
// fill key with bytes from a good random source.
|
||||
@@ -881,30 +884,17 @@ func (a *AddrManager) reset() {
|
||||
a.nTriedFullNodes = 0
|
||||
}
|
||||
|
||||
// HostToNetAddress returns a netaddress given a host address. If the address
|
||||
// is a Tor .onion address this will be taken care of. Else if the host is
|
||||
// not an IP address it will be resolved (via Tor if required).
|
||||
// HostToNetAddress returns a netaddress given a host address. If
|
||||
// the host is not an IP address it will be resolved.
|
||||
func (a *AddrManager) HostToNetAddress(host string, port uint16, services wire.ServiceFlag) (*wire.NetAddress, error) {
|
||||
// Tor address is 16 char base32 + ".onion"
|
||||
var ip net.IP
|
||||
if len(host) == 22 && host[16:] == ".onion" {
|
||||
// go base32 encoding uses capitals (as does the rfc
|
||||
// but Tor and bitcoind tend to user lowercase, so we switch
|
||||
// case here.
|
||||
data, err := base32.StdEncoding.DecodeString(
|
||||
strings.ToUpper(host[:16]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prefix := []byte{0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43}
|
||||
ip = net.IP(append(prefix, data...))
|
||||
} else if ip = net.ParseIP(host); ip == nil {
|
||||
ip := net.ParseIP(host)
|
||||
if ip == nil {
|
||||
ips, err := a.lookupFunc(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(ips) == 0 {
|
||||
return nil, fmt.Errorf("no addresses found for %s", host)
|
||||
return nil, errors.Errorf("no addresses found for %s", host)
|
||||
}
|
||||
ip = ips[0]
|
||||
}
|
||||
@@ -912,28 +902,15 @@ func (a *AddrManager) HostToNetAddress(host string, port uint16, services wire.S
|
||||
return wire.NewNetAddressIPPort(ip, port, services), nil
|
||||
}
|
||||
|
||||
// ipString returns a string for the ip from the provided NetAddress. If the
|
||||
// ip is in the range used for Tor addresses then it will be transformed into
|
||||
// the relevant .onion address.
|
||||
func ipString(na *wire.NetAddress) string {
|
||||
if IsOnionCatTor(na) {
|
||||
// We know now that na.IP is long enough.
|
||||
base32 := base32.StdEncoding.EncodeToString(na.IP[6:])
|
||||
return strings.ToLower(base32) + ".onion"
|
||||
}
|
||||
|
||||
return na.IP.String()
|
||||
}
|
||||
|
||||
// NetAddressKey returns a string key in the form of ip:port for IPv4 addresses
|
||||
// or [ip]:port for IPv6 addresses.
|
||||
func NetAddressKey(na *wire.NetAddress) string {
|
||||
port := strconv.FormatUint(uint64(na.Port), 10)
|
||||
|
||||
return net.JoinHostPort(ipString(na), port)
|
||||
return net.JoinHostPort(na.IP.String(), port)
|
||||
}
|
||||
|
||||
// GetAddress returns a single address that should be routable. It picks a
|
||||
// GetAddress returns a single address that should be routable. It picks a
|
||||
// random one from the possible addresses with preference given to ones that
|
||||
// have not been used recently and should not pick 'close' addresses
|
||||
// consecutively.
|
||||
@@ -942,15 +919,18 @@ func (a *AddrManager) GetAddress() *KnownAddress {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
var knownAddress *KnownAddress
|
||||
if a.localSubnetworkID == nil {
|
||||
return a.getAddress(&a.addrTriedFullNodes, a.nTriedFullNodes,
|
||||
knownAddress = a.getAddress(&a.addrTriedFullNodes, a.nTriedFullNodes,
|
||||
&a.addrNewFullNodes, a.nNewFullNodes)
|
||||
} else {
|
||||
subnetworkID := *a.localSubnetworkID
|
||||
knownAddress = a.getAddress(a.addrTried[subnetworkID], a.nTried[subnetworkID],
|
||||
a.addrNew[subnetworkID], a.nNew[subnetworkID])
|
||||
}
|
||||
|
||||
subnetworkID := *a.localSubnetworkID
|
||||
return knownAddress
|
||||
|
||||
return a.getAddress(a.addrTried[subnetworkID], a.nTried[subnetworkID],
|
||||
a.addrNew[subnetworkID], a.nNew[subnetworkID])
|
||||
}
|
||||
|
||||
// see GetAddress for details
|
||||
@@ -1036,7 +1016,7 @@ func (a *AddrManager) Attempt(addr *wire.NetAddress) {
|
||||
}
|
||||
|
||||
// Connected Marks the given address as currently connected and working at the
|
||||
// current time. The address must already be known to AddrManager else it will
|
||||
// current time. The address must already be known to AddrManager else it will
|
||||
// be ignored.
|
||||
func (a *AddrManager) Connected(addr *wire.NetAddress) {
|
||||
a.mtx.Lock()
|
||||
@@ -1058,8 +1038,8 @@ func (a *AddrManager) Connected(addr *wire.NetAddress) {
|
||||
}
|
||||
}
|
||||
|
||||
// Good marks the given address as good. To be called after a successful
|
||||
// connection and version exchange. If the address is unknown to the address
|
||||
// Good marks the given address as good. To be called after a successful
|
||||
// connection and version exchange. If the address is unknown to the address
|
||||
// manager it will be ignored.
|
||||
func (a *AddrManager) Good(addr *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) {
|
||||
a.mtx.Lock()
|
||||
@@ -1224,7 +1204,7 @@ func (a *AddrManager) Good(addr *wire.NetAddress, subnetworkID *subnetworkid.Sub
|
||||
// with the given priority.
|
||||
func (a *AddrManager) AddLocalAddress(na *wire.NetAddress, priority AddressPriority) error {
|
||||
if !IsRoutable(na) {
|
||||
return fmt.Errorf("address %s is not routable", na.IP)
|
||||
return errors.Errorf("address %s is not routable", na.IP)
|
||||
}
|
||||
|
||||
a.lamtx.Lock()
|
||||
@@ -1262,18 +1242,6 @@ func getReachabilityFrom(localAddr, remoteAddr *wire.NetAddress) int {
|
||||
return Unreachable
|
||||
}
|
||||
|
||||
if IsOnionCatTor(remoteAddr) {
|
||||
if IsOnionCatTor(localAddr) {
|
||||
return Private
|
||||
}
|
||||
|
||||
if IsRoutable(localAddr) && IsIPv4(localAddr) {
|
||||
return Ipv4
|
||||
}
|
||||
|
||||
return Default
|
||||
}
|
||||
|
||||
if IsRFC4380(remoteAddr) {
|
||||
if !IsRoutable(localAddr) {
|
||||
return Default
|
||||
@@ -1351,7 +1319,7 @@ func (a *AddrManager) GetBestLocalAddress(remoteAddr *wire.NetAddress) *wire.Net
|
||||
|
||||
// Send something unroutable if nothing suitable.
|
||||
var ip net.IP
|
||||
if !IsIPv4(remoteAddr) && !IsOnionCatTor(remoteAddr) {
|
||||
if !IsIPv4(remoteAddr) {
|
||||
ip = net.IPv6zero
|
||||
} else {
|
||||
ip = net.IPv4zero
|
||||
@@ -1363,7 +1331,7 @@ func (a *AddrManager) GetBestLocalAddress(remoteAddr *wire.NetAddress) *wire.Net
|
||||
return bestAddress
|
||||
}
|
||||
|
||||
// New returns a new bitcoin address manager.
|
||||
// New returns a new Kaspa address manager.
|
||||
// Use Start to begin processing asynchronous address updates.
|
||||
func New(dataDir string, lookupFunc func(string) ([]net.IP, error), subnetworkID *subnetworkid.SubnetworkID) *AddrManager {
|
||||
am := AddrManager{
|
||||
|
||||
@@ -5,16 +5,18 @@
|
||||
package addrmgr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// naTest is used to describe a test to be performed against the NetAddressKey
|
||||
@@ -35,59 +37,59 @@ var someIP = "173.194.115.66"
|
||||
func addNaTests() {
|
||||
// IPv4
|
||||
// Localhost
|
||||
addNaTest("127.0.0.1", 8333, "127.0.0.1:8333")
|
||||
addNaTest("127.0.0.1", 8334, "127.0.0.1:8334")
|
||||
addNaTest("127.0.0.1", 16111, "127.0.0.1:16111")
|
||||
addNaTest("127.0.0.1", 16110, "127.0.0.1:16110")
|
||||
|
||||
// Class A
|
||||
addNaTest("1.0.0.1", 8333, "1.0.0.1:8333")
|
||||
addNaTest("2.2.2.2", 8334, "2.2.2.2:8334")
|
||||
addNaTest("1.0.0.1", 16111, "1.0.0.1:16111")
|
||||
addNaTest("2.2.2.2", 16110, "2.2.2.2:16110")
|
||||
addNaTest("27.253.252.251", 8335, "27.253.252.251:8335")
|
||||
addNaTest("123.3.2.1", 8336, "123.3.2.1:8336")
|
||||
|
||||
// Private Class A
|
||||
addNaTest("10.0.0.1", 8333, "10.0.0.1:8333")
|
||||
addNaTest("10.1.1.1", 8334, "10.1.1.1:8334")
|
||||
addNaTest("10.0.0.1", 16111, "10.0.0.1:16111")
|
||||
addNaTest("10.1.1.1", 16110, "10.1.1.1:16110")
|
||||
addNaTest("10.2.2.2", 8335, "10.2.2.2:8335")
|
||||
addNaTest("10.10.10.10", 8336, "10.10.10.10:8336")
|
||||
|
||||
// Class B
|
||||
addNaTest("128.0.0.1", 8333, "128.0.0.1:8333")
|
||||
addNaTest("129.1.1.1", 8334, "129.1.1.1:8334")
|
||||
addNaTest("128.0.0.1", 16111, "128.0.0.1:16111")
|
||||
addNaTest("129.1.1.1", 16110, "129.1.1.1:16110")
|
||||
addNaTest("180.2.2.2", 8335, "180.2.2.2:8335")
|
||||
addNaTest("191.10.10.10", 8336, "191.10.10.10:8336")
|
||||
|
||||
// Private Class B
|
||||
addNaTest("172.16.0.1", 8333, "172.16.0.1:8333")
|
||||
addNaTest("172.16.1.1", 8334, "172.16.1.1:8334")
|
||||
addNaTest("172.16.0.1", 16111, "172.16.0.1:16111")
|
||||
addNaTest("172.16.1.1", 16110, "172.16.1.1:16110")
|
||||
addNaTest("172.16.2.2", 8335, "172.16.2.2:8335")
|
||||
addNaTest("172.16.172.172", 8336, "172.16.172.172:8336")
|
||||
|
||||
// Class C
|
||||
addNaTest("193.0.0.1", 8333, "193.0.0.1:8333")
|
||||
addNaTest("200.1.1.1", 8334, "200.1.1.1:8334")
|
||||
addNaTest("193.0.0.1", 16111, "193.0.0.1:16111")
|
||||
addNaTest("200.1.1.1", 16110, "200.1.1.1:16110")
|
||||
addNaTest("205.2.2.2", 8335, "205.2.2.2:8335")
|
||||
addNaTest("223.10.10.10", 8336, "223.10.10.10:8336")
|
||||
|
||||
// Private Class C
|
||||
addNaTest("192.168.0.1", 8333, "192.168.0.1:8333")
|
||||
addNaTest("192.168.1.1", 8334, "192.168.1.1:8334")
|
||||
addNaTest("192.168.0.1", 16111, "192.168.0.1:16111")
|
||||
addNaTest("192.168.1.1", 16110, "192.168.1.1:16110")
|
||||
addNaTest("192.168.2.2", 8335, "192.168.2.2:8335")
|
||||
addNaTest("192.168.192.192", 8336, "192.168.192.192:8336")
|
||||
|
||||
// IPv6
|
||||
// Localhost
|
||||
addNaTest("::1", 8333, "[::1]:8333")
|
||||
addNaTest("fe80::1", 8334, "[fe80::1]:8334")
|
||||
addNaTest("::1", 16111, "[::1]:16111")
|
||||
addNaTest("fe80::1", 16110, "[fe80::1]:16110")
|
||||
|
||||
// Link-local
|
||||
addNaTest("fe80::1:1", 8333, "[fe80::1:1]:8333")
|
||||
addNaTest("fe91::2:2", 8334, "[fe91::2:2]:8334")
|
||||
addNaTest("fe80::1:1", 16111, "[fe80::1:1]:16111")
|
||||
addNaTest("fe91::2:2", 16110, "[fe91::2:2]:16110")
|
||||
addNaTest("fea2::3:3", 8335, "[fea2::3:3]:8335")
|
||||
addNaTest("feb3::4:4", 8336, "[feb3::4:4]:8336")
|
||||
|
||||
// Site-local
|
||||
addNaTest("fec0::1:1", 8333, "[fec0::1:1]:8333")
|
||||
addNaTest("fed1::2:2", 8334, "[fed1::2:2]:8334")
|
||||
addNaTest("fec0::1:1", 16111, "[fec0::1:1]:16111")
|
||||
addNaTest("fed1::2:2", 16110, "[fed1::2:2]:16110")
|
||||
addNaTest("fee2::3:3", 8335, "[fee2::3:3]:8335")
|
||||
addNaTest("fef3::4:4", 8336, "[fef3::4:4]:8336")
|
||||
}
|
||||
@@ -113,14 +115,23 @@ func TestStartStop(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddAddressByIP(t *testing.T) {
|
||||
fmtErr := fmt.Errorf("")
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
fmtErr := errors.Errorf("")
|
||||
addrErr := &net.AddrError{}
|
||||
var tests = []struct {
|
||||
addrIP string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
someIP + ":8333",
|
||||
someIP + ":16111",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
@@ -141,15 +152,15 @@ func TestAddAddressByIP(t *testing.T) {
|
||||
for i, test := range tests {
|
||||
err := amgr.AddAddressByIP(test.addrIP, nil)
|
||||
if test.err != nil && err == nil {
|
||||
t.Errorf("TestGood test %d failed expected an error and got none", i)
|
||||
t.Errorf("TestAddAddressByIP test %d failed expected an error and got none", i)
|
||||
continue
|
||||
}
|
||||
if test.err == nil && err != nil {
|
||||
t.Errorf("TestGood test %d failed expected no error and got one", i)
|
||||
t.Errorf("TestAddAddressByIP test %d failed expected no error and got one", i)
|
||||
continue
|
||||
}
|
||||
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
|
||||
t.Errorf("TestGood test %d failed got %v, want %v", i,
|
||||
t.Errorf("TestAddAddressByIP test %d failed got %v, want %v", i,
|
||||
reflect.TypeOf(err), reflect.TypeOf(test.err))
|
||||
continue
|
||||
}
|
||||
@@ -157,6 +168,15 @@ func TestAddAddressByIP(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddLocalAddress(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
var tests = []struct {
|
||||
address wire.NetAddress
|
||||
priority AddressPriority
|
||||
@@ -210,6 +230,15 @@ func TestAddLocalAddress(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAttempt(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
n := New("testattempt", lookupFunc, nil)
|
||||
|
||||
// Add a new address and get it
|
||||
@@ -232,6 +261,15 @@ func TestAttempt(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConnected(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
n := New("testconnected", lookupFunc, nil)
|
||||
|
||||
// Add a new address and get it
|
||||
@@ -252,6 +290,15 @@ func TestConnected(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNeedMoreAddresses(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
n := New("testneedmoreaddresses", lookupFunc, nil)
|
||||
addrsToAdd := 1500
|
||||
b := n.NeedMoreAddresses()
|
||||
@@ -284,6 +331,15 @@ func TestNeedMoreAddresses(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGood(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
n := New("testgood", lookupFunc, nil)
|
||||
addrsToAdd := 64 * 64
|
||||
addrs := make([]*wire.NetAddress, addrsToAdd)
|
||||
@@ -331,6 +387,15 @@ func TestGood(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
n := New("test_good_change_subnetwork_id", lookupFunc, nil)
|
||||
addr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
addrKey := NetAddressKey(addr)
|
||||
@@ -400,6 +465,15 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetAddress(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
localSubnetworkID := &subnetworkid.SubnetworkID{0xff}
|
||||
n := New("testgetaddress", lookupFunc, localSubnetworkID)
|
||||
|
||||
@@ -417,6 +491,7 @@ func TestGetAddress(t *testing.T) {
|
||||
if ka == nil {
|
||||
t.Fatalf("Did not get an address where there is one in the pool")
|
||||
}
|
||||
n.Attempt(ka.NetAddress())
|
||||
|
||||
// Checks that we don't get it if we find that it has other subnetwork ID than expected.
|
||||
actualSubnetworkID := &subnetworkid.SubnetworkID{0xfe}
|
||||
@@ -449,6 +524,7 @@ func TestGetAddress(t *testing.T) {
|
||||
if !ka.SubnetworkID().IsEqual(localSubnetworkID) {
|
||||
t.Errorf("Wrong Subnetwork ID: got %v, want %v", *ka.SubnetworkID(), localSubnetworkID)
|
||||
}
|
||||
n.Attempt(ka.NetAddress())
|
||||
|
||||
// Mark this as a good address and get it
|
||||
n.Good(ka.NetAddress(), localSubnetworkID)
|
||||
@@ -470,6 +546,15 @@ func TestGetAddress(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetBestLocalAddress(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
localAddrs := []wire.NetAddress{
|
||||
{IP: net.ParseIP("192.168.0.100")},
|
||||
{IP: net.ParseIP("::1")},
|
||||
@@ -562,7 +647,6 @@ func TestGetBestLocalAddress(t *testing.T) {
|
||||
// Add a Tor generated IP address
|
||||
localAddr = wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")}
|
||||
amgr.AddLocalAddress(&localAddr, ManualPrio)
|
||||
|
||||
// Test against want3
|
||||
for x, test := range tests {
|
||||
got := amgr.GetBestLocalAddress(&test.remoteAddr)
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script uses gocov to generate a test coverage report.
|
||||
# The gocov tool my be obtained with the following command:
|
||||
# go get github.com/axw/gocov/gocov
|
||||
#
|
||||
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
|
||||
|
||||
# Check for gocov.
|
||||
type gocov >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo >&2 "This script requires the gocov tool."
|
||||
echo >&2 "You may obtain it with the following command:"
|
||||
echo >&2 "go get github.com/axw/gocov/gocov"
|
||||
exit 1
|
||||
fi
|
||||
gocov test | gocov report
|
||||
@@ -1,38 +1,34 @@
|
||||
// Copyright (c) 2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package addrmgr implements concurrency safe Bitcoin address manager.
|
||||
Package addrmgr implements concurrency safe Kaspa address manager.
|
||||
|
||||
Address Manager Overview
|
||||
|
||||
In order maintain the peer-to-peer Bitcoin network, there needs to be a source
|
||||
of addresses to connect to as nodes come and go. The Bitcoin protocol provides
|
||||
In order maintain the peer-to-peer Kaspa network, there needs to be a source
|
||||
of addresses to connect to as nodes come and go. The Kaspa protocol provides
|
||||
the getaddr and addr messages to allow peers to communicate known addresses with
|
||||
each other. However, there needs to a mechanism to store those results and
|
||||
select peers from them. It is also important to note that remote peers can't
|
||||
each other. However, there needs to a mechanism to store those results and
|
||||
select peers from them. It is also important to note that remote peers can't
|
||||
be trusted to send valid peers nor attempt to provide you with only peers they
|
||||
control with malicious intent.
|
||||
|
||||
With that in mind, this package provides a concurrency safe address manager for
|
||||
caching and selecting peers in a non-deterministic manner. The general idea is
|
||||
caching and selecting peers in a non-deterministic manner. The general idea is
|
||||
the caller adds addresses to the address manager and notifies it when addresses
|
||||
are connected, known good, and attempted. The caller also requests addresses as
|
||||
are connected, known good, and attempted. The caller also requests addresses as
|
||||
it needs them.
|
||||
|
||||
The address manager internally segregates the addresses into groups and
|
||||
non-deterministically selects groups in a cryptographically random manner. This
|
||||
non-deterministically selects groups in a cryptographically random manner. This
|
||||
reduce the chances multiple addresses from the same nets are selected which
|
||||
generally helps provide greater peer diversity, and perhaps more importantly,
|
||||
drastically reduces the chances an attacker is able to coerce your peer into
|
||||
only connecting to nodes they control.
|
||||
|
||||
The address manager also understands routability and Tor addresses and tries
|
||||
hard to only return routable addresses. In addition, it uses the information
|
||||
provided by the caller about connected, known good, and attempted addresses to
|
||||
periodically purge peers which no longer appear to be good peers as well as
|
||||
bias the selection toward known good peers. The general idea is to make a best
|
||||
effort at only providing usable addresses.
|
||||
The address manager also understands routability and tries hard to only return
|
||||
routable addresses. In addition, it uses the information provided by the caller
|
||||
about connected, known good, and attempted addresses to periodically purge
|
||||
peers which no longer appear to be good peers as well as bias the selection
|
||||
toward known good peers. The general idea is to make a best effort at only
|
||||
providing usable addresses.
|
||||
*/
|
||||
package addrmgr
|
||||
|
||||
@@ -7,7 +7,7 @@ package addrmgr
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
func TstKnownAddressIsBad(ka *KnownAddress) bool {
|
||||
|
||||
@@ -7,9 +7,9 @@ package addrmgr
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// KnownAddress tracks information about a known network address that is used
|
||||
@@ -41,7 +41,7 @@ func (ka *KnownAddress) LastAttempt() time.Time {
|
||||
return ka.lastattempt
|
||||
}
|
||||
|
||||
// chance returns the selection probability for a known address. The priority
|
||||
// chance returns the selection probability for a known address. The priority
|
||||
// depends upon how recently the address has been seen, how recently it was last
|
||||
// attempted and how often attempts to connect to it have failed.
|
||||
func (ka *KnownAddress) chance() float64 {
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/addrmgr"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/addrmgr"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
func TestChance(t *testing.T) {
|
||||
|
||||
@@ -5,15 +5,9 @@
|
||||
package addrmgr
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/daglabs/btcd/logger"
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This
|
||||
// means the package will not perform any logging by default until the caller
|
||||
// requests it.
|
||||
var log btclog.Logger
|
||||
|
||||
func init() {
|
||||
log, _ = logger.Get(logger.SubsystemTags.ADXR)
|
||||
}
|
||||
var log, _ = logger.Get(logger.SubsystemTags.ADXR)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -5,11 +5,11 @@
|
||||
package addrmgr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/config"
|
||||
"net"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -72,19 +72,6 @@ var (
|
||||
// rfc6598Net specifies the IPv4 block as defined by RFC6598 (100.64.0.0/10)
|
||||
rfc6598Net = ipNet("100.64.0.0", 10, 32)
|
||||
|
||||
// onionCatNet defines the IPv6 address block used to support Tor.
|
||||
// bitcoind encodes a .onion address as a 16 byte number by decoding the
|
||||
// address prior to the .onion (i.e. the key hash) base32 into a ten
|
||||
// byte number. It then stores the first 6 bytes of the address as
|
||||
// 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43.
|
||||
//
|
||||
// This is the same range used by OnionCat, which is part part of the
|
||||
// RFC4193 unique local IPv6 range.
|
||||
//
|
||||
// In summary the format is:
|
||||
// { magic 6 bytes, 10 bytes base32 decode of key hash }
|
||||
onionCatNet = ipNet("fd87:d87e:eb43::", 48, 128)
|
||||
|
||||
// zero4Net defines the IPv4 address block for address staring with 0
|
||||
// (0.0.0.0/8).
|
||||
zero4Net = ipNet("0.0.0.0", 8, 32)
|
||||
@@ -110,14 +97,6 @@ func IsLocal(na *wire.NetAddress) bool {
|
||||
return na.IP.IsLoopback() || zero4Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsOnionCatTor returns whether or not the passed address is in the IPv6 range
|
||||
// used by bitcoin to support Tor (fd87:d87e:eb43::/48). Note that this range
|
||||
// is the same range used by OnionCat, which is part of the RFC4193 unique local
|
||||
// IPv6 range.
|
||||
func IsOnionCatTor(na *wire.NetAddress) bool {
|
||||
return onionCatNet.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC1918 returns whether or not the passed address is part of the IPv4
|
||||
// private network address space as defined by RFC1918 (10.0.0.0/8,
|
||||
// 172.16.0.0/12, or 192.168.0.0/16).
|
||||
@@ -209,7 +188,7 @@ func IsRFC6598(na *wire.NetAddress) bool {
|
||||
return rfc6598Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsValid returns whether or not the passed address is valid. The address is
|
||||
// IsValid returns whether or not the passed address is valid. The address is
|
||||
// considered invalid under the following circumstances:
|
||||
// IPv4: It is either a zero or all bits set address.
|
||||
// IPv6: It is either a zero or RFC3849 documentation address.
|
||||
@@ -221,23 +200,22 @@ func IsValid(na *wire.NetAddress) bool {
|
||||
}
|
||||
|
||||
// IsRoutable returns whether or not the passed address is routable over
|
||||
// the public internet. This is true as long as the address is valid and is not
|
||||
// the public internet. This is true as long as the address is valid and is not
|
||||
// in any reserved ranges.
|
||||
func IsRoutable(na *wire.NetAddress) bool {
|
||||
if config.ActiveNetParams().AcceptUnroutable {
|
||||
return true
|
||||
if config.ActiveConfig().NetParams().AcceptUnroutable {
|
||||
return !IsLocal(na)
|
||||
}
|
||||
|
||||
return IsValid(na) && !(IsRFC1918(na) || IsRFC2544(na) ||
|
||||
IsRFC3927(na) || IsRFC4862(na) || IsRFC3849(na) ||
|
||||
IsRFC4843(na) || IsRFC5737(na) || IsRFC6598(na) ||
|
||||
IsLocal(na) || (IsRFC4193(na) && !IsOnionCatTor(na)))
|
||||
IsLocal(na) || (IsRFC4193(na)))
|
||||
}
|
||||
|
||||
// GroupKey returns a string representing the network group an address is part
|
||||
// of. This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string
|
||||
// "local" for a local address, the string "tor:key" where key is the /4 of the
|
||||
// onion address for Tor address, and the string "unroutable" for an unroutable
|
||||
// of. This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string
|
||||
// "local" for a local address, and the string "unroutable" for an unroutable
|
||||
// address.
|
||||
func GroupKey(na *wire.NetAddress) string {
|
||||
if IsLocal(na) {
|
||||
@@ -269,14 +247,10 @@ func GroupKey(na *wire.NetAddress) string {
|
||||
}
|
||||
return ip.Mask(net.CIDRMask(16, 32)).String()
|
||||
}
|
||||
if IsOnionCatTor(na) {
|
||||
// group is keyed off the first 4 bits of the actual onion key.
|
||||
return fmt.Sprintf("tor:%d", na.IP[6]&((1<<4)-1))
|
||||
}
|
||||
|
||||
// OK, so now we know ourselves to be a IPv6 address.
|
||||
// bitcoind uses /32 for everything, except for Hurricane Electric's
|
||||
// (he.net) IP range, which it uses /36 for.
|
||||
// We use /32 for everything, except for Hurricane Electric's
|
||||
// (he.net) IP range, which we use /36 for.
|
||||
bits := 32
|
||||
if heNet.Contains(na.IP) {
|
||||
bits = 36
|
||||
|
||||
@@ -5,16 +5,27 @@
|
||||
package addrmgr_test
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/addrmgr"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/addrmgr"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// TestIPTypes ensures the various functions which determine the type of an IP
|
||||
// address based on RFCs work as intended.
|
||||
func TestIPTypes(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
type ipTest struct {
|
||||
in wire.NetAddress
|
||||
rfc1918 bool
|
||||
@@ -39,7 +50,7 @@ func TestIPTypes(t *testing.T) {
|
||||
rfc4193, rfc4380, rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598,
|
||||
local, valid, routable bool) ipTest {
|
||||
nip := net.ParseIP(ip)
|
||||
na := *wire.NewNetAddressIPPort(nip, 8333, wire.SFNodeNetwork)
|
||||
na := *wire.NewNetAddressIPPort(nip, 16111, wire.SFNodeNetwork)
|
||||
test := ipTest{na, rfc1918, rfc2544, rfc3849, rfc3927, rfc3964, rfc4193, rfc4380,
|
||||
rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598, local, valid, routable}
|
||||
return test
|
||||
@@ -145,6 +156,15 @@ func TestIPTypes(t *testing.T) {
|
||||
// TestGroupKey tests the GroupKey function to ensure it properly groups various
|
||||
// IP addresses.
|
||||
func TestGroupKey(t *testing.T) {
|
||||
originalActiveCfg := config.ActiveConfig()
|
||||
config.SetActiveConfig(&config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimnetParams},
|
||||
},
|
||||
})
|
||||
defer config.SetActiveConfig(originalActiveCfg)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
ip string
|
||||
@@ -179,9 +199,9 @@ func TestGroupKey(t *testing.T) {
|
||||
{name: "ipv6 rfc6145 translated ipv4", ip: "::ffff:0:0c01:0203", expected: "12.1.0.0"},
|
||||
|
||||
// Tor.
|
||||
{name: "ipv6 tor onioncat", ip: "fd87:d87e:eb43:1234::5678", expected: "tor:2"},
|
||||
{name: "ipv6 tor onioncat 2", ip: "fd87:d87e:eb43:1245::6789", expected: "tor:2"},
|
||||
{name: "ipv6 tor onioncat 3", ip: "fd87:d87e:eb43:1345::6789", expected: "tor:3"},
|
||||
{name: "ipv6 tor onioncat", ip: "fd87:d87e:eb43:1234::5678", expected: "unroutable"},
|
||||
{name: "ipv6 tor onioncat 2", ip: "fd87:d87e:eb43:1245::6789", expected: "unroutable"},
|
||||
{name: "ipv6 tor onioncat 3", ip: "fd87:d87e:eb43:1345::6789", expected: "unroutable"},
|
||||
|
||||
// IPv6 normal.
|
||||
{name: "ipv6 normal", ip: "2602:100::1", expected: "2602:100::"},
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
|
||||
github.com/conformal/btcd/addrmgr/network.go GroupKey 100.00% (23/23)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.reset 100.00% (6/6)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC5737 100.00% (4/4)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC1918 100.00% (4/4)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go New 100.00% (3/3)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go NetAddressKey 100.00% (2/2)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4862 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.numAddresses 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/log.go init 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/log.go DisableLog 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go ipNet 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsIPv4 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsLocal 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsOnionCatTor 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC2544 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC3849 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC3927 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC3964 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4193 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4380 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4843 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC6052 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC6145 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC6598 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsValid 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRoutable 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetBestLocalAddress 94.74% (18/19)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddLocalAddress 90.91% (10/11)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go getReachabilityFrom 51.52% (17/33)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go ipString 50.00% (2/4)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetAddress 9.30% (4/43)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.deserializePeers 0.00% (0/50)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Good 0.00% (0/44)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.savePeers 0.00% (0/39)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.updateAddress 0.00% (0/30)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.expireNew 0.00% (0/22)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddressCache 0.00% (0/16)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.HostToNetAddress 0.00% (0/15)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getNewBucket 0.00% (0/15)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddressByIP 0.00% (0/14)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getTriedBucket 0.00% (0/14)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.chance 0.00% (0/13)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.loadPeers 0.00% (0/11)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.isBad 0.00% (0/11)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Connected 0.00% (0/10)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.addressHandler 0.00% (0/9)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.pickTried 0.00% (0/8)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.DeserializeNetAddress 0.00% (0/7)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Stop 0.00% (0/7)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Attempt 0.00% (0/7)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Start 0.00% (0/6)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddresses 0.00% (0/4)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NeedMoreAddresses 0.00% (0/3)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NumAddresses 0.00% (0/3)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddress 0.00% (0/3)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.LastAttempt 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.NetAddress 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.find 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr/log.go UseLogger 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr --------------------------------- 21.04% (113/537)
|
||||
|
||||
@@ -1,33 +1,18 @@
|
||||
blockchain
|
||||
==========
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd/blockchain)
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad/blockchain)
|
||||
|
||||
Package blockchain implements bitcoin block handling and chain selection rules.
|
||||
The test coverage is currently only around 60%, but will be increasing over
|
||||
time. See `test_coverage.txt` for the gocov coverage report. Alternatively, if
|
||||
you are running a POSIX OS, you can run the `cov_report.sh` script for a
|
||||
real-time report. Package blockchain is licensed under the liberal ISC license.
|
||||
Package blockdag implements Kaspa block handling, organization of the blockDAG,
|
||||
block sorting and UTXO-set maintenance.
|
||||
The test coverage is currently only around 75%, but will be increasing over
|
||||
time.
|
||||
|
||||
There is an associated blog post about the release of this package
|
||||
[here](https://blog.conformal.com/btcchain-the-bitcoin-chain-package-from-bctd/).
|
||||
## Kaspad BlockDAG Processing Overview
|
||||
|
||||
This package has intentionally been designed so it can be used as a standalone
|
||||
package for any projects needing to handle processing of blocks into the bitcoin
|
||||
block chain.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/daglabs/btcd/blockchain
|
||||
```
|
||||
|
||||
## Bitcoin Chain Processing Overview
|
||||
|
||||
Before a block is allowed into the block chain, it must go through an intensive
|
||||
series of validation rules. The following list serves as a general outline of
|
||||
Before a block is allowed into the block DAG, it must go through an intensive
|
||||
series of validation rules. The following list serves as a general outline of
|
||||
those rules to provide some intuition into what is going on under the hood, but
|
||||
is by no means exhaustive:
|
||||
|
||||
@@ -35,69 +20,22 @@ is by no means exhaustive:
|
||||
- Perform a series of sanity checks on the block and its transactions such as
|
||||
verifying proof of work, timestamps, number and character of transactions,
|
||||
transaction amounts, script complexity, and merkle root calculations
|
||||
- Compare the block against predetermined checkpoints for expected timestamps
|
||||
and difficulty based on elapsed time since the checkpoint
|
||||
- Save the most recent orphan blocks for a limited time in case their parent
|
||||
blocks become available
|
||||
- Stop processing if the block is an orphan as the rest of the processing
|
||||
depends on the block's position within the block chain
|
||||
blocks become available.
|
||||
- Save blocks from the future for delayed processing
|
||||
- Stop processing if the block is an orphan or delayed as the rest of the
|
||||
processing depends on the block's position within the block chain
|
||||
- Make sure the block does not violate finality rules
|
||||
- Perform a series of more thorough checks that depend on the block's position
|
||||
within the block chain such as verifying block difficulties adhere to
|
||||
within the blockDAG such as verifying block difficulties adhere to
|
||||
difficulty retarget rules, timestamps are after the median of the last
|
||||
several blocks, all transactions are finalized, checkpoint blocks match, and
|
||||
block versions are in line with the previous blocks
|
||||
- Determine how the block fits into the chain and perform different actions
|
||||
accordingly in order to ensure any side chains which have higher difficulty
|
||||
than the main chain become the new main chain
|
||||
- When a block is being connected to the main chain (either through
|
||||
reorganization of a side chain to the main chain or just extending the
|
||||
main chain), perform further checks on the block's transactions such as
|
||||
verifying transaction duplicates, script complexity for the combination of
|
||||
connected scripts, coinbase maturity, double spends, and connected
|
||||
transaction values
|
||||
- Determine how the block fits into the DAG and perform different actions
|
||||
accordingly
|
||||
- Run the transaction scripts to verify the spender is allowed to spend the
|
||||
coins
|
||||
- Run GhostDAG to fit the block in a canonical sorting
|
||||
- Build the block's UTXO Set, as well as update the global UTXO Set accordingly
|
||||
- Insert the block into the block database
|
||||
|
||||
## Examples
|
||||
|
||||
* [ProcessBlock Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-BlockChain-ProcessBlock)
|
||||
Demonstrates how to create a new chain instance and use ProcessBlock to
|
||||
attempt to add a block to the chain. This example intentionally
|
||||
attempts to insert a duplicate genesis block to illustrate how an invalid
|
||||
block is handled.
|
||||
|
||||
* [CompactToBig Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-CompactToBig)
|
||||
Demonstrates how to convert the compact "bits" in a block header which
|
||||
represent the target difficulty to a big integer and display it using the
|
||||
typical hex notation.
|
||||
|
||||
* [BigToCompact Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-BigToCompact)
|
||||
Demonstrates how to convert a target difficulty into the
|
||||
compact "bits" in a block header which represent that target difficulty.
|
||||
|
||||
## GPG Verification Key
|
||||
|
||||
All official release tags are signed by Conformal so users can ensure the code
|
||||
has not been tampered with and is coming from the btcsuite developers. To
|
||||
verify the signature perform the following:
|
||||
|
||||
- Download the public key from the Conformal website at
|
||||
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
|
||||
|
||||
- Import the public key into your GPG keyring:
|
||||
```bash
|
||||
gpg --import GIT-GPG-KEY-conformal.txt
|
||||
```
|
||||
|
||||
- Verify the release tag with the following command where `TAG_NAME` is a
|
||||
placeholder for the specific tag:
|
||||
```bash
|
||||
git tag -v TAG_NAME
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
|
||||
Package blockchain is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
||||
|
||||
@@ -6,81 +6,105 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error {
|
||||
blockHeader := &block.MsgBlock().Header
|
||||
newNode, _ := dag.newBlockNode(blockHeader, newBlockSet())
|
||||
newNode.status = statusInvalidAncestor
|
||||
dag.index.AddNode(newNode)
|
||||
return dag.index.flushToDB()
|
||||
}
|
||||
|
||||
// maybeAcceptBlock potentially accepts a block into the block DAG. It
|
||||
// performs several validation checks which depend on its position within
|
||||
// the block DAG before adding it. The block is expected to have already
|
||||
// gone through ProcessBlock before calling this function with it.
|
||||
//
|
||||
// The flags are also passed to checkBlockContext and connectToDAG. See
|
||||
// The flags are also passed to checkBlockContext and connectToDAG. See
|
||||
// their documentation for how the flags modify their behavior.
|
||||
//
|
||||
// This function MUST be called with the dagLock held (for writes).
|
||||
func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error {
|
||||
// The height of this block is one more than the referenced previous
|
||||
// block.
|
||||
parents, err := lookupParentNodes(block, dag)
|
||||
if err != nil {
|
||||
var ruleErr RuleError
|
||||
if ok := errors.As(err, &ruleErr); ok && ruleErr.ErrorCode == ErrInvalidAncestorBlock {
|
||||
err := dag.addNodeToIndexWithInvalidAncestor(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
bluestParent := parents.bluest()
|
||||
blockHeight := int32(0)
|
||||
if !block.IsGenesis() {
|
||||
blockHeight = parents.maxHeight() + 1
|
||||
}
|
||||
block.SetHeight(blockHeight)
|
||||
|
||||
// The block must pass all of the validation rules which depend on the
|
||||
// position of the block within the block DAG.
|
||||
err = dag.checkBlockContext(block, parents, bluestParent, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Insert the block into the database if it's not already there. Even
|
||||
// though it is possible the block will ultimately fail to connect, it
|
||||
// has already passed all proof-of-work and validity tests which means
|
||||
// it would be prohibitively expensive for an attacker to fill up the
|
||||
// disk with a bunch of blocks that fail to connect. This is necessary
|
||||
// since it allows block download to be decoupled from the much more
|
||||
// expensive connection logic. It also has some other nice properties
|
||||
// such as making blocks that never become part of the DAG or
|
||||
// blocks that fail to connect available for further analysis.
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
return dbStoreBlock(dbTx, block)
|
||||
})
|
||||
err = dag.checkBlockContext(block, parents, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a new block node for the block and add it to the node index.
|
||||
blockHeader := &block.MsgBlock().Header
|
||||
newNode := newBlockNode(blockHeader, parents, dag.dagParams.K)
|
||||
newNode, selectedParentAnticone := dag.newBlockNode(&block.MsgBlock().Header, parents)
|
||||
newNode.status = statusDataStored
|
||||
|
||||
dag.index.AddNode(newNode)
|
||||
err = dag.index.flushToDB()
|
||||
|
||||
// Insert the block into the database if it's not already there. Even
|
||||
// though it is possible the block will ultimately fail to connect, it
|
||||
// has already passed all proof-of-work and validity tests which means
|
||||
// it would be prohibitively expensive for an attacker to fill up the
|
||||
// disk with a bunch of blocks that fail to connect. This is necessary
|
||||
// since it allows block download to be decoupled from the much more
|
||||
// expensive connection logic. It also has some other nice properties
|
||||
// such as making blocks that never become part of the DAG or
|
||||
// blocks that fail to connect available for further analysis.
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
err := dbStoreBlock(dbTx, block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dag.index.flushToDBWithTx(dbTx)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure that all the block's transactions are finalized
|
||||
fastAdd := flags&BFFastAdd == BFFastAdd
|
||||
bluestParent := parents.bluest()
|
||||
if !fastAdd {
|
||||
if err := dag.validateAllTxsFinalized(block, newNode, bluestParent); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
block.SetBlueScore(newNode.blueScore)
|
||||
|
||||
// Connect the passed block to the DAG. This also handles validation of the
|
||||
// transaction scripts.
|
||||
err = dag.addBlock(newNode, parents, block, flags)
|
||||
chainUpdates, err := dag.addBlock(newNode, block, selectedParentAnticone, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Notify the caller that the new block was accepted into the block
|
||||
// DAG. The caller would typically want to react by relaying the
|
||||
// DAG. The caller would typically want to react by relaying the
|
||||
// inventory to other peers.
|
||||
dag.dagLock.Unlock()
|
||||
dag.sendNotification(NTBlockAdded, block)
|
||||
dag.sendNotification(NTBlockAdded, &BlockAddedNotificationData{
|
||||
Block: block,
|
||||
WasUnorphaned: flags&BFWasUnorphaned != 0,
|
||||
})
|
||||
if len(chainUpdates.addedChainBlockHashes) > 0 {
|
||||
dag.sendNotification(NTChainChanged, &ChainChangedNotificationData{
|
||||
RemovedChainBlockHashes: chainUpdates.removedChainBlockHashes,
|
||||
AddedChainBlockHashes: chainUpdates.addedChainBlockHashes,
|
||||
})
|
||||
}
|
||||
dag.dagLock.Lock()
|
||||
|
||||
return nil
|
||||
@@ -90,14 +114,14 @@ func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error)
|
||||
header := block.MsgBlock().Header
|
||||
parentHashes := header.ParentHashes
|
||||
|
||||
nodes := newSet()
|
||||
nodes := newBlockSet()
|
||||
for _, parentHash := range parentHashes {
|
||||
node := blockDAG.index.LookupNode(parentHash)
|
||||
if node == nil {
|
||||
str := fmt.Sprintf("parent block %s is unknown", parentHashes)
|
||||
str := fmt.Sprintf("parent block %s is unknown", parentHash)
|
||||
return nil, ruleError(ErrParentBlockUnknown, str)
|
||||
} else if blockDAG.index.NodeStatus(node).KnownInvalid() {
|
||||
str := fmt.Sprintf("parent block %s is known to be invalid", parentHashes)
|
||||
str := fmt.Sprintf("parent block %s is known to be invalid", parentHash)
|
||||
return nil, ruleError(ErrInvalidAncestorBlock, str)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,30 +2,27 @@ package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"bou.ke/monkey"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
)
|
||||
|
||||
func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", Config{
|
||||
DAGParams: &dagconfig.SimNetParams,
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
dag.TestSetBlockRewardMaturity(1)
|
||||
dag.TestSetCoinbaseMaturity(0)
|
||||
|
||||
// Test rejecting the block if its parents are missing
|
||||
orphanBlockFile := "blk_3B.dat"
|
||||
loadedBlocks, err := loadBlocks(orphanBlockFile)
|
||||
loadedBlocks, err := LoadBlocks(filepath.Join("testdata/", orphanBlockFile))
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: "+
|
||||
"Error loading file '%s': %s\n", orphanBlockFile, err)
|
||||
@@ -37,8 +34,8 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
|
||||
"Expected: %s, got: <nil>", ErrParentBlockUnknown)
|
||||
}
|
||||
ruleErr, ok := err.(RuleError)
|
||||
if !ok {
|
||||
var ruleErr RuleError
|
||||
if ok := errors.As(err, &ruleErr); !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrParentBlockUnknown {
|
||||
@@ -48,7 +45,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
|
||||
// Test rejecting the block if its parents are invalid
|
||||
blocksFile := "blk_0_to_4.dat"
|
||||
blocks, err := loadBlocks(blocksFile)
|
||||
blocks, err := LoadBlocks(filepath.Join("testdata/", blocksFile))
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: "+
|
||||
"Error loading file '%s': %s\n", blocksFile, err)
|
||||
@@ -56,10 +53,13 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
|
||||
// Add a valid block and mark it as invalid
|
||||
block1 := blocks[1]
|
||||
isOrphan, err := dag.ProcessBlock(block1, BFNone)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(block1, BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: Valid block unexpectedly returned an error: %s", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: block 1 is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: incorrectly returned block 1 is an orphan")
|
||||
}
|
||||
@@ -72,8 +72,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
|
||||
"Expected: %s, got: <nil>", ErrInvalidAncestorBlock)
|
||||
}
|
||||
ruleErr, ok = err.(RuleError)
|
||||
if !ok {
|
||||
if ok := errors.As(err, &ruleErr); !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrInvalidAncestorBlock {
|
||||
@@ -92,8 +91,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
|
||||
"Expected: %s, got: <nil>", ErrUnexpectedDifficulty)
|
||||
}
|
||||
ruleErr, ok = err.(RuleError)
|
||||
if !ok {
|
||||
if ok := errors.As(err, &ruleErr); !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrUnexpectedDifficulty {
|
||||
@@ -103,37 +101,4 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
|
||||
// Set block2's bits back to valid for next tests
|
||||
block2.MsgBlock().Header.Bits = originalBits
|
||||
|
||||
// Test rejecting the node due to database error
|
||||
databaseErrorMessage := "database error"
|
||||
guard := monkey.Patch(dbStoreBlock, func(dbTx database.Tx, block *util.Block) error {
|
||||
return errors.New(databaseErrorMessage)
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
err = dag.maybeAcceptBlock(block2, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to database error: "+
|
||||
"Expected: %s, got: <nil>", databaseErrorMessage)
|
||||
}
|
||||
if !strings.Contains(err.Error(), databaseErrorMessage) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to database error: "+
|
||||
"Unexpected error. Want: %s, got: %s", databaseErrorMessage, err)
|
||||
}
|
||||
guard.Unpatch()
|
||||
|
||||
// Test rejecting the node due to index error
|
||||
indexErrorMessage := "index error"
|
||||
guard = monkey.Patch((*blockIndex).flushToDB, func(_ *blockIndex) error {
|
||||
return errors.New(indexErrorMessage)
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
err = dag.maybeAcceptBlock(block2, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to index error: "+
|
||||
"Expected %s, got: <nil>", indexErrorMessage)
|
||||
}
|
||||
if !strings.Contains(err.Error(), indexErrorMessage) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to index error: "+
|
||||
"Unexpected error. Want: %s, got: %s", indexErrorMessage, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,8 +2,6 @@ package blockdag
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
)
|
||||
|
||||
// baseHeap is an implementation for heap.Interface that sorts blocks by their height
|
||||
@@ -28,61 +26,53 @@ func (h *baseHeap) Pop() interface{} {
|
||||
type upHeap struct{ baseHeap }
|
||||
|
||||
func (h upHeap) Less(i, j int) bool {
|
||||
if h.baseHeap[i].height == h.baseHeap[j].height {
|
||||
return daghash.HashToBig(h.baseHeap[i].hash).Cmp(daghash.HashToBig(h.baseHeap[j].hash)) < 0
|
||||
}
|
||||
|
||||
return h.baseHeap[i].height < h.baseHeap[j].height
|
||||
return h.baseHeap[i].less(h.baseHeap[j])
|
||||
}
|
||||
|
||||
// downHeap extends baseHeap to include Less operation that traverses from top to bottom
|
||||
type downHeap struct{ baseHeap }
|
||||
|
||||
func (h downHeap) Less(i, j int) bool {
|
||||
if h.baseHeap[i].height == h.baseHeap[j].height {
|
||||
return daghash.HashToBig(h.baseHeap[i].hash).Cmp(daghash.HashToBig(h.baseHeap[j].hash)) > 0
|
||||
}
|
||||
|
||||
return h.baseHeap[i].height > h.baseHeap[j].height
|
||||
return !h.baseHeap[i].less(h.baseHeap[j])
|
||||
}
|
||||
|
||||
// BlockHeap represents a mutable heap of Blocks, sorted by their height
|
||||
type BlockHeap struct {
|
||||
// blockHeap represents a mutable heap of Blocks, sorted by their height
|
||||
type blockHeap struct {
|
||||
impl heap.Interface
|
||||
}
|
||||
|
||||
// NewDownHeap initializes and returns a new BlockHeap
|
||||
func NewDownHeap() BlockHeap {
|
||||
h := BlockHeap{impl: &downHeap{}}
|
||||
// newDownHeap initializes and returns a new blockHeap
|
||||
func newDownHeap() blockHeap {
|
||||
h := blockHeap{impl: &downHeap{}}
|
||||
heap.Init(h.impl)
|
||||
return h
|
||||
}
|
||||
|
||||
// NewUpHeap initializes and returns a new BlockHeap
|
||||
func NewUpHeap() BlockHeap {
|
||||
h := BlockHeap{impl: &upHeap{}}
|
||||
// newUpHeap initializes and returns a new blockHeap
|
||||
func newUpHeap() blockHeap {
|
||||
h := blockHeap{impl: &upHeap{}}
|
||||
heap.Init(h.impl)
|
||||
return h
|
||||
}
|
||||
|
||||
// pop removes the block with lowest height from this heap and returns it
|
||||
func (bh BlockHeap) pop() *blockNode {
|
||||
func (bh blockHeap) pop() *blockNode {
|
||||
return heap.Pop(bh.impl).(*blockNode)
|
||||
}
|
||||
|
||||
// Push pushes the block onto the heap
|
||||
func (bh BlockHeap) Push(block *blockNode) {
|
||||
func (bh blockHeap) Push(block *blockNode) {
|
||||
heap.Push(bh.impl, block)
|
||||
}
|
||||
|
||||
// pushSet pushes a blockset to the heap.
|
||||
func (bh BlockHeap) pushSet(bs blockSet) {
|
||||
for _, block := range bs {
|
||||
func (bh blockHeap) pushSet(bs blockSet) {
|
||||
for block := range bs {
|
||||
heap.Push(bh.impl, block)
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the length of this heap
|
||||
func (bh BlockHeap) Len() int {
|
||||
func (bh blockHeap) Len() int {
|
||||
return bh.impl.Len()
|
||||
}
|
||||
|
||||
@@ -3,19 +3,28 @@ package blockdag
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// TestBlockHeap tests pushing, popping, and determining the length of the heap.
|
||||
func TestBlockHeap(t *testing.T) {
|
||||
block0Header := dagconfig.MainNetParams.GenesisBlock.Header
|
||||
block0 := newBlockNode(&block0Header, newSet(), dagconfig.MainNetParams.K)
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestBlockHeap", Config{
|
||||
DAGParams: &dagconfig.MainnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestBlockHeap: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
block0Header := dagconfig.MainnetParams.GenesisBlock.Header
|
||||
block0, _ := dag.newBlockNode(&block0Header, newBlockSet())
|
||||
|
||||
block100000Header := Block100000.Header
|
||||
block100000 := newBlockNode(&block100000Header, setFromSlice(block0), dagconfig.MainNetParams.K)
|
||||
block100000, _ := dag.newBlockNode(&block100000Header, blockSetFromSlice(block0))
|
||||
|
||||
block0smallHash := newBlockNode(&block0Header, newSet(), dagconfig.MainNetParams.K)
|
||||
block0smallHash, _ := dag.newBlockNode(&block0Header, newBlockSet())
|
||||
block0smallHash.hash = &daghash.Hash{}
|
||||
|
||||
tests := []struct {
|
||||
@@ -81,7 +90,7 @@ func TestBlockHeap(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
dHeap := NewDownHeap()
|
||||
dHeap := newDownHeap()
|
||||
for _, block := range test.toPush {
|
||||
dHeap.Push(block)
|
||||
}
|
||||
@@ -99,7 +108,7 @@ func TestBlockHeap(t *testing.T) {
|
||||
"Expected: %v, got: %v", test.name, test.expectedPopDown, poppedBlock)
|
||||
}
|
||||
|
||||
uHeap := NewUpHeap()
|
||||
uHeap := newUpHeap()
|
||||
for _, block := range test.toPush {
|
||||
uHeap.Push(block)
|
||||
}
|
||||
|
||||
136
blockdag/blockidhash.go
Normal file
136
blockdag/blockidhash.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// idByHashIndexBucketName is the name of the db bucket used to house
|
||||
// the block hash -> block id index.
|
||||
idByHashIndexBucketName = []byte("idbyhashidx")
|
||||
|
||||
// hashByIDIndexBucketName is the name of the db bucket used to house
|
||||
// the block id -> block hash index.
|
||||
hashByIDIndexBucketName = []byte("hashbyididx")
|
||||
|
||||
currentBlockIDKey = []byte("currentblockid")
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// This is a mapping between block hashes and unique IDs. The ID
|
||||
// is simply a sequentially incremented uint64 that is used instead of block hash
|
||||
// for the indexers. This is useful because it is only 8 bytes versus 32 bytes
|
||||
// hashes and thus saves a ton of space when a block is referenced in an index.
|
||||
// It consists of three buckets: the first bucket maps the hash of each
|
||||
// block to the unique ID and the second maps that ID back to the block hash.
|
||||
// The third bucket contains the last received block ID, and is used
|
||||
// when starting the node to check that the enabled indexes are up to date
|
||||
// with the latest received block, and if not, initiate recovery process.
|
||||
//
|
||||
// The serialized format for keys and values in the block hash to ID bucket is:
|
||||
// <hash> = <ID>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash 32 bytes
|
||||
// ID uint64 8 bytes
|
||||
// -----
|
||||
// Total: 40 bytes
|
||||
//
|
||||
// The serialized format for keys and values in the ID to block hash bucket is:
|
||||
// <ID> = <hash>
|
||||
//
|
||||
// Field Type Size
|
||||
// ID uint64 8 bytes
|
||||
// hash daghash.Hash 32 bytes
|
||||
// -----
|
||||
// Total: 40 bytes
|
||||
//
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const blockIDSize = 8 // 8 bytes for block ID
|
||||
|
||||
// DBFetchBlockIDByHash uses an existing database transaction to retrieve the
|
||||
// block id for the provided hash from the index.
|
||||
func DBFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint64, error) {
|
||||
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
|
||||
serializedID := hashIndex.Get(hash[:])
|
||||
if serializedID == nil {
|
||||
return 0, errors.Errorf("no entry in the block ID index for block with hash %s", hash)
|
||||
}
|
||||
|
||||
return DeserializeBlockID(serializedID), nil
|
||||
}
|
||||
|
||||
// DBFetchBlockHashBySerializedID uses an existing database transaction to
|
||||
// retrieve the hash for the provided serialized block id from the index.
|
||||
func DBFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
|
||||
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
|
||||
hashBytes := idIndex.Get(serializedID)
|
||||
if hashBytes == nil {
|
||||
return nil, errors.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID))
|
||||
}
|
||||
|
||||
var hash daghash.Hash
|
||||
copy(hash[:], hashBytes)
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
|
||||
// the index entries for the hash to id and id to hash mappings for the provided
|
||||
// values.
|
||||
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, serializedID []byte) error {
|
||||
// Add the block hash to ID mapping to the index.
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(idByHashIndexBucketName)
|
||||
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the block ID to hash mapping to the index.
|
||||
idIndex := meta.Bucket(hashByIDIndexBucketName)
|
||||
return idIndex.Put(serializedID[:], hash[:])
|
||||
}
|
||||
|
||||
// DBFetchCurrentBlockID returns the last known block ID.
|
||||
func DBFetchCurrentBlockID(dbTx database.Tx) uint64 {
|
||||
serializedID := dbTx.Metadata().Get(currentBlockIDKey)
|
||||
if serializedID == nil {
|
||||
return 0
|
||||
}
|
||||
return DeserializeBlockID(serializedID)
|
||||
}
|
||||
|
||||
// DeserializeBlockID returns a deserialized block id
|
||||
func DeserializeBlockID(serializedID []byte) uint64 {
|
||||
return byteOrder.Uint64(serializedID)
|
||||
}
|
||||
|
||||
// SerializeBlockID returns a serialized block id
|
||||
func SerializeBlockID(blockID uint64) []byte {
|
||||
serializedBlockID := make([]byte, blockIDSize)
|
||||
byteOrder.PutUint64(serializedBlockID, blockID)
|
||||
return serializedBlockID
|
||||
}
|
||||
|
||||
// DBFetchBlockHashByID uses an existing database transaction to retrieve the
|
||||
// hash for the provided block id from the index.
|
||||
func DBFetchBlockHashByID(dbTx database.Tx, id uint64) (*daghash.Hash, error) {
|
||||
return DBFetchBlockHashBySerializedID(dbTx, SerializeBlockID(id))
|
||||
}
|
||||
|
||||
func createBlockID(dbTx database.Tx, blockHash *daghash.Hash) (uint64, error) {
|
||||
currentBlockID := DBFetchCurrentBlockID(dbTx)
|
||||
newBlockID := currentBlockID + 1
|
||||
serializedNewBlockID := SerializeBlockID(newBlockID)
|
||||
err := dbTx.Metadata().Put(currentBlockIDKey, serializedNewBlockID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dbPutBlockIDIndexEntry(dbTx, blockHash, serializedNewBlockID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return newBlockID, nil
|
||||
}
|
||||
@@ -7,16 +7,13 @@ package blockdag
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// blockIndex provides facilities for keeping track of an in-memory index of the
|
||||
// block chain. Although the name block chain suggests a single chain of
|
||||
// blocks, it is actually a tree-shaped structure where any node can have
|
||||
// multiple children. However, there can only be one active branch which does
|
||||
// indeed form a chain from the tip all the way back to the genesis block.
|
||||
// block DAG.
|
||||
type blockIndex struct {
|
||||
// The following fields are set when the instance is created and can't
|
||||
// be changed afterwards, so there is no need to protect them with a
|
||||
@@ -29,7 +26,7 @@ type blockIndex struct {
|
||||
dirty map[*blockNode]struct{}
|
||||
}
|
||||
|
||||
// newBlockIndex returns a new empty instance of a block index. The index will
|
||||
// newBlockIndex returns a new empty instance of a block index. The index will
|
||||
// be dynamically populated as block nodes are loaded from the database and
|
||||
// manually added.
|
||||
func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex {
|
||||
@@ -46,19 +43,19 @@ func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) HaveBlock(hash *daghash.Hash) bool {
|
||||
bi.RLock()
|
||||
defer bi.RUnlock()
|
||||
_, hasBlock := bi.index[*hash]
|
||||
bi.RUnlock()
|
||||
return hasBlock
|
||||
}
|
||||
|
||||
// LookupNode returns the block node identified by the provided hash. It will
|
||||
// LookupNode returns the block node identified by the provided hash. It will
|
||||
// return nil if there is no entry for the hash.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode {
|
||||
bi.RLock()
|
||||
defer bi.RUnlock()
|
||||
node := bi.index[*hash]
|
||||
bi.RUnlock()
|
||||
return node
|
||||
}
|
||||
|
||||
@@ -68,9 +65,9 @@ func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) AddNode(node *blockNode) {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
bi.addNode(node)
|
||||
bi.dirty[node] = struct{}{}
|
||||
bi.Unlock()
|
||||
}
|
||||
|
||||
// addNode adds the provided node to the block index, but does not mark it as
|
||||
@@ -86,8 +83,8 @@ func (bi *blockIndex) addNode(node *blockNode) {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
|
||||
bi.RLock()
|
||||
defer bi.RUnlock()
|
||||
status := node.status
|
||||
bi.RUnlock()
|
||||
return status
|
||||
}
|
||||
|
||||
@@ -98,9 +95,9 @@ func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
node.status |= flags
|
||||
bi.dirty[node] = struct{}{}
|
||||
bi.Unlock()
|
||||
}
|
||||
|
||||
// UnsetStatusFlags flips the provided status flags on the block node to off,
|
||||
@@ -109,35 +106,37 @@ func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
node.status &^= flags
|
||||
bi.dirty[node] = struct{}{}
|
||||
bi.Unlock()
|
||||
}
|
||||
|
||||
// flushToDB writes all dirty block nodes to the database. If all writes
|
||||
// succeed, this clears the dirty set.
|
||||
func (bi *blockIndex) flushToDB() error {
|
||||
bi.Lock()
|
||||
if len(bi.dirty) == 0 {
|
||||
bi.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
err := bi.db.Update(func(dbTx database.Tx) error {
|
||||
for node := range bi.dirty {
|
||||
err := dbStoreBlockNode(dbTx, node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return bi.db.Update(func(dbTx database.Tx) error {
|
||||
return bi.flushToDBWithTx(dbTx)
|
||||
})
|
||||
}
|
||||
|
||||
// If write was successful, clear the dirty set.
|
||||
if err == nil {
|
||||
bi.dirty = make(map[*blockNode]struct{})
|
||||
// flushToDBWithTx writes all dirty block nodes to the database. If all
|
||||
// writes succeed, this clears the dirty set.
|
||||
func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
if len(bi.dirty) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
bi.Unlock()
|
||||
return err
|
||||
for node := range bi.dirty {
|
||||
err := dbStoreBlockNode(dbTx, node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bi *blockIndex) clearDirtyEntries() {
|
||||
bi.dirty = make(map[*blockNode]struct{})
|
||||
}
|
||||
|
||||
@@ -1,58 +1,27 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bou.ke/monkey"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
)
|
||||
|
||||
func TestAncestorErrors(t *testing.T) {
|
||||
node := newTestNode(newSet(), int32(0x10000000), 0, time.Unix(0, 0), dagconfig.MainNetParams.K)
|
||||
node.height = 2
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestAncestorErrors: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
node := newTestNode(dag, newBlockSet(), int32(0x10000000), 0, time.Unix(0, 0))
|
||||
node.blueScore = 2
|
||||
ancestor := node.SelectedAncestor(3)
|
||||
if ancestor != nil {
|
||||
t.Errorf("TestAncestorErrors: Ancestor() unexpectedly returned a node. Expected: <nil>")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlushToDBErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestFlushToDBErrors", Config{
|
||||
DAGParams: &dagconfig.SimNetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFlushToDBErrors: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Call flushToDB without anything to flush. This should succeed
|
||||
err = dag.index.flushToDB()
|
||||
if err != nil {
|
||||
t.Errorf("TestFlushToDBErrors: flushToDB without anything to flush: "+
|
||||
"Unexpected flushToDB error: %s", err)
|
||||
}
|
||||
|
||||
// Mark the genesis block as dirty
|
||||
dag.index.SetStatusFlags(dag.genesis, statusValid)
|
||||
|
||||
// Test flushToDB failure due to database error
|
||||
databaseErrorMessage := "database error"
|
||||
guard := monkey.Patch(dbStoreBlockNode, func(_ database.Tx, _ *blockNode) error {
|
||||
return errors.New(databaseErrorMessage)
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
err = dag.index.flushToDB()
|
||||
if err == nil {
|
||||
t.Errorf("TestFlushToDBErrors: flushToDB failure due to database error: "+
|
||||
"Expected: %s, got: <nil>", databaseErrorMessage)
|
||||
}
|
||||
if !strings.Contains(err.Error(), databaseErrorMessage) {
|
||||
t.Errorf("TestFlushToDBErrors: flushToDB failure due to database error: "+
|
||||
"Unexpected flushToDB error. Expected: %s, got: %s", databaseErrorMessage, err)
|
||||
}
|
||||
}
|
||||
|
||||
102
blockdag/blocklocator.go
Normal file
102
blockdag/blocklocator.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// BlockLocator is used to help locate a specific block. The algorithm for
|
||||
// building the block locator is to add block hashes in reverse order on the
|
||||
// block's selected parent chain until the desired stop block is reached.
|
||||
// In order to keep the list of locator hashes to a reasonable number of entries,
|
||||
// the step between each entry is doubled each loop iteration to exponentially
|
||||
// decrease the number of hashes as a function of the distance from the block
|
||||
// being located.
|
||||
//
|
||||
// For example, assume a selected parent chain with IDs as depicted below, and the
|
||||
// stop block is genesis:
|
||||
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
|
||||
//
|
||||
// The block locator for block 17 would be the hashes of blocks:
|
||||
// [17 16 14 11 7 2 genesis]
|
||||
type BlockLocator []*daghash.Hash
|
||||
|
||||
// BlockLocatorFromHashes returns a block locator from high and low hash.
|
||||
// See BlockLocator for details on the algorithm used to create a block locator.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) BlockLocatorFromHashes(highHash, lowHash *daghash.Hash) (BlockLocator, error) {
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
|
||||
highNode := dag.index.LookupNode(highHash)
|
||||
lowNode := dag.index.LookupNode(lowHash)
|
||||
|
||||
return dag.blockLocator(highNode, lowNode)
|
||||
}
|
||||
|
||||
// blockLocator returns a block locator for the passed high and low nodes.
|
||||
// See the BlockLocator type comments for more details.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for reads).
|
||||
func (dag *BlockDAG) blockLocator(highNode, lowNode *blockNode) (BlockLocator, error) {
|
||||
// We use the selected parent of the high node, so the
|
||||
// block locator won't contain the high node.
|
||||
highNode = highNode.selectedParent
|
||||
|
||||
node := highNode
|
||||
step := uint64(1)
|
||||
locator := make(BlockLocator, 0)
|
||||
for node != nil {
|
||||
locator = append(locator, node.hash)
|
||||
|
||||
// Nothing more to add once the low node has been added.
|
||||
if node.blueScore <= lowNode.blueScore {
|
||||
if node != lowNode {
|
||||
return nil, errors.Errorf("highNode and lowNode are " +
|
||||
"not in the same selected parent chain.")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Calculate blueScore of previous node to include ensuring the
|
||||
// final node is lowNode.
|
||||
nextBlueScore := node.blueScore - step
|
||||
if nextBlueScore < lowNode.blueScore {
|
||||
nextBlueScore = lowNode.blueScore
|
||||
}
|
||||
|
||||
// walk backwards through the nodes to the correct ancestor.
|
||||
node = node.SelectedAncestor(nextBlueScore)
|
||||
|
||||
// Double the distance between included hashes.
|
||||
step *= 2
|
||||
}
|
||||
|
||||
return locator, nil
|
||||
}
|
||||
|
||||
// FindNextLocatorBoundaries returns the lowest unknown block locator, hash
|
||||
// and the highest known block locator hash. This is used to create the
|
||||
// next block locator to find the highest shared known chain block with the
|
||||
// sync peer.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for reads).
|
||||
func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (highHash, lowHash *daghash.Hash) {
|
||||
// Find the most recent locator block hash in the DAG. In the case none of
|
||||
// the hashes in the locator are in the DAG, fall back to the genesis block.
|
||||
lowNode := dag.genesis
|
||||
nextBlockLocatorIndex := int64(len(locator) - 1)
|
||||
for i, hash := range locator {
|
||||
node := dag.index.LookupNode(hash)
|
||||
if node != nil {
|
||||
lowNode = node
|
||||
nextBlockLocatorIndex = int64(i) - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
if nextBlockLocatorIndex < 0 {
|
||||
return nil, lowNode.hash
|
||||
}
|
||||
return locator[nextBlockLocatorIndex], lowNode.hash
|
||||
}
|
||||
@@ -6,13 +6,13 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sort"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// blockStatus is a bit field representing the validation state of the block.
|
||||
@@ -31,11 +31,6 @@ const (
|
||||
// statusInvalidAncestor indicates that one of the block's ancestors has
|
||||
// has failed validation, thus the block is also invalid.
|
||||
statusInvalidAncestor
|
||||
|
||||
// statusNone indicates that the block has no validation state flags set.
|
||||
//
|
||||
// NOTE: This must be defined last in order to avoid influencing iota.
|
||||
statusNone blockStatus = 0
|
||||
)
|
||||
|
||||
// KnownValid returns whether the block is known to be valid. This will return
|
||||
@@ -57,8 +52,8 @@ func (status blockStatus) KnownInvalid() bool {
|
||||
type blockNode struct {
|
||||
// NOTE: Additions, deletions, or modifications to the order of the
|
||||
// definitions in this struct should not be changed without considering
|
||||
// how it affects alignment on 64-bit platforms. The current order is
|
||||
// specifically crafted to result in minimal padding. There will be
|
||||
// how it affects alignment on 64-bit platforms. The current order is
|
||||
// specifically crafted to result in minimal padding. There will be
|
||||
// hundreds of thousands of these in memory, so a few extra bytes of
|
||||
// padding adds up.
|
||||
|
||||
@@ -78,173 +73,143 @@ type blockNode struct {
|
||||
// blueScore is the count of all the blue blocks in this block's past
|
||||
blueScore uint64
|
||||
|
||||
// diff is the UTXO representation of the block
|
||||
// A block's UTXO is reconstituted by applying diffWith on every block in the chain of diffChildren
|
||||
// from the virtual block down to the block. See diffChild
|
||||
diff *UTXODiff
|
||||
|
||||
// diffChild is the child that diff will be built from. See diff
|
||||
diffChild *blockNode
|
||||
// bluesAnticoneSizes is a map holding the set of blues affected by this block and their
|
||||
// modified blue anticone size.
|
||||
bluesAnticoneSizes map[*blockNode]dagconfig.KType
|
||||
|
||||
// hash is the double sha 256 of the block.
|
||||
hash *daghash.Hash
|
||||
|
||||
// workSum is the total amount of work in the DAG up to and including
|
||||
// this node.
|
||||
workSum *big.Int
|
||||
|
||||
// height is the position in the block DAG.
|
||||
height int32
|
||||
|
||||
// chainHeight is the number of hops you need to go down the selected parent chain in order to get to the genesis block.
|
||||
chainHeight uint32
|
||||
|
||||
// Some fields from block headers to aid in best chain selection and
|
||||
// reconstructing headers from memory. These must be treated as
|
||||
// immutable and are intentionally ordered to avoid padding on 64-bit
|
||||
// platforms.
|
||||
version int32
|
||||
bits uint32
|
||||
nonce uint64
|
||||
timestamp int64
|
||||
hashMerkleRoot *daghash.Hash
|
||||
idMerkleRoot *daghash.Hash
|
||||
// Some fields from block headers to aid in reconstructing headers
|
||||
// from memory. These must be treated as immutable and are intentionally
|
||||
// ordered to avoid padding on 64-bit platforms.
|
||||
version int32
|
||||
bits uint32
|
||||
nonce uint64
|
||||
timestamp int64
|
||||
hashMerkleRoot *daghash.Hash
|
||||
acceptedIDMerkleRoot *daghash.Hash
|
||||
utxoCommitment *daghash.Hash
|
||||
|
||||
// status is a bitfield representing the validation state of the block. The
|
||||
// status field, unlike the other fields, may be written to and so should
|
||||
// only be accessed using the concurrent-safe NodeStatus method on
|
||||
// blockIndex once the node has been added to the global index.
|
||||
status blockStatus
|
||||
|
||||
// isFinalized determines whether the node is below the finality point.
|
||||
isFinalized bool
|
||||
}
|
||||
|
||||
// initBlockNode initializes a block node from the given header and parent nodes,
|
||||
// calculating the height and workSum from the respective fields on the first parent.
|
||||
// This function is NOT safe for concurrent access. It must only be called when
|
||||
// initially creating a node.
|
||||
func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents blockSet, phantomK uint32) {
|
||||
*node = blockNode{
|
||||
parents: parents,
|
||||
children: make(blockSet),
|
||||
workSum: big.NewInt(0),
|
||||
timestamp: time.Now().Unix(),
|
||||
// newBlockNode returns a new block node for the given block header and parents, and the
|
||||
// anticone of its selected parent (parent with highest blue score).
|
||||
// selectedParentAnticone is used to update reachability data we store for future reachability queries.
|
||||
// This function is NOT safe for concurrent access.
|
||||
func (dag *BlockDAG) newBlockNode(blockHeader *wire.BlockHeader, parents blockSet) (node *blockNode, selectedParentAnticone []*blockNode) {
|
||||
node = &blockNode{
|
||||
parents: parents,
|
||||
children: make(blockSet),
|
||||
blueScore: math.MaxUint64, // Initialized to the max value to avoid collisions with the genesis block
|
||||
timestamp: dag.AdjustedTime().Unix(),
|
||||
bluesAnticoneSizes: make(map[*blockNode]dagconfig.KType),
|
||||
}
|
||||
|
||||
// blockHeader is nil only for the virtual block
|
||||
if blockHeader != nil {
|
||||
node.hash = blockHeader.BlockHash()
|
||||
node.workSum = util.CalcWork(blockHeader.Bits)
|
||||
node.version = blockHeader.Version
|
||||
node.bits = blockHeader.Bits
|
||||
node.nonce = blockHeader.Nonce
|
||||
node.timestamp = blockHeader.Timestamp.Unix()
|
||||
node.hashMerkleRoot = blockHeader.HashMerkleRoot
|
||||
node.idMerkleRoot = blockHeader.IDMerkleRoot
|
||||
node.acceptedIDMerkleRoot = blockHeader.AcceptedIDMerkleRoot
|
||||
node.utxoCommitment = blockHeader.UTXOCommitment
|
||||
} else {
|
||||
node.hash = &daghash.ZeroHash
|
||||
}
|
||||
|
||||
if len(parents) > 0 {
|
||||
node.blues, node.selectedParent, node.blueScore = phantom(node, phantomK)
|
||||
node.height = calculateNodeHeight(node)
|
||||
node.chainHeight = calculateChainHeight(node)
|
||||
node.workSum = node.workSum.Add(node.selectedParent.workSum, node.workSum)
|
||||
if len(parents) == 0 {
|
||||
// The genesis block is defined to have a blueScore of 0
|
||||
node.blueScore = 0
|
||||
return node, nil
|
||||
}
|
||||
}
|
||||
|
||||
func calculateNodeHeight(node *blockNode) int32 {
|
||||
return node.parents.maxHeight() + 1
|
||||
}
|
||||
|
||||
func calculateChainHeight(node *blockNode) uint32 {
|
||||
if node.isGenesis() {
|
||||
return 0
|
||||
selectedParentAnticone, err := dag.ghostdag(node)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "unexpected error in GHOSTDAG"))
|
||||
}
|
||||
return node.selectedParent.chainHeight + 1
|
||||
}
|
||||
|
||||
// newBlockNode returns a new block node for the given block header and parent
|
||||
// nodes, calculating the height and workSum from the respective fields on the
|
||||
// parent. This function is NOT safe for concurrent access.
|
||||
func newBlockNode(blockHeader *wire.BlockHeader, parents blockSet, phantomK uint32) *blockNode {
|
||||
var node blockNode
|
||||
initBlockNode(&node, blockHeader, parents, phantomK)
|
||||
return &node
|
||||
return node, selectedParentAnticone
|
||||
}
|
||||
|
||||
// updateParentsChildren updates the node's parents to point to new node
|
||||
func (node *blockNode) updateParentsChildren() {
|
||||
for _, parent := range node.parents {
|
||||
for parent := range node.parents {
|
||||
parent.children.add(node)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *blockNode) less(other *blockNode) bool {
|
||||
if node.blueScore == other.blueScore {
|
||||
return daghash.Less(node.hash, other.hash)
|
||||
}
|
||||
|
||||
return node.blueScore < other.blueScore
|
||||
}
|
||||
|
||||
// Header constructs a block header from the node and returns it.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) Header() *wire.BlockHeader {
|
||||
// No lock is needed because all accessed fields are immutable.
|
||||
return &wire.BlockHeader{
|
||||
Version: node.version,
|
||||
ParentHashes: node.ParentHashes(),
|
||||
HashMerkleRoot: node.hashMerkleRoot,
|
||||
IDMerkleRoot: node.idMerkleRoot,
|
||||
Timestamp: time.Unix(node.timestamp, 0),
|
||||
Bits: node.bits,
|
||||
Nonce: node.nonce,
|
||||
Version: node.version,
|
||||
ParentHashes: node.ParentHashes(),
|
||||
HashMerkleRoot: node.hashMerkleRoot,
|
||||
AcceptedIDMerkleRoot: node.acceptedIDMerkleRoot,
|
||||
UTXOCommitment: node.utxoCommitment,
|
||||
Timestamp: time.Unix(node.timestamp, 0),
|
||||
Bits: node.bits,
|
||||
Nonce: node.nonce,
|
||||
}
|
||||
}
|
||||
|
||||
// SelectedAncestor returns the ancestor block node at the provided height by following
|
||||
// the selected chain backwards from this node. The returned block will be nil when a
|
||||
// height is requested that is after the height of the passed node or is less than zero.
|
||||
// SelectedAncestor returns the ancestor block node at the provided blue score by following
|
||||
// the selected-parents chain backwards from this node. The returned block will be nil when a
|
||||
// blue score is requested that is higher than the blue score of the passed node.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) SelectedAncestor(height int32) *blockNode {
|
||||
if height < 0 || height > node.height {
|
||||
func (node *blockNode) SelectedAncestor(blueScore uint64) *blockNode {
|
||||
if blueScore > node.blueScore {
|
||||
return nil
|
||||
}
|
||||
|
||||
n := node
|
||||
for ; n != nil && n.height != height; n = n.selectedParent {
|
||||
// Intentionally left blank
|
||||
for n != nil && n.blueScore > blueScore {
|
||||
n = n.selectedParent
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// RelativeAncestor returns the ancestor block node a relative 'distance' blocks
|
||||
// before this node. This is equivalent to calling Ancestor with the node's
|
||||
// height minus provided distance.
|
||||
// RelativeAncestor returns the ancestor block node a relative 'distance' of
|
||||
// blue blocks before this node. This is equivalent to calling Ancestor with
|
||||
// the node's blue score minus provided distance.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) RelativeAncestor(distance int32) *blockNode {
|
||||
return node.SelectedAncestor(node.height - distance)
|
||||
func (node *blockNode) RelativeAncestor(distance uint64) *blockNode {
|
||||
return node.SelectedAncestor(node.blueScore - distance)
|
||||
}
|
||||
|
||||
// PastMedianTime returns the median time of the previous few blocks
|
||||
// CalcPastMedianTime returns the median time of the previous few blocks
|
||||
// prior to, and including, the block node.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) PastMedianTime() time.Time {
|
||||
// Create a slice of the previous few block timestamps used to calculate
|
||||
// the median per the number defined by the constant medianTimeBlocks.
|
||||
// If there aren't enough blocks yet - pad remaining with genesis block's timestamp.
|
||||
timestamps := make([]int64, medianTimeBlocks)
|
||||
iterNode := node
|
||||
for i := 0; i < medianTimeBlocks; i++ {
|
||||
timestamps[i] = iterNode.timestamp
|
||||
|
||||
if !iterNode.isGenesis() {
|
||||
iterNode = iterNode.selectedParent
|
||||
}
|
||||
func (node *blockNode) PastMedianTime(dag *BlockDAG) time.Time {
|
||||
window := blueBlockWindow(node, 2*dag.TimestampDeviationTolerance-1)
|
||||
medianTimestamp, err := window.medianTimestamp()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("blueBlockWindow: %s", err))
|
||||
}
|
||||
|
||||
sort.Sort(timeSorter(timestamps))
|
||||
|
||||
// Note: This works when medianTimeBlockCount is an odd number.
|
||||
// If it is to be changed to an even number - must take avarage of two middle values
|
||||
// Since medianTimeBlockCount is a constant, we can skip the odd/even check
|
||||
medianTimestamp := timestamps[medianTimeBlocks/2]
|
||||
return time.Unix(medianTimestamp, 0)
|
||||
}
|
||||
|
||||
@@ -257,11 +222,11 @@ func (node *blockNode) isGenesis() bool {
|
||||
return len(node.parents) == 0
|
||||
}
|
||||
|
||||
func (node *blockNode) finalityScore() uint64 {
|
||||
return node.blueScore / FinalityInterval
|
||||
func (node *blockNode) finalityScore(dag *BlockDAG) uint64 {
|
||||
return node.blueScore / uint64(dag.dagParams.FinalityInterval)
|
||||
}
|
||||
|
||||
// String returns a string that contains the block hash and height.
|
||||
// String returns a string that contains the block hash.
|
||||
func (node blockNode) String() string {
|
||||
return fmt.Sprintf("%s (%d)", node.hash, node.height)
|
||||
return node.hash.String()
|
||||
}
|
||||
|
||||
@@ -1,86 +1,41 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestChainHeight(t *testing.T) {
|
||||
phantomK := uint32(2)
|
||||
buildNode := buildNodeGenerator(phantomK, true)
|
||||
// This test is to ensure the size BlueAnticoneSizesSize is serialized to the size of KType.
|
||||
// We verify that by serializing and deserializing the block while making sure that we stay within the expected range.
|
||||
func TestBlueAnticoneSizesSize(t *testing.T) {
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestBlueAnticoneSizesSize: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
node0 := buildNode(setFromSlice())
|
||||
node1 := buildNode(setFromSlice(node0))
|
||||
node2 := buildNode(setFromSlice(node0))
|
||||
node3 := buildNode(setFromSlice(node0))
|
||||
node4 := buildNode(setFromSlice(node1, node2, node3))
|
||||
node5 := buildNode(setFromSlice(node1, node2, node3))
|
||||
node6 := buildNode(setFromSlice(node1, node2, node3))
|
||||
node7 := buildNode(setFromSlice(node0))
|
||||
node8 := buildNode(setFromSlice(node7))
|
||||
node9 := buildNode(setFromSlice(node8))
|
||||
node10 := buildNode(setFromSlice(node9, node6))
|
||||
k := dagconfig.KType(0)
|
||||
k--
|
||||
|
||||
// Because nodes 7 & 8 were mined secretly, node10's selected
|
||||
// parent will be node6, although node9 is higher. So in this
|
||||
// case, node10.height and node10.chainHeight will be different
|
||||
|
||||
tests := []struct {
|
||||
node *blockNode
|
||||
expectedChainHeight uint32
|
||||
}{
|
||||
{
|
||||
node: node0,
|
||||
expectedChainHeight: 0,
|
||||
},
|
||||
{
|
||||
node: node1,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node2,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node3,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node4,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node5,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node6,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node7,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node8,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node9,
|
||||
expectedChainHeight: 3,
|
||||
},
|
||||
{
|
||||
node: node10,
|
||||
expectedChainHeight: 3,
|
||||
},
|
||||
if k < dagconfig.KType(0) {
|
||||
t.Fatalf("KType must be unsigned")
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if test.node.chainHeight != test.expectedChainHeight {
|
||||
t.Errorf("block %v expected chain height %v but got %v", test.node, test.expectedChainHeight, test.node.chainHeight)
|
||||
}
|
||||
if calculateChainHeight(test.node) != test.expectedChainHeight {
|
||||
t.Errorf("block %v expected calculated chain height %v but got %v", test.node, test.expectedChainHeight, test.node.chainHeight)
|
||||
}
|
||||
blockHeader := dagconfig.SimnetParams.GenesisBlock.Header
|
||||
node, _ := dag.newBlockNode(&blockHeader, newBlockSet())
|
||||
fakeBlue := &blockNode{hash: &daghash.Hash{1}}
|
||||
dag.index.AddNode(fakeBlue)
|
||||
// Setting maxKType to maximum value of KType.
|
||||
// As we verify above that KType is unsigned we can be sure that maxKType is indeed the maximum value of KType.
|
||||
maxKType := ^dagconfig.KType(0)
|
||||
node.bluesAnticoneSizes[fakeBlue] = maxKType
|
||||
serializedNode, _ := serializeBlockNode(node)
|
||||
deserializedNode, _ := dag.deserializeBlockNode(serializedNode)
|
||||
if deserializedNode.bluesAnticoneSizes[fakeBlue] != maxKType {
|
||||
t.Fatalf("TestBlueAnticoneSizesSize: BlueAnticoneSize should not change when deserializing. Expected: %v but got %v",
|
||||
maxKType, deserializedNode.bluesAnticoneSizes[fakeBlue])
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -3,96 +3,72 @@ package blockdag
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// blockSet implements a basic unsorted set of blocks
|
||||
type blockSet map[daghash.Hash]*blockNode
|
||||
type blockSet map[*blockNode]struct{}
|
||||
|
||||
// newSet creates a new, empty BlockSet
|
||||
func newSet() blockSet {
|
||||
return map[daghash.Hash]*blockNode{}
|
||||
// newBlockSet creates a new, empty BlockSet
|
||||
func newBlockSet() blockSet {
|
||||
return map[*blockNode]struct{}{}
|
||||
}
|
||||
|
||||
// setFromSlice converts a slice of blocks into an unordered set represented as map
|
||||
func setFromSlice(blocks ...*blockNode) blockSet {
|
||||
set := newSet()
|
||||
for _, block := range blocks {
|
||||
set.add(block)
|
||||
// blockSetFromSlice converts a slice of blockNodes into an unordered set represented as map
|
||||
func blockSetFromSlice(nodes ...*blockNode) blockSet {
|
||||
set := newBlockSet()
|
||||
for _, node := range nodes {
|
||||
set.add(node)
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
// maxHeight returns the height of the highest block in the block set
|
||||
func (bs blockSet) maxHeight() int32 {
|
||||
var maxHeight int32
|
||||
for _, node := range bs {
|
||||
if maxHeight < node.height {
|
||||
maxHeight = node.height
|
||||
}
|
||||
}
|
||||
return maxHeight
|
||||
// add adds a blockNode to this BlockSet
|
||||
func (bs blockSet) add(node *blockNode) {
|
||||
bs[node] = struct{}{}
|
||||
}
|
||||
|
||||
func (bs blockSet) highest() *blockNode {
|
||||
var highest *blockNode
|
||||
for _, node := range bs {
|
||||
if highest == nil ||
|
||||
highest.height < node.height ||
|
||||
(highest.height == node.height && daghash.Less(node.hash, highest.hash)) {
|
||||
|
||||
highest = node
|
||||
}
|
||||
}
|
||||
return highest
|
||||
}
|
||||
|
||||
// add adds a block to this BlockSet
|
||||
func (bs blockSet) add(block *blockNode) {
|
||||
bs[*block.hash] = block
|
||||
}
|
||||
|
||||
// remove removes a block from this BlockSet, if exists
|
||||
// Does nothing if this set does not contain the block
|
||||
func (bs blockSet) remove(block *blockNode) {
|
||||
delete(bs, *block.hash)
|
||||
// remove removes a blockNode from this BlockSet, if exists
|
||||
// Does nothing if this set does not contain the blockNode
|
||||
func (bs blockSet) remove(node *blockNode) {
|
||||
delete(bs, node)
|
||||
}
|
||||
|
||||
// clone clones thie block set
|
||||
func (bs blockSet) clone() blockSet {
|
||||
clone := newSet()
|
||||
for _, block := range bs {
|
||||
clone.add(block)
|
||||
clone := newBlockSet()
|
||||
for node := range bs {
|
||||
clone.add(node)
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
||||
// subtract returns the difference between the BlockSet and another BlockSet
|
||||
func (bs blockSet) subtract(other blockSet) blockSet {
|
||||
diff := newSet()
|
||||
for _, block := range bs {
|
||||
if !other.contains(block) {
|
||||
diff.add(block)
|
||||
diff := newBlockSet()
|
||||
for node := range bs {
|
||||
if !other.contains(node) {
|
||||
diff.add(node)
|
||||
}
|
||||
}
|
||||
return diff
|
||||
}
|
||||
|
||||
// addSet adds all blocks in other set to this set
|
||||
// addSet adds all blockNodes in other set to this set
|
||||
func (bs blockSet) addSet(other blockSet) {
|
||||
for _, block := range other {
|
||||
bs.add(block)
|
||||
for node := range other {
|
||||
bs.add(node)
|
||||
}
|
||||
}
|
||||
|
||||
// addSlice adds provided slice to this set
|
||||
func (bs blockSet) addSlice(slice []*blockNode) {
|
||||
for _, block := range slice {
|
||||
bs.add(block)
|
||||
for _, node := range slice {
|
||||
bs.add(node)
|
||||
}
|
||||
}
|
||||
|
||||
// union returns a BlockSet that contains all blocks included in this set,
|
||||
// union returns a BlockSet that contains all blockNodes included in this set,
|
||||
// the other set, or both
|
||||
func (bs blockSet) union(other blockSet) blockSet {
|
||||
union := bs.clone()
|
||||
@@ -102,39 +78,16 @@ func (bs blockSet) union(other blockSet) blockSet {
|
||||
return union
|
||||
}
|
||||
|
||||
// contains returns true iff this set contains block
|
||||
func (bs blockSet) contains(block *blockNode) bool {
|
||||
_, ok := bs[*block.hash]
|
||||
// contains returns true iff this set contains node
|
||||
func (bs blockSet) contains(node *blockNode) bool {
|
||||
_, ok := bs[node]
|
||||
return ok
|
||||
}
|
||||
|
||||
// containsHash returns true iff this set contains a block hash
|
||||
func (bs blockSet) containsHash(hash *daghash.Hash) bool {
|
||||
_, ok := bs[*hash]
|
||||
return ok
|
||||
}
|
||||
|
||||
// hashesEqual returns true if the given hashes are equal to the hashes
|
||||
// of the blocks in this set.
|
||||
// NOTE: The given hash slice must not contain duplicates.
|
||||
func (bs blockSet) hashesEqual(hashes []*daghash.Hash) bool {
|
||||
if len(hashes) != len(bs) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, hash := range hashes {
|
||||
if _, wasFound := bs[*hash]; !wasFound {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// hashes returns the hashes of the blocks in this set.
|
||||
// hashes returns the hashes of the blockNodes in this set.
|
||||
func (bs blockSet) hashes() []*daghash.Hash {
|
||||
hashes := make([]*daghash.Hash, 0, len(bs))
|
||||
for _, node := range bs {
|
||||
for node := range bs {
|
||||
hashes = append(hashes, node.hash)
|
||||
}
|
||||
daghash.Sort(hashes)
|
||||
@@ -143,27 +96,16 @@ func (bs blockSet) hashes() []*daghash.Hash {
|
||||
|
||||
func (bs blockSet) String() string {
|
||||
nodeStrs := make([]string, 0, len(bs))
|
||||
for _, node := range bs {
|
||||
for node := range bs {
|
||||
nodeStrs = append(nodeStrs, node.String())
|
||||
}
|
||||
return strings.Join(nodeStrs, ",")
|
||||
}
|
||||
|
||||
// anyChildInSet returns true iff any child of block is contained within this set
|
||||
func (bs blockSet) anyChildInSet(block *blockNode) bool {
|
||||
for _, child := range block.children {
|
||||
if bs.contains(child) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (bs blockSet) bluest() *blockNode {
|
||||
var bluestNode *blockNode
|
||||
var maxScore uint64
|
||||
for _, node := range bs {
|
||||
for node := range bs {
|
||||
if bluestNode == nil ||
|
||||
node.blueScore > maxScore ||
|
||||
(node.blueScore == maxScore && daghash.Less(node.hash, bluestNode.hash)) {
|
||||
|
||||
@@ -4,11 +4,11 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
func TestHashes(t *testing.T) {
|
||||
bs := setFromSlice(
|
||||
bs := blockSetFromSlice(
|
||||
&blockNode{
|
||||
hash: &daghash.Hash{3},
|
||||
},
|
||||
@@ -35,47 +35,6 @@ func TestHashes(t *testing.T) {
|
||||
t.Errorf("TestHashes: hashes order is %s but expected %s", hashes, expected)
|
||||
}
|
||||
}
|
||||
func TestBlockSetHighest(t *testing.T) {
|
||||
node1 := &blockNode{hash: &daghash.Hash{10}, height: 1}
|
||||
node2a := &blockNode{hash: &daghash.Hash{20}, height: 2}
|
||||
node2b := &blockNode{hash: &daghash.Hash{21}, height: 2}
|
||||
node3 := &blockNode{hash: &daghash.Hash{30}, height: 3}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
set blockSet
|
||||
expectedHighest *blockNode
|
||||
}{
|
||||
{
|
||||
name: "empty set",
|
||||
set: setFromSlice(),
|
||||
expectedHighest: nil,
|
||||
},
|
||||
{
|
||||
name: "set with one member",
|
||||
set: setFromSlice(node1),
|
||||
expectedHighest: node1,
|
||||
},
|
||||
{
|
||||
name: "same-height highest members in set",
|
||||
set: setFromSlice(node2b, node1, node2a),
|
||||
expectedHighest: node2a,
|
||||
},
|
||||
{
|
||||
name: "typical set",
|
||||
set: setFromSlice(node2b, node3, node1, node2a),
|
||||
expectedHighest: node3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
highest := test.set.highest()
|
||||
if highest != test.expectedHighest {
|
||||
t.Errorf("blockSet.highest: unexpected value in test '%s'. "+
|
||||
"Expected: %v, got: %v", test.name, test.expectedHighest, highest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSetSubtract(t *testing.T) {
|
||||
node1 := &blockNode{hash: &daghash.Hash{10}}
|
||||
@@ -90,33 +49,33 @@ func TestBlockSetSubtract(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "both sets empty",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(),
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "subtract an empty set",
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(node1),
|
||||
setA: blockSetFromSlice(node1),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "subtract from empty set",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(),
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "subtract unrelated set",
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(node2),
|
||||
expectedResult: setFromSlice(node1),
|
||||
setA: blockSetFromSlice(node1),
|
||||
setB: blockSetFromSlice(node2),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node2, node3),
|
||||
expectedResult: setFromSlice(node1),
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node2, node3),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -142,33 +101,33 @@ func TestBlockSetAddSet(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "both sets empty",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(),
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "add an empty set",
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(node1),
|
||||
setA: blockSetFromSlice(node1),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add to empty set",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1),
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add already added member",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1, node2),
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1, node2),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node2, node3),
|
||||
expectedResult: setFromSlice(node1, node2, node3),
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node2, node3),
|
||||
expectedResult: blockSetFromSlice(node1, node2, node3),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -194,33 +153,33 @@ func TestBlockSetAddSlice(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "add empty slice to empty set",
|
||||
set: setFromSlice(),
|
||||
set: blockSetFromSlice(),
|
||||
slice: []*blockNode{},
|
||||
expectedResult: setFromSlice(),
|
||||
expectedResult: blockSetFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "add an empty slice",
|
||||
set: setFromSlice(node1),
|
||||
set: blockSetFromSlice(node1),
|
||||
slice: []*blockNode{},
|
||||
expectedResult: setFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add to empty set",
|
||||
set: setFromSlice(),
|
||||
set: blockSetFromSlice(),
|
||||
slice: []*blockNode{node1},
|
||||
expectedResult: setFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add already added member",
|
||||
set: setFromSlice(node1, node2),
|
||||
set: blockSetFromSlice(node1, node2),
|
||||
slice: []*blockNode{node1},
|
||||
expectedResult: setFromSlice(node1, node2),
|
||||
expectedResult: blockSetFromSlice(node1, node2),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
set: setFromSlice(node1, node2),
|
||||
set: blockSetFromSlice(node1, node2),
|
||||
slice: []*blockNode{node2, node3},
|
||||
expectedResult: setFromSlice(node1, node2, node3),
|
||||
expectedResult: blockSetFromSlice(node1, node2, node3),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -246,33 +205,33 @@ func TestBlockSetUnion(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "both sets empty",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(),
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "union against an empty set",
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(node1),
|
||||
setA: blockSetFromSlice(node1),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "union from an empty set",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1),
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "union with subset",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1, node2),
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1, node2),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node2, node3),
|
||||
expectedResult: setFromSlice(node1, node2, node3),
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node2, node3),
|
||||
expectedResult: blockSetFromSlice(node1, node2, node3),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -284,54 +243,3 @@ func TestBlockSetUnion(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSetHashesEqual(t *testing.T) {
|
||||
node1 := &blockNode{hash: &daghash.Hash{10}}
|
||||
node2 := &blockNode{hash: &daghash.Hash{20}}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
set blockSet
|
||||
hashes []*daghash.Hash
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
name: "empty set, no hashes",
|
||||
set: setFromSlice(),
|
||||
hashes: []*daghash.Hash{},
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "empty set, one hash",
|
||||
set: setFromSlice(),
|
||||
hashes: []*daghash.Hash{node1.hash},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "set and hashes of different length",
|
||||
set: setFromSlice(node1, node2),
|
||||
hashes: []*daghash.Hash{node1.hash},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "set equal to hashes",
|
||||
set: setFromSlice(node1, node2),
|
||||
hashes: []*daghash.Hash{node1.hash, node2.hash},
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "set equal to hashes, different order",
|
||||
set: setFromSlice(node1, node2),
|
||||
hashes: []*daghash.Hash{node2.hash, node1.hash},
|
||||
expectedResult: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := test.set.hashesEqual(test.hashes)
|
||||
if result != test.expectedResult {
|
||||
t.Errorf("blockSet.hashesEqual: unexpected result in test '%s'. "+
|
||||
"Expected: %t, got: %t", test.name, test.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
75
blockdag/blockwindow.go
Normal file
75
blockdag/blockwindow.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type blockWindow []*blockNode
|
||||
|
||||
// blueBlockWindow returns a blockWindow of the given size that contains the
|
||||
// blues in the past of startindNode, sorted by GHOSTDAG order.
|
||||
// If the number of blues in the past of startingNode is less then windowSize,
|
||||
// the window will be padded by genesis blocks to achieve a size of windowSize.
|
||||
func blueBlockWindow(startingNode *blockNode, windowSize uint64) blockWindow {
|
||||
window := make(blockWindow, 0, windowSize)
|
||||
currentNode := startingNode
|
||||
for uint64(len(window)) < windowSize && currentNode.selectedParent != nil {
|
||||
if currentNode.selectedParent != nil {
|
||||
for _, blue := range currentNode.blues {
|
||||
window = append(window, blue)
|
||||
if uint64(len(window)) == windowSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
currentNode = currentNode.selectedParent
|
||||
}
|
||||
}
|
||||
|
||||
if uint64(len(window)) < windowSize {
|
||||
genesis := currentNode
|
||||
for uint64(len(window)) < windowSize {
|
||||
window = append(window, genesis)
|
||||
}
|
||||
}
|
||||
|
||||
return window
|
||||
}
|
||||
|
||||
func (window blockWindow) minMaxTimestamps() (min, max int64) {
|
||||
min = math.MaxInt64
|
||||
max = 0
|
||||
for _, node := range window {
|
||||
if node.timestamp < min {
|
||||
min = node.timestamp
|
||||
}
|
||||
if node.timestamp > max {
|
||||
max = node.timestamp
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (window blockWindow) averageTarget() *big.Int {
|
||||
averageTarget := big.NewInt(0)
|
||||
for _, node := range window {
|
||||
target := util.CompactToBig(node.bits)
|
||||
averageTarget.Add(averageTarget, target)
|
||||
}
|
||||
return averageTarget.Div(averageTarget, big.NewInt(int64(len(window))))
|
||||
}
|
||||
|
||||
func (window blockWindow) medianTimestamp() (int64, error) {
|
||||
if len(window) == 0 {
|
||||
return 0, errors.New("Cannot calculate median timestamp for an empty block window")
|
||||
}
|
||||
timestamps := make([]int64, len(window))
|
||||
for i, node := range window {
|
||||
timestamps[i] = node.timestamp
|
||||
}
|
||||
sort.Sort(timeSorter(timestamps))
|
||||
return timestamps[len(timestamps)/2], nil
|
||||
}
|
||||
157
blockdag/blockwindow_test.go
Normal file
157
blockdag/blockwindow_test.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestBlueBlockWindow(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup dag instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
resetExtraNonceForTest()
|
||||
|
||||
windowSize := uint64(10)
|
||||
genesisNode := dag.genesis
|
||||
blockTime := genesisNode.Header().Timestamp
|
||||
blockByIDMap := make(map[string]*blockNode)
|
||||
idByBlockMap := make(map[*blockNode]string)
|
||||
blockByIDMap["A"] = genesisNode
|
||||
idByBlockMap[genesisNode] = "A"
|
||||
|
||||
blocksData := []*struct {
|
||||
parents []string
|
||||
id string //id is a virtual entity that is used only for tests so we can define relations between blocks without knowing their hash
|
||||
expectedWindowWithGenesisPadding []string
|
||||
}{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedWindowWithGenesisPadding: []string{"A", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "C",
|
||||
expectedWindowWithGenesisPadding: []string{"B", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "D",
|
||||
expectedWindowWithGenesisPadding: []string{"B", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
id: "E",
|
||||
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
id: "F",
|
||||
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "G",
|
||||
expectedWindowWithGenesisPadding: []string{"A", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"G"},
|
||||
id: "H",
|
||||
expectedWindowWithGenesisPadding: []string{"G", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "F"},
|
||||
id: "I",
|
||||
expectedWindowWithGenesisPadding: []string{"F", "C", "D", "B", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "J",
|
||||
expectedWindowWithGenesisPadding: []string{"I", "F", "C", "D", "B", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J"},
|
||||
id: "K",
|
||||
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "C", "D", "B", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K"},
|
||||
id: "L",
|
||||
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "C", "D", "B", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L"},
|
||||
id: "M",
|
||||
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "C", "D", "B", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "N",
|
||||
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "C", "D", "B", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N"},
|
||||
id: "O",
|
||||
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "B"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, blockData := range blocksData {
|
||||
blockTime = blockTime.Add(time.Second)
|
||||
parents := blockSet{}
|
||||
for _, parentID := range blockData.parents {
|
||||
parent := blockByIDMap[parentID]
|
||||
parents.add(parent)
|
||||
}
|
||||
|
||||
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("block %v got unexpected error from PrepareBlockForTest: %v", blockData.id, err)
|
||||
}
|
||||
|
||||
utilBlock := util.NewBlock(block)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("dag.ProcessBlock got unexpected error for block %v: %v", blockData.id, err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("block %s "+
|
||||
"is too far in the future", blockData.id)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("block %v was unexpectedly orphan", blockData.id)
|
||||
}
|
||||
|
||||
node := dag.index.LookupNode(utilBlock.Hash())
|
||||
|
||||
blockByIDMap[blockData.id] = node
|
||||
idByBlockMap[node] = blockData.id
|
||||
|
||||
window := blueBlockWindow(node, windowSize)
|
||||
if err := checkWindowIDs(window, blockData.expectedWindowWithGenesisPadding, idByBlockMap); err != nil {
|
||||
t.Errorf("Unexpected values for window for block %s: %s", blockData.id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkWindowIDs(window []*blockNode, expectedIDs []string, idByBlockMap map[*blockNode]string) error {
|
||||
ids := make([]string, len(window))
|
||||
for i, node := range window {
|
||||
ids[i] = idByBlockMap[node]
|
||||
}
|
||||
if !reflect.DeepEqual(ids, expectedIDs) {
|
||||
return errors.Errorf("window expected to have blocks %s but got %s", expectedIDs, ids)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,270 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
// CheckpointConfirmations is the number of blocks before the end of the current
|
||||
// best block chain that a good checkpoint candidate must be.
|
||||
const CheckpointConfirmations = 2016
|
||||
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// daghash.Hash. It only differs from the one available in daghash in that
|
||||
// it ignores the error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, hashes.
|
||||
func newHashFromStr(hexStr string) *daghash.Hash {
|
||||
hash, _ := daghash.NewHashFromStr(hexStr)
|
||||
return hash
|
||||
}
|
||||
|
||||
// newTxIDFromStr converts the passed big-endian hex string into a
|
||||
// daghash.TxID. It only differs from the one available in daghash in that
|
||||
// it ignores the error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, IDs.
|
||||
func newTxIDFromStr(hexStr string) *daghash.TxID {
|
||||
txID, _ := daghash.NewTxIDFromStr(hexStr)
|
||||
return txID
|
||||
}
|
||||
|
||||
// Checkpoints returns a slice of checkpoints (regardless of whether they are
|
||||
// already known). When there are no checkpoints for the chain, it will return
|
||||
// nil.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) Checkpoints() []dagconfig.Checkpoint {
|
||||
return dag.checkpoints
|
||||
}
|
||||
|
||||
// HasCheckpoints returns whether this BlockDAG has checkpoints defined.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) HasCheckpoints() bool {
|
||||
return len(dag.checkpoints) > 0
|
||||
}
|
||||
|
||||
// LatestCheckpoint returns the most recent checkpoint (regardless of whether it
|
||||
// is already known). When there are no defined checkpoints for the active chain
|
||||
// instance, it will return nil.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) LatestCheckpoint() *dagconfig.Checkpoint {
|
||||
if !dag.HasCheckpoints() {
|
||||
return nil
|
||||
}
|
||||
return &dag.checkpoints[len(dag.checkpoints)-1]
|
||||
}
|
||||
|
||||
// verifyCheckpoint returns whether the passed block height and hash combination
|
||||
// match the checkpoint data. It also returns true if there is no checkpoint
|
||||
// data for the passed block height.
|
||||
func (dag *BlockDAG) verifyCheckpoint(height int32, hash *daghash.Hash) bool {
|
||||
if !dag.HasCheckpoints() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Nothing to check if there is no checkpoint data for the block height.
|
||||
checkpoint, exists := dag.checkpointsByHeight[height]
|
||||
if !exists {
|
||||
return true
|
||||
}
|
||||
|
||||
if !checkpoint.Hash.IsEqual(hash) {
|
||||
return false
|
||||
}
|
||||
|
||||
log.Infof("Verified checkpoint at height %d/block %s", checkpoint.Height,
|
||||
checkpoint.Hash)
|
||||
return true
|
||||
}
|
||||
|
||||
// findPreviousCheckpoint finds the most recent checkpoint that is already
|
||||
// available in the downloaded portion of the block chain and returns the
|
||||
// associated block node. It returns nil if a checkpoint can't be found (this
|
||||
// should really only happen for blocks before the first checkpoint).
|
||||
//
|
||||
// This function MUST be called with the DAG lock held (for reads).
|
||||
func (dag *BlockDAG) findPreviousCheckpoint() (*blockNode, error) {
|
||||
if !dag.HasCheckpoints() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Perform the initial search to find and cache the latest known
|
||||
// checkpoint if the best chain is not known yet or we haven't already
|
||||
// previously searched.
|
||||
checkpoints := dag.checkpoints
|
||||
numCheckpoints := len(checkpoints)
|
||||
if dag.checkpointNode == nil && dag.nextCheckpoint == nil {
|
||||
// Loop backwards through the available checkpoints to find one
|
||||
// that is already available.
|
||||
for i := numCheckpoints - 1; i >= 0; i-- {
|
||||
node := dag.index.LookupNode(checkpoints[i].Hash)
|
||||
if node == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Checkpoint found. Cache it for future lookups and
|
||||
// set the next expected checkpoint accordingly.
|
||||
dag.checkpointNode = node
|
||||
if i < numCheckpoints-1 {
|
||||
dag.nextCheckpoint = &checkpoints[i+1]
|
||||
}
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// No known latest checkpoint. This will only happen on blocks
|
||||
// before the first known checkpoint. So, set the next expected
|
||||
// checkpoint to the first checkpoint and return the fact there
|
||||
// is no latest known checkpoint block.
|
||||
dag.nextCheckpoint = &checkpoints[0]
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// At this point we've already searched for the latest known checkpoint,
|
||||
// so when there is no next checkpoint, the current checkpoint lockin
|
||||
// will always be the latest known checkpoint.
|
||||
if dag.nextCheckpoint == nil {
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// When there is a next checkpoint and the height of the current best
|
||||
// chain does not exceed it, the current checkpoint lockin is still
|
||||
// the latest known checkpoint.
|
||||
if dag.selectedTip().height < dag.nextCheckpoint.Height {
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// We've reached or exceeded the next checkpoint height. Note that
|
||||
// once a checkpoint lockin has been reached, forks are prevented from
|
||||
// any blocks before the checkpoint, so we don't have to worry about the
|
||||
// checkpoint going away out from under us due to a chain reorganize.
|
||||
|
||||
// Cache the latest known checkpoint for future lookups. Note that if
|
||||
// this lookup fails something is very wrong since the chain has already
|
||||
// passed the checkpoint which was verified as accurate before inserting
|
||||
// it.
|
||||
checkpointNode := dag.index.LookupNode(dag.nextCheckpoint.Hash)
|
||||
if checkpointNode == nil {
|
||||
return nil, AssertError(fmt.Sprintf("findPreviousCheckpoint "+
|
||||
"failed lookup of known good block node %s",
|
||||
dag.nextCheckpoint.Hash))
|
||||
}
|
||||
dag.checkpointNode = checkpointNode
|
||||
|
||||
// Set the next expected checkpoint.
|
||||
checkpointIndex := -1
|
||||
for i := numCheckpoints - 1; i >= 0; i-- {
|
||||
if checkpoints[i].Hash.IsEqual(dag.nextCheckpoint.Hash) {
|
||||
checkpointIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
dag.nextCheckpoint = nil
|
||||
if checkpointIndex != -1 && checkpointIndex < numCheckpoints-1 {
|
||||
dag.nextCheckpoint = &checkpoints[checkpointIndex+1]
|
||||
}
|
||||
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// isNonstandardTransaction determines whether a transaction contains any
|
||||
// scripts which are not one of the standard types.
|
||||
func isNonstandardTransaction(tx *util.Tx) bool {
|
||||
// Check all of the output public key scripts for non-standard scripts.
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
scriptClass := txscript.GetScriptClass(txOut.PkScript)
|
||||
if scriptClass == txscript.NonStandardTy {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCheckpointCandidate returns whether or not the passed block is a good
|
||||
// checkpoint candidate.
|
||||
//
|
||||
// The factors used to determine a good checkpoint are:
|
||||
// - The block must be in the main chain
|
||||
// - The block must be at least 'CheckpointConfirmations' blocks prior to the
|
||||
// current end of the main chain
|
||||
// - The timestamps for the blocks before and after the checkpoint must have
|
||||
// timestamps which are also before and after the checkpoint, respectively
|
||||
// (due to the median time allowance this is not always the case)
|
||||
// - The block must not contain any strange transaction such as those with
|
||||
// nonstandard scripts
|
||||
//
|
||||
// The intent is that candidates are reviewed by a developer to make the final
|
||||
// decision and then manually added to the list of checkpoints for a network.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) IsCheckpointCandidate(block *util.Block) (bool, error) {
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
|
||||
// A checkpoint must be in the DAG.
|
||||
node := dag.index.LookupNode(block.Hash())
|
||||
if node == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Ensure the height of the passed block and the entry for the block in
|
||||
// the main chain match. This should always be the case unless the
|
||||
// caller provided an invalid block.
|
||||
if node.height != block.Height() {
|
||||
return false, fmt.Errorf("passed block height of %d does not "+
|
||||
"match the main chain height of %d", block.Height(),
|
||||
node.height)
|
||||
}
|
||||
|
||||
// A checkpoint must be at least CheckpointConfirmations blocks
|
||||
// before the end of the main chain.
|
||||
dagHeight := dag.selectedTip().height
|
||||
if node.height > (dagHeight - CheckpointConfirmations) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must be have at least one block after it.
|
||||
//
|
||||
// This should always succeed since the check above already made sure it
|
||||
// is CheckpointConfirmations back, but be safe in case the constant
|
||||
// changes.
|
||||
nextNode := node.diffChild
|
||||
if nextNode == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must be have at least one block before it.
|
||||
if &node.selectedParent == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must have timestamps for the block and the blocks on
|
||||
// either side of it in order (due to the median time allowance this is
|
||||
// not always the case).
|
||||
prevTime := time.Unix(node.selectedParent.timestamp, 0)
|
||||
curTime := block.MsgBlock().Header.Timestamp
|
||||
nextTime := time.Unix(nextNode.timestamp, 0)
|
||||
if prevTime.After(curTime) || nextTime.Before(curTime) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must have transactions that only contain standard
|
||||
// scripts.
|
||||
for _, tx := range block.Transactions() {
|
||||
if isNonstandardTransaction(tx) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// All of the checks passed, so the block is a candidate.
|
||||
return true, nil
|
||||
}
|
||||
278
blockdag/coinbase.go
Normal file
278
blockdag/coinbase.go
Normal file
@@ -0,0 +1,278 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/txsort"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// compactFeeData is a specialized data type to store a compact list of fees
|
||||
// inside a block.
|
||||
// Every transaction gets a single uint64 value, stored as a plain binary list.
|
||||
// The transactions are ordered the same way they are ordered inside the block, making it easy
|
||||
// to traverse every transaction in a block and extract its fee.
|
||||
//
|
||||
// compactFeeFactory is used to create such a list.
|
||||
// compactFeeIterator is used to iterate over such a list.
|
||||
|
||||
type compactFeeData []byte
|
||||
|
||||
func (cfd compactFeeData) Len() int {
|
||||
return len(cfd) / 8
|
||||
}
|
||||
|
||||
type compactFeeFactory struct {
|
||||
buffer *bytes.Buffer
|
||||
writer *bufio.Writer
|
||||
}
|
||||
|
||||
func newCompactFeeFactory() *compactFeeFactory {
|
||||
buffer := bytes.NewBuffer([]byte{})
|
||||
return &compactFeeFactory{
|
||||
buffer: buffer,
|
||||
writer: bufio.NewWriter(buffer),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfw *compactFeeFactory) add(txFee uint64) error {
|
||||
return binary.Write(cfw.writer, binary.LittleEndian, txFee)
|
||||
}
|
||||
|
||||
func (cfw *compactFeeFactory) data() (compactFeeData, error) {
|
||||
err := cfw.writer.Flush()
|
||||
|
||||
return compactFeeData(cfw.buffer.Bytes()), err
|
||||
}
|
||||
|
||||
type compactFeeIterator struct {
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func (cfd compactFeeData) iterator() *compactFeeIterator {
|
||||
return &compactFeeIterator{
|
||||
reader: bufio.NewReader(bytes.NewBuffer(cfd)),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfr *compactFeeIterator) next() (uint64, error) {
|
||||
var txFee uint64
|
||||
|
||||
err := binary.Read(cfr.reader, binary.LittleEndian, &txFee)
|
||||
|
||||
return txFee, err
|
||||
}
|
||||
|
||||
// The following functions relate to storing and retrieving fee data from the database
|
||||
var feeBucket = []byte("fees")
|
||||
|
||||
// getBluesFeeData returns the compactFeeData for all nodes's blues,
|
||||
// used to calculate the fees this blockNode needs to pay
|
||||
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
|
||||
bluesFeeData := make(map[daghash.Hash]compactFeeData)
|
||||
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
|
||||
}
|
||||
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bluesFeeData, nil
|
||||
}
|
||||
|
||||
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
|
||||
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error creating or retrieving fee bucket: %s", err)
|
||||
}
|
||||
|
||||
return feeBucket.Put(blockHash.CloneBytes(), feeData)
|
||||
}
|
||||
|
||||
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
|
||||
feeBucket := dbTx.Metadata().Bucket(feeBucket)
|
||||
if feeBucket == nil {
|
||||
return nil, errors.New("Fee bucket does not exist")
|
||||
}
|
||||
|
||||
feeData := feeBucket.Get(blockHash.CloneBytes())
|
||||
if feeData == nil {
|
||||
return nil, errors.Errorf("No fee data found for block %s", blockHash)
|
||||
}
|
||||
|
||||
return feeData, nil
|
||||
}
|
||||
|
||||
// The following functions deal with building and validating the coinbase transaction
|
||||
|
||||
func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
|
||||
if node.isGenesis() {
|
||||
return nil
|
||||
}
|
||||
blockCoinbaseTx := block.CoinbaseTransaction().MsgTx()
|
||||
scriptPubKey, extraData, err := DeserializeCoinbasePayload(blockCoinbaseTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expectedCoinbaseTransaction, err := node.expectedCoinbaseTransaction(dag, txsAcceptanceData, scriptPubKey, extraData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !expectedCoinbaseTransaction.Hash().IsEqual(block.CoinbaseTransaction().Hash()) {
|
||||
return ruleError(ErrBadCoinbaseTransaction, "Coinbase transaction is not built as expected")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// expectedCoinbaseTransaction returns the coinbase transaction for the current block
|
||||
func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData, scriptPubKey []byte, extraData []byte) (*util.Tx, error) {
|
||||
bluesFeeData, err := node.getBluesFeeData(dag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txIns := []*wire.TxIn{}
|
||||
txOuts := []*wire.TxOut{}
|
||||
|
||||
for _, blue := range node.blues {
|
||||
txIn, txOut, err := coinbaseInputAndOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txIns = append(txIns, txIn)
|
||||
if txOut != nil {
|
||||
txOuts = append(txOuts, txOut)
|
||||
}
|
||||
}
|
||||
payload, err := SerializeCoinbasePayload(scriptPubKey, extraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
coinbaseTx := wire.NewSubnetworkMsgTx(wire.TxVersion, txIns, txOuts, subnetworkid.SubnetworkIDCoinbase, 0, payload)
|
||||
sortedCoinbaseTx := txsort.Sort(coinbaseTx)
|
||||
return util.NewTx(sortedCoinbaseTx), nil
|
||||
}
|
||||
|
||||
// SerializeCoinbasePayload builds the coinbase payload based on the provided scriptPubKey and extra data.
|
||||
func SerializeCoinbasePayload(scriptPubKey []byte, extraData []byte) ([]byte, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := wire.WriteVarInt(w, uint64(len(scriptPubKey)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = w.Write(scriptPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = w.Write(extraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w.Bytes(), nil
|
||||
}
|
||||
|
||||
// DeserializeCoinbasePayload deserialize the coinbase payload to its component (scriptPubKey and extra data).
|
||||
func DeserializeCoinbasePayload(tx *wire.MsgTx) (scriptPubKey []byte, extraData []byte, err error) {
|
||||
r := bytes.NewReader(tx.Payload)
|
||||
scriptPubKeyLen, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
scriptPubKey = make([]byte, scriptPubKeyLen)
|
||||
_, err = r.Read(scriptPubKey)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
extraData = make([]byte, r.Len())
|
||||
if r.Len() != 0 {
|
||||
_, err = r.Read(extraData)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return scriptPubKey, extraData, nil
|
||||
}
|
||||
|
||||
// feeInputAndOutputForBlueBlock calculates the input and output that should go into the coinbase transaction of blueBlock
|
||||
// If blueBlock gets no fee - returns only txIn and nil for txOut
|
||||
func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (
|
||||
*wire.TxIn, *wire.TxOut, error) {
|
||||
|
||||
blockTxsAcceptanceData, ok := txsAcceptanceData.FindAcceptanceData(blueBlock.hash)
|
||||
if !ok {
|
||||
return nil, nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
|
||||
}
|
||||
blockFeeData, ok := feeData[*blueBlock.hash]
|
||||
if !ok {
|
||||
return nil, nil, errors.Errorf("No feeData for block %s", blueBlock.hash)
|
||||
}
|
||||
|
||||
if len(blockTxsAcceptanceData.TxAcceptanceData) != blockFeeData.Len() {
|
||||
return nil, nil, errors.Errorf(
|
||||
"length of accepted transaction data(%d) and fee data(%d) is not equal for block %s",
|
||||
len(blockTxsAcceptanceData.TxAcceptanceData), blockFeeData.Len(), blueBlock.hash)
|
||||
}
|
||||
|
||||
txIn := &wire.TxIn{
|
||||
SignatureScript: []byte{},
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID(*blueBlock.hash),
|
||||
Index: math.MaxUint32,
|
||||
},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
|
||||
totalFees := uint64(0)
|
||||
feeIterator := blockFeeData.iterator()
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
|
||||
fee, err := feeIterator.next()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
|
||||
}
|
||||
if txAcceptanceData.IsAccepted {
|
||||
totalFees += fee
|
||||
}
|
||||
}
|
||||
|
||||
totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.dagParams) + totalFees
|
||||
|
||||
if totalReward == 0 {
|
||||
return txIn, nil, nil
|
||||
}
|
||||
|
||||
// the ScriptPubKey for the coinbase is parsed from the coinbase payload
|
||||
scriptPubKey, _, err := DeserializeCoinbasePayload(blockTxsAcceptanceData.TxAcceptanceData[0].Tx.MsgTx())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
txOut := &wire.TxOut{
|
||||
Value: totalReward,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
|
||||
return txIn, txOut, nil
|
||||
}
|
||||
@@ -7,80 +7,22 @@ package blockdag
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
_ "github.com/daglabs/btcd/database/ffldb"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// loadBlocks reads files containing bitcoin block data (gzipped but otherwise
|
||||
// in the format bitcoind writes) from disk and returns them as an array of
|
||||
// util.Block. This is largely borrowed from the test code in btcdb.
|
||||
func loadBlocks(filename string) (blocks []*util.Block, err error) {
|
||||
filename = filepath.Join("testdata/", filename)
|
||||
|
||||
var network = wire.MainNet
|
||||
var dr io.Reader
|
||||
var fi io.ReadCloser
|
||||
|
||||
fi, err = os.Open(filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasSuffix(filename, ".bz2") {
|
||||
dr = bzip2.NewReader(fi)
|
||||
} else {
|
||||
dr = fi
|
||||
}
|
||||
defer fi.Close()
|
||||
|
||||
var block *util.Block
|
||||
|
||||
err = nil
|
||||
for height := 0; err == nil; height++ {
|
||||
var rintbuf uint32
|
||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
||||
if err == io.EOF {
|
||||
// hit end of file at expected offset: no warning
|
||||
height--
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if rintbuf != uint32(network) {
|
||||
break
|
||||
}
|
||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
||||
blocklen := rintbuf
|
||||
|
||||
rbytes := make([]byte, blocklen)
|
||||
|
||||
// read block
|
||||
dr.Read(rbytes)
|
||||
|
||||
block, err = util.NewBlockFromBytes(rbytes)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
block.SetHeight(int32(height))
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// loadUTXOSet returns a utxo view loaded from a file.
|
||||
func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
// The utxostore file format is:
|
||||
@@ -143,88 +85,69 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoSet.utxoCollection[wire.OutPoint{TxID: txID, Index: index}] = entry
|
||||
utxoSet.utxoCollection[wire.Outpoint{TxID: txID, Index: index}] = entry
|
||||
}
|
||||
|
||||
return utxoSet, nil
|
||||
}
|
||||
|
||||
// TestSetBlockRewardMaturity makes the ability to set the block reward maturity
|
||||
// TestSetCoinbaseMaturity makes the ability to set the coinbase maturity
|
||||
// available when running tests.
|
||||
func (dag *BlockDAG) TestSetBlockRewardMaturity(maturity uint16) {
|
||||
dag.dagParams.BlockRewardMaturity = maturity
|
||||
func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
|
||||
dag.dagParams.BlockCoinbaseMaturity = maturity
|
||||
}
|
||||
|
||||
// newTestDAG returns a DAG that is usable for syntetic tests. It is
|
||||
// important to note that this chain has no database associated with it, so
|
||||
// newTestDAG returns a DAG that is usable for syntetic tests. It is
|
||||
// important to note that this DAG has no database associated with it, so
|
||||
// it is not usable with all functions and the tests must take care when making
|
||||
// use of it.
|
||||
func newTestDAG(params *dagconfig.Params) *BlockDAG {
|
||||
// Create a genesis block node and block index index populated with it
|
||||
// for use when creating the fake chain below.
|
||||
node := newBlockNode(¶ms.GenesisBlock.Header, newSet(), params.K)
|
||||
index := newBlockIndex(nil, params)
|
||||
index.AddNode(node)
|
||||
|
||||
targetTimespan := int64(params.TargetTimespan / time.Second)
|
||||
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
|
||||
adjustmentFactor := params.RetargetAdjustmentFactor
|
||||
return &BlockDAG{
|
||||
dagParams: params,
|
||||
timeSource: NewMedianTime(),
|
||||
minRetargetTimespan: targetTimespan / adjustmentFactor,
|
||||
maxRetargetTimespan: targetTimespan * adjustmentFactor,
|
||||
blocksPerRetarget: int32(targetTimespan / targetTimePerBlock),
|
||||
index: index,
|
||||
virtual: newVirtualBlock(setFromSlice(node), params.K),
|
||||
genesis: index.LookupNode(params.GenesisHash),
|
||||
warningCaches: newThresholdCaches(vbNumBits),
|
||||
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
|
||||
dag := &BlockDAG{
|
||||
dagParams: params,
|
||||
timeSource: NewMedianTime(),
|
||||
targetTimePerBlock: targetTimePerBlock,
|
||||
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,
|
||||
TimestampDeviationTolerance: params.TimestampDeviationTolerance,
|
||||
powMaxBits: util.BigToCompact(params.PowMax),
|
||||
index: index,
|
||||
warningCaches: newThresholdCaches(vbNumBits),
|
||||
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
|
||||
}
|
||||
|
||||
// Create a genesis block node and block index index populated with it
|
||||
// on the above fake DAG.
|
||||
dag.genesis, _ = dag.newBlockNode(¶ms.GenesisBlock.Header, newBlockSet())
|
||||
index.AddNode(dag.genesis)
|
||||
|
||||
dag.virtual = newVirtualBlock(dag, blockSetFromSlice(dag.genesis))
|
||||
return dag
|
||||
}
|
||||
|
||||
// newTestNode creates a block node connected to the passed parent with the
|
||||
// provided fields populated and fake values for the other fields.
|
||||
func newTestNode(parents blockSet, blockVersion int32, bits uint32, timestamp time.Time, phantomK uint32) *blockNode {
|
||||
func newTestNode(dag *BlockDAG, parents blockSet, blockVersion int32, bits uint32, timestamp time.Time) *blockNode {
|
||||
// Make up a header and create a block node from it.
|
||||
header := &wire.BlockHeader{
|
||||
Version: blockVersion,
|
||||
ParentHashes: parents.hashes(),
|
||||
Bits: bits,
|
||||
Timestamp: timestamp,
|
||||
HashMerkleRoot: &daghash.ZeroHash,
|
||||
IDMerkleRoot: &daghash.ZeroHash,
|
||||
Version: blockVersion,
|
||||
ParentHashes: parents.hashes(),
|
||||
Bits: bits,
|
||||
Timestamp: timestamp,
|
||||
HashMerkleRoot: &daghash.ZeroHash,
|
||||
AcceptedIDMerkleRoot: &daghash.ZeroHash,
|
||||
UTXOCommitment: &daghash.ZeroHash,
|
||||
}
|
||||
return newBlockNode(header, parents, phantomK)
|
||||
node, _ := dag.newBlockNode(header, parents)
|
||||
return node
|
||||
}
|
||||
|
||||
func addNodeAsChildToParents(node *blockNode) {
|
||||
for _, parent := range node.parents {
|
||||
for parent := range node.parents {
|
||||
parent.children.add(node)
|
||||
}
|
||||
}
|
||||
|
||||
func buildNodeGenerator(phantomK uint32, withChildren bool) func(parents blockSet) *blockNode {
|
||||
// For the purposes of these tests, we'll create blockNodes whose hashes are a
|
||||
// series of numbers from 1 to 255.
|
||||
hashCounter := byte(1)
|
||||
buildNode := func(parents blockSet) *blockNode {
|
||||
block := newBlockNode(nil, parents, phantomK)
|
||||
block.hash = &daghash.Hash{hashCounter}
|
||||
hashCounter++
|
||||
|
||||
return block
|
||||
}
|
||||
if withChildren {
|
||||
return func(parents blockSet) *blockNode {
|
||||
node := buildNode(parents)
|
||||
addNodeAsChildToParents(node)
|
||||
return node
|
||||
}
|
||||
}
|
||||
return buildNode
|
||||
}
|
||||
|
||||
// checkRuleError ensures the type of the two passed errors are of the
|
||||
// same type (either both nil or both of type RuleError) and their error codes
|
||||
// match when not nil.
|
||||
@@ -232,7 +155,7 @@ func checkRuleError(gotErr, wantErr error) error {
|
||||
// Ensure the error code is of the expected type and the error
|
||||
// code matches the value specified in the test instance.
|
||||
if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) {
|
||||
return fmt.Errorf("wrong error - got %T (%[1]v), want %T",
|
||||
return errors.Errorf("wrong error - got %T (%[1]v), want %T",
|
||||
gotErr, wantErr)
|
||||
}
|
||||
if gotErr == nil {
|
||||
@@ -242,17 +165,49 @@ func checkRuleError(gotErr, wantErr error) error {
|
||||
// Ensure the want error type is a script error.
|
||||
werr, ok := wantErr.(RuleError)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected test error type %T", wantErr)
|
||||
return errors.Errorf("unexpected test error type %T", wantErr)
|
||||
}
|
||||
|
||||
// Ensure the error codes match. It's safe to use a raw type assert
|
||||
// Ensure the error codes match. It's safe to use a raw type assert
|
||||
// here since the code above already proved they are the same type and
|
||||
// the want error is a script error.
|
||||
gotErrorCode := gotErr.(RuleError).ErrorCode
|
||||
if gotErrorCode != werr.ErrorCode {
|
||||
return fmt.Errorf("mismatched error code - got %v (%v), want %v",
|
||||
return errors.Errorf("mismatched error code - got %v (%v), want %v",
|
||||
gotErrorCode, gotErr, werr.ErrorCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepareAndProcessBlock(t *testing.T, dag *BlockDAG, parents ...*wire.MsgBlock) *wire.MsgBlock {
|
||||
parentHashes := make([]*daghash.Hash, len(parents))
|
||||
for i, parent := range parents {
|
||||
parentHashes[i] = parent.BlockHash()
|
||||
}
|
||||
daghash.Sort(parentHashes)
|
||||
block, err := PrepareBlockForTest(dag, parentHashes, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
utilBlock := util.NewBlock(block)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in ProcessBlock: %s", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("block is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("block was unexpectedly orphan")
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNode {
|
||||
node := dag.index.LookupNode(block.BlockHash())
|
||||
if node == nil {
|
||||
t.Fatalf("couldn't find block node with hash %s", block.BlockHash())
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
@@ -5,22 +5,22 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/btcec"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/kaspanet/kaspad/ecc"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// A variable length quantity (VLQ) is an encoding that uses an arbitrary number
|
||||
// of binary octets to represent an arbitrarily large integer. The scheme
|
||||
// of binary octets to represent an arbitrarily large integer. The scheme
|
||||
// employs a most significant byte (MSB) base-128 encoding where the high bit in
|
||||
// each byte indicates whether or not the byte is the final one. In addition,
|
||||
// each byte indicates whether or not the byte is the final one. In addition,
|
||||
// to ensure there are no redundant encodings, an offset is subtracted every
|
||||
// time a group of 7 bits is shifted out. Therefore each integer can be
|
||||
// time a group of 7 bits is shifted out. Therefore each integer can be
|
||||
// represented in exactly one way, and each representation stands for exactly
|
||||
// one integer.
|
||||
//
|
||||
// Another nice property of this encoding is that it provides a compact
|
||||
// representation of values that are typically used to indicate sizes. For
|
||||
// representation of values that are typically used to indicate sizes. For
|
||||
// example, the values 0 - 127 are represented with a single byte, 128 - 16511
|
||||
// with two bytes, and 16512 - 2113663 with three bytes.
|
||||
//
|
||||
@@ -60,7 +60,7 @@ func serializeSizeVLQ(n uint64) int {
|
||||
|
||||
// putVLQ serializes the provided number to a variable-length quantity according
|
||||
// to the format described above and returns the number of bytes of the encoded
|
||||
// value. The result is placed directly into the passed byte slice which must
|
||||
// value. The result is placed directly into the passed byte slice which must
|
||||
// be at least large enough to handle the number of bytes returned by the
|
||||
// serializeSizeVLQ function or it will panic.
|
||||
func putVLQ(target []byte, n uint64) int {
|
||||
@@ -88,7 +88,7 @@ func putVLQ(target []byte, n uint64) int {
|
||||
}
|
||||
|
||||
// deserializeVLQ deserializes the provided variable-length quantity according
|
||||
// to the format described above. It also returns the number of bytes
|
||||
// to the format described above. It also returns the number of bytes
|
||||
// deserialized.
|
||||
func deserializeVLQ(serialized []byte) (uint64, int) {
|
||||
var n uint64
|
||||
@@ -108,8 +108,7 @@ func deserializeVLQ(serialized []byte) (uint64, int) {
|
||||
// -----------------------------------------------------------------------------
|
||||
// In order to reduce the size of stored scripts, a domain specific compression
|
||||
// algorithm is used which recognizes standard scripts and stores them using
|
||||
// less bytes than the original script. The compression algorithm used here was
|
||||
// obtained from Bitcoin Core, so all credits for the algorithm go to it.
|
||||
// less bytes than the original script.
|
||||
//
|
||||
// The general serialized format is:
|
||||
//
|
||||
@@ -147,22 +146,22 @@ const (
|
||||
cstPayToScriptHash = 1
|
||||
|
||||
// cstPayToPubKeyComp2 identifies a compressed pay-to-pubkey script to
|
||||
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyComp2 = 2
|
||||
|
||||
// cstPayToPubKeyComp3 identifies a compressed pay-to-pubkey script to
|
||||
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyComp3 = 3
|
||||
|
||||
// cstPayToPubKeyUncomp4 identifies a compressed pay-to-pubkey script to
|
||||
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyUncomp4 = 4
|
||||
|
||||
// cstPayToPubKeyUncomp5 identifies a compressed pay-to-pubkey script to
|
||||
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyUncomp5 = 5
|
||||
|
||||
@@ -206,8 +205,8 @@ func isScriptHash(script []byte) (bool, []byte) {
|
||||
// key along with the serialized pubkey it is paying to if it is.
|
||||
//
|
||||
// NOTE: This function ensures the public key is actually valid since the
|
||||
// compression algorithm requires valid pubkeys. It does not support hybrid
|
||||
// pubkeys. This means that even if the script has the correct form for a
|
||||
// compression algorithm requires valid pubkeys. It does not support hybrid
|
||||
// pubkeys. This means that even if the script has the correct form for a
|
||||
// pay-to-pubkey script, this function will only return true when it is paying
|
||||
// to a valid compressed or uncompressed pubkey.
|
||||
func isPubKey(script []byte) (bool, []byte) {
|
||||
@@ -218,7 +217,7 @@ func isPubKey(script []byte) (bool, []byte) {
|
||||
|
||||
// Ensure the public key is valid.
|
||||
serializedPubKey := script[1:34]
|
||||
_, err := btcec.ParsePubKey(serializedPubKey, btcec.S256())
|
||||
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
|
||||
if err == nil {
|
||||
return true, serializedPubKey
|
||||
}
|
||||
@@ -230,7 +229,7 @@ func isPubKey(script []byte) (bool, []byte) {
|
||||
|
||||
// Ensure the public key is valid.
|
||||
serializedPubKey := script[1:66]
|
||||
_, err := btcec.ParsePubKey(serializedPubKey, btcec.S256())
|
||||
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
|
||||
if err == nil {
|
||||
return true, serializedPubKey
|
||||
}
|
||||
@@ -241,27 +240,27 @@ func isPubKey(script []byte) (bool, []byte) {
|
||||
|
||||
// compressedScriptSize returns the number of bytes the passed script would take
|
||||
// when encoded with the domain specific compression algorithm described above.
|
||||
func compressedScriptSize(pkScript []byte) int {
|
||||
func compressedScriptSize(scriptPubKey []byte) int {
|
||||
// Pay-to-pubkey-hash script.
|
||||
if valid, _ := isPubKeyHash(pkScript); valid {
|
||||
if valid, _ := isPubKeyHash(scriptPubKey); valid {
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-script-hash script.
|
||||
if valid, _ := isScriptHash(pkScript); valid {
|
||||
if valid, _ := isScriptHash(scriptPubKey); valid {
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-pubkey (compressed or uncompressed) script.
|
||||
if valid, _ := isPubKey(pkScript); valid {
|
||||
if valid, _ := isPubKey(scriptPubKey); valid {
|
||||
return 33
|
||||
}
|
||||
|
||||
// When none of the above special cases apply, encode the script as is
|
||||
// preceded by the sum of its size and the number of special cases
|
||||
// encoded as a variable length quantity.
|
||||
return serializeSizeVLQ(uint64(len(pkScript)+numSpecialScripts)) +
|
||||
len(pkScript)
|
||||
return serializeSizeVLQ(uint64(len(scriptPubKey)+numSpecialScripts)) +
|
||||
len(scriptPubKey)
|
||||
}
|
||||
|
||||
// decodeCompressedScriptSize treats the passed serialized bytes as a compressed
|
||||
@@ -293,26 +292,26 @@ func decodeCompressedScriptSize(serialized []byte) int {
|
||||
|
||||
// putCompressedScript compresses the passed script according to the domain
|
||||
// specific compression algorithm described above directly into the passed
|
||||
// target byte slice. The target byte slice must be at least large enough to
|
||||
// target byte slice. The target byte slice must be at least large enough to
|
||||
// handle the number of bytes returned by the compressedScriptSize function or
|
||||
// it will panic.
|
||||
func putCompressedScript(target, pkScript []byte) int {
|
||||
func putCompressedScript(target, scriptPubKey []byte) int {
|
||||
// Pay-to-pubkey-hash script.
|
||||
if valid, hash := isPubKeyHash(pkScript); valid {
|
||||
if valid, hash := isPubKeyHash(scriptPubKey); valid {
|
||||
target[0] = cstPayToPubKeyHash
|
||||
copy(target[1:21], hash)
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-script-hash script.
|
||||
if valid, hash := isScriptHash(pkScript); valid {
|
||||
if valid, hash := isScriptHash(scriptPubKey); valid {
|
||||
target[0] = cstPayToScriptHash
|
||||
copy(target[1:21], hash)
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-pubkey (compressed or uncompressed) script.
|
||||
if valid, serializedPubKey := isPubKey(pkScript); valid {
|
||||
if valid, serializedPubKey := isPubKey(scriptPubKey); valid {
|
||||
pubKeyFormat := serializedPubKey[0]
|
||||
switch pubKeyFormat {
|
||||
case 0x02, 0x03:
|
||||
@@ -331,10 +330,10 @@ func putCompressedScript(target, pkScript []byte) int {
|
||||
// When none of the above special cases apply, encode the unmodified
|
||||
// script preceded by the sum of its size and the number of special
|
||||
// cases encoded as a variable length quantity.
|
||||
encodedSize := uint64(len(pkScript) + numSpecialScripts)
|
||||
encodedSize := uint64(len(scriptPubKey) + numSpecialScripts)
|
||||
vlqSizeLen := putVLQ(target, encodedSize)
|
||||
copy(target[vlqSizeLen:], pkScript)
|
||||
return vlqSizeLen + len(pkScript)
|
||||
copy(target[vlqSizeLen:], scriptPubKey)
|
||||
return vlqSizeLen + len(scriptPubKey)
|
||||
}
|
||||
|
||||
// decompressScript returns the original script obtained by decompressing the
|
||||
@@ -343,119 +342,118 @@ func putCompressedScript(target, pkScript []byte) int {
|
||||
//
|
||||
// NOTE: The script parameter must already have been proven to be long enough
|
||||
// to contain the number of bytes returned by decodeCompressedScriptSize or it
|
||||
// will panic. This is acceptable since it is only an internal function.
|
||||
func decompressScript(compressedPkScript []byte) []byte {
|
||||
// will panic. This is acceptable since it is only an internal function.
|
||||
func decompressScript(compressedScriptPubKey []byte) []byte {
|
||||
// In practice this function will not be called with a zero-length or
|
||||
// nil script since the nil script encoding includes the length, however
|
||||
// the code below assumes the length exists, so just return nil now if
|
||||
// the function ever ends up being called with a nil script in the
|
||||
// future.
|
||||
if len(compressedPkScript) == 0 {
|
||||
if len(compressedScriptPubKey) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode the script size and examine it for the special cases.
|
||||
encodedScriptSize, bytesRead := deserializeVLQ(compressedPkScript)
|
||||
encodedScriptSize, bytesRead := deserializeVLQ(compressedScriptPubKey)
|
||||
switch encodedScriptSize {
|
||||
// Pay-to-pubkey-hash script. The resulting script is:
|
||||
// Pay-to-pubkey-hash script. The resulting script is:
|
||||
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
|
||||
case cstPayToPubKeyHash:
|
||||
pkScript := make([]byte, 25)
|
||||
pkScript[0] = txscript.OpDup
|
||||
pkScript[1] = txscript.OpHash160
|
||||
pkScript[2] = txscript.OpData20
|
||||
copy(pkScript[3:], compressedPkScript[bytesRead:bytesRead+20])
|
||||
pkScript[23] = txscript.OpEqualVerify
|
||||
pkScript[24] = txscript.OpCheckSig
|
||||
return pkScript
|
||||
scriptPubKey := make([]byte, 25)
|
||||
scriptPubKey[0] = txscript.OpDup
|
||||
scriptPubKey[1] = txscript.OpHash160
|
||||
scriptPubKey[2] = txscript.OpData20
|
||||
copy(scriptPubKey[3:], compressedScriptPubKey[bytesRead:bytesRead+20])
|
||||
scriptPubKey[23] = txscript.OpEqualVerify
|
||||
scriptPubKey[24] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-script-hash script. The resulting script is:
|
||||
// Pay-to-script-hash script. The resulting script is:
|
||||
// <OP_HASH160><20 byte script hash><OP_EQUAL>
|
||||
case cstPayToScriptHash:
|
||||
pkScript := make([]byte, 23)
|
||||
pkScript[0] = txscript.OpHash160
|
||||
pkScript[1] = txscript.OpData20
|
||||
copy(pkScript[2:], compressedPkScript[bytesRead:bytesRead+20])
|
||||
pkScript[22] = txscript.OpEqual
|
||||
return pkScript
|
||||
scriptPubKey := make([]byte, 23)
|
||||
scriptPubKey[0] = txscript.OpHash160
|
||||
scriptPubKey[1] = txscript.OpData20
|
||||
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+20])
|
||||
scriptPubKey[22] = txscript.OpEqual
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-compressed-pubkey script. The resulting script is:
|
||||
// Pay-to-compressed-pubkey script. The resulting script is:
|
||||
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
|
||||
case cstPayToPubKeyComp2, cstPayToPubKeyComp3:
|
||||
pkScript := make([]byte, 35)
|
||||
pkScript[0] = txscript.OpData33
|
||||
pkScript[1] = byte(encodedScriptSize)
|
||||
copy(pkScript[2:], compressedPkScript[bytesRead:bytesRead+32])
|
||||
pkScript[34] = txscript.OpCheckSig
|
||||
return pkScript
|
||||
scriptPubKey := make([]byte, 35)
|
||||
scriptPubKey[0] = txscript.OpData33
|
||||
scriptPubKey[1] = byte(encodedScriptSize)
|
||||
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+32])
|
||||
scriptPubKey[34] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-uncompressed-pubkey script. The resulting script is:
|
||||
// Pay-to-uncompressed-pubkey script. The resulting script is:
|
||||
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
|
||||
case cstPayToPubKeyUncomp4, cstPayToPubKeyUncomp5:
|
||||
// Change the leading byte to the appropriate compressed pubkey
|
||||
// identifier (0x02 or 0x03) so it can be decoded as a
|
||||
// compressed pubkey. This really should never fail since the
|
||||
// compressed pubkey. This really should never fail since the
|
||||
// encoding ensures it is valid before compressing to this type.
|
||||
compressedKey := make([]byte, 33)
|
||||
compressedKey[0] = byte(encodedScriptSize - 2)
|
||||
copy(compressedKey[1:], compressedPkScript[1:])
|
||||
key, err := btcec.ParsePubKey(compressedKey, btcec.S256())
|
||||
copy(compressedKey[1:], compressedScriptPubKey[1:])
|
||||
key, err := ecc.ParsePubKey(compressedKey, ecc.S256())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
pkScript := make([]byte, 67)
|
||||
pkScript[0] = txscript.OpData65
|
||||
copy(pkScript[1:], key.SerializeUncompressed())
|
||||
pkScript[66] = txscript.OpCheckSig
|
||||
return pkScript
|
||||
scriptPubKey := make([]byte, 67)
|
||||
scriptPubKey[0] = txscript.OpData65
|
||||
copy(scriptPubKey[1:], key.SerializeUncompressed())
|
||||
scriptPubKey[66] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
}
|
||||
|
||||
// When none of the special cases apply, the script was encoded using
|
||||
// the general format, so reduce the script size by the number of
|
||||
// special cases and return the unmodified script.
|
||||
scriptSize := int(encodedScriptSize - numSpecialScripts)
|
||||
pkScript := make([]byte, scriptSize)
|
||||
copy(pkScript, compressedPkScript[bytesRead:bytesRead+scriptSize])
|
||||
return pkScript
|
||||
scriptPubKey := make([]byte, scriptSize)
|
||||
copy(scriptPubKey, compressedScriptPubKey[bytesRead:bytesRead+scriptSize])
|
||||
return scriptPubKey
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// In order to reduce the size of stored amounts, a domain specific compression
|
||||
// algorithm is used which relies on there typically being a lot of zeroes at
|
||||
// end of the amounts. The compression algorithm used here was obtained from
|
||||
// Bitcoin Core, so all credits for the algorithm go to it.
|
||||
// end of the amounts.
|
||||
//
|
||||
// While this is simply exchanging one uint64 for another, the resulting value
|
||||
// for typical amounts has a much smaller magnitude which results in fewer bytes
|
||||
// when encoded as variable length quantity. For example, consider the amount
|
||||
// of 0.1 BTC which is 10000000 satoshi. Encoding 10000000 as a VLQ would take
|
||||
// when encoded as variable length quantity. For example, consider the amount
|
||||
// of 0.1 KAS which is 10000000 sompi. Encoding 10000000 as a VLQ would take
|
||||
// 4 bytes while encoding the compressed value of 8 as a VLQ only takes 1 byte.
|
||||
//
|
||||
// Essentially the compression is achieved by splitting the value into an
|
||||
// exponent in the range [0-9] and a digit in the range [1-9], when possible,
|
||||
// and encoding them in a way that can be decoded. More specifically, the
|
||||
// and encoding them in a way that can be decoded. More specifically, the
|
||||
// encoding is as follows:
|
||||
// - 0 is 0
|
||||
// - Find the exponent, e, as the largest power of 10 that evenly divides the
|
||||
// value up to a maximum of 9
|
||||
// - When e < 9, the final digit can't be 0 so store it as d and remove it by
|
||||
// dividing the value by 10 (call the result n). The encoded value is thus:
|
||||
// dividing the value by 10 (call the result n). The encoded value is thus:
|
||||
// 1 + 10*(9*n + d-1) + e
|
||||
// - When e==9, the only thing known is the amount is not 0. The encoded value
|
||||
// - When e==9, the only thing known is the amount is not 0. The encoded value
|
||||
// is thus:
|
||||
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
|
||||
//
|
||||
// Example encodings:
|
||||
// (The numbers in parenthesis are the number of bytes when serialized as a VLQ)
|
||||
// 0 (1) -> 0 (1) * 0.00000000 BTC
|
||||
// 1000 (2) -> 4 (1) * 0.00001000 BTC
|
||||
// 10000 (2) -> 5 (1) * 0.00010000 BTC
|
||||
// 12345678 (4) -> 111111101(4) * 0.12345678 BTC
|
||||
// 50000000 (4) -> 47 (1) * 0.50000000 BTC
|
||||
// 100000000 (4) -> 9 (1) * 1.00000000 BTC
|
||||
// 500000000 (5) -> 49 (1) * 5.00000000 BTC
|
||||
// 1000000000 (5) -> 10 (1) * 10.00000000 BTC
|
||||
// 0 (1) -> 0 (1) * 0.00000000 KAS
|
||||
// 1000 (2) -> 4 (1) * 0.00001000 KAS
|
||||
// 10000 (2) -> 5 (1) * 0.00010000 KAS
|
||||
// 12345678 (4) -> 111111101(4) * 0.12345678 KAS
|
||||
// 50000000 (4) -> 47 (1) * 0.50000000 KAS
|
||||
// 100000000 (4) -> 9 (1) * 1.00000000 KAS
|
||||
// 500000000 (5) -> 49 (1) * 5.00000000 KAS
|
||||
// 1000000000 (5) -> 10 (1) * 10.00000000 KAS
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// compressTxOutAmount compresses the passed amount according to the domain
|
||||
@@ -543,19 +541,19 @@ func decompressTxOutAmount(amount uint64) uint64 {
|
||||
|
||||
// compressedTxOutSize returns the number of bytes the passed transaction output
|
||||
// fields would take when encoded with the format described above.
|
||||
func compressedTxOutSize(amount uint64, pkScript []byte) int {
|
||||
func compressedTxOutSize(amount uint64, scriptPubKey []byte) int {
|
||||
return serializeSizeVLQ(compressTxOutAmount(amount)) +
|
||||
compressedScriptSize(pkScript)
|
||||
compressedScriptSize(scriptPubKey)
|
||||
}
|
||||
|
||||
// putCompressedTxOut compresses the passed amount and script according to their
|
||||
// domain specific compression algorithms and encodes them directly into the
|
||||
// passed target byte slice with the format described above. The target byte
|
||||
// passed target byte slice with the format described above. The target byte
|
||||
// slice must be at least large enough to handle the number of bytes returned by
|
||||
// the compressedTxOutSize function or it will panic.
|
||||
func putCompressedTxOut(target []byte, amount uint64, pkScript []byte) int {
|
||||
func putCompressedTxOut(target []byte, amount uint64, scriptPubKey []byte) int {
|
||||
offset := putVLQ(target, compressTxOutAmount(amount))
|
||||
offset += putCompressedScript(target[offset:], pkScript)
|
||||
offset += putCompressedScript(target[offset:], scriptPubKey)
|
||||
return offset
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||
// is an error. This is only provided for the hard-coded constants so errors in
|
||||
// is an error. This is only provided for the hard-coded constants so errors in
|
||||
// the source code can be detected. It will only (and must only) be called with
|
||||
// hard-coded values.
|
||||
func hexToBytes(s string) []byte {
|
||||
@@ -162,11 +162,6 @@ func TestScriptCompression(t *testing.T) {
|
||||
uncompressed: hexToBytes("3302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
|
||||
compressed: hexToBytes("293302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
|
||||
},
|
||||
{
|
||||
name: "null data",
|
||||
uncompressed: hexToBytes("6a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
|
||||
compressed: hexToBytes("286a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
|
||||
},
|
||||
{
|
||||
name: "requires 2 size bytes - data push 200 bytes",
|
||||
uncompressed: append(hexToBytes("4cc8"), bytes.Repeat([]byte{0x00}, 200)...),
|
||||
@@ -265,47 +260,47 @@ func TestAmountCompression(t *testing.T) {
|
||||
compressed uint64
|
||||
}{
|
||||
{
|
||||
name: "0 BTC (sometimes used in nulldata)",
|
||||
name: "0 KAS",
|
||||
uncompressed: 0,
|
||||
compressed: 0,
|
||||
},
|
||||
{
|
||||
name: "546 Satoshi (current network dust value)",
|
||||
name: "546 Sompi (current network dust value)",
|
||||
uncompressed: 546,
|
||||
compressed: 4911,
|
||||
},
|
||||
{
|
||||
name: "0.00001 BTC (typical transaction fee)",
|
||||
name: "0.00001 KAS (typical transaction fee)",
|
||||
uncompressed: 1000,
|
||||
compressed: 4,
|
||||
},
|
||||
{
|
||||
name: "0.0001 BTC (typical transaction fee)",
|
||||
name: "0.0001 KAS (typical transaction fee)",
|
||||
uncompressed: 10000,
|
||||
compressed: 5,
|
||||
},
|
||||
{
|
||||
name: "0.12345678 BTC",
|
||||
name: "0.12345678 KAS",
|
||||
uncompressed: 12345678,
|
||||
compressed: 111111101,
|
||||
},
|
||||
{
|
||||
name: "0.5 BTC",
|
||||
name: "0.5 KAS",
|
||||
uncompressed: 50000000,
|
||||
compressed: 48,
|
||||
},
|
||||
{
|
||||
name: "1 BTC",
|
||||
name: "1 KAS",
|
||||
uncompressed: 100000000,
|
||||
compressed: 9,
|
||||
},
|
||||
{
|
||||
name: "5 BTC",
|
||||
name: "5 KAS",
|
||||
uncompressed: 500000000,
|
||||
compressed: 49,
|
||||
},
|
||||
{
|
||||
name: "21000000 BTC (max minted coins)",
|
||||
name: "21000000 KAS (max minted coins)",
|
||||
uncompressed: 2100000000000000,
|
||||
compressed: 21000000,
|
||||
},
|
||||
@@ -338,35 +333,29 @@ func TestCompressedTxOut(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
amount uint64
|
||||
pkScript []byte
|
||||
compressed []byte
|
||||
name string
|
||||
amount uint64
|
||||
scriptPubKey []byte
|
||||
compressed []byte
|
||||
}{
|
||||
{
|
||||
name: "nulldata with 0 BTC",
|
||||
amount: 0,
|
||||
pkScript: hexToBytes("6a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
|
||||
compressed: hexToBytes("00286a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
|
||||
name: "pay-to-pubkey-hash dust",
|
||||
amount: 546,
|
||||
scriptPubKey: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
|
||||
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey-hash dust",
|
||||
amount: 546,
|
||||
pkScript: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
|
||||
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 1 BTC",
|
||||
amount: 100000000,
|
||||
pkScript: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
|
||||
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
name: "pay-to-pubkey uncompressed 1 KAS",
|
||||
amount: 100000000,
|
||||
scriptPubKey: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
|
||||
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the txout is calculated properly.
|
||||
gotSize := compressedTxOutSize(test.amount, test.pkScript)
|
||||
gotSize := compressedTxOutSize(test.amount, test.scriptPubKey)
|
||||
if gotSize != len(test.compressed) {
|
||||
t.Errorf("compressedTxOutSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
@@ -377,7 +366,7 @@ func TestCompressedTxOut(t *testing.T) {
|
||||
// Ensure the txout compresses to the expected value.
|
||||
gotCompressed := make([]byte, gotSize)
|
||||
gotBytesWritten := putCompressedTxOut(gotCompressed,
|
||||
test.amount, test.pkScript)
|
||||
test.amount, test.scriptPubKey)
|
||||
if !bytes.Equal(gotCompressed, test.compressed) {
|
||||
t.Errorf("compressTxOut (%s): did not get expected "+
|
||||
"bytes - got %x, want %x", test.name,
|
||||
@@ -407,10 +396,10 @@ func TestCompressedTxOut(t *testing.T) {
|
||||
test.name, gotAmount, test.amount)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(gotScript, test.pkScript) {
|
||||
if !bytes.Equal(gotScript, test.scriptPubKey) {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected script - got %x, want %x",
|
||||
test.name, gotScript, test.pkScript)
|
||||
test.name, gotScript, test.scriptPubKey)
|
||||
continue
|
||||
}
|
||||
if gotBytesRead != len(test.compressed) {
|
||||
|
||||
1817
blockdag/dag.go
1817
blockdag/dag.go
File diff suppressed because it is too large
Load Diff
1456
blockdag/dag_test.go
1456
blockdag/dag_test.go
File diff suppressed because it is too large
Load Diff
@@ -9,17 +9,21 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/binaryserializer"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// blockHdrSize is the size of a block header. This is simply the
|
||||
// blockHdrSize is the size of a block header. This is simply the
|
||||
// constant from wire and is only provided here for convenience since
|
||||
// wire.MaxBlockHeaderPayload is quite long.
|
||||
blockHdrSize = wire.MaxBlockHeaderPayload
|
||||
@@ -30,18 +34,10 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// blockIndexBucketName is the name of the db bucket used to house to the
|
||||
// blockIndexBucketName is the name of the database bucket used to house the
|
||||
// block headers and contextual information.
|
||||
blockIndexBucketName = []byte("blockheaderidx")
|
||||
|
||||
// hashIndexBucketName is the name of the db bucket used to house to the
|
||||
// block hash -> block height index.
|
||||
hashIndexBucketName = []byte("hashidx")
|
||||
|
||||
// heightIndexBucketName is the name of the db bucket used to house to
|
||||
// the block height -> block hash index.
|
||||
heightIndexBucketName = []byte("heightidx")
|
||||
|
||||
// dagStateKeyName is the name of the db key used to store the DAG
|
||||
// tip hashes.
|
||||
dagStateKeyName = []byte("dagstate")
|
||||
@@ -50,15 +46,19 @@ var (
|
||||
// version of the utxo set currently in the database.
|
||||
utxoSetVersionKeyName = []byte("utxosetversion")
|
||||
|
||||
// utxoSetBucketName is the name of the db bucket used to house the
|
||||
// utxoSetBucketName is the name of the database bucket used to house the
|
||||
// unspent transaction output set.
|
||||
utxoSetBucketName = []byte("utxoset")
|
||||
|
||||
// utxoDiffsBucketName is the name of the db bucket used to house the
|
||||
// utxoDiffsBucketName is the name of the database bucket used to house the
|
||||
// diffs and diff children of blocks.
|
||||
utxoDiffsBucketName = []byte("utxodiffs")
|
||||
|
||||
// subnetworksBucketName is the name of the db bucket used to store the
|
||||
// reachabilityDataBucketName is the name of the database bucket used to house the
|
||||
// reachability tree nodes and future covering sets of blocks.
|
||||
reachabilityDataBucketName = []byte("reachability")
|
||||
|
||||
// subnetworksBucketName is the name of the database bucket used to store the
|
||||
// subnetwork registry.
|
||||
subnetworksBucketName = []byte("subnetworks")
|
||||
|
||||
@@ -83,8 +83,8 @@ func (e errNotInDAG) Error() string {
|
||||
// isNotInDAGErr returns whether or not the passed error is an
|
||||
// errNotInDAG error.
|
||||
func isNotInDAGErr(err error) bool {
|
||||
_, ok := err.(errNotInDAG)
|
||||
return ok
|
||||
var notInDAGErr errNotInDAG
|
||||
return errors.As(err, ¬InDAGErr)
|
||||
}
|
||||
|
||||
// errDeserialize signifies that a problem was encountered when deserializing
|
||||
@@ -99,12 +99,12 @@ func (e errDeserialize) Error() string {
|
||||
// isDeserializeErr returns whether or not the passed error is an errDeserialize
|
||||
// error.
|
||||
func isDeserializeErr(err error) bool {
|
||||
_, ok := err.(errDeserialize)
|
||||
return ok
|
||||
var deserializeErr errDeserialize
|
||||
return errors.As(err, &deserializeErr)
|
||||
}
|
||||
|
||||
// dbPutVersion uses an existing database transaction to update the provided
|
||||
// key in the metadata bucket to the given version. It is primarily used to
|
||||
// key in the metadata bucket to the given version. It is primarily used to
|
||||
// track versions on entities such as buckets.
|
||||
func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
|
||||
var serialized [4]byte
|
||||
@@ -115,10 +115,9 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
|
||||
// -----------------------------------------------------------------------------
|
||||
// The unspent transaction output (UTXO) set consists of an entry for each
|
||||
// unspent output using a format that is optimized to reduce space using domain
|
||||
// specific compression algorithms. This format is a slightly modified version
|
||||
// of the format used in Bitcoin Core.
|
||||
// specific compression algorithms.
|
||||
//
|
||||
// Each entry is keyed by an outpoint as specified below. It is important to
|
||||
// Each entry is keyed by an outpoint as specified below. It is important to
|
||||
// note that the key encoding uses a VLQ, which employs an MSB encoding so
|
||||
// iteration of UTXOs when doing byte-wise comparisons will produce them in
|
||||
// order.
|
||||
@@ -141,12 +140,11 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
|
||||
// compressed script []byte variable
|
||||
//
|
||||
// The serialized header code format is:
|
||||
// bit 0 - containing transaction is a block reward
|
||||
// bit 0 - containing transaction is a coinbase
|
||||
// bits 1-x - height of the block that contains the unspent txout
|
||||
//
|
||||
// Example 1:
|
||||
// From tx in main blockchain:
|
||||
// Blk 1, b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
|
||||
// b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
|
||||
//
|
||||
// 03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52
|
||||
// <><------------------------------------------------------------------>
|
||||
@@ -155,13 +153,12 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
|
||||
//
|
||||
// - header code: 0x03 (coinbase, height 1)
|
||||
// - compressed txout:
|
||||
// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC)
|
||||
// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 KAS)
|
||||
// - 0x04: special script type pay-to-pubkey
|
||||
// - 0x96...52: x-coordinate of the pubkey
|
||||
//
|
||||
// Example 2:
|
||||
// From tx in main blockchain:
|
||||
// Blk 113931, 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2
|
||||
// 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2
|
||||
//
|
||||
// 8cf316800900b8025be1b3efc63b0ad48e7f9f10e87544528d58
|
||||
// <----><------------------------------------------>
|
||||
@@ -170,13 +167,12 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
|
||||
//
|
||||
// - header code: 0x8cf316 (not coinbase, height 113931)
|
||||
// - compressed txout:
|
||||
// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 BTC)
|
||||
// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 KAS)
|
||||
// - 0x00: special script type pay-to-pubkey-hash
|
||||
// - 0xb8...58: pubkey hash
|
||||
//
|
||||
// Example 3:
|
||||
// From tx in main blockchain:
|
||||
// Blk 338156, 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22
|
||||
// 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22
|
||||
//
|
||||
// a8a2588ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6
|
||||
// <----><-------------------------------------------------->
|
||||
@@ -185,7 +181,7 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
|
||||
//
|
||||
// - header code: 0xa8a258 (not coinbase, height 338156)
|
||||
// - compressed txout:
|
||||
// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 BTC)
|
||||
// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 KAS)
|
||||
// - 0x01: special script type pay-to-script-hash
|
||||
// - 0x1d...e6: script hash
|
||||
// -----------------------------------------------------------------------------
|
||||
@@ -204,12 +200,12 @@ var outpointKeyPool = sync.Pool{
|
||||
}
|
||||
|
||||
// outpointKey returns a key suitable for use as a database key in the UTXO set
|
||||
// while making use of a free list. A new buffer is allocated if there are not
|
||||
// already any available on the free list. The returned byte slice should be
|
||||
// while making use of a free list. A new buffer is allocated if there are not
|
||||
// already any available on the free list. The returned byte slice should be
|
||||
// returned to the free list by using the recycleOutpointKey function when the
|
||||
// caller is done with it _unless_ the slice will need to live for longer than
|
||||
// the caller can calculate such as when used to write to the database.
|
||||
func outpointKey(outpoint wire.OutPoint) *[]byte {
|
||||
func outpointKey(outpoint wire.Outpoint) *[]byte {
|
||||
// A VLQ employs an MSB encoding, so they are useful not only to reduce
|
||||
// the amount of storage space, but also so iteration of UTXOs when
|
||||
// doing byte-wise comparisons will produce them in order.
|
||||
@@ -227,101 +223,14 @@ func recycleOutpointKey(key *[]byte) {
|
||||
outpointKeyPool.Put(key)
|
||||
}
|
||||
|
||||
// utxoEntryHeaderCode returns the calculated header code to be used when
|
||||
// serializing the provided utxo entry.
|
||||
func utxoEntryHeaderCode(entry *UTXOEntry) uint64 {
|
||||
|
||||
// As described in the serialization format comments, the header code
|
||||
// encodes the height shifted over one bit and the block reward flag in the
|
||||
// lowest bit.
|
||||
headerCode := uint64(entry.BlockHeight()) << 1
|
||||
if entry.IsBlockReward() {
|
||||
headerCode |= 0x01
|
||||
}
|
||||
|
||||
return headerCode
|
||||
}
|
||||
|
||||
// serializeUTXOEntry returns the entry serialized to a format that is suitable
|
||||
// for long-term storage. The format is described in detail above.
|
||||
func serializeUTXOEntry(entry *UTXOEntry) ([]byte, error) {
|
||||
|
||||
// Encode the header code.
|
||||
headerCode := utxoEntryHeaderCode(entry)
|
||||
|
||||
// Calculate the size needed to serialize the entry.
|
||||
size := serializeSizeVLQ(headerCode) +
|
||||
compressedTxOutSize(uint64(entry.Amount()), entry.PkScript())
|
||||
|
||||
// Serialize the header code followed by the compressed unspent
|
||||
// transaction output.
|
||||
serialized := make([]byte, size)
|
||||
offset := putVLQ(serialized, headerCode)
|
||||
offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()),
|
||||
entry.PkScript())
|
||||
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
// deserializeOutPoint decodes an outPoint from the passed serialized byte
|
||||
// slice into a new wire.OutPoint using a format that is suitable for long-
|
||||
// term storage. this format is described in detail above.
|
||||
func deserializeOutPoint(serialized []byte) (*wire.OutPoint, error) {
|
||||
if len(serialized) <= daghash.HashSize {
|
||||
return nil, errDeserialize("unexpected end of data")
|
||||
}
|
||||
|
||||
txID := daghash.TxID{}
|
||||
txID.SetBytes(serialized[:daghash.HashSize])
|
||||
index, _ := deserializeVLQ(serialized[daghash.HashSize:])
|
||||
return wire.NewOutPoint(&txID, uint32(index)), nil
|
||||
}
|
||||
|
||||
// deserializeUTXOEntry decodes a UTXO entry from the passed serialized byte
|
||||
// slice into a new UTXOEntry using a format that is suitable for long-term
|
||||
// storage. The format is described in detail above.
|
||||
func deserializeUTXOEntry(serialized []byte) (*UTXOEntry, error) {
|
||||
// Deserialize the header code.
|
||||
code, offset := deserializeVLQ(serialized)
|
||||
if offset >= len(serialized) {
|
||||
return nil, errDeserialize("unexpected end of data after header")
|
||||
}
|
||||
|
||||
// Decode the header code.
|
||||
//
|
||||
// Bit 0 indicates whether the containing transaction is a block reward.
|
||||
// Bits 1-x encode height of containing transaction.
|
||||
isBlockReward := code&0x01 != 0
|
||||
blockHeight := int32(code >> 1)
|
||||
|
||||
// Decode the compressed unspent transaction output.
|
||||
amount, pkScript, _, err := decodeCompressedTxOut(serialized[offset:])
|
||||
if err != nil {
|
||||
return nil, errDeserialize(fmt.Sprintf("unable to decode "+
|
||||
"UTXO: %s", err))
|
||||
}
|
||||
|
||||
entry := &UTXOEntry{
|
||||
amount: amount,
|
||||
pkScript: pkScript,
|
||||
blockHeight: blockHeight,
|
||||
packedFlags: 0,
|
||||
}
|
||||
if isBlockReward {
|
||||
entry.packedFlags |= tfBlockReward
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// dbPutUTXODiff uses an existing database transaction to update the UTXO set
|
||||
// in the database based on the provided UTXO view contents and state. In
|
||||
// in the database based on the provided UTXO view contents and state. In
|
||||
// particular, only the entries that have been marked as modified are written
|
||||
// to the database.
|
||||
func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
|
||||
utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
|
||||
for outPoint := range diff.toRemove {
|
||||
key := outpointKey(outPoint)
|
||||
for outpoint := range diff.toRemove {
|
||||
key := outpointKey(outpoint)
|
||||
err := utxoBucket.Delete(*key)
|
||||
recycleOutpointKey(key)
|
||||
if err != nil {
|
||||
@@ -329,17 +238,14 @@ func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
|
||||
}
|
||||
}
|
||||
|
||||
for outPoint, entry := range diff.toAdd {
|
||||
for outpoint, entry := range diff.toAdd {
|
||||
// Serialize and store the UTXO entry.
|
||||
serialized, err := serializeUTXOEntry(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
serialized := serializeUTXOEntry(entry)
|
||||
|
||||
key := outpointKey(outPoint)
|
||||
err = utxoBucket.Put(*key, serialized)
|
||||
key := outpointKey(outpoint)
|
||||
err := utxoBucket.Put(*key, serialized)
|
||||
// NOTE: The key is intentionally not recycled here since the
|
||||
// database interface contract prohibits modifications. It will
|
||||
// database interface contract prohibits modifications. It will
|
||||
// be garbage collected normally when the database is done with
|
||||
// it.
|
||||
if err != nil {
|
||||
@@ -350,58 +256,6 @@ func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The block index consists of two buckets with an entry for every block in the
|
||||
// main chain. One bucket is for the hash to height mapping and the other is
|
||||
// for the height to hash mapping.
|
||||
//
|
||||
// The serialized format for values in the hash to height bucket is:
|
||||
// <height>
|
||||
//
|
||||
// Field Type Size
|
||||
// height uint32 4 bytes
|
||||
//
|
||||
// The serialized format for values in the height to hash bucket is:
|
||||
// <hash>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash daghash.HashSize
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// dbPutBlockIndex uses an existing database transaction to update or add the
|
||||
// block index entries for the hash to height and height to hash mappings for
|
||||
// the provided values.
|
||||
func dbPutBlockIndex(dbTx database.Tx, hash *daghash.Hash, height int32) error {
|
||||
// Serialize the height for use in the index entries.
|
||||
var serializedHeight [4]byte
|
||||
byteOrder.PutUint32(serializedHeight[:], uint32(height))
|
||||
|
||||
// Add the block hash to height mapping to the index.
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(hashIndexBucketName)
|
||||
if err := hashIndex.Put(hash[:], serializedHeight[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the block height to hash mapping to the index.
|
||||
heightIndex := meta.Bucket(heightIndexBucketName)
|
||||
return heightIndex.Put(serializedHeight[:], hash[:])
|
||||
}
|
||||
|
||||
// dbFetchHeightByHash uses an existing database transaction to retrieve the
|
||||
// height for the provided hash from the index.
|
||||
func dbFetchHeightByHash(dbTx database.Tx, hash *daghash.Hash) (int32, error) {
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(hashIndexBucketName)
|
||||
serializedHeight := hashIndex.Get(hash[:])
|
||||
if serializedHeight == nil {
|
||||
str := fmt.Sprintf("block %s is not in the main chain", hash)
|
||||
return 0, errNotInDAG(str)
|
||||
}
|
||||
|
||||
return int32(byteOrder.Uint32(serializedHeight)), nil
|
||||
}
|
||||
|
||||
type dagState struct {
|
||||
TipHashes []*daghash.Hash
|
||||
LastFinalityPoint *daghash.Hash
|
||||
@@ -442,7 +296,7 @@ func dbPutDAGState(dbTx database.Tx, state *dagState) error {
|
||||
}
|
||||
|
||||
// createDAGState initializes both the database and the DAG state to the
|
||||
// genesis block. This includes creating the necessary buckets, so it
|
||||
// genesis block. This includes creating the necessary buckets, so it
|
||||
// must only be called on an uninitialized database.
|
||||
func (dag *BlockDAG) createDAGState() error {
|
||||
// Create the initial the database DAG state including creating the
|
||||
@@ -456,20 +310,6 @@ func (dag *BlockDAG) createDAGState() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the bucket that houses the chain block hash to height
|
||||
// index.
|
||||
_, err = meta.CreateBucket(hashIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the bucket that houses the chain block height to hash
|
||||
// index.
|
||||
_, err = meta.CreateBucket(heightIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the buckets that house the utxo set, the utxo diffs, and their
|
||||
// version.
|
||||
_, err = meta.CreateBucket(utxoSetBucketName)
|
||||
@@ -482,6 +322,11 @@ func (dag *BlockDAG) createDAGState() error {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = meta.CreateBucket(reachabilityDataBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbPutVersion(dbTx, utxoSetVersionKeyName,
|
||||
latestUTXOSetBucketVersion)
|
||||
if err != nil {
|
||||
@@ -497,6 +342,61 @@ func (dag *BlockDAG) createDAGState() error {
|
||||
if err := dbPutLocalSubnetworkID(dbTx, dag.subnetworkID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := meta.CreateBucketIfNotExists(idByHashIndexBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucketIfNotExists(hashByIDIndexBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) removeDAGState() error {
|
||||
err := dag.db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
|
||||
err := meta.DeleteBucket(blockIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(utxoSetBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(utxoDiffsBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(reachabilityDataBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Metadata().Delete(utxoSetVersionKeyName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(subnetworksBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Metadata().Delete(localSubnetworkKeyName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -514,7 +414,7 @@ func dbPutLocalSubnetworkID(dbTx database.Tx, subnetworkID *subnetworkid.Subnetw
|
||||
}
|
||||
|
||||
// initDAGState attempts to load and initialize the DAG state from the
|
||||
// database. When the db does not yet contain any DAG state, both it and the
|
||||
// database. When the db does not yet contain any DAG state, both it and the
|
||||
// DAG state are initialized to the genesis block.
|
||||
func (dag *BlockDAG) initDAGState() error {
|
||||
// Determine the state of the DAG database. We may need to initialize
|
||||
@@ -530,10 +430,10 @@ func (dag *BlockDAG) initDAGState() error {
|
||||
localSubnetworkID.SetBytes(localSubnetworkIDBytes)
|
||||
}
|
||||
if !localSubnetworkID.IsEqual(dag.subnetworkID) {
|
||||
return fmt.Errorf("Cannot start btcd with subnetwork ID %s because"+
|
||||
return errors.Errorf("Cannot start kaspad with subnetwork ID %s because"+
|
||||
" its database is already built with subnetwork ID %s. If you"+
|
||||
" want to switch to a new database, please reset the"+
|
||||
" database by starting btcd with --reset-db flag", dag.subnetworkID, localSubnetworkID)
|
||||
" database by starting kaspad with --reset-db flag", dag.subnetworkID, localSubnetworkID)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -544,7 +444,7 @@ func (dag *BlockDAG) initDAGState() error {
|
||||
|
||||
if !initialized {
|
||||
// At this point the database has not already been initialized, so
|
||||
// initialize both it and the chain state to the genesis block.
|
||||
// initialize both it and the DAG state to the genesis block.
|
||||
return dag.createDAGState()
|
||||
}
|
||||
|
||||
@@ -562,7 +462,7 @@ func (dag *BlockDAG) initDAGState() error {
|
||||
}
|
||||
|
||||
// Load all of the headers from the data for the known DAG
|
||||
// and construct the block index accordingly. Since the
|
||||
// and construct the block index accordingly. Since the
|
||||
// number of nodes are already known, perform a single alloc
|
||||
// for them versus a whole bunch of little ones to reduce
|
||||
// pressure on the GC.
|
||||
@@ -570,65 +470,51 @@ func (dag *BlockDAG) initDAGState() error {
|
||||
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
|
||||
// Determine how many blocks will be loaded into the index so we can
|
||||
// allocate the right amount.
|
||||
var blockCount int32
|
||||
var unprocessedBlockNodes []*blockNode
|
||||
cursor := blockIndexBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
blockCount++
|
||||
}
|
||||
blockNodes := make([]blockNode, blockCount)
|
||||
|
||||
var i int32
|
||||
var lastNode *blockNode
|
||||
cursor = blockIndexBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
header, status, err := deserializeBlockRow(cursor.Value())
|
||||
node, err := dag.deserializeBlockNode(cursor.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parents := newSet()
|
||||
if lastNode == nil {
|
||||
blockHash := header.BlockHash()
|
||||
if !blockHash.IsEqual(dag.dagParams.GenesisHash) {
|
||||
// Check to see if this node had been stored in the the block DB
|
||||
// but not yet accepted. If so, add it to a slice to be processed later.
|
||||
if node.status == statusDataStored {
|
||||
unprocessedBlockNodes = append(unprocessedBlockNodes, node)
|
||||
continue
|
||||
}
|
||||
|
||||
// If the node is known to be invalid add it as-is to the block
|
||||
// index and continue.
|
||||
if node.status.KnownInvalid() {
|
||||
dag.index.addNode(node)
|
||||
continue
|
||||
}
|
||||
|
||||
if dag.blockCount == 0 {
|
||||
if !node.hash.IsEqual(dag.dagParams.GenesisHash) {
|
||||
return AssertError(fmt.Sprintf("initDAGState: Expected "+
|
||||
"first entry in block index to be genesis block, "+
|
||||
"found %s", blockHash))
|
||||
"found %s", node.hash))
|
||||
}
|
||||
} else {
|
||||
for _, hash := range header.ParentHashes {
|
||||
parent := dag.index.LookupNode(hash)
|
||||
if parent == nil {
|
||||
return AssertError(fmt.Sprintf("initDAGState: Could "+
|
||||
"not find parent %s for block %s", hash, header.BlockHash()))
|
||||
}
|
||||
parents.add(parent)
|
||||
}
|
||||
if len(parents) == 0 {
|
||||
if len(node.parents) == 0 {
|
||||
return AssertError(fmt.Sprintf("initDAGState: Could "+
|
||||
"not find any parent for block %s", header.BlockHash()))
|
||||
"not find any parent for block %s", node.hash))
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize the block node for the block, connect it,
|
||||
// Add the node to its parents children, connect it,
|
||||
// and add it to the block index.
|
||||
node := &blockNodes[i]
|
||||
initBlockNode(node, header, parents, dag.dagParams.K)
|
||||
node.status = status
|
||||
node.updateParentsChildren()
|
||||
dag.index.addNode(node)
|
||||
|
||||
if blockStatus(status).KnownValid() {
|
||||
dag.blockCount++
|
||||
}
|
||||
|
||||
lastNode = node
|
||||
i++
|
||||
dag.blockCount++
|
||||
}
|
||||
|
||||
// Load all of the known UTXO entries and construct the full
|
||||
// UTXO set accordingly. Since the number of entries is already
|
||||
// UTXO set accordingly. Since the number of entries is already
|
||||
// known, perform a single alloc for them versus a whole bunch
|
||||
// of little ones to reduce pressure on the GC.
|
||||
log.Infof("Loading UTXO set...")
|
||||
@@ -645,15 +531,15 @@ func (dag *BlockDAG) initDAGState() error {
|
||||
|
||||
fullUTXOCollection := make(utxoCollection, utxoEntryCount)
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
// Deserialize the outPoint
|
||||
outPoint, err := deserializeOutPoint(cursor.Key())
|
||||
// Deserialize the outpoint
|
||||
outpoint, err := deserializeOutpoint(cursor.Key())
|
||||
if err != nil {
|
||||
// Ensure any deserialization errors are returned as database
|
||||
// corruption errors.
|
||||
if isDeserializeErr(err) {
|
||||
return database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt outPoint: %s", err),
|
||||
Description: fmt.Sprintf("corrupt outpoint: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -675,14 +561,23 @@ func (dag *BlockDAG) initDAGState() error {
|
||||
return err
|
||||
}
|
||||
|
||||
fullUTXOCollection[*outPoint] = entry
|
||||
fullUTXOCollection[*outpoint] = entry
|
||||
}
|
||||
|
||||
// Initialize the reachability store
|
||||
err = dag.reachabilityStore.init(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Apply the loaded utxoCollection to the virtual block.
|
||||
dag.virtual.utxoSet.utxoCollection = fullUTXOCollection
|
||||
dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection)
|
||||
if err != nil {
|
||||
return AssertError(fmt.Sprintf("Error loading UTXOSet: %s", err))
|
||||
}
|
||||
|
||||
// Apply the stored tips to the virtual block.
|
||||
tips := newSet()
|
||||
tips := newBlockSet()
|
||||
for _, tipHash := range state.TipHashes {
|
||||
tip := dag.index.LookupNode(tipHash)
|
||||
if tip == nil {
|
||||
@@ -695,33 +590,148 @@ func (dag *BlockDAG) initDAGState() error {
|
||||
|
||||
// Set the last finality point
|
||||
dag.lastFinalityPoint = dag.index.LookupNode(state.LastFinalityPoint)
|
||||
dag.finalizeNodesBelowFinalityPoint(false)
|
||||
|
||||
// Go over any unprocessed blockNodes and process them now.
|
||||
for _, node := range unprocessedBlockNodes {
|
||||
// Check to see if the block exists in the block DB. If it
|
||||
// doesn't, the database has certainly been corrupted.
|
||||
blockExists, err := dbTx.HasBlock(node.hash)
|
||||
if err != nil {
|
||||
return AssertError(fmt.Sprintf("initDAGState: HasBlock "+
|
||||
"for block %s failed: %s", node.hash, err))
|
||||
}
|
||||
if !blockExists {
|
||||
return AssertError(fmt.Sprintf("initDAGState: block %s "+
|
||||
"exists in block index but not in block db", node.hash))
|
||||
}
|
||||
|
||||
// Attempt to accept the block.
|
||||
block, err := dbFetchBlockByNode(dbTx, node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(block, BFWasStored)
|
||||
if err != nil {
|
||||
log.Warnf("Block %s, which was not previously processed, "+
|
||||
"failed to be accepted to the DAG: %s", node.hash, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// If the block is an orphan or is delayed then it couldn't have
|
||||
// possibly been written to the block index in the first place.
|
||||
if isOrphan {
|
||||
return AssertError(fmt.Sprintf("Block %s, which was not "+
|
||||
"previously processed, turned out to be an orphan, which is "+
|
||||
"impossible.", node.hash))
|
||||
}
|
||||
if isDelayed {
|
||||
return AssertError(fmt.Sprintf("Block %s, which was not "+
|
||||
"previously processed, turned out to be delayed, which is "+
|
||||
"impossible.", node.hash))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// deserializeBlockRow parses a value in the block index bucket into a block
|
||||
// header and block status bitfield.
|
||||
func deserializeBlockRow(blockRow []byte) (*wire.BlockHeader, blockStatus, error) {
|
||||
// deserializeBlockNode parses a value in the block index bucket and returns a block node.
|
||||
func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
|
||||
buffer := bytes.NewReader(blockRow)
|
||||
|
||||
var header wire.BlockHeader
|
||||
err := header.Deserialize(buffer)
|
||||
if err != nil {
|
||||
return nil, statusNone, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node := &blockNode{
|
||||
hash: header.BlockHash(),
|
||||
version: header.Version,
|
||||
bits: header.Bits,
|
||||
nonce: header.Nonce,
|
||||
timestamp: header.Timestamp.Unix(),
|
||||
hashMerkleRoot: header.HashMerkleRoot,
|
||||
acceptedIDMerkleRoot: header.AcceptedIDMerkleRoot,
|
||||
utxoCommitment: header.UTXOCommitment,
|
||||
}
|
||||
|
||||
node.children = newBlockSet()
|
||||
node.parents = newBlockSet()
|
||||
|
||||
for _, hash := range header.ParentHashes {
|
||||
parent := dag.index.LookupNode(hash)
|
||||
if parent == nil {
|
||||
return nil, AssertError(fmt.Sprintf("deserializeBlockNode: Could "+
|
||||
"not find parent %s for block %s", hash, header.BlockHash()))
|
||||
}
|
||||
node.parents.add(parent)
|
||||
}
|
||||
|
||||
statusByte, err := buffer.ReadByte()
|
||||
if err != nil {
|
||||
return nil, statusNone, err
|
||||
return nil, err
|
||||
}
|
||||
node.status = blockStatus(statusByte)
|
||||
|
||||
selectedParentHash := &daghash.Hash{}
|
||||
if _, err := io.ReadFull(buffer, selectedParentHash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &header, blockStatus(statusByte), nil
|
||||
// Because genesis doesn't have selected parent, it's serialized as zero hash
|
||||
if !selectedParentHash.IsEqual(&daghash.ZeroHash) {
|
||||
node.selectedParent = dag.index.LookupNode(selectedParentHash)
|
||||
}
|
||||
|
||||
node.blueScore, err = binaryserializer.Uint64(buffer, byteOrder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bluesCount, err := wire.ReadVarInt(buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node.blues = make([]*blockNode, bluesCount)
|
||||
for i := uint64(0); i < bluesCount; i++ {
|
||||
hash := &daghash.Hash{}
|
||||
if _, err := io.ReadFull(buffer, hash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.blues[i] = dag.index.LookupNode(hash)
|
||||
}
|
||||
|
||||
bluesAnticoneSizesLen, err := wire.ReadVarInt(buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node.bluesAnticoneSizes = make(map[*blockNode]dagconfig.KType)
|
||||
for i := uint64(0); i < bluesAnticoneSizesLen; i++ {
|
||||
hash := &daghash.Hash{}
|
||||
if _, err := io.ReadFull(buffer, hash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bluesAnticoneSize, err := binaryserializer.Uint8(buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blue := dag.index.LookupNode(hash)
|
||||
if blue == nil {
|
||||
return nil, errors.Errorf("couldn't find block with hash %s", hash)
|
||||
}
|
||||
node.bluesAnticoneSizes[blue] = dagconfig.KType(bluesAnticoneSize)
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// dbFetchBlockByNode uses an existing database transaction to retrieve the
|
||||
// raw block for the provided node, deserialize it, and return a util.Block
|
||||
// with the height set.
|
||||
// of it.
|
||||
func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error) {
|
||||
// Load the raw block bytes from the database.
|
||||
blockBytes, err := dbTx.FetchBlock(node.hash)
|
||||
@@ -729,36 +739,83 @@ func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the encapsulated block and set the height appropriately.
|
||||
// Create the encapsulated block.
|
||||
block, err := util.NewBlockFromBytes(blockBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block.SetHeight(node.height)
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
// dbStoreBlockNode stores the block header and validation status to the block
|
||||
// index bucket. This overwrites the current entry if there exists one.
|
||||
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
|
||||
// Serialize block data to be stored.
|
||||
func serializeBlockNode(node *blockNode) ([]byte, error) {
|
||||
w := bytes.NewBuffer(make([]byte, 0, blockHdrSize+1))
|
||||
header := node.Header()
|
||||
err := header.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = w.WriteByte(byte(node.status))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Because genesis doesn't have selected parent, it's serialized as zero hash
|
||||
selectedParentHash := &daghash.ZeroHash
|
||||
if node.selectedParent != nil {
|
||||
selectedParentHash = node.selectedParent.hash
|
||||
}
|
||||
_, err = w.Write(selectedParentHash[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = binaryserializer.PutUint64(w, byteOrder, node.blueScore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = wire.WriteVarInt(w, uint64(len(node.blues)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, blue := range node.blues {
|
||||
_, err = w.Write(blue.hash[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = wire.WriteVarInt(w, uint64(len(node.bluesAnticoneSizes)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for blue, blueAnticoneSize := range node.bluesAnticoneSizes {
|
||||
_, err = w.Write(blue.hash[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = binaryserializer.PutUint8(w, uint8(blueAnticoneSize))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return w.Bytes(), nil
|
||||
}
|
||||
|
||||
// dbStoreBlockNode stores the block node data into the block
|
||||
// index bucket. This overwrites the current entry if there exists one.
|
||||
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
|
||||
serializedNode, err := serializeBlockNode(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value := w.Bytes()
|
||||
|
||||
// Write block header data to block index bucket.
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
key := blockIndexKey(node.hash, uint32(node.height))
|
||||
return blockIndexBucket.Put(key, value)
|
||||
key := BlockIndexKey(node.hash, node.blueScore)
|
||||
return blockIndexBucket.Put(key, serializedNode)
|
||||
}
|
||||
|
||||
// dbStoreBlock stores the provided block in the database if it is not already
|
||||
@@ -774,26 +831,29 @@ func dbStoreBlock(dbTx database.Tx, block *util.Block) error {
|
||||
return dbTx.StoreBlock(block)
|
||||
}
|
||||
|
||||
// blockIndexKey generates the binary key for an entry in the block index
|
||||
// bucket. The key is composed of the block height encoded as a big-endian
|
||||
// 32-bit unsigned int followed by the 32 byte block hash.
|
||||
func blockIndexKey(blockHash *daghash.Hash, blockHeight uint32) []byte {
|
||||
indexKey := make([]byte, daghash.HashSize+4)
|
||||
binary.BigEndian.PutUint32(indexKey[0:4], blockHeight)
|
||||
copy(indexKey[4:daghash.HashSize+4], blockHash[:])
|
||||
// BlockIndexKey generates the binary key for an entry in the block index
|
||||
// bucket. The key is composed of the block blue score encoded as a big-endian
|
||||
// 64-bit unsigned int followed by the 32 byte block hash.
|
||||
// The blue score component is important for iteration order.
|
||||
func BlockIndexKey(blockHash *daghash.Hash, blueScore uint64) []byte {
|
||||
indexKey := make([]byte, daghash.HashSize+8)
|
||||
binary.BigEndian.PutUint64(indexKey[0:8], blueScore)
|
||||
copy(indexKey[8:daghash.HashSize+8], blockHash[:])
|
||||
return indexKey
|
||||
}
|
||||
|
||||
// BlockByHash returns the block from the main chain with the given hash with
|
||||
// the appropriate chain height set.
|
||||
func blockHashFromBlockIndexKey(BlockIndexKey []byte) (*daghash.Hash, error) {
|
||||
return daghash.NewHash(BlockIndexKey[8 : daghash.HashSize+8])
|
||||
}
|
||||
|
||||
// BlockByHash returns the block from the DAG with the given hash.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
|
||||
// Lookup the block hash in block index and ensure it is in the best
|
||||
// chain.
|
||||
// Lookup the block hash in block index and ensure it is in the DAG
|
||||
node := dag.index.LookupNode(hash)
|
||||
if node == nil {
|
||||
str := fmt.Sprintf("block %s is not in the main chain", hash)
|
||||
str := fmt.Sprintf("block %s is not in the DAG", hash)
|
||||
return nil, errNotInDAG(str)
|
||||
}
|
||||
|
||||
@@ -806,3 +866,49 @@ func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
|
||||
})
|
||||
return block, err
|
||||
}
|
||||
|
||||
// BlockHashesFrom returns a slice of blocks starting from lowHash
|
||||
// ordered by blueScore. If lowHash is nil then the genesis block is used.
|
||||
//
|
||||
// This method MUST be called with the DAG lock held
|
||||
func (dag *BlockDAG) BlockHashesFrom(lowHash *daghash.Hash, limit int) ([]*daghash.Hash, error) {
|
||||
blockHashes := make([]*daghash.Hash, 0, limit)
|
||||
if lowHash == nil {
|
||||
lowHash = dag.genesis.hash
|
||||
|
||||
// If we're starting from the beginning we should include the
|
||||
// genesis hash in the result
|
||||
blockHashes = append(blockHashes, dag.genesis.hash)
|
||||
}
|
||||
if !dag.IsInDAG(lowHash) {
|
||||
return nil, errors.Errorf("block %s not found", lowHash)
|
||||
}
|
||||
blueScore, err := dag.BlueScoreByBlockHash(lowHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dag.index.db.View(func(dbTx database.Tx) error {
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
lowKey := BlockIndexKey(lowHash, blueScore)
|
||||
|
||||
cursor := blockIndexBucket.Cursor()
|
||||
cursor.Seek(lowKey)
|
||||
for ok := cursor.Next(); ok; ok = cursor.Next() {
|
||||
key := cursor.Key()
|
||||
blockHash, err := blockHashFromBlockIndexKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHashes = append(blockHashes, blockHash)
|
||||
if len(blockHashes) == limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockHashes, nil
|
||||
}
|
||||
|
||||
@@ -6,12 +6,12 @@ package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"github.com/pkg/errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// TestErrNotInDAG ensures the functions related to errNotInDAG work
|
||||
@@ -46,27 +46,23 @@ func TestUtxoSerialization(t *testing.T) {
|
||||
entry *UTXOEntry
|
||||
serialized []byte
|
||||
}{
|
||||
// From tx in main blockchain:
|
||||
// b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
|
||||
{
|
||||
name: "height 1, coinbase",
|
||||
name: "blue score 1, coinbase",
|
||||
entry: &UTXOEntry{
|
||||
amount: 5000000000,
|
||||
pkScript: hexToBytes("410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
|
||||
blockHeight: 1,
|
||||
packedFlags: tfBlockReward,
|
||||
amount: 5000000000,
|
||||
scriptPubKey: hexToBytes("410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
|
||||
blockBlueScore: 1,
|
||||
packedFlags: tfCoinbase,
|
||||
},
|
||||
serialized: hexToBytes("03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"),
|
||||
},
|
||||
// From tx in main blockchain:
|
||||
// 8131ffb0a2c945ecaf9b9063e59558784f9c3a74741ce6ae2a18d0571dac15bb:1
|
||||
{
|
||||
name: "height 100001, not coinbase",
|
||||
name: "blue score 100001, not coinbase",
|
||||
entry: &UTXOEntry{
|
||||
amount: 1000000,
|
||||
pkScript: hexToBytes("76a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
|
||||
blockHeight: 100001,
|
||||
packedFlags: 0,
|
||||
amount: 1000000,
|
||||
scriptPubKey: hexToBytes("76a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
|
||||
blockBlueScore: 100001,
|
||||
packedFlags: 0,
|
||||
},
|
||||
serialized: hexToBytes("8b99420700ee8bd501094a7d5ca318da2506de35e1cb025ddc"),
|
||||
},
|
||||
@@ -74,12 +70,7 @@ func TestUtxoSerialization(t *testing.T) {
|
||||
|
||||
for i, test := range tests {
|
||||
// Ensure the utxo entry serializes to the expected value.
|
||||
gotBytes, err := serializeUTXOEntry(test.entry)
|
||||
if err != nil {
|
||||
t.Errorf("serializeUTXOEntry #%d (%s) unexpected "+
|
||||
"error: %v", i, test.name, err)
|
||||
continue
|
||||
}
|
||||
gotBytes := serializeUTXOEntry(test.entry)
|
||||
if !bytes.Equal(gotBytes, test.serialized) {
|
||||
t.Errorf("serializeUTXOEntry #%d (%s): mismatched "+
|
||||
"bytes - got %x, want %x", i, test.name,
|
||||
@@ -104,22 +95,22 @@ func TestUtxoSerialization(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !bytes.Equal(utxoEntry.PkScript(), test.entry.PkScript()) {
|
||||
if !bytes.Equal(utxoEntry.ScriptPubKey(), test.entry.ScriptPubKey()) {
|
||||
t.Errorf("deserializeUTXOEntry #%d (%s) mismatched "+
|
||||
"scripts: got %x, want %x", i, test.name,
|
||||
utxoEntry.PkScript(), test.entry.PkScript())
|
||||
utxoEntry.ScriptPubKey(), test.entry.ScriptPubKey())
|
||||
continue
|
||||
}
|
||||
if utxoEntry.BlockHeight() != test.entry.BlockHeight() {
|
||||
if utxoEntry.BlockBlueScore() != test.entry.BlockBlueScore() {
|
||||
t.Errorf("deserializeUTXOEntry #%d (%s) mismatched "+
|
||||
"block height: got %d, want %d", i, test.name,
|
||||
utxoEntry.BlockHeight(), test.entry.BlockHeight())
|
||||
"block blue score: got %d, want %d", i, test.name,
|
||||
utxoEntry.BlockBlueScore(), test.entry.BlockBlueScore())
|
||||
continue
|
||||
}
|
||||
if utxoEntry.IsBlockReward() != test.entry.IsBlockReward() {
|
||||
if utxoEntry.IsCoinbase() != test.entry.IsCoinbase() {
|
||||
t.Errorf("deserializeUTXOEntry #%d (%s) mismatched "+
|
||||
"coinbase flag: got %v, want %v", i, test.name,
|
||||
utxoEntry.IsBlockReward(), test.entry.IsBlockReward())
|
||||
utxoEntry.IsCoinbase(), test.entry.IsCoinbase())
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -257,15 +248,28 @@ func TestDAGStateDeserializeErrors(t *testing.T) {
|
||||
test.name, err, test.errType)
|
||||
continue
|
||||
}
|
||||
if derr, ok := err.(database.Error); ok {
|
||||
var dbErr database.Error
|
||||
if ok := errors.As(err, &dbErr); ok {
|
||||
tderr := test.errType.(database.Error)
|
||||
if derr.ErrorCode != tderr.ErrorCode {
|
||||
if dbErr.ErrorCode != tderr.ErrorCode {
|
||||
t.Errorf("deserializeDAGState (%s): "+
|
||||
"wrong error code got: %v, want: %v",
|
||||
test.name, derr.ErrorCode,
|
||||
test.name, dbErr.ErrorCode,
|
||||
tderr.ErrorCode)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// daghash.Hash. It only differs from the one available in daghash in that
|
||||
// it panics in case of an error since it will only (and must only) be
|
||||
// called with hard-coded, and therefore known good, hashes.
|
||||
func newHashFromStr(hexStr string) *daghash.Hash {
|
||||
hash, err := daghash.NewHashFromStr(hexStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
73
blockdag/delayedblockheap.go
Normal file
73
blockdag/delayedblockheap.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
)
|
||||
|
||||
type baseDelayedBlocksHeap []*delayedBlock
|
||||
|
||||
func (h baseDelayedBlocksHeap) Len() int {
|
||||
return len(h)
|
||||
}
|
||||
func (h baseDelayedBlocksHeap) Swap(i, j int) {
|
||||
h[i], h[j] = h[j], h[i]
|
||||
}
|
||||
|
||||
func (h *baseDelayedBlocksHeap) Push(x interface{}) {
|
||||
*h = append(*h, x.(*delayedBlock))
|
||||
}
|
||||
|
||||
func (h *baseDelayedBlocksHeap) Pop() interface{} {
|
||||
oldHeap := *h
|
||||
oldLength := len(oldHeap)
|
||||
popped := oldHeap[oldLength-1]
|
||||
*h = oldHeap[0 : oldLength-1]
|
||||
return popped
|
||||
}
|
||||
|
||||
func (h baseDelayedBlocksHeap) peek() interface{} {
|
||||
if h.Len() > 0 {
|
||||
return h[h.Len()-1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h baseDelayedBlocksHeap) Less(i, j int) bool {
|
||||
return h[j].processTime.After(h[i].processTime)
|
||||
}
|
||||
|
||||
type delayedBlocksHeap struct {
|
||||
baseDelayedBlocksHeap *baseDelayedBlocksHeap
|
||||
impl heap.Interface
|
||||
}
|
||||
|
||||
// newDelayedBlocksHeap initializes and returns a new delayedBlocksHeap
|
||||
func newDelayedBlocksHeap() delayedBlocksHeap {
|
||||
baseHeap := &baseDelayedBlocksHeap{}
|
||||
h := delayedBlocksHeap{impl: baseHeap, baseDelayedBlocksHeap: baseHeap}
|
||||
heap.Init(h.impl)
|
||||
return h
|
||||
}
|
||||
|
||||
// pop removes the block with lowest height from this heap and returns it
|
||||
func (dbh delayedBlocksHeap) pop() *delayedBlock {
|
||||
return heap.Pop(dbh.impl).(*delayedBlock)
|
||||
}
|
||||
|
||||
// Push pushes the block onto the heap
|
||||
func (dbh delayedBlocksHeap) Push(block *delayedBlock) {
|
||||
heap.Push(dbh.impl, block)
|
||||
}
|
||||
|
||||
// Len returns the length of this heap
|
||||
func (dbh delayedBlocksHeap) Len() int {
|
||||
return dbh.impl.Len()
|
||||
}
|
||||
|
||||
// peek returns the topmost element in the queue without poping it
|
||||
func (dbh delayedBlocksHeap) peek() *delayedBlock {
|
||||
if dbh.baseDelayedBlocksHeap.peek() == nil {
|
||||
return nil
|
||||
}
|
||||
return dbh.baseDelayedBlocksHeap.peek().(*delayedBlock)
|
||||
}
|
||||
@@ -8,162 +8,45 @@ import (
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// calcEasiestDifficulty calculates the easiest possible difficulty that a block
|
||||
// can have given starting difficulty bits and a duration. It is mainly used to
|
||||
// verify that claimed proof of work by a block is sane as compared to a
|
||||
// known good checkpoint.
|
||||
func (dag *BlockDAG) calcEasiestDifficulty(bits uint32, duration time.Duration) uint32 {
|
||||
// Convert types used in the calculations below.
|
||||
durationVal := int64(duration / time.Second)
|
||||
adjustmentFactor := big.NewInt(dag.dagParams.RetargetAdjustmentFactor)
|
||||
|
||||
// The test network rules allow minimum difficulty blocks after more
|
||||
// than twice the desired amount of time needed to generate a block has
|
||||
// elapsed.
|
||||
if dag.dagParams.ReduceMinDifficulty {
|
||||
reductionTime := int64(dag.dagParams.MinDiffReductionTime /
|
||||
time.Second)
|
||||
if durationVal > reductionTime {
|
||||
return dag.dagParams.PowLimitBits
|
||||
}
|
||||
}
|
||||
|
||||
// Since easier difficulty equates to higher numbers, the easiest
|
||||
// difficulty for a given duration is the largest value possible given
|
||||
// the number of retargets for the duration and starting difficulty
|
||||
// multiplied by the max adjustment factor.
|
||||
newTarget := util.CompactToBig(bits)
|
||||
for durationVal > 0 && newTarget.Cmp(dag.dagParams.PowLimit) < 0 {
|
||||
newTarget.Mul(newTarget, adjustmentFactor)
|
||||
durationVal -= dag.maxRetargetTimespan
|
||||
}
|
||||
|
||||
// Limit new value to the proof of work limit.
|
||||
if newTarget.Cmp(dag.dagParams.PowLimit) > 0 {
|
||||
newTarget.Set(dag.dagParams.PowLimit)
|
||||
}
|
||||
|
||||
return util.BigToCompact(newTarget)
|
||||
}
|
||||
|
||||
// findPrevTestNetDifficulty returns the difficulty of the previous block which
|
||||
// did not have the special testnet minimum difficulty rule applied.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
func (dag *BlockDAG) findPrevTestNetDifficulty(startNode *blockNode) uint32 {
|
||||
// Search backwards through the chain for the last block without
|
||||
// the special rule applied.
|
||||
iterNode := startNode
|
||||
for iterNode != nil && iterNode.height%dag.blocksPerRetarget != 0 &&
|
||||
iterNode.bits == dag.dagParams.PowLimitBits {
|
||||
|
||||
iterNode = iterNode.selectedParent
|
||||
}
|
||||
|
||||
// Return the found difficulty or the minimum difficulty if no
|
||||
// appropriate block was found.
|
||||
lastBits := dag.dagParams.PowLimitBits
|
||||
if iterNode != nil {
|
||||
lastBits = iterNode.bits
|
||||
}
|
||||
return lastBits
|
||||
}
|
||||
|
||||
// calcNextRequiredDifficulty calculates the required difficulty for the block
|
||||
// after the passed previous block node based on the difficulty retarget rules.
|
||||
// This function differs from the exported CalcNextRequiredDifficulty in that
|
||||
// the exported version uses the current best chain as the previous block node
|
||||
// while this function accepts any block node.
|
||||
func (dag *BlockDAG) calcNextRequiredDifficulty(bluestParent *blockNode, newBlockTime time.Time) (uint32, error) {
|
||||
// requiredDifficulty calculates the required difficulty for a
|
||||
// block given its bluest parent.
|
||||
func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime time.Time) uint32 {
|
||||
// Genesis block.
|
||||
if bluestParent == nil {
|
||||
return dag.dagParams.PowLimitBits, nil
|
||||
if bluestParent == nil || bluestParent.blueScore < dag.difficultyAdjustmentWindowSize+1 {
|
||||
return dag.powMaxBits
|
||||
}
|
||||
|
||||
// Return the previous block's difficulty requirements if this block
|
||||
// is not at a difficulty retarget interval.
|
||||
if (bluestParent.height+1)%dag.blocksPerRetarget != 0 {
|
||||
// For networks that support it, allow special reduction of the
|
||||
// required difficulty once too much time has elapsed without
|
||||
// mining a block.
|
||||
if dag.dagParams.ReduceMinDifficulty {
|
||||
// Return minimum difficulty when more than the desired
|
||||
// amount of time has elapsed without mining a block.
|
||||
reductionTime := int64(dag.dagParams.MinDiffReductionTime /
|
||||
time.Second)
|
||||
allowMinTime := bluestParent.timestamp + reductionTime
|
||||
if newBlockTime.Unix() > allowMinTime {
|
||||
return dag.dagParams.PowLimitBits, nil
|
||||
}
|
||||
// Fetch window of dag.difficultyAdjustmentWindowSize + 1 so we can have dag.difficultyAdjustmentWindowSize block intervals
|
||||
timestampsWindow := blueBlockWindow(bluestParent, dag.difficultyAdjustmentWindowSize+1)
|
||||
windowMinTimestamp, windowMaxTimeStamp := timestampsWindow.minMaxTimestamps()
|
||||
|
||||
// The block was mined within the desired timeframe, so
|
||||
// return the difficulty for the last block which did
|
||||
// not have the special minimum difficulty rule applied.
|
||||
return dag.findPrevTestNetDifficulty(bluestParent), nil
|
||||
}
|
||||
|
||||
// For the main network (or any unrecognized networks), simply
|
||||
// return the previous block's difficulty requirements.
|
||||
return bluestParent.bits, nil
|
||||
}
|
||||
|
||||
// Get the block node at the previous retarget (targetTimespan days
|
||||
// worth of blocks).
|
||||
firstNode := bluestParent.RelativeAncestor(dag.blocksPerRetarget - 1)
|
||||
if firstNode == nil {
|
||||
return 0, AssertError("unable to obtain previous retarget block")
|
||||
}
|
||||
|
||||
// Limit the amount of adjustment that can occur to the previous
|
||||
// difficulty.
|
||||
actualTimespan := bluestParent.timestamp - firstNode.timestamp
|
||||
adjustedTimespan := actualTimespan
|
||||
if actualTimespan < dag.minRetargetTimespan {
|
||||
adjustedTimespan = dag.minRetargetTimespan
|
||||
} else if actualTimespan > dag.maxRetargetTimespan {
|
||||
adjustedTimespan = dag.maxRetargetTimespan
|
||||
}
|
||||
// Remove the last block from the window so to calculate the average target of dag.difficultyAdjustmentWindowSize blocks
|
||||
targetsWindow := timestampsWindow[:dag.difficultyAdjustmentWindowSize]
|
||||
|
||||
// Calculate new target difficulty as:
|
||||
// currentDifficulty * (adjustedTimespan / targetTimespan)
|
||||
// averageWindowTarget * (windowMinTimestamp / (targetTimePerBlock * windowSize))
|
||||
// The result uses integer division which means it will be slightly
|
||||
// rounded down. Bitcoind also uses integer division to calculate this
|
||||
// result.
|
||||
oldTarget := util.CompactToBig(bluestParent.bits)
|
||||
newTarget := new(big.Int).Mul(oldTarget, big.NewInt(adjustedTimespan))
|
||||
targetTimeSpan := int64(dag.dagParams.TargetTimespan / time.Second)
|
||||
newTarget.Div(newTarget, big.NewInt(targetTimeSpan))
|
||||
|
||||
// Limit new value to the proof of work limit.
|
||||
if newTarget.Cmp(dag.dagParams.PowLimit) > 0 {
|
||||
newTarget.Set(dag.dagParams.PowLimit)
|
||||
// rounded down.
|
||||
newTarget := targetsWindow.averageTarget()
|
||||
newTarget.
|
||||
Mul(newTarget, big.NewInt(windowMaxTimeStamp-windowMinTimestamp)).
|
||||
Div(newTarget, big.NewInt(dag.targetTimePerBlock)).
|
||||
Div(newTarget, big.NewInt(int64(dag.difficultyAdjustmentWindowSize)))
|
||||
if newTarget.Cmp(dag.dagParams.PowMax) > 0 {
|
||||
return dag.powMaxBits
|
||||
}
|
||||
|
||||
// Log new target difficulty and return it. The new target logging is
|
||||
// intentionally converting the bits back to a number instead of using
|
||||
// newTarget since conversion to the compact representation loses
|
||||
// precision.
|
||||
newTargetBits := util.BigToCompact(newTarget)
|
||||
log.Debugf("Difficulty retarget at block height %d", bluestParent.height+1)
|
||||
log.Debugf("Old target %08x (%064x)", bluestParent.bits, oldTarget)
|
||||
log.Debugf("New target %08x (%064x)", newTargetBits, util.CompactToBig(newTargetBits))
|
||||
log.Debugf("Actual timespan %s, adjusted timespan %s, target timespan %s",
|
||||
time.Duration(actualTimespan)*time.Second,
|
||||
time.Duration(adjustedTimespan)*time.Second,
|
||||
dag.dagParams.TargetTimespan)
|
||||
|
||||
return newTargetBits, nil
|
||||
return newTargetBits
|
||||
}
|
||||
|
||||
// CalcNextRequiredDifficulty calculates the required difficulty for the block
|
||||
// after the end of the current best chain based on the difficulty retarget
|
||||
// rules.
|
||||
// NextRequiredDifficulty calculates the required difficulty for a block that will
|
||||
// be built on top of the current tips.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) CalcNextRequiredDifficulty(timestamp time.Time) (uint32, error) {
|
||||
difficulty, err := dag.calcNextRequiredDifficulty(dag.selectedTip(), timestamp)
|
||||
return difficulty, err
|
||||
func (dag *BlockDAG) NextRequiredDifficulty(timestamp time.Time) uint32 {
|
||||
difficulty := dag.requiredDifficulty(dag.virtual.parents.bluest(), timestamp)
|
||||
return difficulty
|
||||
}
|
||||
|
||||
@@ -5,10 +5,12 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// TestBigToCompact ensures BigToCompact converts big integers to the expected
|
||||
@@ -75,3 +77,132 @@ func TestCalcWork(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDifficulty(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.DifficultyAdjustmentWindowSize = 264
|
||||
dag, teardownFunc, err := DAGSetup("TestDifficulty", Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
zeroTime := time.Unix(0, 0)
|
||||
addNode := func(parents blockSet, blockTime time.Time) *blockNode {
|
||||
bluestParent := parents.bluest()
|
||||
if blockTime == zeroTime {
|
||||
blockTime = time.Unix(bluestParent.timestamp+1, 0)
|
||||
}
|
||||
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
block.Header.Timestamp = blockTime
|
||||
block.Header.Bits = dag.requiredDifficulty(bluestParent, blockTime)
|
||||
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(block), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in ProcessBlock: %s", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("block is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("block was unexpectedly orphan")
|
||||
}
|
||||
return dag.index.LookupNode(block.BlockHash())
|
||||
}
|
||||
tip := dag.genesis
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if tip.bits != dag.genesis.bits {
|
||||
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment window size, the difficulty should be the same as genesis'")
|
||||
}
|
||||
}
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize+100; i++ {
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if tip.bits != dag.genesis.bits {
|
||||
t.Fatalf("As long as the block rate remains the same, the difficulty shouldn't change")
|
||||
}
|
||||
}
|
||||
nodeInThePast := addNode(blockSetFromSlice(tip), tip.PastMedianTime(dag))
|
||||
if nodeInThePast.bits != tip.bits {
|
||||
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
|
||||
}
|
||||
tip = nodeInThePast
|
||||
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if tip.bits != nodeInThePast.bits {
|
||||
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
|
||||
}
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if compareBits(tip.bits, nodeInThePast.bits) >= 0 {
|
||||
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the block rate, so the difficulty should increase as well")
|
||||
}
|
||||
expectedBits := uint32(0x207f83df)
|
||||
if tip.bits != expectedBits {
|
||||
t.Errorf("tip.bits was expected to be %x but got %x", expectedBits, tip.bits)
|
||||
}
|
||||
|
||||
// Increase block rate to increase difficulty
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
|
||||
tip = addNode(blockSetFromSlice(tip), tip.PastMedianTime(dag))
|
||||
if compareBits(tip.bits, tip.parents.bluest().bits) > 0 {
|
||||
t.Fatalf("Because we're increasing the block rate, the difficulty can't decrease")
|
||||
}
|
||||
}
|
||||
|
||||
// Add blocks until difficulty stabilizes
|
||||
lastBits := tip.bits
|
||||
sameBitsCount := uint64(0)
|
||||
for sameBitsCount < dag.difficultyAdjustmentWindowSize+1 {
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if tip.bits == lastBits {
|
||||
sameBitsCount++
|
||||
} else {
|
||||
lastBits = tip.bits
|
||||
sameBitsCount = 0
|
||||
}
|
||||
}
|
||||
slowNode := addNode(blockSetFromSlice(tip), time.Unix(tip.timestamp+2, 0))
|
||||
if slowNode.bits != tip.bits {
|
||||
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
|
||||
}
|
||||
|
||||
tip = slowNode
|
||||
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if tip.bits != slowNode.bits {
|
||||
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
|
||||
}
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if compareBits(tip.bits, slowNode.bits) <= 0 {
|
||||
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block rate, so the difficulty should decrease as well")
|
||||
}
|
||||
|
||||
splitNode := addNode(blockSetFromSlice(tip), zeroTime)
|
||||
tip = splitNode
|
||||
for i := 0; i < 100; i++ {
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
}
|
||||
blueTip := tip
|
||||
|
||||
redChainTip := splitNode
|
||||
for i := 0; i < 10; i++ {
|
||||
redChainTip = addNode(blockSetFromSlice(redChainTip), redChainTip.PastMedianTime(dag))
|
||||
}
|
||||
tipWithRedPast := addNode(blockSetFromSlice(redChainTip, blueTip), zeroTime)
|
||||
tipWithoutRedPast := addNode(blockSetFromSlice(blueTip), zeroTime)
|
||||
if tipWithoutRedPast.bits != tipWithRedPast.bits {
|
||||
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks shouldn't affect the difficulty")
|
||||
}
|
||||
}
|
||||
|
||||
func compareBits(a uint32, b uint32) int {
|
||||
aTarget := util.CompactToBig(a)
|
||||
bTarget := util.CompactToBig(b)
|
||||
return aTarget.Cmp(bTarget)
|
||||
}
|
||||
|
||||
@@ -1,35 +1,26 @@
|
||||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package blockdag implements bitcoin block handling and chain selection rules.
|
||||
Package blockdag implements kaspa block handling and DAG selection rules.
|
||||
|
||||
The bitcoin block handling and chain selection rules are an integral, and quite
|
||||
likely the most important, part of bitcoin. Unfortunately, at the time of
|
||||
this writing, these rules are also largely undocumented and had to be
|
||||
ascertained from the bitcoind source code. At its core, bitcoin is a
|
||||
distributed consensus of which blocks are valid and which ones will comprise the
|
||||
main block chain (public ledger) that ultimately determines accepted
|
||||
transactions, so it is extremely important that fully validating nodes agree on
|
||||
all rules.
|
||||
The kaspa block handling and DAG selection rules are an integral, and quite
|
||||
likely the most important, part of kaspa. At its core, kaspa is a distributed
|
||||
consensus of which blocks are valid and which ones will comprise the DAG
|
||||
(public ledger) that ultimately determines accepted transactions, so it is
|
||||
extremely important that fully validating nodes agree on all rules.
|
||||
|
||||
At a high level, this package provides support for inserting new blocks into
|
||||
the block chain according to the aforementioned rules. It includes
|
||||
functionality such as rejecting duplicate blocks, ensuring blocks and
|
||||
transactions follow all rules, orphan handling, and best chain selection along
|
||||
with reorganization.
|
||||
the block DAG according to the aforementioned rules. It includes functionality
|
||||
such as rejecting duplicate blocks, ensuring blocks and transactions follow all
|
||||
rules, orphan handling, and DAG order along with reorganization.
|
||||
|
||||
Since this package does not deal with other bitcoin specifics such as network
|
||||
communication or wallets, it provides a notification system which gives the
|
||||
caller a high level of flexibility in how they want to react to certain events
|
||||
such as orphan blocks which need their parents requested and newly connected
|
||||
main chain blocks which might result in wallet updates.
|
||||
Since this package does not deal with other kaspa specifics such as network
|
||||
communication, it provides a notification system which gives the caller a high
|
||||
level of flexibility in how they want to react to certain events such as orphan
|
||||
blocks which need their parents requested and newly connected DAG blocks.
|
||||
|
||||
Bitcoin Chain Processing Overview
|
||||
Kaspa DAG Processing Overview
|
||||
|
||||
Before a block is allowed into the block chain, it must go through an intensive
|
||||
series of validation rules. The following list serves as a general outline of
|
||||
Before a block is allowed into the block DAG, it must go through an intensive
|
||||
series of validation rules. The following list serves as a general outline of
|
||||
those rules to provide some intuition into what is going on under the hood, but
|
||||
is by no means exhaustive:
|
||||
|
||||
@@ -37,26 +28,19 @@ is by no means exhaustive:
|
||||
- Perform a series of sanity checks on the block and its transactions such as
|
||||
verifying proof of work, timestamps, number and character of transactions,
|
||||
transaction amounts, script complexity, and merkle root calculations
|
||||
- Compare the block against predetermined checkpoints for expected timestamps
|
||||
and difficulty based on elapsed time since the checkpoint
|
||||
- Save the most recent orphan blocks for a limited time in case their parent
|
||||
blocks become available
|
||||
- Stop processing if the block is an orphan as the rest of the processing
|
||||
depends on the block's position within the block chain
|
||||
depends on the block's position within the block DAG
|
||||
- Perform a series of more thorough checks that depend on the block's position
|
||||
within the block chain such as verifying block difficulties adhere to
|
||||
within the block DAG such as verifying block difficulties adhere to
|
||||
difficulty retarget rules, timestamps are after the median of the last
|
||||
several blocks, all transactions are finalized, checkpoint blocks match, and
|
||||
several blocks, all transactions are finalized, and
|
||||
block versions are in line with the previous blocks
|
||||
- Determine how the block fits into the chain and perform different actions
|
||||
accordingly in order to ensure any side chains which have higher difficulty
|
||||
than the main chain become the new main chain
|
||||
- When a block is being connected to the main chain (either through
|
||||
reorganization of a side chain to the main chain or just extending the
|
||||
main chain), perform further checks on the block's transactions such as
|
||||
verifying transaction duplicates, script complexity for the combination of
|
||||
connected scripts, coinbase maturity, double spends, and connected
|
||||
transaction values
|
||||
- When a block is being connected to the DAG, perform further checks on the
|
||||
block's transactions such as verifying transaction duplicates, script
|
||||
complexity for the combination of connected scripts, coinbase maturity,
|
||||
double spends, and connected transaction values
|
||||
- Run the transaction scripts to verify the spender is allowed to spend the
|
||||
coins
|
||||
- Insert the block into the block database
|
||||
@@ -64,18 +48,10 @@ is by no means exhaustive:
|
||||
Errors
|
||||
|
||||
Errors returned by this package are either the raw errors provided by underlying
|
||||
calls or of type blockchain.RuleError. This allows the caller to differentiate
|
||||
calls or of type blockdag.RuleError. This allows the caller to differentiate
|
||||
between unexpected errors, such as database errors, versus errors due to rule
|
||||
violations through type assertions. In addition, callers can programmatically
|
||||
violations through type assertions. In addition, callers can programmatically
|
||||
determine the specific rule violation by examining the ErrorCode field of the
|
||||
type asserted blockchain.RuleError.
|
||||
|
||||
Bitcoin Improvement Proposals
|
||||
|
||||
This package includes spec changes outlined by the following BIPs:
|
||||
|
||||
BIP0016 (https://en.bitcoin.it/wiki/BIP_0016)
|
||||
BIP0030 (https://en.bitcoin.it/wiki/BIP_0030)
|
||||
BIP0034 (https://en.bitcoin.it/wiki/BIP_0034)
|
||||
type asserted blockdag.RuleError.
|
||||
*/
|
||||
package blockdag
|
||||
|
||||
@@ -37,9 +37,9 @@ const (
|
||||
// exists.
|
||||
ErrDuplicateBlock ErrorCode = iota
|
||||
|
||||
// ErrBlockTooBig indicates the serialized block size exceeds the
|
||||
// maximum allowed size.
|
||||
ErrBlockTooBig
|
||||
// ErrBlockMassTooHigh indicates the mass of a block exceeds the maximum
|
||||
// allowed limits.
|
||||
ErrBlockMassTooHigh
|
||||
|
||||
// ErrBlockVersionTooOld indicates the block version is too old and is
|
||||
// no longer accepted since the majority of the network has upgraded
|
||||
@@ -47,13 +47,12 @@ const (
|
||||
ErrBlockVersionTooOld
|
||||
|
||||
// ErrInvalidTime indicates the time in the passed block has a precision
|
||||
// that is more than one second. The chain consensus rules require
|
||||
// that is more than one second. The DAG consensus rules require
|
||||
// timestamps to have a maximum precision of one second.
|
||||
ErrInvalidTime
|
||||
|
||||
// ErrTimeTooOld indicates the time is either before the median time of
|
||||
// the last several blocks per the chain consensus rules or prior to the
|
||||
// most recent checkpoint.
|
||||
// the last several blocks per the DAG consensus rules.
|
||||
ErrTimeTooOld
|
||||
|
||||
// ErrTimeTooNew indicates the time is too far in the future as compared
|
||||
@@ -67,7 +66,7 @@ const (
|
||||
ErrWrongParentsOrder
|
||||
|
||||
// ErrDifficultyTooLow indicates the difficulty for the block is lower
|
||||
// than the difficulty required by the most recent checkpoint.
|
||||
// than the difficulty required.
|
||||
ErrDifficultyTooLow
|
||||
|
||||
// ErrUnexpectedDifficulty indicates specified bits do not align with
|
||||
@@ -84,30 +83,26 @@ const (
|
||||
// the expected value.
|
||||
ErrBadMerkleRoot
|
||||
|
||||
// ErrBadCheckpoint indicates a block that is expected to be at a
|
||||
// checkpoint height does not match the expected one.
|
||||
ErrBadCheckpoint
|
||||
// ErrBadUTXOCommitment indicates the calculated UTXO commitment does not match
|
||||
// the expected value.
|
||||
ErrBadUTXOCommitment
|
||||
|
||||
// ErrForkTooOld indicates a block is attempting to fork the block chain
|
||||
// before the most recent checkpoint.
|
||||
ErrForkTooOld
|
||||
|
||||
// ErrCheckpointTimeTooOld indicates a block has a timestamp before the
|
||||
// most recent checkpoint.
|
||||
ErrCheckpointTimeTooOld
|
||||
// ErrFinalityPointTimeTooOld indicates a block has a timestamp before the
|
||||
// last finality point.
|
||||
ErrFinalityPointTimeTooOld
|
||||
|
||||
// ErrNoTransactions indicates the block does not have a least one
|
||||
// transaction. A valid block must have at least the coinbase
|
||||
// transaction. A valid block must have at least the coinbase
|
||||
// transaction.
|
||||
ErrNoTransactions
|
||||
|
||||
// ErrNoTxInputs indicates a transaction does not have any inputs. A
|
||||
// ErrNoTxInputs indicates a transaction does not have any inputs. A
|
||||
// valid transaction must have at least one input.
|
||||
ErrNoTxInputs
|
||||
|
||||
// ErrTxTooBig indicates a transaction exceeds the maximum allowed size
|
||||
// when serialized.
|
||||
ErrTxTooBig
|
||||
// ErrTxMassTooHigh indicates the mass of a transaction exceeds the maximum
|
||||
// allowed limits.
|
||||
ErrTxMassTooHigh
|
||||
|
||||
// ErrBadTxOutValue indicates an output value for a transaction is
|
||||
// invalid in some way such as being out of range.
|
||||
@@ -131,7 +126,7 @@ const (
|
||||
ErrUnfinalizedTx
|
||||
|
||||
// ErrDuplicateTx indicates a block contains an identical transaction
|
||||
// (or at least two transactions which hash to the same value). A
|
||||
// (or at least two transactions which hash to the same value). A
|
||||
// valid block may only contain unique transactions.
|
||||
ErrDuplicateTx
|
||||
|
||||
@@ -141,7 +136,7 @@ const (
|
||||
ErrOverwriteTx
|
||||
|
||||
// ErrImmatureSpend indicates a transaction is attempting to spend a
|
||||
// block reward that has not yet reached the required maturity.
|
||||
// coinbase that has not yet reached the required maturity.
|
||||
ErrImmatureSpend
|
||||
|
||||
// ErrSpendTooHigh indicates a transaction is attempting to spend more
|
||||
@@ -164,42 +159,20 @@ const (
|
||||
// coinbase transaction.
|
||||
ErrMultipleCoinbases
|
||||
|
||||
// ErrBadCoinbaseScriptLen indicates the length of the signature script
|
||||
// for a coinbase transaction is not within the valid range.
|
||||
ErrBadCoinbaseScriptLen
|
||||
// ErrBadCoinbasePayloadLen indicates the length of the payload
|
||||
// for a coinbase transaction is too high.
|
||||
ErrBadCoinbasePayloadLen
|
||||
|
||||
// ErrBadCoinbaseValue indicates the amount of a coinbase value does
|
||||
// not match the expected value of the subsidy plus the sum of all fees.
|
||||
ErrBadCoinbaseValue
|
||||
|
||||
// ErrMissingCoinbaseHeight indicates the coinbase transaction for a
|
||||
// block does not start with the serialized block block height as
|
||||
// required for version 2 and higher blocks.
|
||||
ErrMissingCoinbaseHeight
|
||||
|
||||
// ErrBadCoinbaseHeight indicates the serialized block height in the
|
||||
// coinbase transaction for version 2 and higher blocks does not match
|
||||
// the expected value.
|
||||
ErrBadCoinbaseHeight
|
||||
|
||||
// ErrSecondTxNotFeeTransaction indicates the second transaction in
|
||||
// a block is not a fee transaction.
|
||||
ErrSecondTxNotFeeTransaction
|
||||
|
||||
// ErrBadFeeTransaction indicates that the block's fee transaction is not build as expected
|
||||
ErrBadFeeTransaction
|
||||
|
||||
// ErrMultipleFeeTransactions indicates a block contains more than one
|
||||
// fee transaction.
|
||||
ErrMultipleFeeTransactions
|
||||
// ErrBadCoinbaseTransaction indicates that the block's coinbase transaction is not build as expected
|
||||
ErrBadCoinbaseTransaction
|
||||
|
||||
// ErrScriptMalformed indicates a transaction script is malformed in
|
||||
// some way. For example, it might be longer than the maximum allowed
|
||||
// some way. For example, it might be longer than the maximum allowed
|
||||
// length or fail to parse.
|
||||
ErrScriptMalformed
|
||||
|
||||
// ErrScriptValidation indicates the result of executing transaction
|
||||
// script failed. The error covers any failure when executing scripts
|
||||
// script failed. The error covers any failure when executing scripts
|
||||
// such signature verification failures and execution past the end of
|
||||
// the stack.
|
||||
ErrScriptValidation
|
||||
@@ -240,12 +213,20 @@ const (
|
||||
// ErrSubnetwork indicates that a block doesn't adhere to the subnetwork
|
||||
// registry rules
|
||||
ErrSubnetworkRegistry
|
||||
|
||||
// ErrInvalidParentsRelation indicates that one of the parents of a block
|
||||
// is also an ancestor of another parent
|
||||
ErrInvalidParentsRelation
|
||||
|
||||
// ErrDelayedBlockIsNotAllowed indicates that a block with a delayed timestamp was
|
||||
// submitted with BFDisallowDelay flag raised.
|
||||
ErrDelayedBlockIsNotAllowed
|
||||
)
|
||||
|
||||
// Map of ErrorCode values back to their constant names for pretty printing.
|
||||
var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrDuplicateBlock: "ErrDuplicateBlock",
|
||||
ErrBlockTooBig: "ErrBlockTooBig",
|
||||
ErrBlockMassTooHigh: "ErrBlockMassTooHigh",
|
||||
ErrBlockVersionTooOld: "ErrBlockVersionTooOld",
|
||||
ErrInvalidTime: "ErrInvalidTime",
|
||||
ErrTimeTooOld: "ErrTimeTooOld",
|
||||
@@ -256,12 +237,10 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrUnexpectedDifficulty: "ErrUnexpectedDifficulty",
|
||||
ErrHighHash: "ErrHighHash",
|
||||
ErrBadMerkleRoot: "ErrBadMerkleRoot",
|
||||
ErrBadCheckpoint: "ErrBadCheckpoint",
|
||||
ErrForkTooOld: "ErrForkTooOld",
|
||||
ErrCheckpointTimeTooOld: "ErrCheckpointTimeTooOld",
|
||||
ErrFinalityPointTimeTooOld: "ErrFinalityPointTimeTooOld",
|
||||
ErrNoTransactions: "ErrNoTransactions",
|
||||
ErrNoTxInputs: "ErrNoTxInputs",
|
||||
ErrTxTooBig: "ErrTxTooBig",
|
||||
ErrTxMassTooHigh: "ErrTxMassTooHigh",
|
||||
ErrBadTxOutValue: "ErrBadTxOutValue",
|
||||
ErrDuplicateTxInputs: "ErrDuplicateTxInputs",
|
||||
ErrBadTxInput: "ErrBadTxInput",
|
||||
@@ -275,13 +254,8 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrTooManySigOps: "ErrTooManySigOps",
|
||||
ErrFirstTxNotCoinbase: "ErrFirstTxNotCoinbase",
|
||||
ErrMultipleCoinbases: "ErrMultipleCoinbases",
|
||||
ErrBadCoinbaseScriptLen: "ErrBadCoinbaseScriptLen",
|
||||
ErrBadCoinbaseValue: "ErrBadCoinbaseValue",
|
||||
ErrMissingCoinbaseHeight: "ErrMissingCoinbaseHeight",
|
||||
ErrBadCoinbaseHeight: "ErrBadCoinbaseHeight",
|
||||
ErrSecondTxNotFeeTransaction: "ErrSecondTxNotFeeTransaction",
|
||||
ErrBadFeeTransaction: "ErrBadFeeTransaction",
|
||||
ErrMultipleFeeTransactions: "ErrMultipleFeeTransactions",
|
||||
ErrBadCoinbasePayloadLen: "ErrBadCoinbasePayloadLen",
|
||||
ErrBadCoinbaseTransaction: "ErrBadCoinbaseTransaction",
|
||||
ErrScriptMalformed: "ErrScriptMalformed",
|
||||
ErrScriptValidation: "ErrScriptValidation",
|
||||
ErrParentBlockUnknown: "ErrParentBlockUnknown",
|
||||
@@ -293,6 +267,8 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrInvalidGas: "ErrInvalidGas",
|
||||
ErrInvalidPayload: "ErrInvalidPayload",
|
||||
ErrInvalidPayloadHash: "ErrInvalidPayloadHash",
|
||||
ErrInvalidParentsRelation: "ErrInvalidParentsRelation",
|
||||
ErrDelayedBlockIsNotAllowed: "ErrDelayedBlockIsNotAllowed",
|
||||
}
|
||||
|
||||
// String returns the ErrorCode as a human-readable name.
|
||||
@@ -303,9 +279,9 @@ func (e ErrorCode) String() string {
|
||||
return fmt.Sprintf("Unknown ErrorCode (%d)", int(e))
|
||||
}
|
||||
|
||||
// RuleError identifies a rule violation. It is used to indicate that
|
||||
// RuleError identifies a rule violation. It is used to indicate that
|
||||
// processing of a block or transaction failed due to one of the many validation
|
||||
// rules. The caller can use type assertions to determine if a failure was
|
||||
// rules. The caller can use type assertions to determine if a failure was
|
||||
// specifically due to a rule violation and access the ErrorCode field to
|
||||
// ascertain the specific reason for the rule violation.
|
||||
type RuleError struct {
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
want string
|
||||
}{
|
||||
{ErrDuplicateBlock, "ErrDuplicateBlock"},
|
||||
{ErrBlockTooBig, "ErrBlockTooBig"},
|
||||
{ErrBlockMassTooHigh, "ErrBlockMassTooHigh"},
|
||||
{ErrBlockVersionTooOld, "ErrBlockVersionTooOld"},
|
||||
{ErrInvalidTime, "ErrInvalidTime"},
|
||||
{ErrTimeTooOld, "ErrTimeTooOld"},
|
||||
@@ -27,16 +27,13 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrUnexpectedDifficulty, "ErrUnexpectedDifficulty"},
|
||||
{ErrHighHash, "ErrHighHash"},
|
||||
{ErrBadMerkleRoot, "ErrBadMerkleRoot"},
|
||||
{ErrBadCheckpoint, "ErrBadCheckpoint"},
|
||||
{ErrForkTooOld, "ErrForkTooOld"},
|
||||
{ErrCheckpointTimeTooOld, "ErrCheckpointTimeTooOld"},
|
||||
{ErrFinalityPointTimeTooOld, "ErrFinalityPointTimeTooOld"},
|
||||
{ErrNoTransactions, "ErrNoTransactions"},
|
||||
{ErrNoTxInputs, "ErrNoTxInputs"},
|
||||
{ErrTxTooBig, "ErrTxTooBig"},
|
||||
{ErrTxMassTooHigh, "ErrTxMassTooHigh"},
|
||||
{ErrBadTxOutValue, "ErrBadTxOutValue"},
|
||||
{ErrDuplicateTxInputs, "ErrDuplicateTxInputs"},
|
||||
{ErrBadTxInput, "ErrBadTxInput"},
|
||||
{ErrBadCheckpoint, "ErrBadCheckpoint"},
|
||||
{ErrMissingTxOut, "ErrMissingTxOut"},
|
||||
{ErrUnfinalizedTx, "ErrUnfinalizedTx"},
|
||||
{ErrDuplicateTx, "ErrDuplicateTx"},
|
||||
@@ -47,13 +44,8 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrTooManySigOps, "ErrTooManySigOps"},
|
||||
{ErrFirstTxNotCoinbase, "ErrFirstTxNotCoinbase"},
|
||||
{ErrMultipleCoinbases, "ErrMultipleCoinbases"},
|
||||
{ErrBadCoinbaseScriptLen, "ErrBadCoinbaseScriptLen"},
|
||||
{ErrBadCoinbaseValue, "ErrBadCoinbaseValue"},
|
||||
{ErrMissingCoinbaseHeight, "ErrMissingCoinbaseHeight"},
|
||||
{ErrBadCoinbaseHeight, "ErrBadCoinbaseHeight"},
|
||||
{ErrSecondTxNotFeeTransaction, "ErrSecondTxNotFeeTransaction"},
|
||||
{ErrBadFeeTransaction, "ErrBadFeeTransaction"},
|
||||
{ErrMultipleFeeTransactions, "ErrMultipleFeeTransactions"},
|
||||
{ErrBadCoinbasePayloadLen, "ErrBadCoinbasePayloadLen"},
|
||||
{ErrBadCoinbaseTransaction, "ErrBadCoinbaseTransaction"},
|
||||
{ErrScriptMalformed, "ErrScriptMalformed"},
|
||||
{ErrScriptValidation, "ErrScriptValidation"},
|
||||
{ErrParentBlockUnknown, "ErrParentBlockUnknown"},
|
||||
@@ -65,6 +57,8 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrInvalidGas, "ErrInvalidGas"},
|
||||
{ErrInvalidPayload, "ErrInvalidPayload"},
|
||||
{ErrInvalidPayloadHash, "ErrInvalidPayloadHash"},
|
||||
{ErrInvalidParentsRelation, "ErrInvalidParentsRelation"},
|
||||
{ErrDelayedBlockIsNotAllowed, "ErrDelayedBlockIsNotAllowed"},
|
||||
{0xffff, "Unknown ErrorCode (65535)"},
|
||||
}
|
||||
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
// Copyright (c) 2014-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
_ "github.com/daglabs/btcd/database/ffldb"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
// This example demonstrates how to create a new chain instance and use
|
||||
// ProcessBlock to attempt to add a block to the chain. As the package
|
||||
// overview documentation describes, this includes all of the Bitcoin consensus
|
||||
// rules. This example intentionally attempts to insert a duplicate genesis
|
||||
// block to illustrate how an invalid block is handled.
|
||||
func ExampleBlockDAG_ProcessBlock() {
|
||||
// Create a new database to store the accepted blocks into. Typically
|
||||
// this would be opening an existing database and would not be deleting
|
||||
// and creating a new database like this, but it is done here so this is
|
||||
// a complete working example and does not leave temporary files laying
|
||||
// around.
|
||||
dbPath := filepath.Join(os.TempDir(), "exampleprocessblock")
|
||||
_ = os.RemoveAll(dbPath)
|
||||
db, err := database.Create("ffldb", dbPath, dagconfig.MainNetParams.Net)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create database: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(dbPath)
|
||||
defer db.Close()
|
||||
|
||||
// Create a new BlockDAG instance using the underlying database for
|
||||
// the main bitcoin network. This example does not demonstrate some
|
||||
// of the other available configuration options such as specifying a
|
||||
// notification callback and signature cache. Also, the caller would
|
||||
// ordinarily keep a reference to the median time source and add time
|
||||
// values obtained from other peers on the network so the local time is
|
||||
// adjusted to be in agreement with other peers.
|
||||
chain, err := blockdag.New(&blockdag.Config{
|
||||
DB: db,
|
||||
DAGParams: &dagconfig.MainNetParams,
|
||||
TimeSource: blockdag.NewMedianTime(),
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create chain instance: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Process a block. For this example, we are going to intentionally
|
||||
// cause an error by trying to process the genesis block which already
|
||||
// exists.
|
||||
genesisBlock := util.NewBlock(dagconfig.MainNetParams.GenesisBlock)
|
||||
isOrphan, err := chain.ProcessBlock(genesisBlock,
|
||||
blockdag.BFNone)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to process block: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Block accepted. Is it an orphan?: %v", isOrphan)
|
||||
|
||||
// Output:
|
||||
// Failed to process block: already have block 6477863f190fac902e556da4671c7537da4fe367022b1f00fa5270e0d073cc08
|
||||
}
|
||||
@@ -2,30 +2,32 @@ package blockdag_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util/testtools"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/testtools"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/mining"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/mining"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// TestFinality checks that the finality mechanism works as expected.
|
||||
// This is how the flow goes:
|
||||
// 1) We build a chain of blockdag.FinalityInterval blocks and call its tip altChainTip.
|
||||
// 2) We build another chain (let's call it mainChain) of 2 * blockdag.FinalityInterval
|
||||
// 1) We build a chain of params.FinalityInterval blocks and call its tip altChainTip.
|
||||
// 2) We build another chain (let's call it mainChain) of 2 * params.FinalityInterval
|
||||
// blocks, which points to genesis, and then we check that the block in that
|
||||
// chain with height of blockdag.FinalityInterval is marked as finality point (This is
|
||||
// chain with height of params.FinalityInterval is marked as finality point (This is
|
||||
// very predictable, because the blue score of each new block in a chain is the
|
||||
// parents plus one).
|
||||
// 3) We make a new child to block with height (2 * blockdag.FinalityInterval - 1)
|
||||
// 3) We make a new child to block with height (2 * params.FinalityInterval - 1)
|
||||
// in mainChain, and we check that connecting it to the DAG
|
||||
// doesn't affect the last finality point.
|
||||
// 4) We make a block that points to genesis, and check that it
|
||||
@@ -35,8 +37,9 @@ import (
|
||||
// gets rejected because it doesn't have the last finality point in
|
||||
// its selected parent chain.
|
||||
func TestFinality(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.FinalityInterval = 100
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
@@ -45,18 +48,22 @@ func TestFinality(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
buildNodeToDag := func(parentHashes []*daghash.Hash) (*util.Block, error) {
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, nil, false, 1)
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, nil, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block := util.NewBlock(msgBlock)
|
||||
|
||||
isOrphan, err := dag.ProcessBlock(block, blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(block, blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isDelayed {
|
||||
return nil, errors.Errorf("ProcessBlock: block " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
return nil, fmt.Errorf("ProcessBlock: unexpected returned orphan block")
|
||||
return nil, errors.Errorf("ProcessBlock: unexpected returned orphan block")
|
||||
}
|
||||
|
||||
return block, nil
|
||||
@@ -65,8 +72,8 @@ func TestFinality(t *testing.T) {
|
||||
genesis := util.NewBlock(params.GenesisBlock)
|
||||
currentNode := genesis
|
||||
|
||||
// First we build a chain of blockdag.FinalityInterval blocks for future use
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
// First we build a chain of params.FinalityInterval blocks for future use
|
||||
for i := uint64(0); i < params.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -75,10 +82,10 @@ func TestFinality(t *testing.T) {
|
||||
|
||||
altChainTip := currentNode
|
||||
|
||||
// Now we build a new chain of 2 * blockdag.FinalityInterval blocks, pointed to genesis, and
|
||||
// we expect the block with height 1 * blockdag.FinalityInterval to be the last finality point
|
||||
// Now we build a new chain of 2 * params.FinalityInterval blocks, pointed to genesis, and
|
||||
// we expect the block with height 1 * params.FinalityInterval to be the last finality point
|
||||
currentNode = genesis
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
for i := uint64(0); i < params.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -87,7 +94,7 @@ func TestFinality(t *testing.T) {
|
||||
|
||||
expectedFinalityPoint := currentNode
|
||||
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
for i := uint64(0); i < params.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -111,17 +118,32 @@ func TestFinality(t *testing.T) {
|
||||
|
||||
// Here we check that a block with lower blue score than the last finality
|
||||
// point will get rejected
|
||||
_, err = buildNodeToDag([]*daghash.Hash{genesis.Hash()})
|
||||
fakeCoinbaseTx, err := dag.NextBlockCoinbaseTransaction(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("NextBlockCoinbaseTransaction: %s", err)
|
||||
}
|
||||
merkleRoot := blockdag.BuildHashMerkleTreeStore([]*util.Tx{fakeCoinbaseTx}).Root()
|
||||
beforeFinalityBlock := wire.NewMsgBlock(&wire.BlockHeader{
|
||||
Version: 0x10000000,
|
||||
ParentHashes: []*daghash.Hash{genesis.Hash()},
|
||||
HashMerkleRoot: merkleRoot,
|
||||
AcceptedIDMerkleRoot: &daghash.ZeroHash,
|
||||
UTXOCommitment: &daghash.ZeroHash,
|
||||
Timestamp: dag.SelectedTipHeader().Timestamp,
|
||||
Bits: genesis.MsgBlock().Header.Bits,
|
||||
})
|
||||
beforeFinalityBlock.AddTransaction(fakeCoinbaseTx.MsgTx())
|
||||
_, _, err = dag.ProcessBlock(util.NewBlock(beforeFinalityBlock), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
|
||||
}
|
||||
rErr, ok := err.(blockdag.RuleError)
|
||||
if ok {
|
||||
if rErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
|
||||
var ruleErr blockdag.RuleError
|
||||
if errors.As(err, &ruleErr) {
|
||||
if ruleErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, ruleErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", rErr)
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Here we check that a block that doesn't have the last finality point in
|
||||
@@ -130,21 +152,39 @@ func TestFinality(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
|
||||
}
|
||||
rErr, ok = err.(blockdag.RuleError)
|
||||
if ok {
|
||||
if rErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
|
||||
if errors.As(err, &ruleErr) {
|
||||
if ruleErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, ruleErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", rErr)
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", ruleErr)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFinalityInterval tests that the finality interval is
|
||||
// smaller then wire.MaxInvPerMsg, so when a peer receives
|
||||
// a getblocks message it should always be able to send
|
||||
// all the necessary invs.
|
||||
func TestFinalityInterval(t *testing.T) {
|
||||
netParams := []*dagconfig.Params{
|
||||
&dagconfig.MainnetParams,
|
||||
&dagconfig.TestnetParams,
|
||||
&dagconfig.DevnetParams,
|
||||
&dagconfig.RegressionNetParams,
|
||||
&dagconfig.SimnetParams,
|
||||
}
|
||||
for _, params := range netParams {
|
||||
if params.FinalityInterval > wire.MaxInvPerMsg {
|
||||
t.Errorf("FinalityInterval in %s should be lower or equal to wire.MaxInvPerMsg", params.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestSubnetworkRegistry tests the full subnetwork registry flow
|
||||
func TestSubnetworkRegistry(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.BlockRewardMaturity = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
@@ -168,8 +208,8 @@ func TestSubnetworkRegistry(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainedTransactions(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.BlockRewardMaturity = 1
|
||||
params := dagconfig.SimnetParams
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
@@ -179,92 +219,188 @@ func TestChainedTransactions(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
block1, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{params.GenesisHash}, nil, false, 1)
|
||||
block1, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{params.GenesisHash}, nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err := dag.ProcessBlock(util.NewBlock(block1), blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(block1), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: block1 " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: block1 got unexpectedly orphaned")
|
||||
}
|
||||
cbTx := block1.Transactions[0]
|
||||
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build signature script: %s", err)
|
||||
}
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{TxID: cbTx.TxID(), Index: 0},
|
||||
SignatureScript: nil,
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := &wire.TxOut{
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
ScriptPubKey: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}
|
||||
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
|
||||
|
||||
chainedTxIn := &wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{TxID: tx.TxID(), Index: 0},
|
||||
SignatureScript: nil,
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *tx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
|
||||
scriptPubKey, err := txscript.PayToScriptHashScript(blockdag.OpTrueScript)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build public key script: %s", err)
|
||||
}
|
||||
chainedTxOut := &wire.TxOut{
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
ScriptPubKey: scriptPubKey,
|
||||
Value: uint64(1),
|
||||
}
|
||||
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
|
||||
|
||||
block2, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true, 1)
|
||||
block2, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
//Checks that dag.ProcessBlock fails because we don't allow a transaction to spend another transaction from the same block
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Errorf("ProcessBlock expected an error")
|
||||
} else if rErr, ok := err.(blockdag.RuleError); ok {
|
||||
if rErr.ErrorCode != blockdag.ErrMissingTxOut {
|
||||
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, rErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
|
||||
var ruleErr blockdag.RuleError
|
||||
if ok := errors.As(err, &ruleErr); ok {
|
||||
if ruleErr.ErrorCode != blockdag.ErrMissingTxOut {
|
||||
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, ruleErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
|
||||
}
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: block2 " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Errorf("ProcessBlock: block2 got unexpectedly orphaned")
|
||||
}
|
||||
|
||||
nonChainedTxIn := &wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{TxID: cbTx.TxID(), Index: 0},
|
||||
SignatureScript: nil,
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
nonChainedTxOut := &wire.TxOut{
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
ScriptPubKey: scriptPubKey,
|
||||
Value: uint64(1),
|
||||
}
|
||||
nonChainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{nonChainedTxIn}, []*wire.TxOut{nonChainedTxOut})
|
||||
|
||||
block3, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false, 1)
|
||||
block3, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
//Checks that dag.ProcessBlock doesn't fail because all of its transaction are dependant on transactions from previous blocks
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(block3), blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(block3), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Errorf("ProcessBlock: %v", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: block3 " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Errorf("ProcessBlock: block3 got unexpectedly orphaned")
|
||||
}
|
||||
}
|
||||
|
||||
// TestOrderInDiffFromAcceptanceData makes sure that the order of transactions in
|
||||
// dag.diffFromAcceptanceData is such that if txA is spent by txB then txA is processed
|
||||
// before txB.
|
||||
func TestOrderInDiffFromAcceptanceData(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = math.MaxUint8
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
dag.TestSetCoinbaseMaturity(0)
|
||||
|
||||
createBlock := func(previousBlock *util.Block) *util.Block {
|
||||
// Prepare a transaction that spends the previous block's coinbase transaction
|
||||
var txs []*wire.MsgTx
|
||||
if !previousBlock.IsGenesis() {
|
||||
previousCoinbaseTx := previousBlock.MsgBlock().Transactions[0]
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to build signature script: %s", err)
|
||||
}
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *previousCoinbaseTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := &wire.TxOut{
|
||||
ScriptPubKey: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}
|
||||
txs = append(txs, wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut}))
|
||||
}
|
||||
|
||||
// Create the block
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{previousBlock.Hash()}, txs, false)
|
||||
if err != nil {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to prepare block: %s", err)
|
||||
}
|
||||
|
||||
// Add the block to the DAG
|
||||
newBlock := util.NewBlock(msgBlock)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(newBlock, blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Errorf("TestOrderInDiffFromAcceptanceData: %s", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: block is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: block got unexpectedly orphaned")
|
||||
}
|
||||
return newBlock
|
||||
}
|
||||
|
||||
// Create two block chains starting from the genesis block. Every time a block is added
|
||||
// one of the chains is selected as the selected parent chain while all the blocks in
|
||||
// the other chain (and their transactions) get accepted by the new virtual. If the
|
||||
// transactions in the non-selected parent chain get processed in the wrong order then
|
||||
// diffFromAcceptanceData panics.
|
||||
blockAmountPerChain := 100
|
||||
chainATip := util.NewBlock(params.GenesisBlock)
|
||||
chainBTip := chainATip
|
||||
for i := 0; i < blockAmountPerChain; i++ {
|
||||
chainATip = createBlock(chainATip)
|
||||
chainBTip = createBlock(chainBTip)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGasLimit tests the gas limit rules
|
||||
func TestGasLimit(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.BlockRewardMaturity = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
@@ -273,130 +409,169 @@ func TestGasLimit(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// First we prepare a subnetwrok and a block with coinbase outputs to fund our tests
|
||||
// First we prepare a subnetwork and a block with coinbase outputs to fund our tests
|
||||
gasLimit := uint64(12345)
|
||||
subnetworkID, err := testtools.RegisterSubnetworkForTest(dag, ¶ms, gasLimit)
|
||||
if err != nil {
|
||||
t.Fatalf("could not register network: %s", err)
|
||||
}
|
||||
|
||||
fundsBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), nil, false, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err := dag.ProcessBlock(util.NewBlock(fundsBlock), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: funds block got unexpectedly orphan")
|
||||
cbTxs := []*wire.MsgTx{}
|
||||
for i := 0; i < 4; i++ {
|
||||
fundsBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(fundsBlock), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: the funds block " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: fundsBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
cbTxs = append(cbTxs, fundsBlock.Transactions[util.CoinbaseTransactionIndex])
|
||||
}
|
||||
|
||||
cbTxValue := fundsBlock.Transactions[0].TxOut[0].Value
|
||||
cbTxID := fundsBlock.Transactions[0].TxID()
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build signature script: %s", err)
|
||||
}
|
||||
|
||||
scriptPubKey, err := txscript.PayToScriptHashScript(blockdag.OpTrueScript)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build public key script: %s", err)
|
||||
}
|
||||
|
||||
tx1In := &wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 0),
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[0].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
tx1Out := &wire.TxOut{
|
||||
Value: cbTxValue,
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: cbTxs[0].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
tx1 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx1In}, []*wire.TxOut{tx1Out}, subnetworkID, 10000, []byte{})
|
||||
|
||||
tx2In := &wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 1),
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[1].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
tx2Out := &wire.TxOut{
|
||||
Value: cbTxValue,
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: cbTxs[1].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
tx2 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx2In}, []*wire.TxOut{tx2Out}, subnetworkID, 10000, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that has transactions that exceed the gas limit
|
||||
overLimitBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true, 1)
|
||||
overLimitBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(overLimitBlock), blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(overLimitBlock), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Fatalf("ProcessBlock expected to have an error")
|
||||
t.Fatalf("ProcessBlock expected to have an error in block that exceeds gas limit")
|
||||
}
|
||||
rErr, ok := err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
var ruleErr blockdag.RuleError
|
||||
if !errors.As(err, &ruleErr) {
|
||||
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
|
||||
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
|
||||
} else if ruleErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, ruleErr.ErrorCode)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
overflowGasTxIn := &wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 1),
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[2].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
overflowGasTxOut := &wire.TxOut{
|
||||
Value: cbTxValue,
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: cbTxs[2].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
overflowGasTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{overflowGasTxIn}, []*wire.TxOut{overflowGasTxOut},
|
||||
subnetworkID, math.MaxUint64, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that its transactions' gas overflows uint64
|
||||
overflowGasBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true, 1)
|
||||
overflowGasBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(overflowGasBlock), blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(overflowGasBlock), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Fatalf("ProcessBlock expected to have an error")
|
||||
}
|
||||
rErr, ok = err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
if !errors.As(err, &ruleErr) {
|
||||
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
|
||||
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
|
||||
} else if ruleErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, ruleErr.ErrorCode)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: overflowGasBlock " +
|
||||
"is too far in the future")
|
||||
}
|
||||
|
||||
nonExistentSubnetwork := &subnetworkid.SubnetworkID{123}
|
||||
nonExistentSubnetworkTxIn := &wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 0),
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[3].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
nonExistentSubnetworkTxOut := &wire.TxOut{
|
||||
Value: cbTxValue,
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: cbTxs[3].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
nonExistentSubnetworkTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{nonExistentSubnetworkTxIn},
|
||||
[]*wire.TxOut{nonExistentSubnetworkTxOut}, nonExistentSubnetwork, 1, []byte{})
|
||||
|
||||
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true, 1)
|
||||
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
// Here we check that we can't process a block with a transaction from a non-existent subnetwork
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
|
||||
expectedErrStr := fmt.Sprintf("subnetwork '%s' not found", nonExistentSubnetwork)
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
|
||||
expectedErrStr := fmt.Sprintf("Error getting gas limit for subnetworkID '%s': subnetwork '%s' not found",
|
||||
nonExistentSubnetwork, nonExistentSubnetwork)
|
||||
if err.Error() != expectedErrStr {
|
||||
t.Fatalf("ProcessBlock expected error %v but got %v", expectedErrStr, err)
|
||||
t.Fatalf("ProcessBlock expected error \"%v\" but got \"%v\"", expectedErrStr, err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: nonExistentSubnetworkBlock " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: nonExistentSubnetworkBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
// Here we check that we can process a block with a transaction that doesn't exceed the gas limit
|
||||
validBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1}, true, 1)
|
||||
validBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(validBlock), blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(validBlock), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
219
blockdag/fees.go
219
blockdag/fees.go
@@ -1,219 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/txsort"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// compactFeeData is a specialized data type to store a compact list of fees
|
||||
// inside a block.
|
||||
// Every transaction gets a single uint64 value, stored as a plain binary list.
|
||||
// The transactions are ordered the same way they are ordered inside the block, making it easy
|
||||
// to traverse every transaction in a block and extract its fee.
|
||||
//
|
||||
// compactFeeFactory is used to create such a list.
|
||||
// compactFeeIterator is used to iterate over such a list.
|
||||
|
||||
type compactFeeData []byte
|
||||
|
||||
func (cfd compactFeeData) Len() int {
|
||||
return len(cfd) / 8
|
||||
}
|
||||
|
||||
type compactFeeFactory struct {
|
||||
buffer *bytes.Buffer
|
||||
writer *bufio.Writer
|
||||
}
|
||||
|
||||
func newCompactFeeFactory() *compactFeeFactory {
|
||||
buffer := bytes.NewBuffer([]byte{})
|
||||
return &compactFeeFactory{
|
||||
buffer: buffer,
|
||||
writer: bufio.NewWriter(buffer),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfw *compactFeeFactory) add(txFee uint64) error {
|
||||
return binary.Write(cfw.writer, binary.LittleEndian, txFee)
|
||||
}
|
||||
|
||||
func (cfw *compactFeeFactory) data() (compactFeeData, error) {
|
||||
err := cfw.writer.Flush()
|
||||
|
||||
return compactFeeData(cfw.buffer.Bytes()), err
|
||||
}
|
||||
|
||||
type compactFeeIterator struct {
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func (cfd compactFeeData) iterator() *compactFeeIterator {
|
||||
return &compactFeeIterator{
|
||||
reader: bufio.NewReader(bytes.NewBuffer(cfd)),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfr *compactFeeIterator) next() (uint64, error) {
|
||||
var txFee uint64
|
||||
|
||||
err := binary.Read(cfr.reader, binary.LittleEndian, &txFee)
|
||||
|
||||
return txFee, err
|
||||
}
|
||||
|
||||
// The following functions relate to storing and retrieving fee data from the database
|
||||
var feeBucket = []byte("fees")
|
||||
|
||||
// getBluesFeeData returns the compactFeeData for all nodes's blues,
|
||||
// used to calculate the fees this blockNode needs to pay
|
||||
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
|
||||
bluesFeeData := make(map[daghash.Hash]compactFeeData)
|
||||
|
||||
dag.db.View(func(dbTx database.Tx) error {
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
|
||||
}
|
||||
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return bluesFeeData, nil
|
||||
}
|
||||
|
||||
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
|
||||
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating or retrieving fee bucket: %s", err)
|
||||
}
|
||||
|
||||
return feeBucket.Put(blockHash.CloneBytes(), feeData)
|
||||
}
|
||||
|
||||
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
|
||||
feeBucket := dbTx.Metadata().Bucket(feeBucket)
|
||||
if feeBucket == nil {
|
||||
return nil, errors.New("Fee bucket does not exist")
|
||||
}
|
||||
|
||||
feeData := feeBucket.Get(blockHash.CloneBytes())
|
||||
if feeData == nil {
|
||||
return nil, fmt.Errorf("No fee data found for block %s", blockHash)
|
||||
}
|
||||
|
||||
return feeData, nil
|
||||
}
|
||||
|
||||
// The following functions deal with building and validating the fee transaction
|
||||
|
||||
func (node *blockNode) validateFeeTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
|
||||
if node.isGenesis() {
|
||||
return nil
|
||||
}
|
||||
expectedFeeTransaction, err := node.buildFeeTransaction(dag, txsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !expectedFeeTransaction.TxHash().IsEqual(block.FeeTransaction().Hash()) {
|
||||
return ruleError(ErrBadFeeTransaction, "Fee transaction is not built as expected")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildFeeTransaction returns the expected fee transaction for the current block
|
||||
func (node *blockNode) buildFeeTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData) (*wire.MsgTx, error) {
|
||||
bluesFeeData, err := node.getBluesFeeData(dag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txIns := []*wire.TxIn{}
|
||||
txOuts := []*wire.TxOut{}
|
||||
|
||||
for _, blue := range node.blues {
|
||||
txIn, txOut, err := feeInputAndOutputForBlueBlock(blue, txsAcceptanceData, bluesFeeData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txIns = append(txIns, txIn)
|
||||
if txOut != nil {
|
||||
txOuts = append(txOuts, txOut)
|
||||
}
|
||||
}
|
||||
feeTx := wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
|
||||
return txsort.Sort(feeTx), nil
|
||||
}
|
||||
|
||||
// feeInputAndOutputForBlueBlock calculates the input and output that should go into the fee transaction of blueBlock
|
||||
// If blueBlock gets no fee - returns only txIn and nil for txOut
|
||||
func feeInputAndOutputForBlueBlock(blueBlock *blockNode, txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (
|
||||
*wire.TxIn, *wire.TxOut, error) {
|
||||
|
||||
blockTxsAcceptanceData, ok := txsAcceptanceData[*blueBlock.hash]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
|
||||
}
|
||||
blockFeeData, ok := feeData[*blueBlock.hash]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("No feeData for block %s", blueBlock.hash)
|
||||
}
|
||||
|
||||
if len(blockTxsAcceptanceData) != blockFeeData.Len() {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"length of accepted transaction data(%d) and fee data(%d) is not equal for block %s",
|
||||
len(blockTxsAcceptanceData), blockFeeData.Len(), blueBlock.hash)
|
||||
}
|
||||
|
||||
txIn := &wire.TxIn{
|
||||
SignatureScript: []byte{},
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.TxID(*blueBlock.hash),
|
||||
Index: math.MaxUint32,
|
||||
},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
|
||||
totalFees := uint64(0)
|
||||
feeIterator := blockFeeData.iterator()
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData {
|
||||
fee, err := feeIterator.next()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
|
||||
}
|
||||
if txAcceptanceData.IsAccepted {
|
||||
totalFees += fee
|
||||
}
|
||||
}
|
||||
|
||||
if totalFees == 0 {
|
||||
return txIn, nil, nil
|
||||
}
|
||||
|
||||
// the scriptPubKey for the fee is the same as the coinbase's first scriptPubKey
|
||||
pkScript := blockTxsAcceptanceData[0].Tx.MsgTx().TxOut[0].PkScript
|
||||
|
||||
txOut := &wire.TxOut{
|
||||
Value: totalFees,
|
||||
PkScript: pkScript,
|
||||
}
|
||||
|
||||
return txIn, txOut, nil
|
||||
}
|
||||
@@ -1,305 +0,0 @@
|
||||
// Copyright (c) 2016 The Decred developers
|
||||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/blockdag/fullblocktests"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
_ "github.com/daglabs/btcd/database/ffldb"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// testDbType is the database backend type to use for the tests.
|
||||
testDbType = "ffldb"
|
||||
|
||||
// testDbRoot is the root directory used to create all test databases.
|
||||
testDbRoot = "testdbs"
|
||||
|
||||
// blockDataNet is the expected network in the test block data.
|
||||
blockDataNet = wire.MainNet
|
||||
)
|
||||
|
||||
// filesExists returns whether or not the named file or directory exists.
|
||||
func fileExists(name string) bool {
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isSupportedDbType returns whether or not the passed database type is
|
||||
// currently supported.
|
||||
func isSupportedDbType(dbType string) bool {
|
||||
supportedDrivers := database.SupportedDrivers()
|
||||
for _, driver := range supportedDrivers {
|
||||
if dbType == driver {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// DAGSetup is used to create a new db and chain instance with the genesis
|
||||
// block already inserted. In addition to the new chain instance, it returns
|
||||
// a teardown function the caller should invoke when done testing to clean up.
|
||||
func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func(), error) {
|
||||
if !isSupportedDbType(testDbType) {
|
||||
return nil, nil, fmt.Errorf("unsupported db type %v", testDbType)
|
||||
}
|
||||
|
||||
// Handle memory database specially since it doesn't need the disk
|
||||
// specific handling.
|
||||
var db database.DB
|
||||
var teardown func()
|
||||
if testDbType == "memdb" {
|
||||
ndb, err := database.Create(testDbType)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating db: %v", err)
|
||||
}
|
||||
db = ndb
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
teardown = func() {
|
||||
db.Close()
|
||||
}
|
||||
} else {
|
||||
// Create the root directory for test databases.
|
||||
if !fileExists(testDbRoot) {
|
||||
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
|
||||
err := fmt.Errorf("unable to create test db "+
|
||||
"root: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new database to store the accepted blocks into.
|
||||
dbPath := filepath.Join(testDbRoot, dbName)
|
||||
_ = os.RemoveAll(dbPath)
|
||||
ndb, err := database.Create(testDbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating db: %v", err)
|
||||
}
|
||||
db = ndb
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
teardown = func() {
|
||||
db.Close()
|
||||
os.RemoveAll(dbPath)
|
||||
os.RemoveAll(testDbRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the chain params to ensure any modifications the tests do to
|
||||
// the chain parameters do not affect the global instance.
|
||||
paramsCopy := *params
|
||||
|
||||
// Create the main chain instance.
|
||||
chain, err := blockdag.New(&blockdag.Config{
|
||||
DB: db,
|
||||
DAGParams: ¶msCopy,
|
||||
Checkpoints: nil,
|
||||
TimeSource: blockdag.NewMedianTime(),
|
||||
SigCache: txscript.NewSigCache(1000),
|
||||
})
|
||||
if err != nil {
|
||||
teardown()
|
||||
err := fmt.Errorf("failed to create chain instance: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return chain, teardown, nil
|
||||
}
|
||||
|
||||
// TestFullBlocks ensures all tests generated by the fullblocktests package
|
||||
// have the expected result when processed via ProcessBlock.
|
||||
func TestFullBlocks(t *testing.T) {
|
||||
// TODO: (Stas) This test was disabled for until we have implemented Phantom
|
||||
// Ticket: https://daglabs.atlassian.net/browse/DEV-60
|
||||
t.SkipNow()
|
||||
|
||||
tests, err := fullblocktests.Generate(false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to generate tests: %v", err)
|
||||
}
|
||||
|
||||
// Create a new database and chain instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("fullblocktest",
|
||||
&dagconfig.RegressionNetParams)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup chain instance: %v", err)
|
||||
return
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// testAcceptedBlock attempts to process the block in the provided test
|
||||
// instance and ensures that it was accepted according to the flags
|
||||
// specified in the test.
|
||||
testAcceptedBlock := func(item fullblocktests.AcceptedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
isOrphan, err := dag.ProcessBlock(block,
|
||||
blockdag.BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("block %q (hash %s, height %d) should "+
|
||||
"have been accepted: %v", item.Name,
|
||||
block.Hash(), blockHeight, err)
|
||||
}
|
||||
|
||||
if isOrphan != item.IsOrphan {
|
||||
t.Fatalf("block %q (hash %s, height %d) unexpected "+
|
||||
"orphan flag -- got %v, want %v", item.Name,
|
||||
block.Hash(), blockHeight, isOrphan,
|
||||
item.IsOrphan)
|
||||
}
|
||||
}
|
||||
|
||||
// testRejectedBlock attempts to process the block in the provided test
|
||||
// instance and ensures that it was rejected with the reject code
|
||||
// specified in the test.
|
||||
testRejectedBlock := func(item fullblocktests.RejectedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
_, err := dag.ProcessBlock(block, blockdag.BFNone)
|
||||
if err == nil {
|
||||
t.Fatalf("block %q (hash %s, height %d) should not "+
|
||||
"have been accepted", item.Name, block.Hash(),
|
||||
blockHeight)
|
||||
}
|
||||
|
||||
// Ensure the error code is of the expected type and the reject
|
||||
// code matches the value specified in the test instance.
|
||||
rerr, ok := err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
t.Fatalf("block %q (hash %s, height %d) returned "+
|
||||
"unexpected error type -- got %T, want "+
|
||||
"blockchain.RuleError", item.Name, block.Hash(),
|
||||
blockHeight, err)
|
||||
}
|
||||
if rerr.ErrorCode != item.RejectCode {
|
||||
t.Fatalf("block %q (hash %s, height %d) does not have "+
|
||||
"expected reject code -- got %v, want %v",
|
||||
item.Name, block.Hash(), blockHeight,
|
||||
rerr.ErrorCode, item.RejectCode)
|
||||
}
|
||||
}
|
||||
|
||||
// testRejectedNonCanonicalBlock attempts to decode the block in the
|
||||
// provided test instance and ensures that it failed to decode with a
|
||||
// message error.
|
||||
testRejectedNonCanonicalBlock := func(item fullblocktests.RejectedNonCanonicalBlock) {
|
||||
headerLen := len(item.RawBlock)
|
||||
if headerLen > 80 {
|
||||
headerLen = 80
|
||||
}
|
||||
blockHash := daghash.DoubleHashH(item.RawBlock[0:headerLen])
|
||||
blockHeight := item.Height
|
||||
t.Logf("Testing block %s (hash %s, height %d)", item.Name,
|
||||
blockHash, blockHeight)
|
||||
|
||||
// Ensure there is an error due to deserializing the block.
|
||||
var msgBlock wire.MsgBlock
|
||||
err := msgBlock.BtcDecode(bytes.NewReader(item.RawBlock), 0)
|
||||
if _, ok := err.(*wire.MessageError); !ok {
|
||||
t.Fatalf("block %q (hash %s, height %d) should have "+
|
||||
"failed to decode", item.Name, blockHash,
|
||||
blockHeight)
|
||||
}
|
||||
}
|
||||
|
||||
// testOrphanOrRejectedBlock attempts to process the block in the
|
||||
// provided test instance and ensures that it was either accepted as an
|
||||
// orphan or rejected with a rule violation.
|
||||
testOrphanOrRejectedBlock := func(item fullblocktests.OrphanOrRejectedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
isOrphan, err := dag.ProcessBlock(block, blockdag.BFNone)
|
||||
if err != nil {
|
||||
// Ensure the error code is of the expected type.
|
||||
if _, ok := err.(blockdag.RuleError); !ok {
|
||||
t.Fatalf("block %q (hash %s, height %d) "+
|
||||
"returned unexpected error type -- "+
|
||||
"got %T, want blockchain.RuleError",
|
||||
item.Name, block.Hash(), blockHeight,
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
if !isOrphan {
|
||||
t.Fatalf("block %q (hash %s, height %d) was accepted, "+
|
||||
"but is not considered an orphan", item.Name,
|
||||
block.Hash(), blockHeight)
|
||||
}
|
||||
}
|
||||
|
||||
// testExpectedTip ensures the current tip of the blockchain is the
|
||||
// block specified in the provided test instance.
|
||||
testExpectedTip := func(item fullblocktests.ExpectedTip) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetHeight(blockHeight)
|
||||
t.Logf("Testing tip for block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
// Ensure hash and height match.
|
||||
if dag.HighestTipHash() != item.Block.BlockHash() ||
|
||||
dag.Height() != blockHeight { //TODO: (Ori) the use of dag.Height() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
|
||||
|
||||
t.Fatalf("block %q (hash %s, height %d) should be "+
|
||||
"the current tip -- got (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight, dag.HighestTipHash(),
|
||||
dag.Height()) //TODO: (Ori) the use of dag.Height() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
|
||||
}
|
||||
}
|
||||
|
||||
for testNum, test := range tests {
|
||||
for itemNum, item := range test {
|
||||
switch item := item.(type) {
|
||||
case fullblocktests.AcceptedBlock:
|
||||
testAcceptedBlock(item)
|
||||
case fullblocktests.RejectedBlock:
|
||||
testRejectedBlock(item)
|
||||
case fullblocktests.RejectedNonCanonicalBlock:
|
||||
testRejectedNonCanonicalBlock(item)
|
||||
case fullblocktests.OrphanOrRejectedBlock:
|
||||
testOrphanOrRejectedBlock(item)
|
||||
case fullblocktests.ExpectedTip:
|
||||
testExpectedTip(item)
|
||||
default:
|
||||
t.Fatalf("test #%d, item #%d is not one of "+
|
||||
"the supported test instance types -- "+
|
||||
"got type: %T", testNum, itemNum, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
fullblocktests
|
||||
==============
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd/blockchain/fullblocktests)
|
||||
|
||||
Package fullblocktests provides a set of full block tests to be used for testing
|
||||
the consensus validation rules. The tests are intended to be flexible enough to
|
||||
allow both unit-style tests directly against the blockchain code as well as
|
||||
integration style tests over the peer-to-peer network. To achieve that goal,
|
||||
each test contains additional information about the expected result, however
|
||||
that information can be ignored when doing comparison tests between two
|
||||
independent versions over the peer-to-peer network.
|
||||
|
||||
This package has intentionally been designed so it can be used as a standalone
|
||||
package for any projects needing to test their implementation against a full set
|
||||
of blocks that exercise the consensus validation rules.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/daglabs/btcd/blockchain/fullblocktests
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Package fullblocktests is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
||||
@@ -1,20 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package fullblocktests provides a set of block consensus validation tests.
|
||||
|
||||
All of the generated test instances involve full blocks that are to be used for
|
||||
testing the consensus validation rules. The tests are intended to be flexible
|
||||
enough to allow both unit-style tests directly against the blockchain code as
|
||||
well as integration style tests over the peer-to-peer network. To achieve that
|
||||
goal, each test contains additional information about the expected result,
|
||||
however that information can be ignored when doing comparison tests between two
|
||||
independent versions over the peer-to-peer network.
|
||||
|
||||
This package has intentionally been designed so it can be used as a standalone
|
||||
package for any projects needing to test their implementation against a full set
|
||||
of blocks that exercise the consensus validation rules.
|
||||
*/
|
||||
package fullblocktests
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,146 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fullblocktests
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"math"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util/hdkeychain"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// wire.Hash. It only differs from the one available in daghash in that
|
||||
// it panics on an error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, hashes.
|
||||
func newHashFromStr(hexStr string) *daghash.Hash {
|
||||
hash, err := daghash.NewHashFromStr(hexStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// newTxIDFromStr converts the passed big-endian hex string into a
|
||||
// wire.TxID. It only differs from the one available in daghash in that
|
||||
// it panics on an error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, hashes.
|
||||
func newTxIDFromStr(hexStr string) *daghash.TxID {
|
||||
txID, err := daghash.NewTxIDFromStr(hexStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return txID
|
||||
}
|
||||
|
||||
// fromHex converts the passed hex string into a byte slice and will panic if
|
||||
// there is an error. This is only provided for the hard-coded constants so
|
||||
// errors in the source code can be detected. It will only (and must only) be
|
||||
// called for initialization purposes.
|
||||
func fromHex(s string) []byte {
|
||||
r, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic("invalid hex in source file: " + s)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
var (
|
||||
// bigOne is 1 represented as a big.Int. It is defined here to avoid
|
||||
// the overhead of creating it multiple times.
|
||||
bigOne = big.NewInt(1)
|
||||
|
||||
// regressionPowLimit is the highest proof of work value a Bitcoin block
|
||||
// can have for the regression test network. It is the value 2^255 - 1.
|
||||
regressionPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne)
|
||||
|
||||
// regTestGenesisBlock defines the genesis block of the block chain which serves
|
||||
// as the public transaction ledger for the regression test network.
|
||||
regTestGenesisBlock = wire.MsgBlock{
|
||||
Header: wire.BlockHeader{
|
||||
Version: 1,
|
||||
ParentHashes: []*daghash.Hash{},
|
||||
HashMerkleRoot: newHashFromStr("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"),
|
||||
Timestamp: time.Unix(0x5b28c636, 0), // 2018-06-19 09:00:38 +0000 UTC
|
||||
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
|
||||
Nonce: 1,
|
||||
},
|
||||
Transactions: []*wire.MsgTx{{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: fromHex("04ffff001d010445" +
|
||||
"5468652054696d65732030332f4a616e2f" +
|
||||
"32303039204368616e63656c6c6f72206f" +
|
||||
"6e206272696e6b206f66207365636f6e64" +
|
||||
"206261696c6f757420666f72206261686b73"),
|
||||
Sequence: math.MaxUint64,
|
||||
}},
|
||||
TxOut: []*wire.TxOut{{
|
||||
Value: 0,
|
||||
PkScript: fromHex("4104678afdb0fe5548271967f1" +
|
||||
"a67130b7105cd6a828e03909a67962e0ea1f" +
|
||||
"61deb649f6bc3f4cef38c4f35504e51ec138" +
|
||||
"c4f35504e51ec112de5c384df7ba0b8d578a" +
|
||||
"4c702b6bf11d5fac"),
|
||||
}},
|
||||
LockTime: 0,
|
||||
SubnetworkID: *subnetworkid.SubnetworkIDNative,
|
||||
}},
|
||||
}
|
||||
)
|
||||
|
||||
// regressionNetParams defines the network parameters for the regression test
|
||||
// network.
|
||||
//
|
||||
// NOTE: The test generator intentionally does not use the existing definitions
|
||||
// in the dagconfig package since the intent is to be able to generate known
|
||||
// good tests which exercise that code. Using the dagconfig parameters would
|
||||
// allow them to change out from under the tests potentially invalidating them.
|
||||
var regressionNetParams = &dagconfig.Params{
|
||||
Name: "regtest",
|
||||
Net: wire.TestNet,
|
||||
DefaultPort: "18444",
|
||||
|
||||
// Chain parameters
|
||||
GenesisBlock: ®TestGenesisBlock,
|
||||
GenesisHash: newHashFromStr("5bec7567af40504e0994db3b573c186fffcc4edefe096ff2e58d00523bd7e8a6"),
|
||||
PowLimit: regressionPowLimit,
|
||||
PowLimitBits: 0x207fffff,
|
||||
BlockRewardMaturity: 100,
|
||||
SubsidyReductionInterval: 150,
|
||||
TargetTimespan: time.Hour * 24 * 14, // 14 days
|
||||
TargetTimePerBlock: time.Second * 10, // 10 seconds
|
||||
RetargetAdjustmentFactor: 4, // 25% less, 400% more
|
||||
ReduceMinDifficulty: true,
|
||||
MinDiffReductionTime: time.Minute * 20, // TargetTimePerBlock * 2
|
||||
GenerateSupported: true,
|
||||
|
||||
// Checkpoints ordered from oldest to newest.
|
||||
Checkpoints: nil,
|
||||
|
||||
// Mempool parameters
|
||||
RelayNonStdTxs: true,
|
||||
|
||||
// Address encoding magics
|
||||
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
|
||||
|
||||
// BIP32 hierarchical deterministic extended key magics
|
||||
HDKeyIDPair: hdkeychain.HDKeyPairRegressionNet,
|
||||
|
||||
// BIP44 coin type used in the hierarchical deterministic path for
|
||||
// address generation.
|
||||
HDCoinType: 1,
|
||||
}
|
||||
176
blockdag/ghostdag.go
Normal file
176
blockdag/ghostdag.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// ghostdag runs the GHOSTDAG protocol and updates newNode.blues,
|
||||
// newNode.selectedParent and newNode.bluesAnticoneSizes accordingly.
|
||||
// The function updates newNode.blues by iterating over the blocks in
|
||||
// the anticone of newNode.selectedParent (which is the parent with the
|
||||
// highest blue score) and adds any block to newNode.blues if by adding
|
||||
// it to newNode.blues these conditions will not be violated:
|
||||
//
|
||||
// 1) |anticone-of-candidate-block ∩ blue-set-of-newNode| ≤ K
|
||||
//
|
||||
// 2) For every blue block in blue-set-of-newNode:
|
||||
// |(anticone-of-blue-block ∩ blue-set-newNode) ∪ {candidate-block}| ≤ K.
|
||||
// We validate this condition by maintaining a map bluesAnticoneSizes for
|
||||
// each block which holds all the blue anticone sizes that were affected by
|
||||
// the new added blue blocks.
|
||||
// So to find out what is |anticone-of-blue ∩ blue-set-of-newNode| we just iterate in
|
||||
// the selected parent chain of newNode until we find an existing entry in
|
||||
// bluesAnticoneSizes.
|
||||
//
|
||||
// For further details see the article https://eprint.iacr.org/2018/104.pdf
|
||||
func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blockNode, err error) {
|
||||
newNode.selectedParent = newNode.parents.bluest()
|
||||
newNode.bluesAnticoneSizes[newNode.selectedParent] = 0
|
||||
newNode.blues = []*blockNode{newNode.selectedParent}
|
||||
selectedParentAnticone, err = dag.selectedParentAnticone(newNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Slice(selectedParentAnticone, func(i, j int) bool {
|
||||
return selectedParentAnticone[i].less(selectedParentAnticone[j])
|
||||
})
|
||||
|
||||
for _, blueCandidate := range selectedParentAnticone {
|
||||
candidateBluesAnticoneSizes := make(map[*blockNode]dagconfig.KType)
|
||||
var candidateAnticoneSize dagconfig.KType
|
||||
possiblyBlue := true
|
||||
|
||||
// Iterate over all blocks in the blue set of newNode that are not in the past
|
||||
// of blueCandidate, and check for each one of them if blueCandidate potentially
|
||||
// enlarges their blue anticone to be over K, or that they enlarge the blue anticone
|
||||
// of blueCandidate to be over K.
|
||||
for chainBlock := newNode; possiblyBlue; chainBlock = chainBlock.selectedParent {
|
||||
// If blueCandidate is in the future of chainBlock, it means
|
||||
// that all remaining blues are in the past of chainBlock and thus
|
||||
// in the past of blueCandidate. In this case we know for sure that
|
||||
// the anticone of blueCandidate will not exceed K, and we can mark
|
||||
// it as blue.
|
||||
//
|
||||
// newNode is always in the future of blueCandidate, so there's
|
||||
// no point in checking it.
|
||||
if chainBlock != newNode {
|
||||
if isAncestorOfBlueCandidate, err := dag.isAncestorOf(chainBlock, blueCandidate); err != nil {
|
||||
return nil, err
|
||||
} else if isAncestorOfBlueCandidate {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, block := range chainBlock.blues {
|
||||
// Skip blocks that exist in the past of blueCandidate.
|
||||
if isAncestorOfBlueCandidate, err := dag.isAncestorOf(block, blueCandidate); err != nil {
|
||||
return nil, err
|
||||
} else if isAncestorOfBlueCandidate {
|
||||
continue
|
||||
}
|
||||
|
||||
candidateBluesAnticoneSizes[block], err = dag.blueAnticoneSize(block, newNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
candidateAnticoneSize++
|
||||
|
||||
if candidateAnticoneSize > dag.dagParams.K {
|
||||
// k-cluster violation: The candidate's blue anticone exceeded k
|
||||
possiblyBlue = false
|
||||
break
|
||||
}
|
||||
|
||||
if candidateBluesAnticoneSizes[block] == dag.dagParams.K {
|
||||
// k-cluster violation: A block in candidate's blue anticone already
|
||||
// has k blue blocks in its own anticone
|
||||
possiblyBlue = false
|
||||
break
|
||||
}
|
||||
|
||||
// This is a sanity check that validates that a blue
|
||||
// block's blue anticone is not already larger than K.
|
||||
if candidateBluesAnticoneSizes[block] > dag.dagParams.K {
|
||||
return nil, errors.New("found blue anticone size larger than k")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if possiblyBlue {
|
||||
// No k-cluster violation found, we can now set the candidate block as blue
|
||||
newNode.blues = append(newNode.blues, blueCandidate)
|
||||
newNode.bluesAnticoneSizes[blueCandidate] = candidateAnticoneSize
|
||||
for blue, blueAnticoneSize := range candidateBluesAnticoneSizes {
|
||||
newNode.bluesAnticoneSizes[blue] = blueAnticoneSize + 1
|
||||
}
|
||||
|
||||
// The maximum length of node.blues can be K+1 because
|
||||
// it contains the selected parent.
|
||||
if dagconfig.KType(len(newNode.blues)) == dag.dagParams.K+1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newNode.blueScore = newNode.selectedParent.blueScore + uint64(len(newNode.blues))
|
||||
return selectedParentAnticone, nil
|
||||
}
|
||||
|
||||
// selectedParentAnticone returns the blocks in the anticone of the selected parent of the given node.
|
||||
// The function work as follows.
|
||||
// We start by adding all parents of the node (other than the selected parent) to a process queue.
|
||||
// For each node in the queue:
|
||||
// we check whether it is in the past of the selected parent.
|
||||
// If not, we add the node to the resulting anticone-set and queue it for processing.
|
||||
func (dag *BlockDAG) selectedParentAnticone(node *blockNode) ([]*blockNode, error) {
|
||||
anticoneSet := newBlockSet()
|
||||
var anticoneSlice []*blockNode
|
||||
selectedParentPast := newBlockSet()
|
||||
var queue []*blockNode
|
||||
// Queueing all parents (other than the selected parent itself) for processing.
|
||||
for parent := range node.parents {
|
||||
if parent == node.selectedParent {
|
||||
continue
|
||||
}
|
||||
anticoneSet.add(parent)
|
||||
anticoneSlice = append(anticoneSlice, parent)
|
||||
queue = append(queue, parent)
|
||||
}
|
||||
for len(queue) > 0 {
|
||||
var current *blockNode
|
||||
current, queue = queue[0], queue[1:]
|
||||
// For each parent of a the current node we check whether it is in the past of the selected parent. If not,
|
||||
// we add the it to the resulting anticone-set and queue it for further processing.
|
||||
for parent := range current.parents {
|
||||
if anticoneSet.contains(parent) || selectedParentPast.contains(parent) {
|
||||
continue
|
||||
}
|
||||
isAncestorOfSelectedParent, err := dag.isAncestorOf(parent, node.selectedParent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isAncestorOfSelectedParent {
|
||||
selectedParentPast.add(parent)
|
||||
continue
|
||||
}
|
||||
anticoneSet.add(parent)
|
||||
anticoneSlice = append(anticoneSlice, parent)
|
||||
queue = append(queue, parent)
|
||||
}
|
||||
}
|
||||
return anticoneSlice, nil
|
||||
}
|
||||
|
||||
// blueAnticoneSize returns the blue anticone size of 'block' from the worldview of 'context'.
|
||||
// Expects 'block' to be in the blue set of 'context'
|
||||
func (dag *BlockDAG) blueAnticoneSize(block, context *blockNode) (dagconfig.KType, error) {
|
||||
for current := context; current != nil; current = current.selectedParent {
|
||||
if blueAnticoneSize, ok := current.bluesAnticoneSizes[block]; ok {
|
||||
return blueAnticoneSize, nil
|
||||
}
|
||||
}
|
||||
return 0, errors.Errorf("block %s is not in blue set of %s", block.hash, context.hash)
|
||||
}
|
||||
370
blockdag/ghostdag_test.go
Normal file
370
blockdag/ghostdag_test.go
Normal file
@@ -0,0 +1,370 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type testBlockData struct {
|
||||
parents []string
|
||||
id string // id is a virtual entity that is used only for tests so we can define relations between blocks without knowing their hash
|
||||
expectedScore uint64
|
||||
expectedSelectedParent string
|
||||
expectedBlues []string
|
||||
}
|
||||
|
||||
// TestGHOSTDAG iterates over several dag simulations, and checks
|
||||
// that the blue score, blue set and selected parent of each
|
||||
// block are calculated as expected.
|
||||
func TestGHOSTDAG(t *testing.T) {
|
||||
dagParams := dagconfig.SimnetParams
|
||||
|
||||
tests := []struct {
|
||||
k dagconfig.KType
|
||||
expectedReds []string
|
||||
dagData []*testBlockData
|
||||
}{
|
||||
{
|
||||
k: 3,
|
||||
expectedReds: []string{"F", "G", "H", "I", "O", "P"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "C",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "D",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
id: "E",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C", "D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "F",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F"},
|
||||
id: "G",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "H",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "I",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E", "G"},
|
||||
id: "J",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J"},
|
||||
id: "K",
|
||||
expectedScore: 6,
|
||||
expectedSelectedParent: "J",
|
||||
expectedBlues: []string{"J"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I", "K"},
|
||||
id: "L",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "K",
|
||||
expectedBlues: []string{"K"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L"},
|
||||
id: "M",
|
||||
expectedScore: 8,
|
||||
expectedSelectedParent: "L",
|
||||
expectedBlues: []string{"L"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "N",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "O",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "P",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "Q",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "R",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"R"},
|
||||
id: "S",
|
||||
expectedScore: 10,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "O", "P", "Q", "S"},
|
||||
id: "T",
|
||||
expectedScore: 13,
|
||||
expectedSelectedParent: "S",
|
||||
expectedBlues: []string{"S", "N", "Q"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
func() {
|
||||
resetExtraNonceForTest()
|
||||
dagParams.K = test.k
|
||||
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), Config{
|
||||
DAGParams: &dagParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup dag instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
genesisNode := dag.genesis
|
||||
blockByIDMap := make(map[string]*blockNode)
|
||||
idByBlockMap := make(map[*blockNode]string)
|
||||
blockByIDMap["A"] = genesisNode
|
||||
idByBlockMap[genesisNode] = "A"
|
||||
|
||||
for _, blockData := range test.dagData {
|
||||
parents := blockSet{}
|
||||
for _, parentID := range blockData.parents {
|
||||
parent := blockByIDMap[parentID]
|
||||
parents.add(parent)
|
||||
}
|
||||
|
||||
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("TestGHOSTDAG: block %v got unexpected error from PrepareBlockForTest: %v", blockData.id, err)
|
||||
}
|
||||
|
||||
utilBlock := util.NewBlock(block)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("TestGHOSTDAG: dag.ProcessBlock got unexpected error for block %v: %v", blockData.id, err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("TestGHOSTDAG: block %s "+
|
||||
"is too far in the future", blockData.id)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestGHOSTDAG: block %v was unexpectedly orphan", blockData.id)
|
||||
}
|
||||
|
||||
node := dag.index.LookupNode(utilBlock.Hash())
|
||||
|
||||
blockByIDMap[blockData.id] = node
|
||||
idByBlockMap[node] = blockData.id
|
||||
|
||||
bluesIDs := make([]string, 0, len(node.blues))
|
||||
for _, blue := range node.blues {
|
||||
bluesIDs = append(bluesIDs, idByBlockMap[blue])
|
||||
}
|
||||
selectedParentID := idByBlockMap[node.selectedParent]
|
||||
fullDataStr := fmt.Sprintf("blues: %v, selectedParent: %v, score: %v",
|
||||
bluesIDs, selectedParentID, node.blueScore)
|
||||
if blockData.expectedScore != node.blueScore {
|
||||
t.Errorf("Test %d: Block %v expected to have score %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedScore, node.blueScore, fullDataStr)
|
||||
}
|
||||
if blockData.expectedSelectedParent != selectedParentID {
|
||||
t.Errorf("Test %d: Block %v expected to have selected parent %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedSelectedParent, selectedParentID, fullDataStr)
|
||||
}
|
||||
if !reflect.DeepEqual(blockData.expectedBlues, bluesIDs) {
|
||||
t.Errorf("Test %d: Block %v expected to have blues %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedBlues, bluesIDs, fullDataStr)
|
||||
}
|
||||
}
|
||||
|
||||
reds := make(map[string]bool)
|
||||
|
||||
for id := range blockByIDMap {
|
||||
reds[id] = true
|
||||
}
|
||||
|
||||
for tip := &dag.virtual.blockNode; tip.selectedParent != nil; tip = tip.selectedParent {
|
||||
tipID := idByBlockMap[tip]
|
||||
delete(reds, tipID)
|
||||
for _, blue := range tip.blues {
|
||||
blueID := idByBlockMap[blue]
|
||||
delete(reds, blueID)
|
||||
}
|
||||
}
|
||||
if !checkReds(test.expectedReds, reds) {
|
||||
redsIDs := make([]string, 0, len(reds))
|
||||
for id := range reds {
|
||||
redsIDs = append(redsIDs, id)
|
||||
}
|
||||
sort.Strings(redsIDs)
|
||||
sort.Strings(test.expectedReds)
|
||||
t.Errorf("Test %d: Expected reds %v but got %v", i, test.expectedReds, redsIDs)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func checkReds(expectedReds []string, reds map[string]bool) bool {
|
||||
if len(expectedReds) != len(reds) {
|
||||
return false
|
||||
}
|
||||
for _, redID := range expectedReds {
|
||||
if !reds[redID] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestBlueAnticoneSizeErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizeErrors", Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestBlueAnticoneSizeErrors: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Prepare a block chain with size K beginning with the genesis block
|
||||
currentBlockA := dag.dagParams.GenesisBlock
|
||||
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
|
||||
newBlock := prepareAndProcessBlock(t, dag, currentBlockA)
|
||||
currentBlockA = newBlock
|
||||
}
|
||||
|
||||
// Prepare another block chain with size K beginning with the genesis block
|
||||
currentBlockB := dag.dagParams.GenesisBlock
|
||||
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
|
||||
newBlock := prepareAndProcessBlock(t, dag, currentBlockB)
|
||||
currentBlockB = newBlock
|
||||
}
|
||||
|
||||
// Get references to the tips of the two chains
|
||||
blockNodeA := dag.index.LookupNode(currentBlockA.BlockHash())
|
||||
blockNodeB := dag.index.LookupNode(currentBlockB.BlockHash())
|
||||
|
||||
// Try getting the blueAnticoneSize between them. Since the two
|
||||
// blocks are not in the anticones of eachother, this should fail.
|
||||
_, err = dag.blueAnticoneSize(blockNodeA, blockNodeB)
|
||||
if err == nil {
|
||||
t.Fatalf("TestBlueAnticoneSizeErrors: blueAnticoneSize unexpectedly succeeded")
|
||||
}
|
||||
expectedErrSubstring := "is not in blue set of"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestBlueAnticoneSizeErrors: blueAnticoneSize returned wrong error. "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGHOSTDAGErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestGHOSTDAGErrors", Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestGHOSTDAGErrors: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Add two child blocks to the genesis
|
||||
block1 := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
|
||||
block2 := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
|
||||
|
||||
// Add a child block to the previous two blocks
|
||||
block3 := prepareAndProcessBlock(t, dag, block1, block2)
|
||||
|
||||
// Clear the reachability store
|
||||
dag.reachabilityStore.loaded = map[daghash.Hash]*reachabilityData{}
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata().Bucket(reachabilityDataBucketName)
|
||||
cursor := bucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
err := bucket.Delete(cursor.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestGHOSTDAGErrors: db.Update failed: %s", err)
|
||||
}
|
||||
|
||||
// Try to rerun GHOSTDAG on the last block. GHOSTDAG uses
|
||||
// reachability data, so we expect it to fail.
|
||||
blockNode3 := dag.index.LookupNode(block3.BlockHash())
|
||||
_, err = dag.ghostdag(blockNode3)
|
||||
if err == nil {
|
||||
t.Fatalf("TestGHOSTDAGErrors: ghostdag unexpectedly succeeded")
|
||||
}
|
||||
expectedErrSubstring := "Couldn't find reachability data"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestGHOSTDAGErrors: ghostdag returned wrong error. "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,8 @@
|
||||
indexers
|
||||
========
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd/blockchain/indexers)
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad/blockdag/indexers)
|
||||
|
||||
Package indexers implements optional block chain indexes.
|
||||
|
||||
@@ -12,21 +11,14 @@ via an RPC interface.
|
||||
|
||||
## Supported Indexers
|
||||
|
||||
- Transaction-by-hash (txbyhashidx) Index
|
||||
- Transaction-by-hash (txindex) Index
|
||||
- Creates a mapping from the hash of each transaction to the block that
|
||||
contains it along with its offset and length within the serialized block
|
||||
- Transaction-by-address (txbyaddridx) Index
|
||||
- Transaction-by-address (addrindex) Index
|
||||
- Creates a mapping from every address to all transactions which either credit
|
||||
or debit the address
|
||||
- Requires the transaction-by-hash index
|
||||
- AcceptanceData-by-block Index
|
||||
- Creates a mapping from the hash of each block to the list of transaction this block
|
||||
accepts from it's .Blues
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/daglabs/btcd/blockchain/indexers
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Package indexers is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
||||
|
||||
234
blockdag/indexers/acceptanceindex.go
Normal file
234
blockdag/indexers/acceptanceindex.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// acceptanceIndexName is the human-readable name for the index.
|
||||
acceptanceIndexName = "acceptance index"
|
||||
)
|
||||
|
||||
var (
|
||||
// acceptanceIndexKey is the key of the acceptance index and the db bucket used
|
||||
// to house it.
|
||||
acceptanceIndexKey = []byte("acceptanceidx")
|
||||
)
|
||||
|
||||
// AcceptanceIndex implements a txAcceptanceData by block hash index. That is to say,
|
||||
// it stores a mapping between a block's hash and the set of transactions that the
|
||||
// block accepts among its blue blocks.
|
||||
type AcceptanceIndex struct {
|
||||
db database.DB
|
||||
dag *blockdag.BlockDAG
|
||||
}
|
||||
|
||||
// Ensure the AcceptanceIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*AcceptanceIndex)(nil)
|
||||
|
||||
// NewAcceptanceIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping between block hashes and their txAcceptanceData.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that in
|
||||
// turn is used by the blockdag package. This allows the index to be
|
||||
// seamlessly maintained along with the DAG.
|
||||
func NewAcceptanceIndex() *AcceptanceIndex {
|
||||
return &AcceptanceIndex{}
|
||||
}
|
||||
|
||||
// DropAcceptanceIndex drops the acceptance index from the provided database if it
|
||||
// exists.
|
||||
func DropAcceptanceIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
return dropIndex(db, acceptanceIndexKey, acceptanceIndexName, interrupt)
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Key() []byte {
|
||||
return acceptanceIndexKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Name() string {
|
||||
return acceptanceIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time. It creates the bucket for the
|
||||
// acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Create(dbTx database.Tx) error {
|
||||
_, err := dbTx.Metadata().CreateBucket(acceptanceIndexKey)
|
||||
return err
|
||||
}
|
||||
|
||||
// Init initializes the hash-based acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
idx.dag = dag
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the DAG.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) ConnectBlock(dbTx database.Tx, _ *util.Block, blockID uint64, _ *blockdag.BlockDAG,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
return dbPutTxsAcceptanceData(dbTx, blockID, txsAcceptanceData)
|
||||
}
|
||||
|
||||
// TxsAcceptanceData returns the acceptance data of all the transactions that
|
||||
// were accepted by the block with hash blockHash.
|
||||
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
var txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
txsAcceptanceData, err = dbFetchTxsAcceptanceDataByHash(dbTx, blockHash)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return txsAcceptanceData, nil
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
for blockID := currentBlockID + 1; blockID <= lastKnownBlockID; blockID++ {
|
||||
hash, err := blockdag.DBFetchBlockHashByID(dbTx, currentBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = idx.ConnectBlock(dbTx, nil, blockID, nil, txAcceptanceData, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dbPutTxsAcceptanceData(dbTx database.Tx, blockID uint64,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(txsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
|
||||
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedTxsAcceptanceData)
|
||||
}
|
||||
|
||||
func dbFetchTxsAcceptanceDataByHash(dbTx database.Tx,
|
||||
hash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
|
||||
blockID, err := blockdag.DBFetchBlockIDByHash(dbTx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dbFetchTxsAcceptanceDataByID(dbTx, blockID)
|
||||
}
|
||||
|
||||
func dbFetchTxsAcceptanceDataByID(dbTx database.Tx,
|
||||
blockID uint64) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
serializedBlockID := blockdag.SerializeBlockID(blockID)
|
||||
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
|
||||
serializedTxsAcceptanceData := bucket.Get(serializedBlockID)
|
||||
if serializedTxsAcceptanceData == nil {
|
||||
return nil, errors.Errorf("no entry in the accpetance index for block id %d", blockID)
|
||||
}
|
||||
|
||||
return deserializeMultiBlockTxsAcceptanceData(serializedTxsAcceptanceData)
|
||||
}
|
||||
|
||||
type serializableTxAcceptanceData struct {
|
||||
MsgTx wire.MsgTx
|
||||
IsAccepted bool
|
||||
}
|
||||
|
||||
type serializableBlockTxsAcceptanceData struct {
|
||||
BlockHash daghash.Hash
|
||||
TxAcceptanceData []serializableTxAcceptanceData
|
||||
}
|
||||
|
||||
type serializableMultiBlockTxsAcceptanceData []serializableBlockTxsAcceptanceData
|
||||
|
||||
func serializeMultiBlockTxsAcceptanceData(
|
||||
multiBlockTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) ([]byte, error) {
|
||||
// Convert MultiBlockTxsAcceptanceData to a serializable format
|
||||
serializableData := make(serializableMultiBlockTxsAcceptanceData, len(multiBlockTxsAcceptanceData))
|
||||
for i, blockTxsAcceptanceData := range multiBlockTxsAcceptanceData {
|
||||
serializableBlockData := serializableBlockTxsAcceptanceData{
|
||||
BlockHash: blockTxsAcceptanceData.BlockHash,
|
||||
TxAcceptanceData: make([]serializableTxAcceptanceData, len(blockTxsAcceptanceData.TxAcceptanceData)),
|
||||
}
|
||||
for i, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
|
||||
serializableBlockData.TxAcceptanceData[i] = serializableTxAcceptanceData{
|
||||
MsgTx: *txAcceptanceData.Tx.MsgTx(),
|
||||
IsAccepted: txAcceptanceData.IsAccepted,
|
||||
}
|
||||
}
|
||||
serializableData[i] = serializableBlockData
|
||||
}
|
||||
|
||||
// Serialize
|
||||
var buffer bytes.Buffer
|
||||
encoder := gob.NewEncoder(&buffer)
|
||||
err := encoder.Encode(serializableData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func deserializeMultiBlockTxsAcceptanceData(
|
||||
serializedTxsAcceptanceData []byte) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
// Deserialize
|
||||
buffer := bytes.NewBuffer(serializedTxsAcceptanceData)
|
||||
decoder := gob.NewDecoder(buffer)
|
||||
var serializedData serializableMultiBlockTxsAcceptanceData
|
||||
err := decoder.Decode(&serializedData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert serializable format to MultiBlockTxsAcceptanceData
|
||||
multiBlockTxsAcceptanceData := make(blockdag.MultiBlockTxsAcceptanceData, len(serializedData))
|
||||
for i, serializableBlockData := range serializedData {
|
||||
blockTxsAcceptanceData := blockdag.BlockTxsAcceptanceData{
|
||||
BlockHash: serializableBlockData.BlockHash,
|
||||
TxAcceptanceData: make([]blockdag.TxAcceptanceData, len(serializableBlockData.TxAcceptanceData)),
|
||||
}
|
||||
for i, txData := range serializableBlockData.TxAcceptanceData {
|
||||
msgTx := txData.MsgTx
|
||||
blockTxsAcceptanceData.TxAcceptanceData[i] = blockdag.TxAcceptanceData{
|
||||
Tx: util.NewTx(&msgTx),
|
||||
IsAccepted: txData.IsAccepted,
|
||||
}
|
||||
}
|
||||
multiBlockTxsAcceptanceData[i] = blockTxsAcceptanceData
|
||||
}
|
||||
|
||||
return multiBlockTxsAcceptanceData, nil
|
||||
}
|
||||
340
blockdag/indexers/acceptanceindex_test.go
Normal file
340
blockdag/indexers/acceptanceindex_test.go
Normal file
@@ -0,0 +1,340 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAcceptanceIndexSerializationAndDeserialization(t *testing.T) {
|
||||
// Create test data
|
||||
hash, _ := daghash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
txIn1 := &wire.TxIn{SignatureScript: []byte{1}, PreviousOutpoint: wire.Outpoint{Index: 1}, Sequence: 0}
|
||||
txIn2 := &wire.TxIn{SignatureScript: []byte{2}, PreviousOutpoint: wire.Outpoint{Index: 2}, Sequence: 0}
|
||||
txOut1 := &wire.TxOut{ScriptPubKey: []byte{1}, Value: 10}
|
||||
txOut2 := &wire.TxOut{ScriptPubKey: []byte{2}, Value: 20}
|
||||
blockTxsAcceptanceData := blockdag.BlockTxsAcceptanceData{
|
||||
BlockHash: *hash,
|
||||
TxAcceptanceData: []blockdag.TxAcceptanceData{
|
||||
{
|
||||
Tx: util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn1}, []*wire.TxOut{txOut1})),
|
||||
IsAccepted: true,
|
||||
},
|
||||
{
|
||||
Tx: util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn2}, []*wire.TxOut{txOut2})),
|
||||
IsAccepted: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
multiBlockTxsAcceptanceData := blockdag.MultiBlockTxsAcceptanceData{blockTxsAcceptanceData}
|
||||
|
||||
// Serialize
|
||||
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(multiBlockTxsAcceptanceData)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexSerializationAndDeserialization: serialization failed: %s", err)
|
||||
}
|
||||
|
||||
// Deserialize
|
||||
deserializedTxsAcceptanceData, err := deserializeMultiBlockTxsAcceptanceData(serializedTxsAcceptanceData)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexSerializationAndDeserialization: deserialization failed: %s", err)
|
||||
}
|
||||
|
||||
// Check that they're the same
|
||||
if !reflect.DeepEqual(multiBlockTxsAcceptanceData, deserializedTxsAcceptanceData) {
|
||||
t.Fatalf("TestAcceptanceIndexSerializationAndDeserialization: original data and deseralize data aren't equal")
|
||||
}
|
||||
}
|
||||
|
||||
// TestAcceptanceIndexRecover tests the recoverability of the
|
||||
// acceptance index.
|
||||
// It does it by following these steps:
|
||||
// * It creates a DAG with enabled acceptance index (let's call it dag1) and
|
||||
// make it process some blocks.
|
||||
// * It creates a copy of dag1 (let's call it dag2), and disables the acceptance
|
||||
// index in it.
|
||||
// * It processes two more blocks in both dag1 and dag2.
|
||||
// * A copy of dag2 is created (let's call it dag3) with enabled
|
||||
// acceptance index
|
||||
// * It checks that the two missing blocks are added to dag3 acceptance index by
|
||||
// comparing dag1's last block acceptance data and dag3's last block acceptance
|
||||
// data.
|
||||
func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
params := &dagconfig.SimnetParams
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
|
||||
testFiles := []string{
|
||||
"blk_0_to_4.dat",
|
||||
"blk_3B.dat",
|
||||
}
|
||||
|
||||
var blocks []*util.Block
|
||||
for _, file := range testFiles {
|
||||
blockTmp, err := blockdag.LoadBlocks(filepath.Join("../testdata/", file))
|
||||
if err != nil {
|
||||
t.Fatalf("Error loading file: %v\n", err)
|
||||
}
|
||||
blocks = append(blocks, blockTmp...)
|
||||
}
|
||||
|
||||
db1AcceptanceIndex := NewAcceptanceIndex()
|
||||
db1IndexManager := NewManager([]Indexer{db1AcceptanceIndex})
|
||||
db1Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover1")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(db1Path)
|
||||
|
||||
db1, err := database.Create("ffldb", db1Path, params.Net)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
db1Config := blockdag.Config{
|
||||
IndexManager: db1IndexManager,
|
||||
DAGParams: params,
|
||||
DB: db1,
|
||||
}
|
||||
|
||||
db1DAG, teardown, err := blockdag.DAGSetup("", db1Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
if teardown != nil {
|
||||
defer teardown()
|
||||
}
|
||||
|
||||
for i := 1; i < len(blocks)-2; i++ {
|
||||
isOrphan, isDelayed, err := db1DAG.ProcessBlock(blocks[i], blockdag.BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock fail on block %v: %v\n", i, err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: block %d "+
|
||||
"is too far in the future", i)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned block %v "+
|
||||
"is an orphan\n", i)
|
||||
}
|
||||
}
|
||||
|
||||
err = db1.FlushCache()
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing database to disk: %s", err)
|
||||
}
|
||||
|
||||
db2Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover2")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(db2Path)
|
||||
|
||||
err = copyDirectory(db1Path, db2Path)
|
||||
if err != nil {
|
||||
t.Fatalf("copyDirectory: %s", err)
|
||||
}
|
||||
|
||||
for i := len(blocks) - 2; i < len(blocks); i++ {
|
||||
isOrphan, isDelayed, err := db1DAG.ProcessBlock(blocks[i], blockdag.BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock fail on block %v: %v\n", i, err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: block %d "+
|
||||
"is too far in the future", i)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned block %v "+
|
||||
"is an orphan\n", i)
|
||||
}
|
||||
}
|
||||
|
||||
db1LastBlockAcceptanceData, err := db1AcceptanceIndex.TxsAcceptanceData(blocks[len(blocks)-1].Hash())
|
||||
if err != nil {
|
||||
t.Fatalf("Error fetching acceptance data: %s", err)
|
||||
}
|
||||
|
||||
db2, err := database.Open("ffldb", db2Path, params.Net)
|
||||
if err != nil {
|
||||
t.Fatalf("Error opening database: %s", err)
|
||||
}
|
||||
|
||||
db2Config := blockdag.Config{
|
||||
DAGParams: params,
|
||||
DB: db2,
|
||||
}
|
||||
|
||||
db2DAG, teardown, err := blockdag.DAGSetup("", db2Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
if teardown != nil {
|
||||
defer teardown()
|
||||
}
|
||||
|
||||
for i := len(blocks) - 2; i < len(blocks); i++ {
|
||||
isOrphan, isDelayed, err := db2DAG.ProcessBlock(blocks[i], blockdag.BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock fail on block %v: %v\n", i, err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: block %d "+
|
||||
"is too far in the future", i)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned block %v "+
|
||||
"is an orphan\n", i)
|
||||
}
|
||||
}
|
||||
|
||||
err = db2.FlushCache()
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing database to disk: %s", err)
|
||||
}
|
||||
db3Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover3")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(db3Path)
|
||||
err = copyDirectory(db2Path, db3Path)
|
||||
if err != nil {
|
||||
t.Fatalf("copyDirectory: %s", err)
|
||||
}
|
||||
|
||||
db3, err := database.Open("ffldb", db3Path, params.Net)
|
||||
if err != nil {
|
||||
t.Fatalf("Error opening database: %s", err)
|
||||
}
|
||||
|
||||
db3AcceptanceIndex := NewAcceptanceIndex()
|
||||
db3IndexManager := NewManager([]Indexer{db3AcceptanceIndex})
|
||||
db3Config := blockdag.Config{
|
||||
IndexManager: db3IndexManager,
|
||||
DAGParams: params,
|
||||
DB: db3,
|
||||
}
|
||||
|
||||
_, teardown, err = blockdag.DAGSetup("", db3Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
if teardown != nil {
|
||||
defer teardown()
|
||||
}
|
||||
|
||||
db3LastBlockAcceptanceData, err := db3AcceptanceIndex.TxsAcceptanceData(blocks[len(blocks)-1].Hash())
|
||||
if err != nil {
|
||||
t.Fatalf("Error fetching acceptance data: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(db1LastBlockAcceptanceData, db3LastBlockAcceptanceData) {
|
||||
t.Fatalf("recovery failed")
|
||||
}
|
||||
}
|
||||
|
||||
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
|
||||
func copyDirectory(scrDir, dest string) error {
|
||||
entries, err := ioutil.ReadDir(scrDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
sourcePath := filepath.Join(scrDir, entry.Name())
|
||||
destPath := filepath.Join(dest, entry.Name())
|
||||
|
||||
fileInfo, err := os.Stat(sourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stat, ok := fileInfo.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.Errorf("failed to get raw syscall.Stat_t data for '%s'", sourcePath)
|
||||
}
|
||||
|
||||
switch fileInfo.Mode() & os.ModeType {
|
||||
case os.ModeDir:
|
||||
if err := createIfNotExists(destPath, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := copyDirectory(sourcePath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
case os.ModeSymlink:
|
||||
if err := copySymLink(sourcePath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := copyFile(sourcePath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.Lchown(destPath, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
isSymlink := entry.Mode()&os.ModeSymlink != 0
|
||||
if !isSymlink {
|
||||
if err := os.Chmod(destPath, entry.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
|
||||
func copyFile(srcFile, dstFile string) error {
|
||||
out, err := os.Create(dstFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
in, err := os.Open(srcFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
|
||||
func createIfNotExists(dir string, perm os.FileMode) error {
|
||||
if blockdag.FileExists(dir) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, perm); err != nil {
|
||||
return errors.Errorf("failed to create directory: '%s', error: '%s'", dir, err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
|
||||
func copySymLink(source, dest string) error {
|
||||
link, err := os.Readlink(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Symlink(link, dest)
|
||||
}
|
||||
@@ -1,929 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// addrIndexName is the human-readable name for the index.
|
||||
addrIndexName = "address index"
|
||||
|
||||
// level0MaxEntries is the maximum number of transactions that are
|
||||
// stored in level 0 of an address index entry. Subsequent levels store
|
||||
// 2^n * level0MaxEntries entries, or in words, double the maximum of
|
||||
// the previous level.
|
||||
level0MaxEntries = 8
|
||||
|
||||
// addrKeySize is the number of bytes an address key consumes in the
|
||||
// index. It consists of 1 byte address type + 20 bytes hash160.
|
||||
addrKeySize = 1 + 20
|
||||
|
||||
// levelKeySize is the number of bytes a level key in the address index
|
||||
// consumes. It consists of the address key + 1 byte for the level.
|
||||
levelKeySize = addrKeySize + 1
|
||||
|
||||
// levelOffset is the offset in the level key which identifes the level.
|
||||
levelOffset = levelKeySize - 1
|
||||
|
||||
// addrKeyTypePubKeyHash is the address type in an address key which
|
||||
// represents both a pay-to-pubkey-hash and a pay-to-pubkey address.
|
||||
// This is done because both are identical for the purposes of the
|
||||
// address index.
|
||||
addrKeyTypePubKeyHash = 0
|
||||
|
||||
// addrKeyTypeScriptHash is the address type in an address key which
|
||||
// represents a pay-to-script-hash address. This is necessary because
|
||||
// the hash of a pubkey address might be the same as that of a script
|
||||
// hash.
|
||||
addrKeyTypeScriptHash = 1
|
||||
|
||||
// Size of a transaction entry. It consists of 4 bytes block id + 4
|
||||
// bytes offset + 4 bytes length.
|
||||
txEntrySize = 4 + 4 + 4
|
||||
)
|
||||
|
||||
var (
|
||||
// addrIndexKey is the key of the address index and the db bucket used
|
||||
// to house it.
|
||||
addrIndexKey = []byte("txbyaddridx")
|
||||
|
||||
// errUnsupportedAddressType is an error that is used to signal an
|
||||
// unsupported address type has been used.
|
||||
errUnsupportedAddressType = errors.New("address type is not supported " +
|
||||
"by the address index")
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The address index maps addresses referenced in the blockchain to a list of
|
||||
// all the transactions involving that address. Transactions are stored
|
||||
// according to their order of appearance in the blockchain. That is to say
|
||||
// first by block height and then by offset inside the block. It is also
|
||||
// important to note that this implementation requires the transaction index
|
||||
// since it is needed in order to catch up old blocks due to the fact the spent
|
||||
// outputs will already be pruned from the utxo set.
|
||||
//
|
||||
// The approach used to store the index is similar to a log-structured merge
|
||||
// tree (LSM tree) and is thus similar to how leveldb works internally.
|
||||
//
|
||||
// Every address consists of one or more entries identified by a level starting
|
||||
// from 0 where each level holds a maximum number of entries such that each
|
||||
// subsequent level holds double the maximum of the previous one. In equation
|
||||
// form, the number of entries each level holds is 2^n * firstLevelMaxSize.
|
||||
//
|
||||
// New transactions are appended to level 0 until it becomes full at which point
|
||||
// the entire level 0 entry is appended to the level 1 entry and level 0 is
|
||||
// cleared. This process continues until level 1 becomes full at which point it
|
||||
// will be appended to level 2 and cleared and so on.
|
||||
//
|
||||
// The result of this is the lower levels contain newer transactions and the
|
||||
// transactions within each level are ordered from oldest to newest.
|
||||
//
|
||||
// The intent of this approach is to provide a balance between space efficiency
|
||||
// and indexing cost. Storing one entry per transaction would have the lowest
|
||||
// indexing cost, but would waste a lot of space because the same address hash
|
||||
// would be duplicated for every transaction key. On the other hand, storing a
|
||||
// single entry with all transactions would be the most space efficient, but
|
||||
// would cause indexing cost to grow quadratically with the number of
|
||||
// transactions involving the same address. The approach used here provides
|
||||
// logarithmic insertion and retrieval.
|
||||
//
|
||||
// The serialized key format is:
|
||||
//
|
||||
// <addr type><addr hash><level>
|
||||
//
|
||||
// Field Type Size
|
||||
// addr type uint8 1 byte
|
||||
// addr hash hash160 20 bytes
|
||||
// level uint8 1 byte
|
||||
// -----
|
||||
// Total: 22 bytes
|
||||
//
|
||||
// The serialized value format is:
|
||||
//
|
||||
// [<block id><start offset><tx length>,...]
|
||||
//
|
||||
// Field Type Size
|
||||
// block id uint32 4 bytes
|
||||
// start offset uint32 4 bytes
|
||||
// tx length uint32 4 bytes
|
||||
// -----
|
||||
// Total: 12 bytes per indexed tx
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// fetchBlockHashFunc defines a callback function to use in order to convert a
|
||||
// serialized block ID to an associated block hash.
|
||||
type fetchBlockHashFunc func(serializedID []byte) (*daghash.Hash, error)
|
||||
|
||||
// serializeAddrIndexEntry serializes the provided block id and transaction
|
||||
// location according to the format described in detail above.
|
||||
func serializeAddrIndexEntry(blockID uint32, txLoc wire.TxLoc) []byte {
|
||||
// Serialize the entry.
|
||||
serialized := make([]byte, 12)
|
||||
byteOrder.PutUint32(serialized, blockID)
|
||||
byteOrder.PutUint32(serialized[4:], uint32(txLoc.TxStart))
|
||||
byteOrder.PutUint32(serialized[8:], uint32(txLoc.TxLen))
|
||||
return serialized
|
||||
}
|
||||
|
||||
// deserializeAddrIndexEntry decodes the passed serialized byte slice into the
|
||||
// provided region struct according to the format described in detail above and
|
||||
// uses the passed block hash fetching function in order to conver the block ID
|
||||
// to the associated block hash.
|
||||
func deserializeAddrIndexEntry(serialized []byte, region *database.BlockRegion, fetchBlockHash fetchBlockHashFunc) error {
|
||||
// Ensure there are enough bytes to decode.
|
||||
if len(serialized) < txEntrySize {
|
||||
return errDeserialize("unexpected end of data")
|
||||
}
|
||||
|
||||
hash, err := fetchBlockHash(serialized[0:4])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
region.Hash = hash
|
||||
region.Offset = byteOrder.Uint32(serialized[4:8])
|
||||
region.Len = byteOrder.Uint32(serialized[8:12])
|
||||
return nil
|
||||
}
|
||||
|
||||
// keyForLevel returns the key for a specific address and level in the address
|
||||
// index entry.
|
||||
func keyForLevel(addrKey [addrKeySize]byte, level uint8) [levelKeySize]byte {
|
||||
var key [levelKeySize]byte
|
||||
copy(key[:], addrKey[:])
|
||||
key[levelOffset] = level
|
||||
return key
|
||||
}
|
||||
|
||||
// dbPutAddrIndexEntry updates the address index to include the provided entry
|
||||
// according to the level-based scheme described in detail above.
|
||||
func dbPutAddrIndexEntry(bucket internalBucket, addrKey [addrKeySize]byte, blockID uint32, txLoc wire.TxLoc) error {
|
||||
// Start with level 0 and its initial max number of entries.
|
||||
curLevel := uint8(0)
|
||||
maxLevelBytes := level0MaxEntries * txEntrySize
|
||||
|
||||
// Simply append the new entry to level 0 and return now when it will
|
||||
// fit. This is the most common path.
|
||||
newData := serializeAddrIndexEntry(blockID, txLoc)
|
||||
level0Key := keyForLevel(addrKey, 0)
|
||||
level0Data := bucket.Get(level0Key[:])
|
||||
if len(level0Data)+len(newData) <= maxLevelBytes {
|
||||
mergedData := newData
|
||||
if len(level0Data) > 0 {
|
||||
mergedData = make([]byte, len(level0Data)+len(newData))
|
||||
copy(mergedData, level0Data)
|
||||
copy(mergedData[len(level0Data):], newData)
|
||||
}
|
||||
return bucket.Put(level0Key[:], mergedData)
|
||||
}
|
||||
|
||||
// At this point, level 0 is full, so merge each level into higher
|
||||
// levels as many times as needed to free up level 0.
|
||||
prevLevelData := level0Data
|
||||
for {
|
||||
// Each new level holds twice as much as the previous one.
|
||||
curLevel++
|
||||
maxLevelBytes *= 2
|
||||
|
||||
// Move to the next level as long as the current level is full.
|
||||
curLevelKey := keyForLevel(addrKey, curLevel)
|
||||
curLevelData := bucket.Get(curLevelKey[:])
|
||||
if len(curLevelData) == maxLevelBytes {
|
||||
prevLevelData = curLevelData
|
||||
continue
|
||||
}
|
||||
|
||||
// The current level has room for the data in the previous one,
|
||||
// so merge the data from previous level into it.
|
||||
mergedData := prevLevelData
|
||||
if len(curLevelData) > 0 {
|
||||
mergedData = make([]byte, len(curLevelData)+
|
||||
len(prevLevelData))
|
||||
copy(mergedData, curLevelData)
|
||||
copy(mergedData[len(curLevelData):], prevLevelData)
|
||||
}
|
||||
err := bucket.Put(curLevelKey[:], mergedData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Move all of the levels before the previous one up a level.
|
||||
for mergeLevel := curLevel - 1; mergeLevel > 0; mergeLevel-- {
|
||||
mergeLevelKey := keyForLevel(addrKey, mergeLevel)
|
||||
prevLevelKey := keyForLevel(addrKey, mergeLevel-1)
|
||||
prevData := bucket.Get(prevLevelKey[:])
|
||||
err := bucket.Put(mergeLevelKey[:], prevData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Finally, insert the new entry into level 0 now that it is empty.
|
||||
return bucket.Put(level0Key[:], newData)
|
||||
}
|
||||
|
||||
// dbFetchAddrIndexEntries returns block regions for transactions referenced by
|
||||
// the given address key and the number of entries skipped since it could have
|
||||
// been less in the case where there are less total entries than the requested
|
||||
// number of entries to skip.
|
||||
func dbFetchAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, numToSkip, numRequested uint32, reverse bool, fetchBlockHash fetchBlockHashFunc) ([]database.BlockRegion, uint32, error) {
|
||||
// When the reverse flag is not set, all levels need to be fetched
|
||||
// because numToSkip and numRequested are counted from the oldest
|
||||
// transactions (highest level) and thus the total count is needed.
|
||||
// However, when the reverse flag is set, only enough records to satisfy
|
||||
// the requested amount are needed.
|
||||
var level uint8
|
||||
var serialized []byte
|
||||
for !reverse || len(serialized) < int(numToSkip+numRequested)*txEntrySize {
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
levelData := bucket.Get(curLevelKey[:])
|
||||
if levelData == nil {
|
||||
// Stop when there are no more levels.
|
||||
break
|
||||
}
|
||||
|
||||
// Higher levels contain older transactions, so prepend them.
|
||||
prepended := make([]byte, len(serialized)+len(levelData))
|
||||
copy(prepended, levelData)
|
||||
copy(prepended[len(levelData):], serialized)
|
||||
serialized = prepended
|
||||
level++
|
||||
}
|
||||
|
||||
// When the requested number of entries to skip is larger than the
|
||||
// number available, skip them all and return now with the actual number
|
||||
// skipped.
|
||||
numEntries := uint32(len(serialized) / txEntrySize)
|
||||
if numToSkip >= numEntries {
|
||||
return nil, numEntries, nil
|
||||
}
|
||||
|
||||
// Nothing more to do when there are no requested entries.
|
||||
if numRequested == 0 {
|
||||
return nil, numToSkip, nil
|
||||
}
|
||||
|
||||
// Limit the number to load based on the number of available entries,
|
||||
// the number to skip, and the number requested.
|
||||
numToLoad := numEntries - numToSkip
|
||||
if numToLoad > numRequested {
|
||||
numToLoad = numRequested
|
||||
}
|
||||
|
||||
// Start the offset after all skipped entries and load the calculated
|
||||
// number.
|
||||
results := make([]database.BlockRegion, numToLoad)
|
||||
for i := uint32(0); i < numToLoad; i++ {
|
||||
// Calculate the read offset according to the reverse flag.
|
||||
var offset uint32
|
||||
if reverse {
|
||||
offset = (numEntries - numToSkip - i - 1) * txEntrySize
|
||||
} else {
|
||||
offset = (numToSkip + i) * txEntrySize
|
||||
}
|
||||
|
||||
// Deserialize and populate the result.
|
||||
err := deserializeAddrIndexEntry(serialized[offset:],
|
||||
&results[i], fetchBlockHash)
|
||||
if err != nil {
|
||||
// Ensure any deserialization errors are returned as
|
||||
// database corruption errors.
|
||||
if isDeserializeErr(err) {
|
||||
err = database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("failed to "+
|
||||
"deserialized address index "+
|
||||
"for key %x: %s", addrKey, err),
|
||||
}
|
||||
}
|
||||
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return results, numToSkip, nil
|
||||
}
|
||||
|
||||
// minEntriesToReachLevel returns the minimum number of entries that are
|
||||
// required to reach the given address index level.
|
||||
func minEntriesToReachLevel(level uint8) int {
|
||||
maxEntriesForLevel := level0MaxEntries
|
||||
minRequired := 1
|
||||
for l := uint8(1); l <= level; l++ {
|
||||
minRequired += maxEntriesForLevel
|
||||
maxEntriesForLevel *= 2
|
||||
}
|
||||
return minRequired
|
||||
}
|
||||
|
||||
// maxEntriesForLevel returns the maximum number of entries allowed for the
|
||||
// given address index level.
|
||||
func maxEntriesForLevel(level uint8) int {
|
||||
numEntries := level0MaxEntries
|
||||
for l := level; l > 0; l-- {
|
||||
numEntries *= 2
|
||||
}
|
||||
return numEntries
|
||||
}
|
||||
|
||||
// dbRemoveAddrIndexEntries removes the specified number of entries from from
|
||||
// the address index for the provided key. An assertion error will be returned
|
||||
// if the count exceeds the total number of entries in the index.
|
||||
func dbRemoveAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, count int) error {
|
||||
// Nothing to do if no entries are being deleted.
|
||||
if count <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make use of a local map to track pending updates and define a closure
|
||||
// to apply it to the database. This is done in order to reduce the
|
||||
// number of database reads and because there is more than one exit
|
||||
// path that needs to apply the updates.
|
||||
pendingUpdates := make(map[uint8][]byte)
|
||||
applyPending := func() error {
|
||||
for level, data := range pendingUpdates {
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
if len(data) == 0 {
|
||||
err := bucket.Delete(curLevelKey[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
err := bucket.Put(curLevelKey[:], data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loop forwards through the levels while removing entries until the
|
||||
// specified number has been removed. This will potentially result in
|
||||
// entirely empty lower levels which will be backfilled below.
|
||||
var highestLoadedLevel uint8
|
||||
numRemaining := count
|
||||
for level := uint8(0); numRemaining > 0; level++ {
|
||||
// Load the data for the level from the database.
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
curLevelData := bucket.Get(curLevelKey[:])
|
||||
if len(curLevelData) == 0 && numRemaining > 0 {
|
||||
return AssertError(fmt.Sprintf("dbRemoveAddrIndexEntries "+
|
||||
"not enough entries for address key %x to "+
|
||||
"delete %d entries", addrKey, count))
|
||||
}
|
||||
pendingUpdates[level] = curLevelData
|
||||
highestLoadedLevel = level
|
||||
|
||||
// Delete the entire level as needed.
|
||||
numEntries := len(curLevelData) / txEntrySize
|
||||
if numRemaining >= numEntries {
|
||||
pendingUpdates[level] = nil
|
||||
numRemaining -= numEntries
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove remaining entries to delete from the level.
|
||||
offsetEnd := len(curLevelData) - (numRemaining * txEntrySize)
|
||||
pendingUpdates[level] = curLevelData[:offsetEnd]
|
||||
break
|
||||
}
|
||||
|
||||
// When all elements in level 0 were not removed there is nothing left
|
||||
// to do other than updating the database.
|
||||
if len(pendingUpdates[0]) != 0 {
|
||||
return applyPending()
|
||||
}
|
||||
|
||||
// At this point there are one or more empty levels before the current
|
||||
// level which need to be backfilled and the current level might have
|
||||
// had some entries deleted from it as well. Since all levels after
|
||||
// level 0 are required to either be empty, half full, or completely
|
||||
// full, the current level must be adjusted accordingly by backfilling
|
||||
// each previous levels in a way which satisfies the requirements. Any
|
||||
// entries that are left are assigned to level 0 after the loop as they
|
||||
// are guaranteed to fit by the logic in the loop. In other words, this
|
||||
// effectively squashes all remaining entries in the current level into
|
||||
// the lowest possible levels while following the level rules.
|
||||
//
|
||||
// Note that the level after the current level might also have entries
|
||||
// and gaps are not allowed, so this also keeps track of the lowest
|
||||
// empty level so the code below knows how far to backfill in case it is
|
||||
// required.
|
||||
lowestEmptyLevel := uint8(255)
|
||||
curLevelData := pendingUpdates[highestLoadedLevel]
|
||||
curLevelMaxEntries := maxEntriesForLevel(highestLoadedLevel)
|
||||
for level := highestLoadedLevel; level > 0; level-- {
|
||||
// When there are not enough entries left in the current level
|
||||
// for the number that would be required to reach it, clear the
|
||||
// the current level which effectively moves them all up to the
|
||||
// previous level on the next iteration. Otherwise, there are
|
||||
// are sufficient entries, so update the current level to
|
||||
// contain as many entries as possible while still leaving
|
||||
// enough remaining entries required to reach the level.
|
||||
numEntries := len(curLevelData) / txEntrySize
|
||||
prevLevelMaxEntries := curLevelMaxEntries / 2
|
||||
minPrevRequired := minEntriesToReachLevel(level - 1)
|
||||
if numEntries < prevLevelMaxEntries+minPrevRequired {
|
||||
lowestEmptyLevel = level
|
||||
pendingUpdates[level] = nil
|
||||
} else {
|
||||
// This level can only be completely full or half full,
|
||||
// so choose the appropriate offset to ensure enough
|
||||
// entries remain to reach the level.
|
||||
var offset int
|
||||
if numEntries-curLevelMaxEntries >= minPrevRequired {
|
||||
offset = curLevelMaxEntries * txEntrySize
|
||||
} else {
|
||||
offset = prevLevelMaxEntries * txEntrySize
|
||||
}
|
||||
pendingUpdates[level] = curLevelData[:offset]
|
||||
curLevelData = curLevelData[offset:]
|
||||
}
|
||||
|
||||
curLevelMaxEntries = prevLevelMaxEntries
|
||||
}
|
||||
pendingUpdates[0] = curLevelData
|
||||
if len(curLevelData) == 0 {
|
||||
lowestEmptyLevel = 0
|
||||
}
|
||||
|
||||
// When the highest loaded level is empty, it's possible the level after
|
||||
// it still has data and thus that data needs to be backfilled as well.
|
||||
for len(pendingUpdates[highestLoadedLevel]) == 0 {
|
||||
// When the next level is empty too, the is no data left to
|
||||
// continue backfilling, so there is nothing left to do.
|
||||
// Otherwise, populate the pending updates map with the newly
|
||||
// loaded data and update the highest loaded level accordingly.
|
||||
level := highestLoadedLevel + 1
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
levelData := bucket.Get(curLevelKey[:])
|
||||
if len(levelData) == 0 {
|
||||
break
|
||||
}
|
||||
pendingUpdates[level] = levelData
|
||||
highestLoadedLevel = level
|
||||
|
||||
// At this point the highest level is not empty, but it might
|
||||
// be half full. When that is the case, move it up a level to
|
||||
// simplify the code below which backfills all lower levels that
|
||||
// are still empty. This also means the current level will be
|
||||
// empty, so the loop will perform another another iteration to
|
||||
// potentially backfill this level with data from the next one.
|
||||
curLevelMaxEntries := maxEntriesForLevel(level)
|
||||
if len(levelData)/txEntrySize != curLevelMaxEntries {
|
||||
pendingUpdates[level] = nil
|
||||
pendingUpdates[level-1] = levelData
|
||||
level--
|
||||
curLevelMaxEntries /= 2
|
||||
}
|
||||
|
||||
// Backfill all lower levels that are still empty by iteratively
|
||||
// halfing the data until the lowest empty level is filled.
|
||||
for level > lowestEmptyLevel {
|
||||
offset := (curLevelMaxEntries / 2) * txEntrySize
|
||||
pendingUpdates[level] = levelData[:offset]
|
||||
levelData = levelData[offset:]
|
||||
pendingUpdates[level-1] = levelData
|
||||
level--
|
||||
curLevelMaxEntries /= 2
|
||||
}
|
||||
|
||||
// The lowest possible empty level is now the highest loaded
|
||||
// level.
|
||||
lowestEmptyLevel = highestLoadedLevel
|
||||
}
|
||||
|
||||
// Apply the pending updates.
|
||||
return applyPending()
|
||||
}
|
||||
|
||||
// addrToKey converts known address types to an addrindex key. An error is
|
||||
// returned for unsupported types.
|
||||
func addrToKey(addr util.Address) ([addrKeySize]byte, error) {
|
||||
switch addr := addr.(type) {
|
||||
case *util.AddressPubKeyHash:
|
||||
var result [addrKeySize]byte
|
||||
result[0] = addrKeyTypePubKeyHash
|
||||
copy(result[1:], addr.Hash160()[:])
|
||||
return result, nil
|
||||
|
||||
case *util.AddressScriptHash:
|
||||
var result [addrKeySize]byte
|
||||
result[0] = addrKeyTypeScriptHash
|
||||
copy(result[1:], addr.Hash160()[:])
|
||||
return result, nil
|
||||
|
||||
case *util.AddressPubKey:
|
||||
var result [addrKeySize]byte
|
||||
result[0] = addrKeyTypePubKeyHash
|
||||
copy(result[1:], addr.AddressPubKeyHash().Hash160()[:])
|
||||
return result, nil
|
||||
}
|
||||
|
||||
return [addrKeySize]byte{}, errUnsupportedAddressType
|
||||
}
|
||||
|
||||
// AddrIndex implements a transaction by address index. That is to say, it
|
||||
// supports querying all transactions that reference a given address because
|
||||
// they are either crediting or debiting the address. The returned transactions
|
||||
// are ordered according to their order of appearance in the blockchain. In
|
||||
// other words, first by block height and then by offset inside the block.
|
||||
//
|
||||
// In addition, support is provided for a memory-only index of unconfirmed
|
||||
// transactions such as those which are kept in the memory pool before inclusion
|
||||
// in a block.
|
||||
type AddrIndex struct {
|
||||
// The following fields are set when the instance is created and can't
|
||||
// be changed afterwards, so there is no need to protect them with a
|
||||
// separate mutex.
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
|
||||
// The following fields are used to quickly link transactions and
|
||||
// addresses that have not been included into a block yet when an
|
||||
// address index is being maintained. The are protected by the
|
||||
// unconfirmedLock field.
|
||||
//
|
||||
// The txnsByAddr field is used to keep an index of all transactions
|
||||
// which either create an output to a given address or spend from a
|
||||
// previous output to it keyed by the address.
|
||||
//
|
||||
// The addrsByTx field is essentially the reverse and is used to
|
||||
// keep an index of all addresses which a given transaction involves.
|
||||
// This allows fairly efficient updates when transactions are removed
|
||||
// once they are included into a block.
|
||||
unconfirmedLock sync.RWMutex
|
||||
txnsByAddr map[[addrKeySize]byte]map[daghash.TxID]*util.Tx
|
||||
addrsByTx map[daghash.TxID]map[[addrKeySize]byte]struct{}
|
||||
}
|
||||
|
||||
// Ensure the AddrIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*AddrIndex)(nil)
|
||||
|
||||
// Ensure the AddrIndex type implements the NeedsInputser interface.
|
||||
var _ NeedsInputser = (*AddrIndex)(nil)
|
||||
|
||||
// NeedsInputs signals that the index requires the referenced inputs in order
|
||||
// to properly create the index.
|
||||
//
|
||||
// This implements the NeedsInputser interface.
|
||||
func (idx *AddrIndex) NeedsInputs() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Init is only provided to satisfy the Indexer interface as there is nothing to
|
||||
// initialize for this index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Init(db database.DB) error {
|
||||
idx.db = db
|
||||
return nil
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Key() []byte {
|
||||
return addrIndexKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Name() string {
|
||||
return addrIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time. It creates the bucket for the address
|
||||
// index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Create(dbTx database.Tx) error {
|
||||
_, err := dbTx.Metadata().CreateBucket(addrIndexKey)
|
||||
return err
|
||||
}
|
||||
|
||||
// writeIndexData represents the address index data to be written for one block.
|
||||
// It consists of the address mapped to an ordered list of the transactions
|
||||
// that involve the address in block. It is ordered so the transactions can be
|
||||
// stored in the order they appear in the block.
|
||||
type writeIndexData map[[addrKeySize]byte][]int
|
||||
|
||||
// indexPkScript extracts all standard addresses from the passed public key
|
||||
// script and maps each of them to the associated transaction using the passed
|
||||
// map.
|
||||
func (idx *AddrIndex) indexPkScript(data writeIndexData, pkScript []byte, txIdx int) {
|
||||
// Nothing to index if the script is non-standard or otherwise doesn't
|
||||
// contain any addresses.
|
||||
_, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript,
|
||||
idx.dagParams)
|
||||
if err != nil || len(addrs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
// Ignore unsupported address types.
|
||||
continue
|
||||
}
|
||||
|
||||
// Avoid inserting the transaction more than once. Since the
|
||||
// transactions are indexed serially any duplicates will be
|
||||
// indexed in a row, so checking the most recent entry for the
|
||||
// address is enough to detect duplicates.
|
||||
indexedTxns := data[addrKey]
|
||||
numTxns := len(indexedTxns)
|
||||
if numTxns > 0 && indexedTxns[numTxns-1] == txIdx {
|
||||
continue
|
||||
}
|
||||
indexedTxns = append(indexedTxns, txIdx)
|
||||
data[addrKey] = indexedTxns
|
||||
}
|
||||
}
|
||||
|
||||
// indexBlock extract all of the standard addresses from all of the transactions
|
||||
// in the passed block and maps each of them to the associated transaction using
|
||||
// the passed map.
|
||||
func (idx *AddrIndex) indexBlock(data writeIndexData, block *util.Block, dag *blockdag.BlockDAG) {
|
||||
for txIdx, tx := range block.Transactions() {
|
||||
// Coinbases do not reference any inputs. Since the block is
|
||||
// required to have already gone through full validation, it has
|
||||
// already been proven on the first transaction in the block is
|
||||
// a coinbase, and the second one is a fee transaction.
|
||||
if txIdx > 1 {
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
// The UTXO should always have the input since
|
||||
// the index contract requires it, however, be
|
||||
// safe and simply ignore any missing entries.
|
||||
entry, ok := dag.GetUTXOEntry(txIn.PreviousOutPoint)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
idx.indexPkScript(data, entry.PkScript(), txIdx)
|
||||
}
|
||||
}
|
||||
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
idx.indexPkScript(data, txOut.PkScript, txIdx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the main chain. This indexer adds a mapping for each address
|
||||
// the transactions in the block involve.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// The offset and length of the transactions within the serialized
|
||||
// block.
|
||||
txLocs, err := block.TxLoc()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the internal block ID associated with the block.
|
||||
blockID, err := dbFetchBlockIDByHash(dbTx, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build all of the address to transaction mappings in a local map.
|
||||
addrsToTxns := make(writeIndexData)
|
||||
idx.indexBlock(addrsToTxns, block, dag)
|
||||
|
||||
// Add all of the index entries for each address.
|
||||
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
|
||||
for addrKey, txIdxs := range addrsToTxns {
|
||||
for _, txIdx := range txIdxs {
|
||||
err := dbPutAddrIndexEntry(addrIdxBucket, addrKey,
|
||||
blockID, txLocs[txIdx])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DisconnectBlock is invoked by the index manager when a block has been
|
||||
// disconnected from the main chain. This indexer removes the address mappings
|
||||
// each transaction in the block involve.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) DisconnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG) error {
|
||||
// Build all of the address to transaction mappings in a local map.
|
||||
addrsToTxns := make(writeIndexData)
|
||||
idx.indexBlock(addrsToTxns, block, dag)
|
||||
|
||||
// Remove all of the index entries for each address.
|
||||
bucket := dbTx.Metadata().Bucket(addrIndexKey)
|
||||
for addrKey, txIdxs := range addrsToTxns {
|
||||
err := dbRemoveAddrIndexEntries(bucket, addrKey, len(txIdxs))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxRegionsForAddress returns a slice of block regions which identify each
|
||||
// transaction that involves the passed address according to the specified
|
||||
// number to skip, number requested, and whether or not the results should be
|
||||
// reversed. It also returns the number actually skipped since it could be less
|
||||
// in the case where there are not enough entries.
|
||||
//
|
||||
// NOTE: These results only include transactions confirmed in blocks. See the
|
||||
// UnconfirmedTxnsForAddress method for obtaining unconfirmed transactions
|
||||
// that involve a given address.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) TxRegionsForAddress(dbTx database.Tx, addr util.Address, numToSkip, numRequested uint32, reverse bool) ([]database.BlockRegion, uint32, error) {
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var regions []database.BlockRegion
|
||||
var skipped uint32
|
||||
err = idx.db.View(func(dbTx database.Tx) error {
|
||||
// Create closure to lookup the block hash given the ID using
|
||||
// the database transaction.
|
||||
fetchBlockHash := func(id []byte) (*daghash.Hash, error) {
|
||||
// Deserialize and populate the result.
|
||||
return dbFetchBlockHashBySerializedID(dbTx, id)
|
||||
}
|
||||
|
||||
var err error
|
||||
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
|
||||
regions, skipped, err = dbFetchAddrIndexEntries(addrIdxBucket,
|
||||
addrKey, numToSkip, numRequested, reverse,
|
||||
fetchBlockHash)
|
||||
return err
|
||||
})
|
||||
|
||||
return regions, skipped, err
|
||||
}
|
||||
|
||||
// indexUnconfirmedAddresses modifies the unconfirmed (memory-only) address
|
||||
// index to include mappings for the addresses encoded by the passed public key
|
||||
// script to the transaction.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) indexUnconfirmedAddresses(pkScript []byte, tx *util.Tx) {
|
||||
// The error is ignored here since the only reason it can fail is if the
|
||||
// script fails to parse and it was already validated before being
|
||||
// admitted to the mempool.
|
||||
_, addresses, _, _ := txscript.ExtractPkScriptAddrs(pkScript,
|
||||
idx.dagParams)
|
||||
for _, addr := range addresses {
|
||||
// Ignore unsupported address types.
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add a mapping from the address to the transaction.
|
||||
idx.unconfirmedLock.Lock()
|
||||
addrIndexEntry := idx.txnsByAddr[addrKey]
|
||||
if addrIndexEntry == nil {
|
||||
addrIndexEntry = make(map[daghash.TxID]*util.Tx)
|
||||
idx.txnsByAddr[addrKey] = addrIndexEntry
|
||||
}
|
||||
addrIndexEntry[*tx.ID()] = tx
|
||||
|
||||
// Add a mapping from the transaction to the address.
|
||||
addrsByTxEntry := idx.addrsByTx[*tx.ID()]
|
||||
if addrsByTxEntry == nil {
|
||||
addrsByTxEntry = make(map[[addrKeySize]byte]struct{})
|
||||
idx.addrsByTx[*tx.ID()] = addrsByTxEntry
|
||||
}
|
||||
addrsByTxEntry[addrKey] = struct{}{}
|
||||
idx.unconfirmedLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// AddUnconfirmedTx adds all addresses related to the transaction to the
|
||||
// unconfirmed (memory-only) address index.
|
||||
//
|
||||
// NOTE: This transaction MUST have already been validated by the memory pool
|
||||
// before calling this function with it and have all of the inputs available in
|
||||
// the provided utxo view. Failure to do so could result in some or all
|
||||
// addresses not being indexed.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) AddUnconfirmedTx(tx *util.Tx, utxoSet blockdag.UTXOSet) {
|
||||
// Index addresses of all referenced previous transaction outputs.
|
||||
//
|
||||
// The existence checks are elided since this is only called after the
|
||||
// transaction has already been validated and thus all inputs are
|
||||
// already known to exist.
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
entry, ok := utxoSet.Get(txIn.PreviousOutPoint)
|
||||
if !ok {
|
||||
// Ignore missing entries. This should never happen
|
||||
// in practice since the function comments specifically
|
||||
// call out all inputs must be available.
|
||||
continue
|
||||
}
|
||||
idx.indexUnconfirmedAddresses(entry.PkScript(), tx)
|
||||
}
|
||||
|
||||
// Index addresses of all created outputs.
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
idx.indexUnconfirmedAddresses(txOut.PkScript, tx)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveUnconfirmedTx removes the passed transaction from the unconfirmed
|
||||
// (memory-only) address index.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) RemoveUnconfirmedTx(txID *daghash.TxID) {
|
||||
idx.unconfirmedLock.Lock()
|
||||
defer idx.unconfirmedLock.Unlock()
|
||||
|
||||
// Remove all address references to the transaction from the address
|
||||
// index and remove the entry for the address altogether if it no longer
|
||||
// references any transactions.
|
||||
for addrKey := range idx.addrsByTx[*txID] {
|
||||
delete(idx.txnsByAddr[addrKey], *txID)
|
||||
if len(idx.txnsByAddr[addrKey]) == 0 {
|
||||
delete(idx.txnsByAddr, addrKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the entry from the transaction to address lookup map as well.
|
||||
delete(idx.addrsByTx, *txID)
|
||||
}
|
||||
|
||||
// UnconfirmedTxnsForAddress returns all transactions currently in the
|
||||
// unconfirmed (memory-only) address index that involve the passed address.
|
||||
// Unsupported address types are ignored and will result in no results.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr util.Address) []*util.Tx {
|
||||
// Ignore unsupported address types.
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Protect concurrent access.
|
||||
idx.unconfirmedLock.RLock()
|
||||
defer idx.unconfirmedLock.RUnlock()
|
||||
|
||||
// Return a new slice with the results if there are any. This ensures
|
||||
// safe concurrency.
|
||||
if txns, exists := idx.txnsByAddr[addrKey]; exists {
|
||||
addressTxns := make([]*util.Tx, 0, len(txns))
|
||||
for _, tx := range txns {
|
||||
addressTxns = append(addressTxns, tx)
|
||||
}
|
||||
return addressTxns
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewAddrIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping of all addresses in the blockchain to the respective transactions
|
||||
// that involve them.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that in
|
||||
// turn is used by the blockchain package. This allows the index to be
|
||||
// seamlessly maintained along with the chain.
|
||||
func NewAddrIndex(dagParams *dagconfig.Params) *AddrIndex {
|
||||
return &AddrIndex{
|
||||
dagParams: dagParams,
|
||||
txnsByAddr: make(map[[addrKeySize]byte]map[daghash.TxID]*util.Tx),
|
||||
addrsByTx: make(map[daghash.TxID]map[[addrKeySize]byte]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// DropAddrIndex drops the address index from the provided database if it
|
||||
// exists.
|
||||
func DropAddrIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
return dropIndex(db, addrIndexKey, addrIndexName, interrupt)
|
||||
}
|
||||
@@ -1,276 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// addrIndexBucket provides a mock address index database bucket by implementing
|
||||
// the internalBucket interface.
|
||||
type addrIndexBucket struct {
|
||||
levels map[[levelKeySize]byte][]byte
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of the mock address index bucket.
|
||||
func (b *addrIndexBucket) Clone() *addrIndexBucket {
|
||||
levels := make(map[[levelKeySize]byte][]byte)
|
||||
for k, v := range b.levels {
|
||||
vCopy := make([]byte, len(v))
|
||||
copy(vCopy, v)
|
||||
levels[k] = vCopy
|
||||
}
|
||||
return &addrIndexBucket{levels: levels}
|
||||
}
|
||||
|
||||
// Get returns the value associated with the key from the mock address index
|
||||
// bucket.
|
||||
//
|
||||
// This is part of the internalBucket interface.
|
||||
func (b *addrIndexBucket) Get(key []byte) []byte {
|
||||
var levelKey [levelKeySize]byte
|
||||
copy(levelKey[:], key)
|
||||
return b.levels[levelKey]
|
||||
}
|
||||
|
||||
// Put stores the provided key/value pair to the mock address index bucket.
|
||||
//
|
||||
// This is part of the internalBucket interface.
|
||||
func (b *addrIndexBucket) Put(key []byte, value []byte) error {
|
||||
var levelKey [levelKeySize]byte
|
||||
copy(levelKey[:], key)
|
||||
b.levels[levelKey] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes the provided key from the mock address index bucket.
|
||||
//
|
||||
// This is part of the internalBucket interface.
|
||||
func (b *addrIndexBucket) Delete(key []byte) error {
|
||||
var levelKey [levelKeySize]byte
|
||||
copy(levelKey[:], key)
|
||||
delete(b.levels, levelKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
// printLevels returns a string with a visual representation of the provided
|
||||
// address key taking into account the max size of each level. It is useful
|
||||
// when creating and debugging test cases.
|
||||
func (b *addrIndexBucket) printLevels(addrKey [addrKeySize]byte) string {
|
||||
highestLevel := uint8(0)
|
||||
for k := range b.levels {
|
||||
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
|
||||
continue
|
||||
}
|
||||
level := uint8(k[levelOffset])
|
||||
if level > highestLevel {
|
||||
highestLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
var levelBuf bytes.Buffer
|
||||
_, _ = levelBuf.WriteString("\n")
|
||||
maxEntries := level0MaxEntries
|
||||
for level := uint8(0); level <= highestLevel; level++ {
|
||||
data := b.levels[keyForLevel(addrKey, level)]
|
||||
numEntries := len(data) / txEntrySize
|
||||
for i := 0; i < numEntries; i++ {
|
||||
start := i * txEntrySize
|
||||
num := byteOrder.Uint32(data[start:])
|
||||
_, _ = levelBuf.WriteString(fmt.Sprintf("%02d ", num))
|
||||
}
|
||||
for i := numEntries; i < maxEntries; i++ {
|
||||
_, _ = levelBuf.WriteString("_ ")
|
||||
}
|
||||
_, _ = levelBuf.WriteString("\n")
|
||||
maxEntries *= 2
|
||||
}
|
||||
|
||||
return levelBuf.String()
|
||||
}
|
||||
|
||||
// sanityCheck ensures that all data stored in the bucket for the given address
|
||||
// adheres to the level-based rules described by the address index
|
||||
// documentation.
|
||||
func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal int) error {
|
||||
// Find the highest level for the key.
|
||||
highestLevel := uint8(0)
|
||||
for k := range b.levels {
|
||||
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
|
||||
continue
|
||||
}
|
||||
level := uint8(k[levelOffset])
|
||||
if level > highestLevel {
|
||||
highestLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the expected total number of entries are present and that
|
||||
// all levels adhere to the rules described in the address index
|
||||
// documentation.
|
||||
var totalEntries int
|
||||
maxEntries := level0MaxEntries
|
||||
for level := uint8(0); level <= highestLevel; level++ {
|
||||
// Level 0 can'have more entries than the max allowed if the
|
||||
// levels after it have data and it can't be empty. All other
|
||||
// levels must either be half full or full.
|
||||
data := b.levels[keyForLevel(addrKey, level)]
|
||||
numEntries := len(data) / txEntrySize
|
||||
totalEntries += numEntries
|
||||
if level == 0 {
|
||||
if (highestLevel != 0 && numEntries == 0) ||
|
||||
numEntries > maxEntries {
|
||||
|
||||
return fmt.Errorf("level %d has %d entries",
|
||||
level, numEntries)
|
||||
}
|
||||
} else if numEntries != maxEntries && numEntries != maxEntries/2 {
|
||||
return fmt.Errorf("level %d has %d entries", level,
|
||||
numEntries)
|
||||
}
|
||||
maxEntries *= 2
|
||||
}
|
||||
if totalEntries != expectedTotal {
|
||||
return fmt.Errorf("expected %d entries - got %d", expectedTotal,
|
||||
totalEntries)
|
||||
}
|
||||
|
||||
// Ensure all of the numbers are in order starting from the highest
|
||||
// level moving to the lowest level.
|
||||
expectedNum := uint32(0)
|
||||
for level := highestLevel + 1; level > 0; level-- {
|
||||
data := b.levels[keyForLevel(addrKey, level)]
|
||||
numEntries := len(data) / txEntrySize
|
||||
for i := 0; i < numEntries; i++ {
|
||||
start := i * txEntrySize
|
||||
num := byteOrder.Uint32(data[start:])
|
||||
if num != expectedNum {
|
||||
return fmt.Errorf("level %d offset %d does "+
|
||||
"not contain the expected number of "+
|
||||
"%d - got %d", level, i, num,
|
||||
expectedNum)
|
||||
}
|
||||
expectedNum++
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestAddrIndexLevels ensures that adding and deleting entries to the address
|
||||
// index creates multiple levels as described by the address index
|
||||
// documentation.
|
||||
func TestAddrIndexLevels(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
key [addrKeySize]byte
|
||||
numInsert int
|
||||
printLevels bool // Set to help debug a specific test.
|
||||
}{
|
||||
{
|
||||
name: "level 0 not full",
|
||||
numInsert: level0MaxEntries - 1,
|
||||
},
|
||||
{
|
||||
name: "level 1 half",
|
||||
numInsert: level0MaxEntries + 1,
|
||||
},
|
||||
{
|
||||
name: "level 1 full",
|
||||
numInsert: level0MaxEntries*2 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 half, level 1 half",
|
||||
numInsert: level0MaxEntries*3 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 half, level 1 full",
|
||||
numInsert: level0MaxEntries*4 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 full, level 1 half",
|
||||
numInsert: level0MaxEntries*5 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 full, level 1 full",
|
||||
numInsert: level0MaxEntries*6 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 3 half, level 2 half, level 1 half",
|
||||
numInsert: level0MaxEntries*7 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 3 full, level 2 half, level 1 full",
|
||||
numInsert: level0MaxEntries*12 + 1,
|
||||
},
|
||||
}
|
||||
|
||||
nextTest:
|
||||
for testNum, test := range tests {
|
||||
// Insert entries in order.
|
||||
populatedBucket := &addrIndexBucket{
|
||||
levels: make(map[[levelKeySize]byte][]byte),
|
||||
}
|
||||
for i := 0; i < test.numInsert; i++ {
|
||||
txLoc := wire.TxLoc{TxStart: i * 2}
|
||||
err := dbPutAddrIndexEntry(populatedBucket, test.key,
|
||||
uint32(i), txLoc)
|
||||
if err != nil {
|
||||
t.Errorf("dbPutAddrIndexEntry #%d (%s) - "+
|
||||
"unexpected error: %v", testNum,
|
||||
test.name, err)
|
||||
continue nextTest
|
||||
}
|
||||
}
|
||||
if test.printLevels {
|
||||
t.Log(populatedBucket.printLevels(test.key))
|
||||
}
|
||||
|
||||
// Delete entries from the populated bucket until all entries
|
||||
// have been deleted. The bucket is reset to the fully
|
||||
// populated bucket on each iteration so every combination is
|
||||
// tested. Notice the upper limit purposes exceeds the number
|
||||
// of entries to ensure attempting to delete more entries than
|
||||
// there are works correctly.
|
||||
for numDelete := 0; numDelete <= test.numInsert+1; numDelete++ {
|
||||
// Clone populated bucket to run each delete against.
|
||||
bucket := populatedBucket.Clone()
|
||||
|
||||
// Remove the number of entries for this iteration.
|
||||
err := dbRemoveAddrIndexEntries(bucket, test.key,
|
||||
numDelete)
|
||||
if err != nil {
|
||||
if numDelete <= test.numInsert {
|
||||
t.Errorf("dbRemoveAddrIndexEntries (%s) "+
|
||||
" delete %d - unexpected error: "+
|
||||
"%v", test.name, numDelete, err)
|
||||
continue nextTest
|
||||
}
|
||||
}
|
||||
if test.printLevels {
|
||||
t.Log(bucket.printLevels(test.key))
|
||||
}
|
||||
|
||||
// Sanity check the levels to ensure the adhere to all
|
||||
// rules.
|
||||
numExpected := test.numInsert
|
||||
if numDelete <= test.numInsert {
|
||||
numExpected -= numDelete
|
||||
}
|
||||
err = bucket.sanityCheck(test.key, numExpected)
|
||||
if err != nil {
|
||||
t.Errorf("sanity check fail (%s) delete %d: %v",
|
||||
test.name, numDelete, err)
|
||||
continue nextTest
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
// blockProgressLogger provides periodic logging for other services in order
|
||||
// to show users progress of certain "actions" involving some or all current
|
||||
// blocks. Ex: syncing to best chain, indexing all blocks, etc.
|
||||
type blockProgressLogger struct {
|
||||
receivedLogBlocks int64
|
||||
receivedLogTx int64
|
||||
lastBlockLogTime time.Time
|
||||
|
||||
subsystemLogger btclog.Logger
|
||||
progressAction string
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// newBlockProgressLogger returns a new block progress logger.
|
||||
// The progress message is templated as follows:
|
||||
// {progressAction} {numProcessed} {blocks|block} in the last {timePeriod}
|
||||
// ({numTxs}, height {lastBlockHeight}, {lastBlockTimeStamp})
|
||||
func newBlockProgressLogger(progressMessage string, logger btclog.Logger) *blockProgressLogger {
|
||||
return &blockProgressLogger{
|
||||
lastBlockLogTime: time.Now(),
|
||||
progressAction: progressMessage,
|
||||
subsystemLogger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// LogBlockHeight logs a new block height as an information message to show
|
||||
// progress to the user. In order to prevent spam, it limits logging to one
|
||||
// message every 10 seconds with duration and totals included.
|
||||
func (b *blockProgressLogger) LogBlockHeight(block *util.Block) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
b.receivedLogBlocks++
|
||||
b.receivedLogTx += int64(len(block.MsgBlock().Transactions))
|
||||
|
||||
now := time.Now()
|
||||
duration := now.Sub(b.lastBlockLogTime)
|
||||
if duration < time.Second*10 {
|
||||
return
|
||||
}
|
||||
|
||||
// Truncate the duration to 10s of milliseconds.
|
||||
durationMillis := int64(duration / time.Millisecond)
|
||||
tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10)
|
||||
|
||||
// Log information about new block height.
|
||||
blockStr := "blocks"
|
||||
if b.receivedLogBlocks == 1 {
|
||||
blockStr = "block"
|
||||
}
|
||||
txStr := "transactions"
|
||||
if b.receivedLogTx == 1 {
|
||||
txStr = "transaction"
|
||||
}
|
||||
b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, height %d, %s)",
|
||||
b.progressAction, b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx,
|
||||
txStr, block.Height(), block.MsgBlock().Header.Timestamp)
|
||||
|
||||
b.receivedLogBlocks = 0
|
||||
b.receivedLogTx = 0
|
||||
b.lastBlockLogTime = now
|
||||
}
|
||||
@@ -1,352 +0,0 @@
|
||||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/gcs"
|
||||
"github.com/daglabs/btcd/util/gcs/builder"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// cfIndexName is the human-readable name for the index.
|
||||
cfIndexName = "committed filter index"
|
||||
)
|
||||
|
||||
// Committed filters come in two flavours: basic and extended. They are
|
||||
// generated and dropped in pairs, and both are indexed by a block's hash.
|
||||
// Besides holding different content, they also live in different buckets.
|
||||
var (
|
||||
// cfIndexParentBucketKey is the name of the parent bucket used to house
|
||||
// the index. The rest of the buckets live below this bucket.
|
||||
cfIndexParentBucketKey = []byte("cfindexparentbucket")
|
||||
|
||||
// cfIndexKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cfilters.
|
||||
cfIndexKeys = [][]byte{
|
||||
[]byte("cf0byhashidx"),
|
||||
[]byte("cf1byhashidx"),
|
||||
}
|
||||
|
||||
// cfHeaderKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cf headers.
|
||||
cfHeaderKeys = [][]byte{
|
||||
[]byte("cf0headerbyhashidx"),
|
||||
[]byte("cf1headerbyhashidx"),
|
||||
}
|
||||
|
||||
// cfHashKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cf hashes.
|
||||
cfHashKeys = [][]byte{
|
||||
[]byte("cf0hashbyhashidx"),
|
||||
[]byte("cf1hashbyhashidx"),
|
||||
}
|
||||
|
||||
maxFilterType = uint8(len(cfHeaderKeys) - 1)
|
||||
)
|
||||
|
||||
// dbFetchFilterIdxEntry retrieves a data blob from the filter index database.
|
||||
// An entry's absence is not considered an error.
|
||||
func dbFetchFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash) ([]byte, error) {
|
||||
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
||||
return idx.Get(h[:]), nil
|
||||
}
|
||||
|
||||
// dbStoreFilterIdxEntry stores a data blob in the filter index database.
|
||||
func dbStoreFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash, f []byte) error {
|
||||
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
||||
return idx.Put(h[:], f)
|
||||
}
|
||||
|
||||
// dbDeleteFilterIdxEntry deletes a data blob from the filter index database.
|
||||
func dbDeleteFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash) error {
|
||||
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
||||
return idx.Delete(h[:])
|
||||
}
|
||||
|
||||
// CfIndex implements a committed filter (cf) by hash index.
|
||||
type CfIndex struct {
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
}
|
||||
|
||||
// Ensure the CfIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*CfIndex)(nil)
|
||||
|
||||
// Init initializes the hash-based cf index. This is part of the Indexer
|
||||
// interface.
|
||||
func (idx *CfIndex) Init(db database.DB) error {
|
||||
idx.db = db
|
||||
return nil
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice. This is
|
||||
// part of the Indexer interface.
|
||||
func (idx *CfIndex) Key() []byte {
|
||||
return cfIndexParentBucketKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index. This is part of the
|
||||
// Indexer interface.
|
||||
func (idx *CfIndex) Name() string {
|
||||
return cfIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs to
|
||||
// be created for the first time. It creates buckets for the two hash-based cf
|
||||
// indexes (simple, extended).
|
||||
func (idx *CfIndex) Create(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
|
||||
cfIndexParentBucket, err := meta.CreateBucket(cfIndexParentBucketKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bucketName := range cfIndexKeys {
|
||||
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucketName := range cfHeaderKeys {
|
||||
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucketName := range cfHashKeys {
|
||||
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// storeFilter stores a given filter, and performs the steps needed to
|
||||
// generate the filter's header.
|
||||
func storeFilter(dbTx database.Tx, block *util.Block, f *gcs.Filter,
|
||||
filterType wire.FilterType) error {
|
||||
if uint8(filterType) > maxFilterType {
|
||||
return errors.New("unsupported filter type")
|
||||
}
|
||||
|
||||
// Figure out which buckets to use.
|
||||
fkey := cfIndexKeys[filterType]
|
||||
hkey := cfHeaderKeys[filterType]
|
||||
hashkey := cfHashKeys[filterType]
|
||||
|
||||
// Start by storing the filter.
|
||||
h := block.Hash()
|
||||
filterBytes, err := f.NBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbStoreFilterIdxEntry(dbTx, fkey, h, filterBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Next store the filter hash.
|
||||
filterHash, err := builder.GetFilterHash(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbStoreFilterIdxEntry(dbTx, hashkey, h, filterHash[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then fetch the previous block's filter header.
|
||||
var prevHeader *daghash.Hash
|
||||
header := block.MsgBlock().Header
|
||||
if header.IsGenesis() {
|
||||
prevHeader = &daghash.ZeroHash
|
||||
} else {
|
||||
ph := header.SelectedParentHash()
|
||||
pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct the new block's filter header, and store it.
|
||||
prevHeader, err = daghash.NewHash(pfh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fh, err := builder.MakeHeaderForFilter(f, prevHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dbStoreFilterIdxEntry(dbTx, hkey, h, fh[:])
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the main chain. This indexer adds a hash-to-cf mapping for
|
||||
// every passed block. This is part of the Indexer interface.
|
||||
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *util.Block,
|
||||
_ *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
f, err := builder.BuildBasicFilter(block.MsgBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = storeFilter(dbTx, block, f, wire.GCSFilterRegular)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err = builder.BuildExtFilter(block.MsgBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return storeFilter(dbTx, block, f, wire.GCSFilterExtended)
|
||||
}
|
||||
|
||||
// DisconnectBlock is invoked by the index manager when a block has been
|
||||
// disconnected from the main chain. This indexer removes the hash-to-cf
|
||||
// mapping for every passed block. This is part of the Indexer interface.
|
||||
func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *util.Block,
|
||||
_ *blockdag.BlockDAG) error {
|
||||
|
||||
for _, key := range cfIndexKeys {
|
||||
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range cfHeaderKeys {
|
||||
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range cfHashKeys {
|
||||
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// entryByBlockHash fetches a filter index entry of a particular type
|
||||
// (eg. filter, filter header, etc) for a filter type and block hash.
|
||||
func (idx *CfIndex) entryByBlockHash(filterTypeKeys [][]byte,
|
||||
filterType wire.FilterType, h *daghash.Hash) ([]byte, error) {
|
||||
|
||||
if uint8(filterType) > maxFilterType {
|
||||
return nil, errors.New("unsupported filter type")
|
||||
}
|
||||
key := filterTypeKeys[filterType]
|
||||
|
||||
var entry []byte
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
entry, err = dbFetchFilterIdxEntry(dbTx, key, h)
|
||||
return err
|
||||
})
|
||||
return entry, err
|
||||
}
|
||||
|
||||
// entriesByBlockHashes batch fetches a filter index entry of a particular type
|
||||
// (eg. filter, filter header, etc) for a filter type and slice of block hashes.
|
||||
func (idx *CfIndex) entriesByBlockHashes(filterTypeKeys [][]byte,
|
||||
filterType wire.FilterType, blockHashes []*daghash.Hash) ([][]byte, error) {
|
||||
|
||||
if uint8(filterType) > maxFilterType {
|
||||
return nil, errors.New("unsupported filter type")
|
||||
}
|
||||
key := filterTypeKeys[filterType]
|
||||
|
||||
entries := make([][]byte, 0, len(blockHashes))
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
for _, blockHash := range blockHashes {
|
||||
entry, err := dbFetchFilterIdxEntry(dbTx, key, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// FilterByBlockHash returns the serialized contents of a block's basic or
|
||||
// extended committed filter.
|
||||
func (idx *CfIndex) FilterByBlockHash(h *daghash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfIndexKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FiltersByBlockHashes returns the serialized contents of a block's basic or
|
||||
// extended committed filter for a set of blocks by hash.
|
||||
func (idx *CfIndex) FiltersByBlockHashes(blockHashes []*daghash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfIndexKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// FilterHeaderByBlockHash returns the serialized contents of a block's basic
|
||||
// or extended committed filter header.
|
||||
func (idx *CfIndex) FilterHeaderByBlockHash(h *daghash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfHeaderKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FilterHeadersByBlockHashes returns the serialized contents of a block's basic
|
||||
// or extended committed filter header for a set of blocks by hash.
|
||||
func (idx *CfIndex) FilterHeadersByBlockHashes(blockHashes []*daghash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfHeaderKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// FilterHashByBlockHash returns the serialized contents of a block's basic
|
||||
// or extended committed filter hash.
|
||||
func (idx *CfIndex) FilterHashByBlockHash(h *daghash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfHashKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FilterHashesByBlockHashes returns the serialized contents of a block's basic
|
||||
// or extended committed filter hash for a set of blocks by hash.
|
||||
func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*daghash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfHashKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// NewCfIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping of the hashes of all blocks in the blockchain to their respective
|
||||
// committed filters.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that
|
||||
// in turn is used by the blockchain package. This allows the index to be
|
||||
// seamlessly maintained along with the chain.
|
||||
func NewCfIndex(dagParams *dagconfig.Params) *CfIndex {
|
||||
return &CfIndex{dagParams: dagParams}
|
||||
}
|
||||
|
||||
// DropCfIndex drops the CF index from the provided database if exists.
|
||||
func DropCfIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
return dropIndex(db, cfIndexParentBucketKey, cfIndexName, interrupt)
|
||||
}
|
||||
@@ -3,17 +3,16 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package indexers implements optional block chain indexes.
|
||||
Package indexers implements optional block DAG indexes.
|
||||
*/
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -46,13 +45,22 @@ type Indexer interface {
|
||||
Create(dbTx database.Tx) error
|
||||
|
||||
// Init is invoked when the index manager is first initializing the
|
||||
// index. This differs from the Create method in that it is called on
|
||||
// index. This differs from the Create method in that it is called on
|
||||
// every load, including the case the index was just created.
|
||||
Init(db database.DB) error
|
||||
Init(db database.DB, dag *blockdag.BlockDAG) error
|
||||
|
||||
// ConnectBlock is invoked when the index manager is notified that a new
|
||||
// block has been connected to the DAG.
|
||||
ConnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData) error
|
||||
ConnectBlock(dbTx database.Tx,
|
||||
block *util.Block,
|
||||
blockID uint64,
|
||||
dag *blockdag.BlockDAG,
|
||||
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData,
|
||||
virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error
|
||||
}
|
||||
|
||||
// AssertError identifies an error that indicates an internal code consistency
|
||||
@@ -77,11 +85,11 @@ func (e errDeserialize) Error() string {
|
||||
// isDeserializeErr returns whether or not the passed error is an errDeserialize
|
||||
// error.
|
||||
func isDeserializeErr(err error) bool {
|
||||
_, ok := err.(errDeserialize)
|
||||
return ok
|
||||
var deserializeErr errDeserialize
|
||||
return errors.As(err, &deserializeErr)
|
||||
}
|
||||
|
||||
// internalBucket is an abstraction over a database bucket. It is used to make
|
||||
// internalBucket is an abstraction over a database bucket. It is used to make
|
||||
// the code easier to test since it allows mock objects in the tests to only
|
||||
// implement these functions instead of everything a database.Bucket supports.
|
||||
type internalBucket interface {
|
||||
|
||||
@@ -5,16 +5,7 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/daglabs/btcd/logger"
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This
|
||||
// means the package will not perform any logging by default until the caller
|
||||
// requests it.
|
||||
var log btclog.Logger
|
||||
|
||||
// The default amount of logging is none.
|
||||
func init() {
|
||||
log, _ = logger.Get(logger.SubsystemTags.INDX)
|
||||
}
|
||||
var log, _ = logger.Get(logger.SubsystemTags.INDX)
|
||||
|
||||
@@ -5,26 +5,29 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
var (
|
||||
// indexTipsBucketName is the name of the db bucket used to house the
|
||||
// current tip of each index.
|
||||
indexTipsBucketName = []byte("idxtips")
|
||||
|
||||
indexCurrentBlockIDBucketName = []byte("idxcurrentblockid")
|
||||
)
|
||||
|
||||
// Manager defines an index manager that manages multiple optional indexes and
|
||||
// implements the blockchain.IndexManager interface so it can be seamlessly
|
||||
// plugged into normal chain processing.
|
||||
// implements the blockdag.IndexManager interface so it can be seamlessly
|
||||
// plugged into normal DAG processing.
|
||||
type Manager struct {
|
||||
db database.DB
|
||||
enabledIndexes []Indexer
|
||||
}
|
||||
|
||||
// Ensure the Manager type implements the blockchain.IndexManager interface.
|
||||
// Ensure the Manager type implements the blockdag.IndexManager interface.
|
||||
var _ blockdag.IndexManager = (*Manager)(nil)
|
||||
|
||||
// indexDropKey returns the key for an index which indicates it is in the
|
||||
@@ -37,7 +40,7 @@ func indexDropKey(idxKey []byte) []byte {
|
||||
}
|
||||
|
||||
// maybeFinishDrops determines if each of the enabled indexes are in the middle
|
||||
// of being dropped and finishes dropping them when the are. This is necessary
|
||||
// of being dropped and finishes dropping them when the are. This is necessary
|
||||
// because dropping and index has to be done in several atomic steps rather than
|
||||
// one big atomic step due to the massive number of entries.
|
||||
func (m *Manager) maybeFinishDrops(interrupt <-chan struct{}) error {
|
||||
@@ -113,14 +116,14 @@ func (m *Manager) maybeCreateIndexes(dbTx database.Tx) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init initializes the enabled indexes. This is called during chain
|
||||
// Init initializes the enabled indexes. This is called during DAG
|
||||
// initialization and primarily consists of catching up all indexes to the
|
||||
// current best chain tip. This is necessary since each index can be disabled
|
||||
// current tips. This is necessary since each index can be disabled
|
||||
// and re-enabled at any time and attempting to catch-up indexes at the same
|
||||
// time new blocks are being downloaded would lead to an overall longer time to
|
||||
// catch up due to the I/O contention.
|
||||
//
|
||||
// This is part of the blockchain.IndexManager interface.
|
||||
// This is part of the blockdag.IndexManager interface.
|
||||
func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) error {
|
||||
// Nothing to do when no indexes are enabled.
|
||||
if len(m.enabledIndexes) == 0 {
|
||||
@@ -146,6 +149,9 @@ func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucketIfNotExists(indexCurrentBlockIDBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.maybeCreateIndexes(dbTx)
|
||||
})
|
||||
@@ -155,25 +161,68 @@ func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-
|
||||
|
||||
// Initialize each of the enabled indexes.
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
if err := indexer.Init(db); err != nil {
|
||||
if err := indexer.Init(db, blockDAG); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return m.recoverIfNeeded()
|
||||
}
|
||||
|
||||
// ConnectBlock must be invoked when a block is extending the main chain. It
|
||||
// recoverIfNeeded checks if the node worked for some time
|
||||
// without one of the current enabled indexes, and if it's
|
||||
// the case, recovers the missing blocks from the index.
|
||||
func (m *Manager) recoverIfNeeded() error {
|
||||
return m.db.Update(func(dbTx database.Tx) error {
|
||||
lastKnownBlockID := blockdag.DBFetchCurrentBlockID(dbTx)
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
serializedCurrentIdxBlockID := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Get(indexer.Key())
|
||||
currentIdxBlockID := uint64(0)
|
||||
if serializedCurrentIdxBlockID != nil {
|
||||
currentIdxBlockID = blockdag.DeserializeBlockID(serializedCurrentIdxBlockID)
|
||||
}
|
||||
if lastKnownBlockID > currentIdxBlockID {
|
||||
err := indexer.Recover(dbTx, currentIdxBlockID, lastKnownBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// ConnectBlock must be invoked when a block is added to the DAG. It
|
||||
// keeps track of the state of each index it is managing, performs some sanity
|
||||
// checks, and invokes each indexer.
|
||||
//
|
||||
// This is part of the blockchain.IndexManager interface.
|
||||
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// This is part of the blockdag.IndexManager interface.
|
||||
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
// Call each of the currently active optional indexes with the block
|
||||
// being connected so they can update accordingly.
|
||||
for _, index := range m.enabledIndexes {
|
||||
// Notify the indexer with the connected block so it can index it.
|
||||
if err := index.ConnectBlock(dbTx, block, dag, txsAcceptanceData); err != nil {
|
||||
if err := index.ConnectBlock(dbTx, block, blockID, dag, txsAcceptanceData, virtualTxsAcceptanceData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the new block ID index entry for the block being connected and
|
||||
// update the current internal block ID accordingly.
|
||||
err := m.updateIndexersWithCurrentBlockID(dbTx, block.Hash(), blockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) updateIndexersWithCurrentBlockID(dbTx database.Tx, blockHash *daghash.Hash, blockID uint64) error {
|
||||
serializedBlockID := blockdag.SerializeBlockID(blockID)
|
||||
for _, index := range m.enabledIndexes {
|
||||
err := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Put(index.Key(), serializedBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -182,17 +231,17 @@ func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, dag *blockda
|
||||
|
||||
// NewManager returns a new index manager with the provided indexes enabled.
|
||||
//
|
||||
// The manager returned satisfies the blockchain.IndexManager interface and thus
|
||||
// cleanly plugs into the normal blockchain processing path.
|
||||
// The manager returned satisfies the blockdag.IndexManager interface and thus
|
||||
// cleanly plugs into the normal blockdag processing path.
|
||||
func NewManager(enabledIndexes []Indexer) *Manager {
|
||||
return &Manager{
|
||||
enabledIndexes: enabledIndexes,
|
||||
}
|
||||
}
|
||||
|
||||
// dropIndex drops the passed index from the database. Since indexes can be
|
||||
// dropIndex drops the passed index from the database. Since indexes can be
|
||||
// massive, it deletes the index in multiple database transactions in order to
|
||||
// keep memory usage to reasonable levels. It also marks the drop in progress
|
||||
// keep memory usage to reasonable levels. It also marks the drop in progress
|
||||
// so the drop can be resumed if it is stopped before it is done before the
|
||||
// index can be used again.
|
||||
func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan struct{}) error {
|
||||
@@ -216,7 +265,7 @@ func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan s
|
||||
// Mark that the index is in the process of being dropped so that it
|
||||
// can be resumed on the next start if interrupted before the process is
|
||||
// complete.
|
||||
log.Infof("Dropping all %s entries. This might take a while...",
|
||||
log.Infof("Dropping all %s entries. This might take a while...",
|
||||
idxName)
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
@@ -228,7 +277,7 @@ func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan s
|
||||
|
||||
// Since the indexes can be so large, attempting to simply delete
|
||||
// the bucket in a single database transaction would result in massive
|
||||
// memory usage and likely crash many systems due to ulimits. In order
|
||||
// memory usage and likely crash many systems due to ulimits. In order
|
||||
// to avoid this, use a cursor to delete a maximum number of entries out
|
||||
// of the bucket at a time. Recurse buckets depth-first to delete any
|
||||
// sub-buckets.
|
||||
@@ -314,11 +363,7 @@ func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan s
|
||||
}
|
||||
return bucket.DeleteBucket(bucketName[len(bucketName)-1])
|
||||
})
|
||||
}
|
||||
|
||||
// Call extra index specific deinitialization for the transaction index.
|
||||
if idxName == txIndexName {
|
||||
if err := dropBlockIDIndex(db); err != nil {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -332,6 +377,10 @@ func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan s
|
||||
return err
|
||||
}
|
||||
|
||||
if err := meta.Bucket(indexCurrentBlockIDBucketName).Delete(idxKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return indexesBucket.Delete(indexDropKey(idxKey))
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1,572 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// txIndexName is the human-readable name for the index.
|
||||
txIndexName = "transaction index"
|
||||
|
||||
includingBlocksIndexKeyEntrySize = 8 // 4 bytes for offset + 4 bytes for transaction length
|
||||
|
||||
acceptingBlocksIndexKeyEntrySize = 4 // 4 bytes for accepting block ID
|
||||
)
|
||||
|
||||
var (
|
||||
includingBlocksIndexKey = []byte("includingblocksidx")
|
||||
|
||||
acceptingBlocksIndexKey = []byte("acceptingblocksidx")
|
||||
|
||||
// idByHashIndexBucketName is the name of the db bucket used to house
|
||||
// the block id -> block hash index.
|
||||
idByHashIndexBucketName = []byte("idbyhashidx")
|
||||
|
||||
// hashByIDIndexBucketName is the name of the db bucket used to house
|
||||
// the block hash -> block id index.
|
||||
hashByIDIndexBucketName = []byte("hashbyididx")
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The transaction index consists of an entry for every transaction in the DAG.
|
||||
// In order to significantly optimize the space requirements a separate
|
||||
// index which provides an internal mapping between each block that has been
|
||||
// indexed and a unique ID for use within the hash to location mappings. The ID
|
||||
// is simply a sequentially incremented uint32. This is useful because it is
|
||||
// only 4 bytes versus 32 bytes hashes and thus saves a ton of space in the
|
||||
// index.
|
||||
//
|
||||
// There are four buckets used in total. The first bucket maps the hash of
|
||||
// each transaction to its location in each block it's included in. The second bucket
|
||||
// contains all of the blocks that from their viewpoint the transaction has been
|
||||
// accepted (i.e. the transaction is found in their blue set without double spends),
|
||||
// and their blue block (or themselves) that included the transaction. The third
|
||||
// bucket maps the hash of each block to the unique ID and the fourth maps
|
||||
// that ID back to the block hash.
|
||||
//
|
||||
// NOTE: Although it is technically possible for multiple transactions to have
|
||||
// the same hash as long as the previous transaction with the same hash is fully
|
||||
// spent, this code only stores the most recent one because doing otherwise
|
||||
// would add a non-trivial amount of space and overhead for something that will
|
||||
// realistically never happen per the probability and even if it did, the old
|
||||
// one must be fully spent and so the most likely transaction a caller would
|
||||
// want for a given hash is the most recent one anyways.
|
||||
//
|
||||
// The including blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
|
||||
//
|
||||
// <block id> = <start offset><tx length>
|
||||
//
|
||||
// Field Type Size
|
||||
// block id uint32 4 bytes
|
||||
// start offset uint32 4 bytes
|
||||
// tx length uint32 4 bytes
|
||||
// -----
|
||||
// Total: 12 bytes
|
||||
//
|
||||
// The accepting blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
|
||||
//
|
||||
// <accepting block id> = <including block id>
|
||||
//
|
||||
// Field Type Size
|
||||
// accepting block id uint32 4 bytes
|
||||
// including block id uint32 4 bytes
|
||||
// -----
|
||||
// Total: 8 bytes
|
||||
//
|
||||
// The serialized format for keys and values in the block hash to ID bucket is:
|
||||
// <hash> = <ID>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash 32 bytes
|
||||
// ID uint32 4 bytes
|
||||
// -----
|
||||
// Total: 36 bytes
|
||||
//
|
||||
// The serialized format for keys and values in the ID to block hash bucket is:
|
||||
// <ID> = <hash>
|
||||
//
|
||||
// Field Type Size
|
||||
// ID uint32 4 bytes
|
||||
// hash daghash.Hash 32 bytes
|
||||
// -----
|
||||
// Total: 36 bytes
|
||||
//
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
|
||||
// the index entries for the hash to id and id to hash mappings for the provided
|
||||
// values.
|
||||
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, id uint32) error {
|
||||
// Serialize the height for use in the index entries.
|
||||
var serializedID [4]byte
|
||||
byteOrder.PutUint32(serializedID[:], id)
|
||||
|
||||
// Add the block hash to ID mapping to the index.
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(idByHashIndexBucketName)
|
||||
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the block ID to hash mapping to the index.
|
||||
idIndex := meta.Bucket(hashByIDIndexBucketName)
|
||||
return idIndex.Put(serializedID[:], hash[:])
|
||||
}
|
||||
|
||||
// dbFetchBlockIDByHash uses an existing database transaction to retrieve the
|
||||
// block id for the provided hash from the index.
|
||||
func dbFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint32, error) {
|
||||
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
|
||||
serializedID := hashIndex.Get(hash[:])
|
||||
if serializedID == nil {
|
||||
return 0, fmt.Errorf("No entry in the block ID index for block with hash %s", hash)
|
||||
}
|
||||
|
||||
return byteOrder.Uint32(serializedID), nil
|
||||
}
|
||||
|
||||
// dbFetchBlockHashBySerializedID uses an existing database transaction to
|
||||
// retrieve the hash for the provided serialized block id from the index.
|
||||
func dbFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
|
||||
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
|
||||
hashBytes := idIndex.Get(serializedID)
|
||||
if hashBytes == nil {
|
||||
return nil, fmt.Errorf("No entry in the block ID index for block with id %d", byteOrder.Uint32(serializedID))
|
||||
}
|
||||
|
||||
var hash daghash.Hash
|
||||
copy(hash[:], hashBytes)
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
// dbFetchBlockHashByID uses an existing database transaction to retrieve the
|
||||
// hash for the provided block id from the index.
|
||||
func dbFetchBlockHashByID(dbTx database.Tx, id uint32) (*daghash.Hash, error) {
|
||||
var serializedID [4]byte
|
||||
byteOrder.PutUint32(serializedID[:], id)
|
||||
return dbFetchBlockHashBySerializedID(dbTx, serializedID[:])
|
||||
}
|
||||
|
||||
func putIncludingBlocksEntry(target []byte, txLoc wire.TxLoc) {
|
||||
byteOrder.PutUint32(target, uint32(txLoc.TxStart))
|
||||
byteOrder.PutUint32(target[4:], uint32(txLoc.TxLen))
|
||||
}
|
||||
|
||||
func putAcceptingBlocksEntry(target []byte, includingBlockID uint32) {
|
||||
byteOrder.PutUint32(target, includingBlockID)
|
||||
}
|
||||
|
||||
func dbPutIncludingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint32, serializedData []byte) error {
|
||||
bucket, err := dbTx.Metadata().Bucket(includingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockIDBytes := make([]byte, 4)
|
||||
byteOrder.PutUint32(blockIDBytes, uint32(blockID))
|
||||
return bucket.Put(blockIDBytes, serializedData)
|
||||
}
|
||||
|
||||
func dbPutAcceptingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint32, serializedData []byte) error {
|
||||
bucket, err := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockIDBytes := make([]byte, 4)
|
||||
byteOrder.PutUint32(blockIDBytes, uint32(blockID))
|
||||
return bucket.Put(blockIDBytes, serializedData)
|
||||
}
|
||||
|
||||
// dbFetchFirstTxRegion uses an existing database transaction to fetch the block
|
||||
// region for the provided transaction hash from the transaction index. When
|
||||
// there is no entry for the provided hash, nil will be returned for the both
|
||||
// the region and the error.
|
||||
//
|
||||
// P.S Because the transaction can be found in multiple blocks, this function arbitarily
|
||||
// returns the first block region that is stored in the txindex.
|
||||
func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.BlockRegion, error) {
|
||||
// Load the record from the database and return now if it doesn't exist.
|
||||
txBucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txID[:])
|
||||
if txBucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No block region"+
|
||||
"was found for %s", txID),
|
||||
}
|
||||
}
|
||||
cursor := txBucket.Cursor()
|
||||
if ok := cursor.First(); !ok {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No block region"+
|
||||
"was found for %s", txID),
|
||||
}
|
||||
}
|
||||
blockIDBytes := cursor.Key()
|
||||
serializedData := cursor.Value()
|
||||
if len(serializedData) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Ensure the serialized data has enough bytes to properly deserialize.
|
||||
if len(serializedData) < includingBlocksIndexKeyEntrySize {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt transaction index "+
|
||||
"entry for %s", txID),
|
||||
}
|
||||
}
|
||||
|
||||
// Load the block hash associated with the block ID.
|
||||
hash, err := dbFetchBlockHashBySerializedID(dbTx, blockIDBytes)
|
||||
if err != nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt transaction index "+
|
||||
"entry for %s: %s", txID, err),
|
||||
}
|
||||
}
|
||||
|
||||
// Deserialize the final entry.
|
||||
region := database.BlockRegion{Hash: &daghash.Hash{}}
|
||||
copy(region.Hash[:], hash[:])
|
||||
region.Offset = byteOrder.Uint32(serializedData[:4])
|
||||
region.Len = byteOrder.Uint32(serializedData[4:])
|
||||
|
||||
return ®ion, nil
|
||||
}
|
||||
|
||||
// dbAddTxIndexEntries uses an existing database transaction to add a
|
||||
// transaction index entry for every transaction in the passed block.
|
||||
func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint32, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// The offset and length of the transactions within the serialized
|
||||
// block.
|
||||
txLocs, err := block.TxLoc()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As an optimization, allocate a single slice big enough to hold all
|
||||
// of the serialized transaction index entries for the block and
|
||||
// serialize them directly into the slice. Then, pass the appropriate
|
||||
// subslice to the database to be written. This approach significantly
|
||||
// cuts down on the number of required allocations.
|
||||
includingBlocksOffset := 0
|
||||
serializedIncludingBlocksValues := make([]byte, len(block.Transactions())*includingBlocksIndexKeyEntrySize)
|
||||
for i, tx := range block.Transactions() {
|
||||
putIncludingBlocksEntry(serializedIncludingBlocksValues[includingBlocksOffset:], txLocs[i])
|
||||
endOffset := includingBlocksOffset + includingBlocksIndexKeyEntrySize
|
||||
err := dbPutIncludingBlocksEntry(dbTx, tx.ID(), blockID,
|
||||
serializedIncludingBlocksValues[includingBlocksOffset:endOffset:endOffset])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
includingBlocksOffset += includingBlocksIndexKeyEntrySize
|
||||
}
|
||||
|
||||
for includingBlockHash, blockTxsAcceptanceData := range txsAcceptanceData {
|
||||
var includingBlockID uint32
|
||||
if includingBlockHash.IsEqual(block.Hash()) {
|
||||
includingBlockID = blockID
|
||||
} else {
|
||||
includingBlockID, err = dbFetchBlockIDByHash(dbTx, &includingBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
includingBlockIDBytes := make([]byte, 4)
|
||||
byteOrder.PutUint32(includingBlockIDBytes, uint32(includingBlockID))
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData {
|
||||
err = dbPutAcceptingBlocksEntry(dbTx, txAcceptanceData.Tx.ID(), blockID, includingBlockIDBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxIndex implements a transaction by hash index. That is to say, it supports
|
||||
// querying all transactions by their hash.
|
||||
type TxIndex struct {
|
||||
db database.DB
|
||||
curBlockID uint32
|
||||
}
|
||||
|
||||
// Ensure the TxIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*TxIndex)(nil)
|
||||
|
||||
// Init initializes the hash-based transaction index. In particular, it finds
|
||||
// the highest used block ID and stores it for later use when connecting or
|
||||
// disconnecting blocks.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Init(db database.DB) error {
|
||||
idx.db = db
|
||||
|
||||
// Find the latest known block id field for the internal block id
|
||||
// index and initialize it. This is done because it's a lot more
|
||||
// efficient to do a single search at initialize time than it is to
|
||||
// write another value to the database on every update.
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
// Scan forward in large gaps to find a block id that doesn't
|
||||
// exist yet to serve as an upper bound for the binary search
|
||||
// below.
|
||||
var highestKnown, nextUnknown uint32
|
||||
testBlockID := uint32(1)
|
||||
increment := uint32(100000)
|
||||
for {
|
||||
_, err := dbFetchBlockHashByID(dbTx, testBlockID)
|
||||
if err != nil {
|
||||
nextUnknown = testBlockID
|
||||
break
|
||||
}
|
||||
|
||||
highestKnown = testBlockID
|
||||
testBlockID += increment
|
||||
}
|
||||
log.Tracef("Forward scan (highest known %d, next unknown %d)",
|
||||
highestKnown, nextUnknown)
|
||||
|
||||
// No used block IDs due to new database.
|
||||
if nextUnknown == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use a binary search to find the final highest used block id.
|
||||
// This will take at most ceil(log_2(increment)) attempts.
|
||||
for {
|
||||
testBlockID = (highestKnown + nextUnknown) / 2
|
||||
_, err := dbFetchBlockHashByID(dbTx, testBlockID)
|
||||
if err != nil {
|
||||
nextUnknown = testBlockID
|
||||
} else {
|
||||
highestKnown = testBlockID
|
||||
}
|
||||
log.Tracef("Binary scan (highest known %d, next "+
|
||||
"unknown %d)", highestKnown, nextUnknown)
|
||||
if highestKnown+1 == nextUnknown {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
idx.curBlockID = highestKnown
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Current internal block ID: %d", idx.curBlockID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Key() []byte {
|
||||
return includingBlocksIndexKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Name() string {
|
||||
return txIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time. It creates the buckets for the hash-based
|
||||
// transaction index and the internal block ID indexes.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Create(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
if _, err := meta.CreateBucket(idByHashIndexBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucket(hashByIDIndexBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucket(includingBlocksIndexKey); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := meta.CreateBucket(acceptingBlocksIndexKey)
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the DAG. This indexer adds a hash-to-transaction mapping
|
||||
// for every transaction in the passed block.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *util.Block, _ *blockdag.BlockDAG, acceptedTxsData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// Increment the internal block ID to use for the block being connected
|
||||
// and add all of the transactions in the block to the index.
|
||||
newBlockID := idx.curBlockID + 1
|
||||
if block.MsgBlock().Header.IsGenesis() {
|
||||
newBlockID = 0
|
||||
}
|
||||
if err := dbAddTxIndexEntries(dbTx, block, newBlockID, acceptedTxsData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the new block ID index entry for the block being connected and
|
||||
// update the current internal block ID accordingly.
|
||||
err := dbPutBlockIDIndexEntry(dbTx, block.Hash(), newBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idx.curBlockID = newBlockID
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxFirstBlockRegion returns the first block region for the provided transaction hash
|
||||
// from the transaction index. The block region can in turn be used to load the
|
||||
// raw transaction bytes. When there is no entry for the provided hash, nil
|
||||
// will be returned for the both the entry and the error.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *TxIndex) TxFirstBlockRegion(txID *daghash.TxID) (*database.BlockRegion, error) {
|
||||
var region *database.BlockRegion
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
region, err = dbFetchFirstTxRegion(dbTx, txID)
|
||||
return err
|
||||
})
|
||||
return region, err
|
||||
}
|
||||
|
||||
// TxBlocks returns the hashes of the blocks where the transaction exists
|
||||
func (idx *TxIndex) TxBlocks(txHash *daghash.Hash) ([]*daghash.Hash, error) {
|
||||
blockHashes := make([]*daghash.Hash, 0)
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
blockHashes, err = dbFetchTxBlocks(dbTx, txHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return blockHashes, err
|
||||
}
|
||||
|
||||
func dbFetchTxBlocks(dbTx database.Tx, txHash *daghash.Hash) ([]*daghash.Hash, error) {
|
||||
blockHashes := make([]*daghash.Hash, 0)
|
||||
bucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txHash[:])
|
||||
if bucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No including blocks "+
|
||||
"were found for %s", txHash),
|
||||
}
|
||||
}
|
||||
err := bucket.ForEach(func(blockIDBytes, _ []byte) error {
|
||||
blockID := byteOrder.Uint32(blockIDBytes)
|
||||
blockHash, err := dbFetchBlockHashByID(dbTx, blockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHashes = append(blockHashes, blockHash)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockHashes, nil
|
||||
}
|
||||
|
||||
// BlockThatAcceptedTx returns the hash of the block where the transaction got accepted (from the virtual block point of view)
|
||||
func (idx *TxIndex) BlockThatAcceptedTx(dag *blockdag.BlockDAG, txID *daghash.TxID) (*daghash.Hash, error) {
|
||||
var acceptingBlock *daghash.Hash
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
acceptingBlock, err = dbFetchTxAcceptingBlock(dbTx, txID, dag)
|
||||
return err
|
||||
})
|
||||
return acceptingBlock, err
|
||||
}
|
||||
|
||||
func dbFetchTxAcceptingBlock(dbTx database.Tx, txID *daghash.TxID, dag *blockdag.BlockDAG) (*daghash.Hash, error) {
|
||||
bucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(txID[:])
|
||||
if bucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No accepting blocks "+
|
||||
"were found for %s", txID),
|
||||
}
|
||||
}
|
||||
cursor := bucket.Cursor()
|
||||
if !cursor.First() {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No accepting blocks "+
|
||||
"were found for %s", txID),
|
||||
}
|
||||
}
|
||||
for ; cursor.Key() != nil; cursor.Next() {
|
||||
blockID := byteOrder.Uint32(cursor.Key())
|
||||
blockHash, err := dbFetchBlockHashByID(dbTx, blockID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dag.IsInSelectedPathChain(blockHash) {
|
||||
return blockHash, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewTxIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping of the hashes of all transactions in the blockchain to the respective
|
||||
// block, location within the block, and size of the transaction.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that in
|
||||
// turn is used by the blockchain package. This allows the index to be
|
||||
// seamlessly maintained along with the chain.
|
||||
func NewTxIndex() *TxIndex {
|
||||
return &TxIndex{}
|
||||
}
|
||||
|
||||
// dropBlockIDIndex drops the internal block id index.
|
||||
func dropBlockIDIndex(db database.DB) error {
|
||||
return db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
err := meta.DeleteBucket(idByHashIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return meta.DeleteBucket(hashByIDIndexBucketName)
|
||||
})
|
||||
}
|
||||
|
||||
// DropTxIndex drops the transaction index from the provided database if it
|
||||
// exists. Since the address index relies on it, the address index will also be
|
||||
// dropped when it exists.
|
||||
func DropTxIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
err := dropIndex(db, addrIndexKey, addrIndexName, interrupt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dropIndex(db, includingBlocksIndexKey, addrIndexName, interrupt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dropIndex(db, acceptingBlocksIndexKey, txIndexName, interrupt)
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/mining"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
func createTransaction(value uint64, originTx *wire.MsgTx, outputIndex uint32) *wire.MsgTx {
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: originTx.TxID(),
|
||||
Index: outputIndex,
|
||||
},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := wire.NewTxOut(value, blockdag.OpTrueScript)
|
||||
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
|
||||
|
||||
return tx
|
||||
}
|
||||
|
||||
func TestTxIndexConnectBlock(t *testing.T) {
|
||||
blocks := make(map[daghash.Hash]*util.Block)
|
||||
|
||||
txIndex := NewTxIndex()
|
||||
indexManager := NewManager([]Indexer{txIndex})
|
||||
|
||||
params := dagconfig.SimNetParams
|
||||
params.BlockRewardMaturity = 1
|
||||
params.K = 1
|
||||
|
||||
config := blockdag.Config{
|
||||
IndexManager: indexManager,
|
||||
DAGParams: ¶ms,
|
||||
}
|
||||
|
||||
dag, teardown, err := blockdag.DAGSetup("TestTxIndexConnectBlock", config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
if teardown != nil {
|
||||
defer teardown()
|
||||
}
|
||||
|
||||
prepareAndProcessBlock := func(parentHashes []*daghash.Hash, transactions []*wire.MsgTx, blockName string) *wire.MsgBlock {
|
||||
block, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, transactions, false, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: block %v got unexpected error from PrepareBlockForTest: %v", blockName, err)
|
||||
}
|
||||
utilBlock := util.NewBlock(block)
|
||||
blocks[*block.BlockHash()] = utilBlock
|
||||
isOrphan, err := dag.ProcessBlock(utilBlock, blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: dag.ProcessBlock got unexpected error for block %v: %v", blockName, err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestTxIndexConnectBlock: block %v was unexpectedly orphan", blockName)
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
block1 := prepareAndProcessBlock([]*daghash.Hash{params.GenesisHash}, nil, "1")
|
||||
block2Tx := createTransaction(block1.Transactions[0].TxOut[0].Value, block1.Transactions[0], 0)
|
||||
block2 := prepareAndProcessBlock([]*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{block2Tx}, "2")
|
||||
block3Tx := createTransaction(block2.Transactions[0].TxOut[0].Value, block2.Transactions[0], 0)
|
||||
block3 := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3")
|
||||
|
||||
block3TxID := block3Tx.TxID()
|
||||
block3TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, &block3TxID)
|
||||
if err != nil {
|
||||
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
|
||||
}
|
||||
block3Hash := block3.BlockHash()
|
||||
if !block3TxNewAcceptedBlock.IsEqual(block3Hash) {
|
||||
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
|
||||
"been accepted in block %v but instead got accepted in block %v", block3Hash, block3TxNewAcceptedBlock)
|
||||
}
|
||||
|
||||
block3A := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3A")
|
||||
block4 := prepareAndProcessBlock([]*daghash.Hash{block3.BlockHash()}, nil, "4")
|
||||
prepareAndProcessBlock([]*daghash.Hash{block3A.BlockHash(), block4.BlockHash()}, nil, "5")
|
||||
|
||||
block3TxAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, &block3TxID)
|
||||
if err != nil {
|
||||
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
|
||||
}
|
||||
block3AHash := block3A.BlockHash()
|
||||
if !block3TxAcceptedBlock.IsEqual(block3AHash) {
|
||||
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
|
||||
"been accepted in block %v but instead got accepted in block %v", block3AHash, block3TxAcceptedBlock)
|
||||
}
|
||||
|
||||
region, err := txIndex.TxFirstBlockRegion(&block3TxID)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: no block region was found for block3Tx")
|
||||
}
|
||||
regionBlock, ok := blocks[*region.Hash]
|
||||
if !ok {
|
||||
t.Fatalf("TestTxIndexConnectBlock: couldn't find block with hash %v", region.Hash)
|
||||
}
|
||||
|
||||
regionBlockBytes, err := regionBlock.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: Couldn't serialize block to bytes")
|
||||
}
|
||||
block3TxInBlock := regionBlockBytes[region.Offset : region.Offset+region.Len]
|
||||
|
||||
block3TxBuf := bytes.NewBuffer(make([]byte, 0, block3Tx.SerializeSize()))
|
||||
block3Tx.BtcEncode(block3TxBuf, 0)
|
||||
blockTxBytes := block3TxBuf.Bytes()
|
||||
|
||||
if !reflect.DeepEqual(blockTxBytes, block3TxInBlock) {
|
||||
t.Errorf("TestTxIndexConnectBlock: the block region that was in the bucket doesn't match block3Tx")
|
||||
}
|
||||
|
||||
}
|
||||
@@ -5,16 +5,9 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/daglabs/btcd/logger"
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This
|
||||
// means the package will not perform any logging by default until the caller
|
||||
// requests it.
|
||||
var log btclog.Logger
|
||||
|
||||
// The default amount of logging is none.
|
||||
func init() {
|
||||
log, _ = logger.Get(logger.SubsystemTags.CHAN)
|
||||
}
|
||||
var log, _ = logger.Get(logger.SubsystemTags.BDAG)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
const (
|
||||
// maxAllowedOffsetSeconds is the maximum number of seconds in either
|
||||
// direction that local clock will be adjusted. When the median time
|
||||
// direction that local clock will be adjusted. When the median time
|
||||
// of the network is outside of this range, no offset will be applied.
|
||||
maxAllowedOffsetSecs = 70 * 60 // 1 hour 10 minutes
|
||||
|
||||
@@ -25,7 +25,7 @@ const (
|
||||
|
||||
var (
|
||||
// maxMedianTimeEntries is the maximum number of entries allowed in the
|
||||
// median time data. This is a variable as opposed to a constant so the
|
||||
// median time data. This is a variable as opposed to a constant so the
|
||||
// test code can modify it.
|
||||
maxMedianTimeEntries = 200
|
||||
)
|
||||
@@ -51,29 +51,26 @@ type MedianTimeSource interface {
|
||||
// be sorted.
|
||||
type int64Sorter []int64
|
||||
|
||||
// Len returns the number of 64-bit integers in the slice. It is part of the
|
||||
// Len returns the number of 64-bit integers in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s int64Sorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Swap swaps the 64-bit integers at the passed indices. It is part of the
|
||||
// Swap swaps the 64-bit integers at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s int64Sorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Less returns whether the 64-bit integer with index i should sort before the
|
||||
// 64-bit integer with index j. It is part of the sort.Interface
|
||||
// 64-bit integer with index j. It is part of the sort.Interface
|
||||
// implementation.
|
||||
func (s int64Sorter) Less(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
}
|
||||
|
||||
// medianTime provides an implementation of the MedianTimeSource interface.
|
||||
// It is limited to maxMedianTimeEntries includes the same buggy behavior as
|
||||
// the time offset mechanism in Bitcoin Core. This is necessary because it is
|
||||
// used in the consensus code.
|
||||
type medianTime struct {
|
||||
mtx sync.Mutex
|
||||
knownIDs map[string]struct{}
|
||||
@@ -137,15 +134,6 @@ func (m *medianTime) AddTimeSample(sourceID string, timeVal time.Time) {
|
||||
log.Debugf("Added time sample of %s (total: %d)", offsetDuration,
|
||||
numOffsets)
|
||||
|
||||
// NOTE: The following code intentionally has a bug to mirror the
|
||||
// buggy behavior in Bitcoin Core since the median time is used in the
|
||||
// consensus rules.
|
||||
//
|
||||
// In particular, the offset is only updated when the number of entries
|
||||
// is odd, but the max number of entries is 200, an even number. Thus,
|
||||
// the offset will never be updated again once the max number of entries
|
||||
// is reached.
|
||||
|
||||
// The median offset is only updated when there are enough offsets and
|
||||
// the number of offsets is odd so the middle value is the true median.
|
||||
// Thus, there is nothing to do when those conditions are not met.
|
||||
@@ -163,7 +151,7 @@ func (m *medianTime) AddTimeSample(sourceID string, timeVal time.Time) {
|
||||
m.offsetSecs = median
|
||||
} else {
|
||||
// The median offset of all added time data is larger than the
|
||||
// maximum allowed offset, so don't use an offset. This
|
||||
// maximum allowed offset, so don't use an offset. This
|
||||
// effectively limits how far the local clock can be skewed.
|
||||
m.offsetSecs = 0
|
||||
|
||||
@@ -183,7 +171,7 @@ func (m *medianTime) AddTimeSample(sourceID string, timeVal time.Time) {
|
||||
// Warn if none of the time samples are close.
|
||||
if !remoteHasCloseTime {
|
||||
log.Warnf("Please check your date and time " +
|
||||
"are correct! btcd will not work " +
|
||||
"are correct! kaspad will not work " +
|
||||
"properly with an invalid time")
|
||||
}
|
||||
}
|
||||
@@ -206,8 +194,8 @@ func (m *medianTime) Offset() time.Duration {
|
||||
}
|
||||
|
||||
// NewMedianTime returns a new instance of concurrency-safe implementation of
|
||||
// the MedianTimeSource interface. The returned implementation contains the
|
||||
// rules necessary for proper time handling in the chain consensus rules and
|
||||
// the MedianTimeSource interface. The returned implementation contains the
|
||||
// rules necessary for proper time handling in the DAG consensus rules and
|
||||
// expects the time samples to be added from the timestamp field of the version
|
||||
// message received from remote peers that successfully connect and negotiate.
|
||||
func NewMedianTime() MedianTimeSource {
|
||||
|
||||
@@ -23,7 +23,7 @@ func TestMedianTime(t *testing.T) {
|
||||
{in: []int64{1, 2, 3}, wantOffset: 0},
|
||||
{in: []int64{1, 2, 3, 4}, wantOffset: 0},
|
||||
|
||||
// Various number of entries. The expected offset is only
|
||||
// Various number of entries. The expected offset is only
|
||||
// updated on odd number of elements.
|
||||
{in: []int64{-13, 57, -4, -23, -12}, wantOffset: -12},
|
||||
{in: []int64{55, -13, 61, -52, 39, 55}, wantOffset: 39},
|
||||
@@ -34,9 +34,7 @@ func TestMedianTime(t *testing.T) {
|
||||
{in: []int64{-5, -4, -3, -2, -1}, wantOffset: -3, useDupID: true},
|
||||
|
||||
// The offset stops being updated once the max number of entries
|
||||
// has been reached. This is actually a bug from Bitcoin Core,
|
||||
// but since the time is ultimately used as a part of the
|
||||
// consensus rules, it must be mirrored.
|
||||
// has been reached.
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52}, wantOffset: 17},
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45}, wantOffset: 17},
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45, 4}, wantOffset: 17},
|
||||
|
||||
@@ -7,8 +7,8 @@ package blockdag
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// MerkleTree holds the hashes of a merkle tree
|
||||
@@ -20,7 +20,7 @@ func (mt MerkleTree) Root() *daghash.Hash {
|
||||
}
|
||||
|
||||
// nextPowerOfTwo returns the next highest power of two from a given number if
|
||||
// it is not already a power of two. This is a helper function used during the
|
||||
// it is not already a power of two. This is a helper function used during the
|
||||
// calculation of a merkle tree.
|
||||
func nextPowerOfTwo(n int) int {
|
||||
// Return the number if it's already a power of 2.
|
||||
@@ -34,7 +34,7 @@ func nextPowerOfTwo(n int) int {
|
||||
}
|
||||
|
||||
// HashMerkleBranches takes two hashes, treated as the left and right tree
|
||||
// nodes, and returns the hash of their concatenation. This is a helper
|
||||
// nodes, and returns the hash of their concatenation. This is a helper
|
||||
// function used to aid in the generation of a merkle tree.
|
||||
func HashMerkleBranches(left *daghash.Hash, right *daghash.Hash) *daghash.Hash {
|
||||
// Concatenate the left and right nodes.
|
||||
@@ -67,13 +67,13 @@ func BuildIDMerkleTreeStore(transactions []*util.Tx) MerkleTree {
|
||||
}
|
||||
|
||||
// buildMerkleTreeStore creates a merkle tree from a slice of hashes,
|
||||
// stores it using a linear array, and returns a slice of the backing array. A
|
||||
// stores it using a linear array, and returns a slice of the backing array. A
|
||||
// linear array was chosen as opposed to an actual tree structure since it uses
|
||||
// about half as much memory. The following describes a merkle tree and how it
|
||||
// about half as much memory. The following describes a merkle tree and how it
|
||||
// is stored in a linear array.
|
||||
//
|
||||
// A merkle tree is a tree in which every non-leaf node is the hash of its
|
||||
// children nodes. A diagram depicting how this works for bitcoin transactions
|
||||
// children nodes. A diagram depicting how this works for kaspa transactions
|
||||
// where h(x) is a double sha256 follows:
|
||||
//
|
||||
// root = h1234 = h(h12 + h34)
|
||||
@@ -89,7 +89,7 @@ func BuildIDMerkleTreeStore(transactions []*util.Tx) MerkleTree {
|
||||
// As the above shows, the merkle root is always the last element in the array.
|
||||
//
|
||||
// The number of inputs is not always a power of two which results in a
|
||||
// balanced tree structure as above. In that case, parent nodes with no
|
||||
// balanced tree structure as above. In that case, parent nodes with no
|
||||
// children are also zero and parent nodes with only a single left node
|
||||
// are calculated by concatenating the left node with itself before hashing.
|
||||
// Since this function uses nodes that are pointers to the hashes, empty nodes
|
||||
|
||||
@@ -5,9 +5,10 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// TestMerkle tests the BuildHashMerkleTreeStore API.
|
||||
@@ -24,8 +25,11 @@ func TestMerkle(t *testing.T) {
|
||||
|
||||
idMerkleTree := BuildIDMerkleTreeStore(block.Transactions())
|
||||
calculatedIDMerkleRoot := idMerkleTree.Root()
|
||||
wantIDMerkleRoot := Block100000.Header.IDMerkleRoot
|
||||
if !wantIDMerkleRoot.IsEqual(calculatedIDMerkleRoot) {
|
||||
wantIDMerkleRoot, err := daghash.NewHashFromStr("3f69feb7edf5d0d67930afc990c8ec931e3428d7c7a65d7af6b81079319eb110")
|
||||
if err != nil {
|
||||
t.Errorf("BuildIDMerkleTreeStore: unexpected error: %s", err)
|
||||
}
|
||||
if !calculatedIDMerkleRoot.IsEqual(wantIDMerkleRoot) {
|
||||
t.Errorf("BuildIDMerkleTreeStore: ID merkle root mismatch - "+
|
||||
"got %v, want %v", calculatedIDMerkleRoot, wantIDMerkleRoot)
|
||||
}
|
||||
|
||||
111
blockdag/mining.go
Normal file
111
blockdag/mining.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BlockForMining returns a block with the given transactions
|
||||
// that points to the current DAG tips, that is valid from
|
||||
// all aspects except proof of work.
|
||||
func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, error) {
|
||||
blockTimestamp := dag.NextBlockTime()
|
||||
requiredDifficulty := dag.NextRequiredDifficulty(blockTimestamp)
|
||||
|
||||
// Calculate the next expected block version based on the state of the
|
||||
// rule change deployments.
|
||||
nextBlockVersion, err := dag.CalcNextBlockVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a new block ready to be solved.
|
||||
hashMerkleTree := BuildHashMerkleTreeStore(transactions)
|
||||
acceptedIDMerkleRoot, err := dag.NextAcceptedIDMerkleRootNoLock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var msgBlock wire.MsgBlock
|
||||
for _, tx := range transactions {
|
||||
msgBlock.AddTransaction(tx.MsgTx())
|
||||
}
|
||||
|
||||
utxoWithTransactions, err := dag.UTXOSet().WithTransactions(msgBlock.Transactions, UnacceptedBlueScore, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoCommitment := utxoWithTransactions.Multiset().Hash()
|
||||
|
||||
msgBlock.Header = wire.BlockHeader{
|
||||
Version: nextBlockVersion,
|
||||
ParentHashes: dag.TipHashes(),
|
||||
HashMerkleRoot: hashMerkleTree.Root(),
|
||||
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
|
||||
UTXOCommitment: utxoCommitment,
|
||||
Timestamp: blockTimestamp,
|
||||
Bits: requiredDifficulty,
|
||||
}
|
||||
|
||||
return &msgBlock, nil
|
||||
}
|
||||
|
||||
// CoinbasePayloadExtraData returns coinbase payload extra data parameter
|
||||
// which is built from extra nonce and coinbase flags.
|
||||
func CoinbasePayloadExtraData(extraNonce uint64, coinbaseFlags string) ([]byte, error) {
|
||||
extraNonceBytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(extraNonceBytes, extraNonce)
|
||||
w := &bytes.Buffer{}
|
||||
_, err := w.Write(extraNonceBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = w.Write([]byte(coinbaseFlags))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w.Bytes(), nil
|
||||
}
|
||||
|
||||
// NextCoinbaseFromAddress returns a coinbase transaction for the
|
||||
// next block with the given address and extra data in its payload.
|
||||
func (dag *BlockDAG) NextCoinbaseFromAddress(payToAddress util.Address, extraData []byte) (*util.Tx, error) {
|
||||
coinbasePayloadScriptPubKey, err := txscript.PayToAddrScript(payToAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
coinbaseTx, err := dag.NextBlockCoinbaseTransactionNoLock(coinbasePayloadScriptPubKey, extraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return coinbaseTx, nil
|
||||
}
|
||||
|
||||
// NextBlockMinimumTime returns the minimum allowed timestamp for a block building
|
||||
// on the end of the DAG. In particular, it is one second after
|
||||
// the median timestamp of the last several blocks per the DAG consensus
|
||||
// rules.
|
||||
func (dag *BlockDAG) NextBlockMinimumTime() time.Time {
|
||||
return dag.CalcPastMedianTime().Add(time.Second)
|
||||
}
|
||||
|
||||
// NextBlockTime returns a valid block time for the
|
||||
// next block that will point to the existing DAG tips.
|
||||
func (dag *BlockDAG) NextBlockTime() time.Time {
|
||||
// The timestamp for the block must not be before the median timestamp
|
||||
// of the last several blocks. Thus, choose the maximum between the
|
||||
// current time and one second after the past median time. The current
|
||||
// timestamp is truncated to a second boundary before comparison since a
|
||||
// block timestamp does not supported a precision greater than one
|
||||
// second.
|
||||
newTimestamp := dag.AdjustedTime()
|
||||
minTimestamp := dag.NextBlockMinimumTime()
|
||||
if newTimestamp.Before(minTimestamp) {
|
||||
newTimestamp = minTimestamp
|
||||
}
|
||||
|
||||
return newTimestamp
|
||||
}
|
||||
@@ -6,13 +6,15 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// NotificationType represents the type of a notification message.
|
||||
type NotificationType int
|
||||
|
||||
// NotificationCallback is used for a caller to provide a callback for
|
||||
// notifications about various chain events.
|
||||
// notifications about various blockDAG events.
|
||||
type NotificationCallback func(*Notification)
|
||||
|
||||
// Constants for the type of a notification message.
|
||||
@@ -20,12 +22,17 @@ const (
|
||||
// NTBlockAdded indicates the associated block was added into
|
||||
// the blockDAG.
|
||||
NTBlockAdded NotificationType = iota
|
||||
|
||||
// NTChainChanged indicates that selected parent
|
||||
// chain had changed.
|
||||
NTChainChanged
|
||||
)
|
||||
|
||||
// notificationTypeStrings is a map of notification types back to their constant
|
||||
// names for pretty printing.
|
||||
var notificationTypeStrings = map[NotificationType]string{
|
||||
NTBlockAdded: "NTBlockAdded",
|
||||
NTBlockAdded: "NTBlockAdded",
|
||||
NTChainChanged: "NTChainChanged",
|
||||
}
|
||||
|
||||
// String returns the NotificationType in human-readable form.
|
||||
@@ -45,13 +52,13 @@ type Notification struct {
|
||||
Data interface{}
|
||||
}
|
||||
|
||||
// Subscribe to block chain notifications. Registers a callback to be executed
|
||||
// Subscribe to block DAG notifications. Registers a callback to be executed
|
||||
// when various events take place. See the documentation on Notification and
|
||||
// NotificationType for details on the types and contents of notifications.
|
||||
func (dag *BlockDAG) Subscribe(callback NotificationCallback) {
|
||||
dag.notificationsLock.Lock()
|
||||
defer dag.notificationsLock.Unlock()
|
||||
dag.notifications = append(dag.notifications, callback)
|
||||
dag.notificationsLock.Unlock()
|
||||
}
|
||||
|
||||
// sendNotification sends a notification with the passed type and data if the
|
||||
@@ -61,8 +68,22 @@ func (dag *BlockDAG) sendNotification(typ NotificationType, data interface{}) {
|
||||
// Generate and send the notification.
|
||||
n := Notification{Type: typ, Data: data}
|
||||
dag.notificationsLock.RLock()
|
||||
defer dag.notificationsLock.RUnlock()
|
||||
for _, callback := range dag.notifications {
|
||||
callback(&n)
|
||||
}
|
||||
dag.notificationsLock.RUnlock()
|
||||
}
|
||||
|
||||
// BlockAddedNotificationData defines data to be sent along with a BlockAdded
|
||||
// notification
|
||||
type BlockAddedNotificationData struct {
|
||||
Block *util.Block
|
||||
WasUnorphaned bool
|
||||
}
|
||||
|
||||
// ChainChangedNotificationData defines data to be sent along with a ChainChanged
|
||||
// notification
|
||||
type ChainChangedNotificationData struct {
|
||||
RemovedChainBlockHashes []*daghash.Hash
|
||||
AddedChainBlockHashes []*daghash.Hash
|
||||
}
|
||||
|
||||
@@ -5,21 +5,22 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
)
|
||||
|
||||
// TestNotifications ensures that notification callbacks are fired on events.
|
||||
func TestNotifications(t *testing.T) {
|
||||
blocks, err := loadBlocks("blk_0_to_4.dat")
|
||||
blocks, err := LoadBlocks(filepath.Join("testdata/blk_0_to_4.dat"))
|
||||
if err != nil {
|
||||
t.Fatalf("Error loading file: %v\n", err)
|
||||
}
|
||||
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("notifications", Config{
|
||||
DAGParams: &dagconfig.SimNetParams,
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup dag instance: %v", err)
|
||||
@@ -40,14 +41,18 @@ func TestNotifications(t *testing.T) {
|
||||
dag.Subscribe(callback)
|
||||
}
|
||||
|
||||
isOrphan, err := dag.ProcessBlock(blocks[1], BFNone)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(blocks[1], BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock fail on block 1: %v\n", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: block 1 " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned block " +
|
||||
"is an orphan\n")
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock fail on block 1: %v\n", err)
|
||||
}
|
||||
|
||||
if notificationCount != numSubscribers {
|
||||
t.Fatalf("Expected notification callback to be executed %d "+
|
||||
|
||||
@@ -1,105 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
)
|
||||
|
||||
// phantom calculates and returns the block's blue set, selected parent and blue score.
|
||||
// Chain start is determined by going down the DAG through the selected path
|
||||
// (follow the selected parent of each block) k + 1 steps.
|
||||
// The blue set of a block are all blue blocks in its past.
|
||||
// To optimize memory usage, for each block we are storing only the blue blocks in
|
||||
// its selected parent's anticone that are in the future of the chain start
|
||||
// as well as the selected parent itself - the rest of the
|
||||
// blue set can be restored by traversing the selected parent chain and combining
|
||||
// the .blues of all blocks in it.
|
||||
// The blue score is the total number of blocks in this block's blue set
|
||||
// of the selected parent. (the blue score of the genesis block is defined as 0)
|
||||
// The selected parent is chosen by determining which block's parent will give this block the highest blue score.
|
||||
func phantom(block *blockNode, k uint32) (blues []*blockNode, selectedParent *blockNode, score uint64) {
|
||||
bestScore := uint64(0)
|
||||
var bestParent *blockNode
|
||||
var bestBlues []*blockNode
|
||||
var bestHash *daghash.Hash
|
||||
for _, parent := range block.parents {
|
||||
chainStart := digToChainStart(parent, k)
|
||||
candidates := blueCandidates(chainStart)
|
||||
blues := traverseCandidates(block, candidates, parent)
|
||||
score := uint64(len(blues)) + parent.blueScore
|
||||
|
||||
if score > bestScore || (score == bestScore && (bestHash == nil || daghash.Less(parent.hash, bestHash))) {
|
||||
bestScore = score
|
||||
bestBlues = blues
|
||||
bestParent = parent
|
||||
bestHash = parent.hash
|
||||
}
|
||||
}
|
||||
|
||||
return bestBlues, bestParent, bestScore
|
||||
}
|
||||
|
||||
// digToChainStart digs through the selected path and returns the block in depth k+1
|
||||
func digToChainStart(parent *blockNode, k uint32) *blockNode {
|
||||
current := parent
|
||||
|
||||
for i := uint32(0); i < k; i++ {
|
||||
if current.isGenesis() {
|
||||
break
|
||||
}
|
||||
current = current.selectedParent
|
||||
}
|
||||
|
||||
return current
|
||||
}
|
||||
|
||||
func blueCandidates(chainStart *blockNode) blockSet {
|
||||
candidates := newSet()
|
||||
candidates.add(chainStart)
|
||||
|
||||
queue := []*blockNode{chainStart}
|
||||
for len(queue) > 0 {
|
||||
var current *blockNode
|
||||
current, queue = queue[0], queue[1:]
|
||||
|
||||
children := current.children
|
||||
for _, child := range children {
|
||||
if !candidates.contains(child) {
|
||||
candidates.add(child)
|
||||
queue = append(queue, child)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return candidates
|
||||
}
|
||||
|
||||
//traverseCandidates returns all the blocks that are in the future of the chain start and in the anticone of the selected parent
|
||||
func traverseCandidates(newBlock *blockNode, candidates blockSet, selectedParent *blockNode) []*blockNode {
|
||||
blues := []*blockNode{}
|
||||
selectedParentPast := newSet()
|
||||
queue := NewDownHeap()
|
||||
visited := newSet()
|
||||
|
||||
for _, parent := range newBlock.parents {
|
||||
queue.Push(parent)
|
||||
}
|
||||
|
||||
for queue.Len() > 0 {
|
||||
current := queue.pop()
|
||||
if candidates.contains(current) {
|
||||
if current == selectedParent || selectedParentPast.anyChildInSet(current) {
|
||||
selectedParentPast.add(current)
|
||||
} else {
|
||||
blues = append(blues, current)
|
||||
}
|
||||
for _, parent := range current.parents {
|
||||
if !visited.contains(parent) {
|
||||
visited.add(parent)
|
||||
queue.Push(parent)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return append(blues, selectedParent)
|
||||
}
|
||||
@@ -1,897 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
)
|
||||
|
||||
type testBlockData struct {
|
||||
parents []string
|
||||
id string //id is a virtual entity that is used only for tests so we can define relations between blocks without knowing their hash
|
||||
expectedScore uint64
|
||||
expectedSelectedParent string
|
||||
expectedBlues []string
|
||||
}
|
||||
|
||||
type hashIDPair struct {
|
||||
hash *daghash.Hash
|
||||
id string
|
||||
}
|
||||
|
||||
//TestPhantom iterate over several dag simulations, and checks
|
||||
//that the blue score, blue set and selected parent of each
|
||||
//block calculated as expected
|
||||
func TestPhantom(t *testing.T) {
|
||||
netParams := dagconfig.SimNetParams
|
||||
|
||||
blockVersion := int32(0x10000000)
|
||||
|
||||
tests := []struct {
|
||||
k uint32
|
||||
dagData []*testBlockData
|
||||
virtualBlockID string
|
||||
expectedReds []string
|
||||
}{
|
||||
{
|
||||
//Block hash order:AKJIHGFEDCB
|
||||
k: 1,
|
||||
virtualBlockID: "K",
|
||||
expectedReds: []string{"D"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "C",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "D",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "E",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "F",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
id: "G",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"D", "B", "C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "E"},
|
||||
id: "H",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"E", "B", "C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E", "G"},
|
||||
id: "I",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"G", "D", "E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F"},
|
||||
id: "J",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I", "J"},
|
||||
id: "K",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "H",
|
||||
expectedBlues: []string{"I", "J", "G", "F", "H"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
//block hash order:AVUTSRQPONMLKJIHGFEDCB
|
||||
k: 2,
|
||||
virtualBlockID: "V",
|
||||
expectedReds: []string{"D", "J", "P"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "C",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "D",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "E",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "F",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "G",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"G"},
|
||||
id: "H",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "G",
|
||||
expectedBlues: []string{"G"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "I",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "J",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "K",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "I",
|
||||
expectedBlues: []string{"I"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K", "H"},
|
||||
id: "L",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "K",
|
||||
expectedBlues: []string{"K"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F", "L"},
|
||||
id: "M",
|
||||
expectedScore: 10,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"L", "K", "I", "H", "G", "E", "B", "F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"G", "K"},
|
||||
id: "N",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "G",
|
||||
expectedBlues: []string{"K", "I", "E", "B", "G"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "N"},
|
||||
id: "O",
|
||||
expectedScore: 8,
|
||||
expectedSelectedParent: "N",
|
||||
expectedBlues: []string{"N"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "P",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P"},
|
||||
id: "Q",
|
||||
expectedScore: 10,
|
||||
expectedSelectedParent: "P",
|
||||
expectedBlues: []string{"O", "N", "K", "J", "I", "E", "P"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L", "Q"},
|
||||
id: "R",
|
||||
expectedScore: 11,
|
||||
expectedSelectedParent: "Q",
|
||||
expectedBlues: []string{"Q"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M", "R"},
|
||||
id: "S",
|
||||
expectedScore: 15,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"R", "Q", "O", "N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "F"},
|
||||
id: "T",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"H", "G", "F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M", "T"},
|
||||
id: "U",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"T", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"S", "U"},
|
||||
id: "V",
|
||||
expectedScore: 18,
|
||||
expectedSelectedParent: "S",
|
||||
expectedBlues: []string{"U", "T", "S"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
//Block hash order:AXWVUTSRQPONMLKJIHGFEDCB
|
||||
k: 1,
|
||||
virtualBlockID: "X",
|
||||
expectedReds: []string{"D", "F", "G", "H", "J", "K", "L", "N", "O", "Q", "R", "S", "U", "V"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "C",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "D",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "E",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "F",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "G",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "H",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "I",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "J",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "K",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "L",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "M",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "N",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F", "G", "J"},
|
||||
id: "O",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"J", "G", "F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B", "M", "I"},
|
||||
id: "P",
|
||||
expectedScore: 6,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"M", "I", "E", "C", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K", "E"},
|
||||
id: "Q",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"K", "D", "E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L", "N"},
|
||||
id: "R",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "L",
|
||||
expectedBlues: []string{"L"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I", "Q"},
|
||||
id: "S",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "Q",
|
||||
expectedBlues: []string{"Q"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K", "P"},
|
||||
id: "T",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "P",
|
||||
expectedBlues: []string{"P"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K", "L"},
|
||||
id: "U",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "K",
|
||||
expectedBlues: []string{"L", "K"},
|
||||
},
|
||||
{
|
||||
parents: []string{"U", "R"},
|
||||
id: "V",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"U", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"S", "U", "T"},
|
||||
id: "W",
|
||||
expectedScore: 8,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"V", "W", "H"},
|
||||
id: "X",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "W",
|
||||
expectedBlues: []string{"W"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
//Secret mining attack: The attacker is mining
|
||||
//blocks B,C,D,E,F,G,T in secret without propagating
|
||||
//them, so all blocks except T should be red, because
|
||||
//they don't follow the rules of PHANTOM that require
|
||||
//you to point to all the parents that you know, and
|
||||
//propagate your block as soon as it's mined
|
||||
|
||||
//Block hash order:AYXWVUTSRQPONMLKJIHGFEDCB
|
||||
k: 1,
|
||||
virtualBlockID: "Y",
|
||||
expectedReds: []string{"B", "C", "D", "E", "F", "G", "L"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "C",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "D",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "E",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "F",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F"},
|
||||
id: "G",
|
||||
expectedScore: 6,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "H",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "I",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I"},
|
||||
id: "J",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "H",
|
||||
expectedBlues: []string{"I", "H"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I"},
|
||||
id: "K",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "H",
|
||||
expectedBlues: []string{"I", "H"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "L",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "I",
|
||||
expectedBlues: []string{"I"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "K", "L"},
|
||||
id: "M",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "J",
|
||||
expectedBlues: []string{"K", "J"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "K", "L"},
|
||||
id: "N",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "J",
|
||||
expectedBlues: []string{"K", "J"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M"},
|
||||
id: "O",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M"},
|
||||
id: "P",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M"},
|
||||
id: "Q",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P", "Q"},
|
||||
id: "R",
|
||||
expectedScore: 10,
|
||||
expectedSelectedParent: "O",
|
||||
expectedBlues: []string{"Q", "P", "O"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P", "Q"},
|
||||
id: "S",
|
||||
expectedScore: 10,
|
||||
expectedSelectedParent: "O",
|
||||
expectedBlues: []string{"Q", "P", "O"},
|
||||
},
|
||||
{
|
||||
parents: []string{"G", "S", "R"},
|
||||
id: "T",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"S", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"S", "R"},
|
||||
id: "U",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"S", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "V",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "W",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"U", "T"},
|
||||
id: "X",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"V", "W", "X"},
|
||||
id: "Y",
|
||||
expectedScore: 17,
|
||||
expectedSelectedParent: "V",
|
||||
expectedBlues: []string{"X", "W", "V"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
//Censorship mining attack: The attacker is mining blocks B,C,D,E,F,G in secret without propagating them,
|
||||
//so all blocks except B and C should be red, because they don't follow the rules of
|
||||
//PHANTOM that require you to point to all the parents that you know
|
||||
|
||||
//Block hash order:AYXWVUTSRQPONMLKJIHGFEDCB
|
||||
k: 1,
|
||||
virtualBlockID: "Y",
|
||||
expectedReds: []string{"D", "E", "F", "G", "L"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "C",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "D",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "E",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "F",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F"},
|
||||
id: "G",
|
||||
expectedScore: 6,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "H",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "I",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I", "B"},
|
||||
id: "J",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"I", "H", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I", "B"},
|
||||
id: "K",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"I", "H", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "L",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "I",
|
||||
expectedBlues: []string{"I"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "K", "L", "C"},
|
||||
id: "M",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "J",
|
||||
expectedBlues: []string{"K", "C", "J"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "K", "L", "C"},
|
||||
id: "N",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "J",
|
||||
expectedBlues: []string{"K", "C", "J"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M", "D"},
|
||||
id: "O",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M", "D"},
|
||||
id: "P",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M", "D"},
|
||||
id: "Q",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P", "Q", "E"},
|
||||
id: "R",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "O",
|
||||
expectedBlues: []string{"Q", "P", "O"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P", "Q", "E"},
|
||||
id: "S",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "O",
|
||||
expectedBlues: []string{"Q", "P", "O"},
|
||||
},
|
||||
{
|
||||
parents: []string{"G", "S", "R"},
|
||||
id: "T",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"S", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"S", "R", "F"},
|
||||
id: "U",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"S", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "V",
|
||||
expectedScore: 16,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "W",
|
||||
expectedScore: 16,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "X",
|
||||
expectedScore: 16,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"V", "W", "X"},
|
||||
id: "Y",
|
||||
expectedScore: 19,
|
||||
expectedSelectedParent: "V",
|
||||
expectedBlues: []string{"X", "W", "V"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
netParams.K = test.k
|
||||
// Generate enough synthetic blocks for the rest of the test
|
||||
blockDAG := newTestDAG(&netParams)
|
||||
genesisNode := blockDAG.genesis
|
||||
blockTime := genesisNode.Header().Timestamp
|
||||
blockByIDMap := make(map[string]*blockNode)
|
||||
idByBlockMap := make(map[*blockNode]string)
|
||||
blockByIDMap["A"] = genesisNode
|
||||
idByBlockMap[genesisNode] = "A"
|
||||
|
||||
for _, blockData := range test.dagData {
|
||||
blockTime = blockTime.Add(time.Second)
|
||||
parents := blockSet{}
|
||||
for _, parentID := range blockData.parents {
|
||||
parent := blockByIDMap[parentID]
|
||||
parents.add(parent)
|
||||
}
|
||||
node := newTestNode(parents, blockVersion, 0, blockTime, test.k)
|
||||
node.hash = &daghash.Hash{} //It helps to predict hash order
|
||||
for i, char := range blockData.id {
|
||||
node.hash[i] = byte(char)
|
||||
}
|
||||
|
||||
blockDAG.index.AddNode(node)
|
||||
addNodeAsChildToParents(node)
|
||||
|
||||
blockByIDMap[blockData.id] = node
|
||||
idByBlockMap[node] = blockData.id
|
||||
|
||||
bluesIDs := make([]string, 0, len(node.blues))
|
||||
for _, blue := range node.blues {
|
||||
bluesIDs = append(bluesIDs, idByBlockMap[blue])
|
||||
}
|
||||
selectedParentID := idByBlockMap[node.selectedParent]
|
||||
fullDataStr := fmt.Sprintf("blues: %v, selectedParent: %v, score: %v",
|
||||
bluesIDs, selectedParentID, node.blueScore)
|
||||
if blockData.expectedScore != node.blueScore {
|
||||
t.Errorf("Test %d: Block %v expected to have score %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedScore, node.blueScore, fullDataStr)
|
||||
}
|
||||
if blockData.expectedSelectedParent != selectedParentID {
|
||||
t.Errorf("Test %d: Block %v expected to have selected parent %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedSelectedParent, selectedParentID, fullDataStr)
|
||||
}
|
||||
if !reflect.DeepEqual(blockData.expectedBlues, bluesIDs) {
|
||||
t.Errorf("Test %d: Block %v expected to have blues %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedBlues, bluesIDs, fullDataStr)
|
||||
}
|
||||
}
|
||||
|
||||
reds := make(map[string]bool)
|
||||
|
||||
for id := range blockByIDMap {
|
||||
reds[id] = true
|
||||
}
|
||||
|
||||
for tip := blockByIDMap[test.virtualBlockID]; tip.selectedParent != nil; tip = tip.selectedParent {
|
||||
tipID := idByBlockMap[tip]
|
||||
delete(reds, tipID)
|
||||
for _, blue := range tip.blues {
|
||||
blueID := idByBlockMap[blue]
|
||||
delete(reds, blueID)
|
||||
}
|
||||
}
|
||||
if !checkReds(test.expectedReds, reds) {
|
||||
redsIDs := make([]string, 0, len(reds))
|
||||
for id := range reds {
|
||||
redsIDs = append(redsIDs, id)
|
||||
}
|
||||
sort.Strings(redsIDs)
|
||||
sort.Strings(test.expectedReds)
|
||||
t.Errorf("Test %d: Expected reds %v but got %v", i, test.expectedReds, redsIDs)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func checkReds(expectedReds []string, reds map[string]bool) bool {
|
||||
if len(expectedReds) != len(reds) {
|
||||
return false
|
||||
}
|
||||
for _, redID := range expectedReds {
|
||||
if !reds[redID] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -6,22 +6,22 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// BehaviorFlags is a bitmask defining tweaks to the normal behavior when
|
||||
// performing chain processing and consensus rules checks.
|
||||
// performing DAG processing and consensus rules checks.
|
||||
type BehaviorFlags uint32
|
||||
|
||||
const (
|
||||
// BFFastAdd may be set to indicate that several checks can be avoided
|
||||
// for the block since it is already known to fit into the chain due to
|
||||
// already proving it correct links into the chain up to a known
|
||||
// checkpoint. This is primarily used for headers-first mode.
|
||||
// for the block since it is already known to fit into the DAG due to
|
||||
// already proving it correct links into the DAG.
|
||||
BFFastAdd BehaviorFlags = 1 << iota
|
||||
|
||||
// BFNoPoWCheck may be set to indicate the proof of work check which
|
||||
@@ -29,46 +29,36 @@ const (
|
||||
// not be performed.
|
||||
BFNoPoWCheck
|
||||
|
||||
// BFWasUnorphaned may be set to indicate that a block was just now
|
||||
// unorphaned
|
||||
BFWasUnorphaned
|
||||
|
||||
// BFAfterDelay may be set to indicate that a block had timestamp too far
|
||||
// in the future, just finished the delay
|
||||
BFAfterDelay
|
||||
|
||||
// BFIsSync may be set to indicate that the block was sent as part of the
|
||||
// netsync process
|
||||
BFIsSync
|
||||
|
||||
// BFWasStored is set to indicate that the block was previously stored
|
||||
// in the block index but was never fully processed
|
||||
BFWasStored
|
||||
|
||||
// BFDisallowDelay is set to indicate that a delayed block should be rejected.
|
||||
// This is used for the case where a block is submitted through RPC.
|
||||
BFDisallowDelay
|
||||
|
||||
// BFNone is a convenience value to specifically indicate no flags.
|
||||
BFNone BehaviorFlags = 0
|
||||
)
|
||||
|
||||
// BlockExists determines whether a block with the given hash exists in
|
||||
// IsInDAG determines whether a block with the given hash exists in
|
||||
// the DAG.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) BlockExists(hash *daghash.Hash) (bool, error) {
|
||||
// Check block index first (could be main chain or side chain blocks).
|
||||
if dag.index.HaveBlock(hash) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Check in the database.
|
||||
var exists bool
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
exists, err = dbTx.HasBlock(hash)
|
||||
if err != nil || !exists {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ignore side chain blocks in the database. This is necessary
|
||||
// because there is not currently any record of the associated
|
||||
// block index data such as its block height, so it's not yet
|
||||
// possible to efficiently load the block and do anything useful
|
||||
// with it.
|
||||
//
|
||||
// Ultimately the entire block index should be serialized
|
||||
// instead of only the current main chain so it can be consulted
|
||||
// directly.
|
||||
_, err = dbFetchHeightByHash(dbTx, hash)
|
||||
if isNotInDAGErr(err) {
|
||||
exists = false
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
return exists, err
|
||||
func (dag *BlockDAG) IsInDAG(hash *daghash.Hash) bool {
|
||||
return dag.index.HaveBlock(hash)
|
||||
}
|
||||
|
||||
// processOrphans determines if there are any orphans which depend on the passed
|
||||
@@ -79,9 +69,9 @@ func (dag *BlockDAG) BlockExists(hash *daghash.Hash) (bool, error) {
|
||||
// The flags do not modify the behavior of this function directly, however they
|
||||
// are needed to pass along to maybeAcceptBlock.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
// This function MUST be called with the DAG state lock held (for writes).
|
||||
func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) error {
|
||||
// Start with processing at least the passed hash. Leave a little room
|
||||
// Start with processing at least the passed hash. Leave a little room
|
||||
// for additional orphan blocks that need to be processed without
|
||||
// needing to grow the array in the common case.
|
||||
processHashes := make([]*daghash.Hash, 0, 10)
|
||||
@@ -93,10 +83,7 @@ func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) err
|
||||
processHashes = processHashes[1:]
|
||||
|
||||
// Look up all orphans that are parented by the block we just
|
||||
// accepted. This will typically only be one, but it could
|
||||
// be multiple if multiple blocks are mined and broadcast
|
||||
// around the same time. The one with the most proof of work
|
||||
// will eventually win out. An indexing for loop is
|
||||
// accepted. An indexing for loop is
|
||||
// intentionally used over a range here as range does not
|
||||
// reevaluate the slice on each iteration nor does it adjust the
|
||||
// index for the modified slice.
|
||||
@@ -109,15 +96,31 @@ func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) err
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip this orphan if one or more of its parents are
|
||||
// still missing.
|
||||
_, err := lookupParentNodes(orphan.block, dag)
|
||||
if err != nil {
|
||||
var ruleErr RuleError
|
||||
if ok := errors.As(err, &ruleErr); ok && ruleErr.ErrorCode == ErrParentBlockUnknown {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove the orphan from the orphan pool.
|
||||
orphanHash := orphan.block.Hash()
|
||||
dag.removeOrphanBlock(orphan)
|
||||
i--
|
||||
|
||||
// Potentially accept the block into the block chain.
|
||||
err := dag.maybeAcceptBlock(orphan.block, flags)
|
||||
// Potentially accept the block into the block DAG.
|
||||
err = dag.maybeAcceptBlock(orphan.block, flags|BFWasUnorphaned)
|
||||
if err != nil {
|
||||
return err
|
||||
// Since we don't want to reject the original block because of
|
||||
// a bad unorphaned child, only return an error if it's not a RuleError.
|
||||
if !errors.As(err, &RuleError{}) {
|
||||
return err
|
||||
}
|
||||
log.Warnf("Verification failed for orphan block %s: %s", orphanHash, err)
|
||||
}
|
||||
|
||||
// Add this block to the list of blocks to process so
|
||||
@@ -130,7 +133,7 @@ func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) err
|
||||
}
|
||||
|
||||
// ProcessBlock is the main workhorse for handling insertion of new blocks into
|
||||
// the block chain. It includes functionality such as rejecting duplicate
|
||||
// the block DAG. It includes functionality such as rejecting duplicate
|
||||
// blocks, ensuring blocks follow all rules, orphan handling, and insertion into
|
||||
// the block DAG.
|
||||
//
|
||||
@@ -138,102 +141,101 @@ func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) err
|
||||
// whether or not the block is an orphan.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) ProcessBlock(block *util.Block, flags BehaviorFlags) (bool, error) {
|
||||
func (dag *BlockDAG) ProcessBlock(block *util.Block, flags BehaviorFlags) (isOrphan bool, isDelayed bool, err error) {
|
||||
dag.dagLock.Lock()
|
||||
defer dag.dagLock.Unlock()
|
||||
return dag.processBlockNoLock(block, flags)
|
||||
}
|
||||
|
||||
fastAdd := flags&BFFastAdd == BFFastAdd
|
||||
func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags) (isOrphan bool, isDelayed bool, err error) {
|
||||
isAfterDelay := flags&BFAfterDelay == BFAfterDelay
|
||||
wasBlockStored := flags&BFWasStored == BFWasStored
|
||||
disallowDelay := flags&BFDisallowDelay == BFDisallowDelay
|
||||
|
||||
blockHash := block.Hash()
|
||||
log.Tracef("Processing block %s", blockHash)
|
||||
|
||||
// The block must not already exist in the main chain or side chains.
|
||||
exists, err := dag.BlockExists(blockHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if exists {
|
||||
// The block must not already exist in the DAG.
|
||||
if dag.IsInDAG(blockHash) && !wasBlockStored {
|
||||
str := fmt.Sprintf("already have block %s", blockHash)
|
||||
return false, ruleError(ErrDuplicateBlock, str)
|
||||
return false, false, ruleError(ErrDuplicateBlock, str)
|
||||
}
|
||||
|
||||
// The block must not already exist as an orphan.
|
||||
if _, exists := dag.orphans[*blockHash]; exists {
|
||||
str := fmt.Sprintf("already have block (orphan) %s", blockHash)
|
||||
return false, ruleError(ErrDuplicateBlock, str)
|
||||
return false, false, ruleError(ErrDuplicateBlock, str)
|
||||
}
|
||||
|
||||
// Perform preliminary sanity checks on the block and its transactions.
|
||||
err = dag.checkBlockSanity(block, flags)
|
||||
if err != nil {
|
||||
return false, err
|
||||
if dag.isKnownDelayedBlock(blockHash) {
|
||||
str := fmt.Sprintf("already have block (delayed) %s", blockHash)
|
||||
return false, false, ruleError(ErrDuplicateBlock, str)
|
||||
}
|
||||
|
||||
// Find the previous checkpoint and perform some additional checks based
|
||||
// on the checkpoint. This provides a few nice properties such as
|
||||
// preventing old side chain blocks before the last checkpoint,
|
||||
// rejecting easy to mine, but otherwise bogus, blocks that could be
|
||||
// used to eat memory, and ensuring expected (versus claimed) proof of
|
||||
// work requirements since the previous checkpoint are met.
|
||||
blockHeader := &block.MsgBlock().Header
|
||||
checkpointNode, err := dag.findPreviousCheckpoint()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if checkpointNode != nil {
|
||||
// Ensure the block timestamp is after the checkpoint timestamp.
|
||||
checkpointTime := time.Unix(checkpointNode.timestamp, 0)
|
||||
if blockHeader.Timestamp.Before(checkpointTime) {
|
||||
str := fmt.Sprintf("block %s has timestamp %s before "+
|
||||
"last checkpoint timestamp %s", blockHash,
|
||||
blockHeader.Timestamp, checkpointTime)
|
||||
return false, ruleError(ErrCheckpointTimeTooOld, str)
|
||||
if !isAfterDelay {
|
||||
// Perform preliminary sanity checks on the block and its transactions.
|
||||
delay, err := dag.checkBlockSanity(block, flags)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
if !fastAdd {
|
||||
// Even though the checks prior to now have already ensured the
|
||||
// proof of work exceeds the claimed amount, the claimed amount
|
||||
// is a field in the block header which could be forged. This
|
||||
// check ensures the proof of work is at least the minimum
|
||||
// expected based on elapsed time since the last checkpoint and
|
||||
// maximum adjustment allowed by the retarget rules.
|
||||
duration := blockHeader.Timestamp.Sub(checkpointTime)
|
||||
requiredTarget := util.CompactToBig(dag.calcEasiestDifficulty(
|
||||
checkpointNode.bits, duration))
|
||||
currentTarget := util.CompactToBig(blockHeader.Bits)
|
||||
if currentTarget.Cmp(requiredTarget) > 0 {
|
||||
str := fmt.Sprintf("block target difficulty of %064x "+
|
||||
"is too low when compared to the previous "+
|
||||
"checkpoint", currentTarget)
|
||||
return false, ruleError(ErrDifficultyTooLow, str)
|
||||
|
||||
if delay != 0 && disallowDelay {
|
||||
str := fmt.Sprintf("Cannot process blocks beyond the allowed time offset while the BFDisallowDelay flag is raised %s", blockHash)
|
||||
return false, true, ruleError(ErrDelayedBlockIsNotAllowed, str)
|
||||
}
|
||||
|
||||
if delay != 0 {
|
||||
err = dag.addDelayedBlock(block, delay)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
return false, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
var missingParents []*daghash.Hash
|
||||
for _, parentHash := range block.MsgBlock().Header.ParentHashes {
|
||||
if !dag.IsInDAG(parentHash) {
|
||||
missingParents = append(missingParents, parentHash)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle the case of a block with a valid timestamp(non-delayed) which points to a delayed block.
|
||||
delay, isParentDelayed := dag.maxDelayOfParents(missingParents)
|
||||
if isParentDelayed {
|
||||
// Add Nanosecond to ensure that parent process time will be after its child.
|
||||
delay += time.Nanosecond
|
||||
err := dag.addDelayedBlock(block, delay)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
return false, true, err
|
||||
}
|
||||
|
||||
// Handle orphan blocks.
|
||||
allParentsExist := true
|
||||
for _, parentHash := range blockHeader.ParentHashes {
|
||||
parentExists, err := dag.BlockExists(parentHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
if len(missingParents) > 0 {
|
||||
// Some orphans during netsync are a normal part of the process, since the anticone
|
||||
// of the chain-split is never explicitly requested.
|
||||
// Therefore, if we are during netsync - don't report orphans to default logs.
|
||||
//
|
||||
// The number K*2 was chosen since in peace times anticone is limited to K blocks,
|
||||
// while some red block can make it a bit bigger, but much more than that indicates
|
||||
// there might be some problem with the netsync process.
|
||||
if flags&BFIsSync == BFIsSync && dagconfig.KType(len(dag.orphans)) < dag.dagParams.K*2 {
|
||||
log.Debugf("Adding orphan block %s. This is normal part of netsync process", blockHash)
|
||||
} else {
|
||||
log.Infof("Adding orphan block %s", blockHash)
|
||||
}
|
||||
dag.addOrphanBlock(block)
|
||||
|
||||
if !parentExists {
|
||||
log.Infof("Adding orphan block %s with parent %s", blockHash, parentHash)
|
||||
dag.addOrphanBlock(block)
|
||||
|
||||
allParentsExist = false
|
||||
}
|
||||
}
|
||||
|
||||
if !allParentsExist {
|
||||
return true, nil
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
// The block has passed all context independent checks and appears sane
|
||||
// enough to potentially accept it into the block DAG.
|
||||
err = dag.maybeAcceptBlock(block, flags)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
// Accept any orphan blocks that depend on this block (they are
|
||||
@@ -241,10 +243,33 @@ func (dag *BlockDAG) ProcessBlock(block *util.Block, flags BehaviorFlags) (bool,
|
||||
// there are no more.
|
||||
err = dag.processOrphans(blockHash, flags)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
if !isAfterDelay {
|
||||
err = dag.processDelayedBlocks()
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Accepted block %s", blockHash)
|
||||
|
||||
return false, nil
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// maxDelayOfParents returns the maximum delay of the given block hashes.
|
||||
// Note that delay could be 0, but isDelayed will return true. This is the case where the parent process time is due.
|
||||
func (dag *BlockDAG) maxDelayOfParents(parentHashes []*daghash.Hash) (delay time.Duration, isDelayed bool) {
|
||||
for _, parentHash := range parentHashes {
|
||||
if delayedParent, exists := dag.delayedBlocks[*parentHash]; exists {
|
||||
isDelayed = true
|
||||
parentDelay := delayedParent.processTime.Sub(dag.AdjustedTime())
|
||||
if parentDelay > delay {
|
||||
delay = parentDelay
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return delay, isDelayed
|
||||
}
|
||||
|
||||
238
blockdag/process_test.go
Normal file
238
blockdag/process_test.go
Normal file
@@ -0,0 +1,238 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
func TestProcessOrphans(t *testing.T) {
|
||||
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup dag instance: %v", err)
|
||||
return
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
dag.TestSetCoinbaseMaturity(0)
|
||||
|
||||
blocksFile := "blk_0_to_4.dat"
|
||||
blocks, err := LoadBlocks(filepath.Join("testdata/", blocksFile))
|
||||
if err != nil {
|
||||
t.Fatalf("TestProcessOrphans: "+
|
||||
"Error loading file '%s': %s\n", blocksFile, err)
|
||||
}
|
||||
|
||||
// Get a reference to a parent block
|
||||
parentBlock := blocks[1]
|
||||
|
||||
// Get a reference to a child block and mess with it so that:
|
||||
// a. It gets added to the orphan pool
|
||||
// b. It gets rejected once it's unorphaned
|
||||
childBlock := blocks[2]
|
||||
childBlock.MsgBlock().Header.UTXOCommitment = &daghash.ZeroHash
|
||||
|
||||
// Process the child block so that it gets added to the orphan pool
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(childBlock, BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("TestProcessOrphans: child block unexpectedly returned an error: %s", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("TestProcessOrphans: child block is too far in the future")
|
||||
}
|
||||
if !isOrphan {
|
||||
t.Fatalf("TestProcessOrphans: incorrectly returned that child block is not an orphan")
|
||||
}
|
||||
|
||||
// Process the parent block. Note that this will attempt to unorphan the child block
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(parentBlock, BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("TestProcessOrphans: parent block unexpectedly returned an error: %s", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("TestProcessOrphans: parent block is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestProcessOrphans: incorrectly returned that parent block is an orphan")
|
||||
}
|
||||
|
||||
// Make sure that the child block had been rejected
|
||||
node := dag.index.LookupNode(childBlock.Hash())
|
||||
if node == nil {
|
||||
t.Fatalf("TestProcessOrphans: child block missing from block index")
|
||||
}
|
||||
if !dag.index.NodeStatus(node).KnownInvalid() {
|
||||
t.Fatalf("TestProcessOrphans: child block erroneously not marked as invalid")
|
||||
}
|
||||
}
|
||||
|
||||
type fakeTimeSource struct {
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func (fts *fakeTimeSource) AdjustedTime() time.Time {
|
||||
return fts.time
|
||||
}
|
||||
|
||||
func (fts *fakeTimeSource) AddTimeSample(_ string, _ time.Time) {
|
||||
}
|
||||
|
||||
func (fts *fakeTimeSource) Offset() time.Duration {
|
||||
return 0
|
||||
}
|
||||
|
||||
func TestProcessDelayedBlocks(t *testing.T) {
|
||||
// We use dag1 so we can build the test blocks with the proper
|
||||
// block header (UTXO commitment, acceptedIDMerkleroot, etc), and
|
||||
// then we use dag2 for the actual test.
|
||||
dag1, teardownFunc, err := DAGSetup("TestProcessDelayedBlocks1", Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
initialTime := dag1.dagParams.GenesisBlock.Header.Timestamp
|
||||
// Here we use a fake time source that returns a timestamp
|
||||
// one hour into the future to make delayedBlock artificially
|
||||
// valid.
|
||||
dag1.timeSource = &fakeTimeSource{initialTime.Add(time.Hour)}
|
||||
|
||||
delayedBlock, err := PrepareBlockForTest(dag1, []*daghash.Hash{dag1.dagParams.GenesisBlock.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
|
||||
blockDelay := time.Duration(dag1.dagParams.TimestampDeviationTolerance+5) * time.Second
|
||||
delayedBlock.Header.Timestamp = initialTime.Add(blockDelay)
|
||||
|
||||
isOrphan, isDelayed, err := dag1.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
|
||||
"is an orphan\n")
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
|
||||
"is delayed\n")
|
||||
}
|
||||
|
||||
delayedBlockChild, err := PrepareBlockForTest(dag1, []*daghash.Hash{delayedBlock.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
|
||||
// Here the actual test begins. We add a delayed block and
|
||||
// its child and check that they are not added to the DAG,
|
||||
// and check that they're added only if we add a new block
|
||||
// after the delayed block timestamp is valid.
|
||||
dag2, teardownFunc2, err := DAGSetup("TestProcessDelayedBlocks2", Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc2()
|
||||
dag2.timeSource = &fakeTimeSource{initialTime}
|
||||
|
||||
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
|
||||
"is an orphan\n")
|
||||
}
|
||||
if !isDelayed {
|
||||
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
|
||||
"is not delayed\n")
|
||||
}
|
||||
|
||||
if dag2.IsInDAG(delayedBlock.BlockHash()) {
|
||||
t.Errorf("dag.IsInDAG should return false for a delayed block")
|
||||
}
|
||||
if !dag2.IsKnownBlock(delayedBlock.BlockHash()) {
|
||||
t.Errorf("dag.IsKnownBlock should return true for a a delayed block")
|
||||
}
|
||||
|
||||
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(delayedBlockChild), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned delayedBlockChild " +
|
||||
"is an orphan\n")
|
||||
}
|
||||
if !isDelayed {
|
||||
t.Fatalf("ProcessBlock incorrectly returned delayedBlockChild " +
|
||||
"is not delayed\n")
|
||||
}
|
||||
|
||||
if dag2.IsInDAG(delayedBlockChild.BlockHash()) {
|
||||
t.Errorf("dag.IsInDAG should return false for a child of a delayed block")
|
||||
}
|
||||
if !dag2.IsKnownBlock(delayedBlockChild.BlockHash()) {
|
||||
t.Errorf("dag.IsKnownBlock should return true for a child of a delayed block")
|
||||
}
|
||||
|
||||
blockBeforeDelay, err := PrepareBlockForTest(dag2, []*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(blockBeforeDelay), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
|
||||
"is an orphan\n")
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
|
||||
"is delayed\n")
|
||||
}
|
||||
|
||||
if dag2.IsInDAG(delayedBlock.BlockHash()) {
|
||||
t.Errorf("delayedBlock shouldn't be added to the DAG because its time hasn't reached yet")
|
||||
}
|
||||
if dag2.IsInDAG(delayedBlockChild.BlockHash()) {
|
||||
t.Errorf("delayedBlockChild shouldn't be added to the DAG because its parent is not in the DAG")
|
||||
}
|
||||
|
||||
// We advance the clock to the point where delayedBlock timestamp is valid.
|
||||
secondsUntilDelayedBlockIsValid := delayedBlock.Header.Timestamp.Unix() - int64(dag2.TimestampDeviationTolerance) - dag2.AdjustedTime().Unix() + 1
|
||||
dag2.timeSource = &fakeTimeSource{initialTime.Add(time.Duration(secondsUntilDelayedBlockIsValid) * time.Second)}
|
||||
|
||||
blockAfterDelay, err := PrepareBlockForTest(dag2, []*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(blockAfterDelay), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
|
||||
"is an orphan\n")
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
|
||||
"is not delayed\n")
|
||||
}
|
||||
|
||||
if !dag2.IsInDAG(delayedBlock.BlockHash()) {
|
||||
t.Fatalf("delayedBlock should be added to the DAG because its time has been reached")
|
||||
}
|
||||
if !dag2.IsInDAG(delayedBlockChild.BlockHash()) {
|
||||
t.Errorf("delayedBlockChild shouldn't be added to the DAG because its parent has been added to the DAG")
|
||||
}
|
||||
}
|
||||
577
blockdag/reachability.go
Normal file
577
blockdag/reachability.go
Normal file
@@ -0,0 +1,577 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// reachabilityInterval represents an interval to be used within the
|
||||
// tree reachability algorithm. See reachabilityTreeNode for further
|
||||
// details.
|
||||
type reachabilityInterval struct {
|
||||
start uint64
|
||||
end uint64
|
||||
}
|
||||
|
||||
func newReachabilityInterval(start uint64, end uint64) *reachabilityInterval {
|
||||
return &reachabilityInterval{start: start, end: end}
|
||||
}
|
||||
|
||||
// size returns the size of this interval. Note that intervals are
|
||||
// inclusive from both sides.
|
||||
func (ri *reachabilityInterval) size() uint64 {
|
||||
return ri.end - ri.start + 1
|
||||
}
|
||||
|
||||
// splitInHalf splits this interval by a fraction of 0.5.
|
||||
// See splitFraction for further details.
|
||||
func (ri *reachabilityInterval) splitInHalf() (
|
||||
left *reachabilityInterval, right *reachabilityInterval, err error) {
|
||||
|
||||
return ri.splitFraction(0.5)
|
||||
}
|
||||
|
||||
// splitFraction splits this interval to two parts such that their
|
||||
// union is equal to the original interval and the first (left) part
|
||||
// contains the given fraction of the original interval's size.
|
||||
// Note: if the split results in fractional parts, this method rounds
|
||||
// the first part up and the last part down.
|
||||
func (ri *reachabilityInterval) splitFraction(fraction float64) (
|
||||
left *reachabilityInterval, right *reachabilityInterval, err error) {
|
||||
|
||||
if fraction < 0 || fraction > 1 {
|
||||
return nil, nil, errors.Errorf("fraction must be between 0 and 1")
|
||||
}
|
||||
if ri.size() == 0 {
|
||||
return nil, nil, errors.Errorf("cannot split an empty interval")
|
||||
}
|
||||
|
||||
allocationSize := uint64(math.Ceil(float64(ri.size()) * fraction))
|
||||
left = newReachabilityInterval(ri.start, ri.start+allocationSize-1)
|
||||
right = newReachabilityInterval(ri.start+allocationSize, ri.end)
|
||||
return left, right, nil
|
||||
}
|
||||
|
||||
// splitExact splits this interval to exactly |sizes| parts where
|
||||
// |part_i| = sizes[i]. This method expects sum(sizes) to be exactly
|
||||
// equal to the interval's size.
|
||||
func (ri *reachabilityInterval) splitExact(sizes []uint64) ([]*reachabilityInterval, error) {
|
||||
sizesSum := uint64(0)
|
||||
for _, size := range sizes {
|
||||
sizesSum += size
|
||||
}
|
||||
if sizesSum != ri.size() {
|
||||
return nil, errors.Errorf("sum of sizes must be equal to the interval's size")
|
||||
}
|
||||
|
||||
intervals := make([]*reachabilityInterval, len(sizes))
|
||||
start := ri.start
|
||||
for i, size := range sizes {
|
||||
intervals[i] = newReachabilityInterval(start, start+size-1)
|
||||
start += size
|
||||
}
|
||||
return intervals, nil
|
||||
}
|
||||
|
||||
// splitWithExponentialBias splits this interval to |sizes| parts
|
||||
// by the allocation rule described below. This method expects sum(sizes)
|
||||
// to be smaller or equal to the interval's size. Every part_i is
|
||||
// allocated at least sizes[i] capacity. The remaining budget is
|
||||
// split by an exponentially biased rule described below.
|
||||
//
|
||||
// This rule follows the GHOSTDAG protocol behavior where the child
|
||||
// with the largest subtree is expected to dominate the competition
|
||||
// for new blocks and thus grow the most. However, we may need to
|
||||
// add slack for non-largest subtrees in order to make CPU reindexing
|
||||
// attacks unworthy.
|
||||
func (ri *reachabilityInterval) splitWithExponentialBias(sizes []uint64) ([]*reachabilityInterval, error) {
|
||||
intervalSize := ri.size()
|
||||
sizesSum := uint64(0)
|
||||
for _, size := range sizes {
|
||||
sizesSum += size
|
||||
}
|
||||
if sizesSum > intervalSize {
|
||||
return nil, errors.Errorf("sum of sizes must be less than or equal to the interval's size")
|
||||
}
|
||||
if sizesSum == intervalSize {
|
||||
return ri.splitExact(sizes)
|
||||
}
|
||||
|
||||
// Add a fractional bias to every size in the given sizes
|
||||
totalBias := intervalSize - sizesSum
|
||||
remainingBias := totalBias
|
||||
biasedSizes := make([]uint64, len(sizes))
|
||||
fractions := exponentialFractions(sizes)
|
||||
for i, fraction := range fractions {
|
||||
var bias uint64
|
||||
if i == len(fractions)-1 {
|
||||
bias = remainingBias
|
||||
} else {
|
||||
bias = uint64(math.Round(float64(totalBias) * fraction))
|
||||
if bias > remainingBias {
|
||||
bias = remainingBias
|
||||
}
|
||||
}
|
||||
biasedSizes[i] = sizes[i] + bias
|
||||
remainingBias -= bias
|
||||
}
|
||||
return ri.splitExact(biasedSizes)
|
||||
}
|
||||
|
||||
// exponentialFractions returns a fraction of each size in sizes
|
||||
// as follows:
|
||||
// fraction[i] = 2^size[i] / sum_j(2^size[j])
|
||||
// In the code below the above equation is divided by 2^max(size)
|
||||
// to avoid exploding numbers. Note that in 1 / 2^(max(size)-size[i])
|
||||
// we divide 1 by potentially a very large number, which will
|
||||
// result in loss of float precision. This is not a problem - all
|
||||
// numbers close to 0 bear effectively the same weight.
|
||||
func exponentialFractions(sizes []uint64) []float64 {
|
||||
maxSize := uint64(0)
|
||||
for _, size := range sizes {
|
||||
if size > maxSize {
|
||||
maxSize = size
|
||||
}
|
||||
}
|
||||
fractions := make([]float64, len(sizes))
|
||||
for i, size := range sizes {
|
||||
fractions[i] = 1 / math.Pow(2, float64(maxSize-size))
|
||||
}
|
||||
fractionsSum := float64(0)
|
||||
for _, fraction := range fractions {
|
||||
fractionsSum += fraction
|
||||
}
|
||||
for i, fraction := range fractions {
|
||||
fractions[i] = fraction / fractionsSum
|
||||
}
|
||||
return fractions
|
||||
}
|
||||
|
||||
// isAncestorOf checks if this interval's node is a reachability tree
|
||||
// ancestor of the other interval's node. The condition below is relying on the
|
||||
// property of reachability intervals that intervals are either completely disjoint,
|
||||
// or one strictly contains the other.
|
||||
func (ri *reachabilityInterval) isAncestorOf(other *reachabilityInterval) bool {
|
||||
return ri.start <= other.end && other.end <= ri.end
|
||||
}
|
||||
|
||||
// String returns a string representation of the interval.
|
||||
func (ri *reachabilityInterval) String() string {
|
||||
return fmt.Sprintf("[%d,%d]", ri.start, ri.end)
|
||||
}
|
||||
|
||||
// reachabilityTreeNode represents a node in the reachability tree
|
||||
// of some DAG block. It mainly provides the ability to query *tree*
|
||||
// reachability with O(1) query time. It does so by managing an
|
||||
// index interval for each node and making sure all nodes in its
|
||||
// subtree are indexed within the interval, so the query
|
||||
// B ∈ subtree(A) simply becomes B.interval ⊂ A.interval.
|
||||
//
|
||||
// The main challenge of maintaining such intervals is that our tree
|
||||
// is an ever-growing tree and as such pre-allocated intervals may
|
||||
// not suffice as per future events. This is where the reindexing
|
||||
// algorithm below comes into place.
|
||||
// We use the reasonable assumption that the initial root interval
|
||||
// (e.g., [0, 2^64-1]) should always suffice for any practical use-
|
||||
// case, and so reindexing should always succeed unless more than
|
||||
// 2^64 blocks are added to the DAG/tree.
|
||||
type reachabilityTreeNode struct {
|
||||
blockNode *blockNode
|
||||
|
||||
children []*reachabilityTreeNode
|
||||
parent *reachabilityTreeNode
|
||||
|
||||
// interval is the index interval containing all intervals of
|
||||
// blocks in this node's subtree
|
||||
interval *reachabilityInterval
|
||||
|
||||
// remainingInterval is the not-yet allocated interval (within
|
||||
// this node's interval) awaiting new children
|
||||
remainingInterval *reachabilityInterval
|
||||
}
|
||||
|
||||
func newReachabilityTreeNode(blockNode *blockNode) *reachabilityTreeNode {
|
||||
// Please see the comment above reachabilityTreeNode to understand why
|
||||
// we use these initial values.
|
||||
interval := newReachabilityInterval(1, math.MaxUint64-1)
|
||||
// We subtract 1 from the end of the remaining interval to prevent the node from allocating
|
||||
// the entire interval to its child, so its interval would *strictly* contain the interval of its child.
|
||||
remainingInterval := newReachabilityInterval(interval.start, interval.end-1)
|
||||
return &reachabilityTreeNode{blockNode: blockNode, interval: interval, remainingInterval: remainingInterval}
|
||||
}
|
||||
|
||||
// addChild adds child to this tree node. If this node has no
|
||||
// remaining interval to allocate, a reindexing is triggered.
|
||||
// This method returns a list of reachabilityTreeNodes modified
|
||||
// by it.
|
||||
func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode) ([]*reachabilityTreeNode, error) {
|
||||
// Set the parent-child relationship
|
||||
rtn.children = append(rtn.children, child)
|
||||
child.parent = rtn
|
||||
|
||||
// No allocation space left -- reindex
|
||||
if rtn.remainingInterval.size() == 0 {
|
||||
reindexStartTime := time.Now()
|
||||
modifiedNodes, err := rtn.reindexIntervals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reindexTimeElapsed := time.Since(reindexStartTime)
|
||||
log.Debugf("Reachability reindex triggered for "+
|
||||
"block %s. Modified %d tree nodes and took %dms.",
|
||||
rtn.blockNode.hash, len(modifiedNodes), reindexTimeElapsed.Milliseconds())
|
||||
return modifiedNodes, nil
|
||||
}
|
||||
|
||||
// Allocate from the remaining space
|
||||
allocated, remaining, err := rtn.remainingInterval.splitInHalf()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
child.setInterval(allocated)
|
||||
rtn.remainingInterval = remaining
|
||||
return []*reachabilityTreeNode{rtn, child}, nil
|
||||
}
|
||||
|
||||
// setInterval sets the reachability interval for this node.
|
||||
func (rtn *reachabilityTreeNode) setInterval(interval *reachabilityInterval) {
|
||||
rtn.interval = interval
|
||||
|
||||
// Reserve a single interval index for the current node. This
|
||||
// is necessary to ensure that ancestor intervals are strictly
|
||||
// supersets of any descendant intervals and not equal
|
||||
rtn.remainingInterval = newReachabilityInterval(interval.start, interval.end-1)
|
||||
}
|
||||
|
||||
// reindexIntervals traverses the reachability subtree that's
|
||||
// defined by this node and reallocates reachability interval space
|
||||
// such that another reindexing is unlikely to occur shortly
|
||||
// thereafter. It does this by traversing down the reachability
|
||||
// tree until it finds a node with a subreeSize that's greater than
|
||||
// its interval size. See propagateInterval for further details.
|
||||
// This method returns a list of reachabilityTreeNodes modified by it.
|
||||
func (rtn *reachabilityTreeNode) reindexIntervals() ([]*reachabilityTreeNode, error) {
|
||||
current := rtn
|
||||
|
||||
// Initial interval and subtree sizes
|
||||
intervalSize := current.interval.size()
|
||||
subTreeSizeMap := make(map[*reachabilityTreeNode]uint64)
|
||||
current.countSubtrees(subTreeSizeMap)
|
||||
currentSubtreeSize := subTreeSizeMap[current]
|
||||
|
||||
// Find the first ancestor that has sufficient interval space
|
||||
for intervalSize < currentSubtreeSize {
|
||||
if current.parent == nil {
|
||||
// If we ended up here it means that there are more
|
||||
// than 2^64 blocks, which shouldn't ever happen.
|
||||
return nil, errors.Errorf("missing tree " +
|
||||
"parent during reindexing. Theoretically, this " +
|
||||
"should only ever happen if there are more " +
|
||||
"than 2^64 blocks in the DAG.")
|
||||
}
|
||||
current = current.parent
|
||||
intervalSize = current.interval.size()
|
||||
current.countSubtrees(subTreeSizeMap)
|
||||
currentSubtreeSize = subTreeSizeMap[current]
|
||||
}
|
||||
|
||||
// Propagate the interval to the subtree
|
||||
return current.propagateInterval(subTreeSizeMap)
|
||||
}
|
||||
|
||||
// countSubtrees counts the size of each subtree under this node,
|
||||
// and populates the provided subTreeSizeMap with the results.
|
||||
// It is equivalent to the following recursive implementation:
|
||||
//
|
||||
// func (rtn *reachabilityTreeNode) countSubtrees() uint64 {
|
||||
// subtreeSize := uint64(0)
|
||||
// for _, child := range rtn.children {
|
||||
// subtreeSize += child.countSubtrees()
|
||||
// }
|
||||
// return subtreeSize + 1
|
||||
// }
|
||||
//
|
||||
// However, we are expecting (linearly) deep trees, and so a
|
||||
// recursive stack-based approach is inefficient and will hit
|
||||
// recursion limits. Instead, the same logic was implemented
|
||||
// using a (queue-based) BFS method. At a high level, the
|
||||
// algorithm uses BFS for reaching all leaves and pushes
|
||||
// intermediate updates from leaves via parent chains until all
|
||||
// size information is gathered at the root of the operation
|
||||
// (i.e. at rtn).
|
||||
func (rtn *reachabilityTreeNode) countSubtrees(subTreeSizeMap map[*reachabilityTreeNode]uint64) {
|
||||
queue := []*reachabilityTreeNode{rtn}
|
||||
calculatedChildrenCount := make(map[*reachabilityTreeNode]uint64)
|
||||
for len(queue) > 0 {
|
||||
var current *reachabilityTreeNode
|
||||
current, queue = queue[0], queue[1:]
|
||||
if len(current.children) == 0 {
|
||||
// We reached a leaf
|
||||
subTreeSizeMap[current] = 1
|
||||
} else if calculatedChildrenCount[current] <= uint64(len(current.children)) {
|
||||
// We haven't yet calculated the subtree size of
|
||||
// the current node. Add all its children to the
|
||||
// queue
|
||||
queue = append(queue, current.children...)
|
||||
continue
|
||||
}
|
||||
|
||||
// We reached a leaf or a pre-calculated subtree.
|
||||
// Push information up
|
||||
for current != rtn {
|
||||
current = current.parent
|
||||
calculatedChildrenCount[current]++
|
||||
if calculatedChildrenCount[current] != uint64(len(current.children)) {
|
||||
// Not all subtrees of the current node are ready
|
||||
break
|
||||
}
|
||||
// All children of `current` have calculated their subtree size.
|
||||
// Sum them all together and add 1 to get the sub tree size of
|
||||
// `current`.
|
||||
childSubtreeSizeSum := uint64(0)
|
||||
for _, child := range current.children {
|
||||
childSubtreeSizeSum += subTreeSizeMap[child]
|
||||
}
|
||||
subTreeSizeMap[current] = childSubtreeSizeSum + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// propagateInterval propagates the new interval using a BFS traversal.
|
||||
// Subtree intervals are recursively allocated according to subtree sizes and
|
||||
// the allocation rule in splitWithExponentialBias. This method returns
|
||||
// a list of reachabilityTreeNodes modified by it.
|
||||
func (rtn *reachabilityTreeNode) propagateInterval(subTreeSizeMap map[*reachabilityTreeNode]uint64) ([]*reachabilityTreeNode, error) {
|
||||
// We set the interval to reset its remainingInterval, so we could reallocate it while reindexing.
|
||||
rtn.setInterval(rtn.interval)
|
||||
queue := []*reachabilityTreeNode{rtn}
|
||||
var modifiedNodes []*reachabilityTreeNode
|
||||
for len(queue) > 0 {
|
||||
var current *reachabilityTreeNode
|
||||
current, queue = queue[0], queue[1:]
|
||||
if len(current.children) > 0 {
|
||||
sizes := make([]uint64, len(current.children))
|
||||
for i, child := range current.children {
|
||||
sizes[i] = subTreeSizeMap[child]
|
||||
}
|
||||
intervals, err := current.remainingInterval.splitWithExponentialBias(sizes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, child := range current.children {
|
||||
childInterval := intervals[i]
|
||||
child.setInterval(childInterval)
|
||||
queue = append(queue, child)
|
||||
}
|
||||
|
||||
// Empty up remaining interval
|
||||
current.remainingInterval.start = current.remainingInterval.end + 1
|
||||
}
|
||||
|
||||
modifiedNodes = append(modifiedNodes, current)
|
||||
}
|
||||
return modifiedNodes, nil
|
||||
}
|
||||
|
||||
// isAncestorOf checks if this node is a reachability tree ancestor
|
||||
// of the other node.
|
||||
func (rtn *reachabilityTreeNode) isAncestorOf(other *reachabilityTreeNode) bool {
|
||||
return rtn.interval.isAncestorOf(other.interval)
|
||||
}
|
||||
|
||||
// String returns a string representation of a reachability tree node
|
||||
// and its children.
|
||||
func (rtn *reachabilityTreeNode) String() string {
|
||||
queue := []*reachabilityTreeNode{rtn}
|
||||
lines := []string{rtn.interval.String()}
|
||||
for len(queue) > 0 {
|
||||
var current *reachabilityTreeNode
|
||||
current, queue = queue[0], queue[1:]
|
||||
if len(current.children) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
line := ""
|
||||
for _, child := range current.children {
|
||||
line += child.interval.String()
|
||||
queue = append(queue, child)
|
||||
}
|
||||
lines = append([]string{line}, lines...)
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// futureCoveringBlockSet represents a collection of blocks in the future of
|
||||
// a certain block. Once a block B is added to the DAG, every block A_i in
|
||||
// B's selected parent anticone must register B in its futureCoveringBlockSet. This allows
|
||||
// to relatively quickly (O(log(|futureCoveringBlockSet|))) query whether B
|
||||
// is a descendent (is in the "future") of any block that previously
|
||||
// registered it.
|
||||
//
|
||||
// Note that futureCoveringBlockSet is meant to be queried only if B is not
|
||||
// a reachability tree descendant of the block in question, as reachability
|
||||
// tree queries are always O(1).
|
||||
//
|
||||
// See insertBlock, isInFuture, and dag.isAncestorOf for further details.
|
||||
type futureCoveringBlockSet []*futureCoveringBlock
|
||||
|
||||
// futureCoveringBlock represents a block in the future of some other block.
|
||||
type futureCoveringBlock struct {
|
||||
blockNode *blockNode
|
||||
treeNode *reachabilityTreeNode
|
||||
}
|
||||
|
||||
// insertBlock inserts the given block into this futureCoveringBlockSet
|
||||
// while keeping futureCoveringBlockSet ordered by interval.
|
||||
// If a block B ∈ futureCoveringBlockSet exists such that its interval
|
||||
// contains block's interval, block need not be added. If block's
|
||||
// interval contains B's interval, it replaces it.
|
||||
//
|
||||
// Notes:
|
||||
// * Intervals never intersect unless one contains the other
|
||||
// (this follows from the tree structure and the indexing rule).
|
||||
// * Since futureCoveringBlockSet is kept ordered, a binary search can be
|
||||
// used for insertion/queries.
|
||||
// * Although reindexing may change a block's interval, the
|
||||
// is-superset relation will by definition
|
||||
// be always preserved.
|
||||
func (fb *futureCoveringBlockSet) insertBlock(block *futureCoveringBlock) {
|
||||
blockInterval := block.treeNode.interval
|
||||
i := fb.findIndex(block)
|
||||
if i > 0 {
|
||||
candidate := (*fb)[i-1]
|
||||
candidateInterval := candidate.treeNode.interval
|
||||
if candidateInterval.isAncestorOf(blockInterval) {
|
||||
// candidate is an ancestor of block, no need to insert
|
||||
return
|
||||
}
|
||||
if blockInterval.isAncestorOf(candidateInterval) {
|
||||
// block is an ancestor of candidate, and can thus replace it
|
||||
(*fb)[i-1] = block
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Insert block in the correct index to maintain futureCoveringBlockSet as
|
||||
// a sorted-by-interval list.
|
||||
// Note that i might be equal to len(futureCoveringBlockSet)
|
||||
left := (*fb)[:i]
|
||||
right := append([]*futureCoveringBlock{block}, (*fb)[i:]...)
|
||||
*fb = append(left, right...)
|
||||
}
|
||||
|
||||
// isInFuture resolves whether the given block is in the subtree of
|
||||
// any block in this futureCoveringBlockSet.
|
||||
// See insertBlock method for the complementary insertion behavior.
|
||||
//
|
||||
// Like the insert method, this method also relies on the fact that
|
||||
// futureCoveringBlockSet is kept ordered by interval to efficiently perform a
|
||||
// binary search over futureCoveringBlockSet and answer the query in
|
||||
// O(log(|futureCoveringBlockSet|)).
|
||||
func (fb futureCoveringBlockSet) isInFuture(block *futureCoveringBlock) bool {
|
||||
i := fb.findIndex(block)
|
||||
if i == 0 {
|
||||
// No candidate to contain block
|
||||
return false
|
||||
}
|
||||
|
||||
candidate := fb[i-1]
|
||||
return candidate.treeNode.isAncestorOf(block.treeNode)
|
||||
}
|
||||
|
||||
// findIndex finds the index of the block with the maximum start that is below
|
||||
// the given block.
|
||||
func (fb futureCoveringBlockSet) findIndex(block *futureCoveringBlock) int {
|
||||
blockInterval := block.treeNode.interval
|
||||
end := blockInterval.end
|
||||
|
||||
low := 0
|
||||
high := len(fb)
|
||||
for low < high {
|
||||
middle := (low + high) / 2
|
||||
middleInterval := fb[middle].treeNode.interval
|
||||
if end < middleInterval.start {
|
||||
high = middle
|
||||
} else {
|
||||
low = middle + 1
|
||||
}
|
||||
}
|
||||
return low
|
||||
}
|
||||
|
||||
// String returns a string representation of the intervals in this futureCoveringBlockSet.
|
||||
func (fb futureCoveringBlockSet) String() string {
|
||||
intervalsString := ""
|
||||
for _, block := range fb {
|
||||
intervalsString += block.treeNode.interval.String()
|
||||
}
|
||||
return intervalsString
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) updateReachability(node *blockNode, selectedParentAnticone []*blockNode) error {
|
||||
// Allocate a new reachability tree node
|
||||
newTreeNode := newReachabilityTreeNode(node)
|
||||
|
||||
// If this is the genesis node, simply initialize it and return
|
||||
if node.isGenesis() {
|
||||
dag.reachabilityStore.setTreeNode(newTreeNode)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Insert the node into the selected parent's reachability tree
|
||||
selectedParentTreeNode, err := dag.reachabilityStore.treeNodeByBlockNode(node.selectedParent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
modifiedTreeNodes, err := selectedParentTreeNode.addChild(newTreeNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, modifiedTreeNode := range modifiedTreeNodes {
|
||||
dag.reachabilityStore.setTreeNode(modifiedTreeNode)
|
||||
}
|
||||
|
||||
// Add the block to the futureCoveringSets of all the blocks
|
||||
// in the selected parent's anticone
|
||||
for _, current := range selectedParentAnticone {
|
||||
currentFutureCoveringSet, err := dag.reachabilityStore.futureCoveringSetByBlockNode(current)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentFutureCoveringSet.insertBlock(&futureCoveringBlock{blockNode: node, treeNode: newTreeNode})
|
||||
err = dag.reachabilityStore.setFutureCoveringSet(current, currentFutureCoveringSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// isAncestorOf returns true if this node is in the past of the other node
|
||||
// in the DAG. The complexity of this method is O(log(|this.futureCoveringBlockSet|))
|
||||
func (dag *BlockDAG) isAncestorOf(this *blockNode, other *blockNode) (bool, error) {
|
||||
// First, check if this node is a reachability tree ancestor of the
|
||||
// other node
|
||||
thisTreeNode, err := dag.reachabilityStore.treeNodeByBlockNode(this)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
otherTreeNode, err := dag.reachabilityStore.treeNodeByBlockNode(other)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if thisTreeNode.isAncestorOf(otherTreeNode) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Otherwise, use previously registered future blocks to complete the
|
||||
// reachability test
|
||||
thisFutureCoveringSet, err := dag.reachabilityStore.futureCoveringSetByBlockNode(this)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return thisFutureCoveringSet.isInFuture(&futureCoveringBlock{blockNode: other, treeNode: otherTreeNode}), nil
|
||||
}
|
||||
648
blockdag/reachability_test.go
Normal file
648
blockdag/reachability_test.go
Normal file
@@ -0,0 +1,648 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAddChild(t *testing.T) {
|
||||
// Scenario 1: test addChild in a chain
|
||||
// root -> a -> b -> c...
|
||||
// Create the root node of a new reachability tree
|
||||
root := newReachabilityTreeNode(&blockNode{})
|
||||
root.setInterval(newReachabilityInterval(1, 100))
|
||||
|
||||
// Add a chain of child nodes just before a reindex occurs (2^6=64 < 100)
|
||||
currentTip := root
|
||||
for i := 0; i < 6; i++ {
|
||||
node := newReachabilityTreeNode(&blockNode{})
|
||||
modifiedNodes, err := currentTip.addChild(node)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAddChild: addChild failed: %s", err)
|
||||
}
|
||||
|
||||
// Expect only the node and its parent to be affected
|
||||
expectedModifiedNodes := []*reachabilityTreeNode{currentTip, node}
|
||||
if !reflect.DeepEqual(modifiedNodes, expectedModifiedNodes) {
|
||||
t.Fatalf("TestAddChild: unexpected modifiedNodes. "+
|
||||
"want: %s, got: %s", expectedModifiedNodes, modifiedNodes)
|
||||
}
|
||||
|
||||
currentTip = node
|
||||
}
|
||||
|
||||
// Add another node to the tip of the chain to trigger a reindex (100 < 2^7=128)
|
||||
lastChild := newReachabilityTreeNode(&blockNode{})
|
||||
modifiedNodes, err := currentTip.addChild(lastChild)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAddChild: addChild failed: %s", err)
|
||||
}
|
||||
|
||||
// Expect more than just the node and its parent to be modified but not
|
||||
// all the nodes
|
||||
if len(modifiedNodes) <= 2 && len(modifiedNodes) >= 7 {
|
||||
t.Fatalf("TestAddChild: unexpected amount of modifiedNodes.")
|
||||
}
|
||||
|
||||
// Expect the tip to have an interval of 1 and remaining interval of 0
|
||||
tipInterval := lastChild.interval.size()
|
||||
if tipInterval != 1 {
|
||||
t.Fatalf("TestAddChild: unexpected tip interval size: want: 1, got: %d", tipInterval)
|
||||
}
|
||||
tipRemainingInterval := lastChild.remainingInterval.size()
|
||||
if tipRemainingInterval != 0 {
|
||||
t.Fatalf("TestAddChild: unexpected tip interval size: want: 0, got: %d", tipRemainingInterval)
|
||||
}
|
||||
|
||||
// Expect all nodes to be descendant nodes of root
|
||||
currentNode := currentTip
|
||||
for currentNode != nil {
|
||||
if !root.isAncestorOf(currentNode) {
|
||||
t.Fatalf("TestAddChild: currentNode is not a descendant of root")
|
||||
}
|
||||
currentNode = currentNode.parent
|
||||
}
|
||||
|
||||
// Scenario 2: test addChild where all nodes are direct descendants of root
|
||||
// root -> a, b, c...
|
||||
// Create the root node of a new reachability tree
|
||||
root = newReachabilityTreeNode(&blockNode{})
|
||||
root.setInterval(newReachabilityInterval(1, 100))
|
||||
|
||||
// Add child nodes to root just before a reindex occurs (2^6=64 < 100)
|
||||
childNodes := make([]*reachabilityTreeNode, 6)
|
||||
for i := 0; i < len(childNodes); i++ {
|
||||
childNodes[i] = newReachabilityTreeNode(&blockNode{})
|
||||
modifiedNodes, err := root.addChild(childNodes[i])
|
||||
if err != nil {
|
||||
t.Fatalf("TestAddChild: addChild failed: %s", err)
|
||||
}
|
||||
|
||||
// Expect only the node and the root to be affected
|
||||
expectedModifiedNodes := []*reachabilityTreeNode{root, childNodes[i]}
|
||||
if !reflect.DeepEqual(modifiedNodes, expectedModifiedNodes) {
|
||||
t.Fatalf("TestAddChild: unexpected modifiedNodes. "+
|
||||
"want: %s, got: %s", expectedModifiedNodes, modifiedNodes)
|
||||
}
|
||||
}
|
||||
|
||||
// Add another node to the root to trigger a reindex (100 < 2^7=128)
|
||||
lastChild = newReachabilityTreeNode(&blockNode{})
|
||||
modifiedNodes, err = root.addChild(lastChild)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAddChild: addChild failed: %s", err)
|
||||
}
|
||||
|
||||
// Expect more than just the node and the root to be modified but not
|
||||
// all the nodes
|
||||
if len(modifiedNodes) <= 2 && len(modifiedNodes) >= 7 {
|
||||
t.Fatalf("TestAddChild: unexpected amount of modifiedNodes.")
|
||||
}
|
||||
|
||||
// Expect the last-added child to have an interval of 1 and remaining interval of 0
|
||||
lastChildInterval := lastChild.interval.size()
|
||||
if lastChildInterval != 1 {
|
||||
t.Fatalf("TestAddChild: unexpected lastChild interval size: want: 1, got: %d", lastChildInterval)
|
||||
}
|
||||
lastChildRemainingInterval := lastChild.remainingInterval.size()
|
||||
if lastChildRemainingInterval != 0 {
|
||||
t.Fatalf("TestAddChild: unexpected lastChild interval size: want: 0, got: %d", lastChildRemainingInterval)
|
||||
}
|
||||
|
||||
// Expect all nodes to be descendant nodes of root
|
||||
for _, childNode := range childNodes {
|
||||
if !root.isAncestorOf(childNode) {
|
||||
t.Fatalf("TestAddChild: childNode is not a descendant of root")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitFraction(t *testing.T) {
|
||||
tests := []struct {
|
||||
interval *reachabilityInterval
|
||||
fraction float64
|
||||
expectedLeft *reachabilityInterval
|
||||
expectedRight *reachabilityInterval
|
||||
}{
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
fraction: 0.5,
|
||||
expectedLeft: newReachabilityInterval(1, 50),
|
||||
expectedRight: newReachabilityInterval(51, 100),
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(2, 100),
|
||||
fraction: 0.5,
|
||||
expectedLeft: newReachabilityInterval(2, 51),
|
||||
expectedRight: newReachabilityInterval(52, 100),
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 99),
|
||||
fraction: 0.5,
|
||||
expectedLeft: newReachabilityInterval(1, 50),
|
||||
expectedRight: newReachabilityInterval(51, 99),
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
fraction: 0.2,
|
||||
expectedLeft: newReachabilityInterval(1, 20),
|
||||
expectedRight: newReachabilityInterval(21, 100),
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
fraction: 0,
|
||||
expectedLeft: newReachabilityInterval(1, 0),
|
||||
expectedRight: newReachabilityInterval(1, 100),
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
fraction: 1,
|
||||
expectedLeft: newReachabilityInterval(1, 100),
|
||||
expectedRight: newReachabilityInterval(101, 100),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
left, right, err := test.interval.splitFraction(test.fraction)
|
||||
if err != nil {
|
||||
t.Fatalf("TestSplitFraction: splitFraction unexpectedly failed in test #%d: %s", i, err)
|
||||
}
|
||||
if !reflect.DeepEqual(left, test.expectedLeft) {
|
||||
t.Errorf("TestSplitFraction: unexpected left in test #%d. "+
|
||||
"want: %s, got: %s", i, test.expectedLeft, left)
|
||||
}
|
||||
if !reflect.DeepEqual(right, test.expectedRight) {
|
||||
t.Errorf("TestSplitFraction: unexpected right in test #%d. "+
|
||||
"want: %s, got: %s", i, test.expectedRight, right)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitExact(t *testing.T) {
|
||||
tests := []struct {
|
||||
interval *reachabilityInterval
|
||||
sizes []uint64
|
||||
expectedIntervals []*reachabilityInterval
|
||||
}{
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
sizes: []uint64{100},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 100),
|
||||
},
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
sizes: []uint64{50, 50},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 50),
|
||||
newReachabilityInterval(51, 100),
|
||||
},
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
sizes: []uint64{10, 20, 30, 40},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 10),
|
||||
newReachabilityInterval(11, 30),
|
||||
newReachabilityInterval(31, 60),
|
||||
newReachabilityInterval(61, 100),
|
||||
},
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
sizes: []uint64{0, 100},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 0),
|
||||
newReachabilityInterval(1, 100),
|
||||
},
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
sizes: []uint64{100, 0},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 100),
|
||||
newReachabilityInterval(101, 100),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
intervals, err := test.interval.splitExact(test.sizes)
|
||||
if err != nil {
|
||||
t.Fatalf("TestSplitExact: splitExact unexpectedly failed in test #%d: %s", i, err)
|
||||
}
|
||||
if !reflect.DeepEqual(intervals, test.expectedIntervals) {
|
||||
t.Errorf("TestSplitExact: unexpected intervals in test #%d. "+
|
||||
"want: %s, got: %s", i, test.expectedIntervals, intervals)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitWithExponentialBias(t *testing.T) {
|
||||
tests := []struct {
|
||||
interval *reachabilityInterval
|
||||
sizes []uint64
|
||||
expectedIntervals []*reachabilityInterval
|
||||
}{
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
sizes: []uint64{100},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 100),
|
||||
},
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
sizes: []uint64{50, 50},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 50),
|
||||
newReachabilityInterval(51, 100),
|
||||
},
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
sizes: []uint64{10, 20, 30, 40},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 10),
|
||||
newReachabilityInterval(11, 30),
|
||||
newReachabilityInterval(31, 60),
|
||||
newReachabilityInterval(61, 100),
|
||||
},
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
sizes: []uint64{25, 25},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 50),
|
||||
newReachabilityInterval(51, 100),
|
||||
},
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
sizes: []uint64{1, 1},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 50),
|
||||
newReachabilityInterval(51, 100),
|
||||
},
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
sizes: []uint64{33, 33, 33},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 33),
|
||||
newReachabilityInterval(34, 66),
|
||||
newReachabilityInterval(67, 100),
|
||||
},
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
sizes: []uint64{10, 15, 25},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 10),
|
||||
newReachabilityInterval(11, 25),
|
||||
newReachabilityInterval(26, 100),
|
||||
},
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100),
|
||||
sizes: []uint64{25, 15, 10},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 75),
|
||||
newReachabilityInterval(76, 90),
|
||||
newReachabilityInterval(91, 100),
|
||||
},
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 10_000),
|
||||
sizes: []uint64{10, 10, 20},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 20),
|
||||
newReachabilityInterval(21, 40),
|
||||
newReachabilityInterval(41, 10_000),
|
||||
},
|
||||
},
|
||||
{
|
||||
interval: newReachabilityInterval(1, 100_000),
|
||||
sizes: []uint64{31_000, 31_000, 30_001},
|
||||
expectedIntervals: []*reachabilityInterval{
|
||||
newReachabilityInterval(1, 35_000),
|
||||
newReachabilityInterval(35_001, 69_999),
|
||||
newReachabilityInterval(70_000, 100_000),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
intervals, err := test.interval.splitWithExponentialBias(test.sizes)
|
||||
if err != nil {
|
||||
t.Fatalf("TestSplitWithExponentialBias: splitWithExponentialBias unexpectedly failed in test #%d: %s", i, err)
|
||||
}
|
||||
if !reflect.DeepEqual(intervals, test.expectedIntervals) {
|
||||
t.Errorf("TestSplitWithExponentialBias: unexpected intervals in test #%d. "+
|
||||
"want: %s, got: %s", i, test.expectedIntervals, intervals)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsInFuture(t *testing.T) {
|
||||
blocks := futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(2, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
block *futureCoveringBlock
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 1)}},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(5, 7)}},
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 76)}},
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(78, 100)}},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1980, 2000)}},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
block: &futureCoveringBlock{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1920)}},
|
||||
expectedResult: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
result := blocks.isInFuture(test.block)
|
||||
if result != test.expectedResult {
|
||||
t.Errorf("TestIsInFuture: unexpected result in test #%d. Want: %t, got: %t",
|
||||
i, test.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertBlock(t *testing.T) {
|
||||
blocks := futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
toInsert []*futureCoveringBlock
|
||||
expectedResult futureCoveringBlockSet
|
||||
}{
|
||||
{
|
||||
toInsert: []*futureCoveringBlock{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(5, 7)}},
|
||||
},
|
||||
expectedResult: futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
},
|
||||
},
|
||||
{
|
||||
toInsert: []*futureCoveringBlock{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(65, 78)}},
|
||||
},
|
||||
expectedResult: futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(65, 78)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
},
|
||||
},
|
||||
{
|
||||
toInsert: []*futureCoveringBlock{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
|
||||
},
|
||||
expectedResult: futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
},
|
||||
},
|
||||
{
|
||||
toInsert: []*futureCoveringBlock{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(3000, 3010)}},
|
||||
},
|
||||
expectedResult: futureCoveringBlockSet{
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1, 3)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(4, 67)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(67, 77)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(88, 97)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(657, 789)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1000, 1000)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(1920, 1921)}},
|
||||
{treeNode: &reachabilityTreeNode{interval: newReachabilityInterval(3000, 3010)}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
// Create a clone of blocks so that we have a clean start for every test
|
||||
blocksClone := make(futureCoveringBlockSet, len(blocks))
|
||||
for i, block := range blocks {
|
||||
blocksClone[i] = block
|
||||
}
|
||||
|
||||
for _, block := range test.toInsert {
|
||||
blocksClone.insertBlock(block)
|
||||
}
|
||||
if !reflect.DeepEqual(blocksClone, test.expectedResult) {
|
||||
t.Errorf("TestInsertBlock: unexpected result in test #%d. Want: %s, got: %s",
|
||||
i, test.expectedResult, blocksClone)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitFractionErrors(t *testing.T) {
|
||||
interval := newReachabilityInterval(100, 200)
|
||||
|
||||
// Negative fraction
|
||||
_, _, err := interval.splitFraction(-0.5)
|
||||
if err == nil {
|
||||
t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " +
|
||||
"didn't return an error for a negative fraction")
|
||||
}
|
||||
expectedErrSubstring := "fraction must be between 0 and 1"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+
|
||||
"for a negative fraction. "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
|
||||
// Fraction > 1
|
||||
_, _, err = interval.splitFraction(1.5)
|
||||
if err == nil {
|
||||
t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " +
|
||||
"didn't return an error for a fraction greater than 1")
|
||||
}
|
||||
expectedErrSubstring = "fraction must be between 0 and 1"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+
|
||||
"for a fraction greater than 1. "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
|
||||
// Splitting an empty interval
|
||||
emptyInterval := newReachabilityInterval(1, 0)
|
||||
_, _, err = emptyInterval.splitFraction(0.5)
|
||||
if err == nil {
|
||||
t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " +
|
||||
"didn't return an error for an empty interval")
|
||||
}
|
||||
expectedErrSubstring = "cannot split an empty interval"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+
|
||||
"for an empty interval. "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitExactErrors(t *testing.T) {
|
||||
interval := newReachabilityInterval(100, 199)
|
||||
|
||||
// Sum of sizes greater than the size of the interval
|
||||
sizes := []uint64{50, 51}
|
||||
_, err := interval.splitExact(sizes)
|
||||
if err == nil {
|
||||
t.Fatalf("TestSplitExactErrors: splitExact unexpectedly " +
|
||||
"didn't return an error for (sum of sizes) > (size of interval)")
|
||||
}
|
||||
expectedErrSubstring := "sum of sizes must be equal to the interval's size"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestSplitExactErrors: splitExact returned wrong error "+
|
||||
"for (sum of sizes) > (size of interval). "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
|
||||
// Sum of sizes smaller than the size of the interval
|
||||
sizes = []uint64{50, 49}
|
||||
_, err = interval.splitExact(sizes)
|
||||
if err == nil {
|
||||
t.Fatalf("TestSplitExactErrors: splitExact unexpectedly " +
|
||||
"didn't return an error for (sum of sizes) < (size of interval)")
|
||||
}
|
||||
expectedErrSubstring = "sum of sizes must be equal to the interval's size"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestSplitExactErrors: splitExact returned wrong error "+
|
||||
"for (sum of sizes) < (size of interval). "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitWithExponentialBiasErrors(t *testing.T) {
|
||||
interval := newReachabilityInterval(100, 199)
|
||||
|
||||
// Sum of sizes greater than the size of the interval
|
||||
sizes := []uint64{50, 51}
|
||||
_, err := interval.splitWithExponentialBias(sizes)
|
||||
if err == nil {
|
||||
t.Fatalf("TestSplitWithExponentialBiasErrors: splitWithExponentialBias " +
|
||||
"unexpectedly didn't return an error")
|
||||
}
|
||||
expectedErrSubstring := "sum of sizes must be less than or equal to the interval's size"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestSplitWithExponentialBiasErrors: splitWithExponentialBias "+
|
||||
"returned wrong error. Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReindexIntervalErrors(t *testing.T) {
|
||||
// Create a treeNode and give it size = 100
|
||||
treeNode := newReachabilityTreeNode(&blockNode{})
|
||||
treeNode.setInterval(newReachabilityInterval(0, 99))
|
||||
|
||||
// Add a chain of 100 child treeNodes to treeNode
|
||||
var err error
|
||||
currentTreeNode := treeNode
|
||||
for i := 0; i < 100; i++ {
|
||||
childTreeNode := newReachabilityTreeNode(&blockNode{})
|
||||
_, err = currentTreeNode.addChild(childTreeNode)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
currentTreeNode = childTreeNode
|
||||
}
|
||||
|
||||
// At the 100th addChild we expect a reindex. This reindex should
|
||||
// fail because our initial treeNode only has size = 100, and the
|
||||
// reindex requires size > 100.
|
||||
// This simulates the case when (somehow) there's more than 2^64
|
||||
// blocks in the DAG, since the genesis block has size = 2^64.
|
||||
if err == nil {
|
||||
t.Fatalf("TestReindexIntervalErrors: reindexIntervals " +
|
||||
"unexpectedly didn't return an error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "missing tree parent during reindexing") {
|
||||
t.Fatalf("TestReindexIntervalErrors: reindexIntervals "+
|
||||
"returned an expected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFutureCoveringBlockSetString(t *testing.T) {
|
||||
treeNodeA := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeA.setInterval(newReachabilityInterval(123, 456))
|
||||
treeNodeB := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeB.setInterval(newReachabilityInterval(457, 789))
|
||||
futureCoveringSet := futureCoveringBlockSet{
|
||||
&futureCoveringBlock{treeNode: treeNodeA},
|
||||
&futureCoveringBlock{treeNode: treeNodeB},
|
||||
}
|
||||
|
||||
str := futureCoveringSet.String()
|
||||
expectedStr := "[123,456][457,789]"
|
||||
if str != expectedStr {
|
||||
t.Fatalf("TestFutureCoveringBlockSetString: unexpected "+
|
||||
"string. Want: %s, got: %s", expectedStr, str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReachabilityTreeNodeString(t *testing.T) {
|
||||
treeNodeA := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeA.setInterval(newReachabilityInterval(100, 199))
|
||||
treeNodeB1 := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeB1.setInterval(newReachabilityInterval(100, 150))
|
||||
treeNodeB2 := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeB2.setInterval(newReachabilityInterval(150, 199))
|
||||
treeNodeC := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeC.setInterval(newReachabilityInterval(100, 149))
|
||||
treeNodeA.children = []*reachabilityTreeNode{treeNodeB1, treeNodeB2}
|
||||
treeNodeB2.children = []*reachabilityTreeNode{treeNodeC}
|
||||
|
||||
str := treeNodeA.String()
|
||||
expectedStr := "[100,149]\n[100,150][150,199]\n[100,199]"
|
||||
if str != expectedStr {
|
||||
t.Fatalf("TestReachabilityTreeNodeString: unexpected "+
|
||||
"string. Want: %s, got: %s", expectedStr, str)
|
||||
}
|
||||
}
|
||||
392
blockdag/reachabilitystore.go
Normal file
392
blockdag/reachabilitystore.go
Normal file
@@ -0,0 +1,392 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
type reachabilityData struct {
|
||||
treeNode *reachabilityTreeNode
|
||||
futureCoveringSet futureCoveringBlockSet
|
||||
}
|
||||
|
||||
type reachabilityStore struct {
|
||||
dag *BlockDAG
|
||||
dirty map[daghash.Hash]struct{}
|
||||
loaded map[daghash.Hash]*reachabilityData
|
||||
}
|
||||
|
||||
func newReachabilityStore(dag *BlockDAG) *reachabilityStore {
|
||||
return &reachabilityStore{
|
||||
dag: dag,
|
||||
dirty: make(map[daghash.Hash]struct{}),
|
||||
loaded: make(map[daghash.Hash]*reachabilityData),
|
||||
}
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) setTreeNode(treeNode *reachabilityTreeNode) {
|
||||
// load the reachability data from DB to store.loaded
|
||||
node := treeNode.blockNode
|
||||
_, exists := store.reachabilityDataByHash(node.hash)
|
||||
if !exists {
|
||||
store.loaded[*node.hash] = &reachabilityData{}
|
||||
}
|
||||
|
||||
store.loaded[*node.hash].treeNode = treeNode
|
||||
store.setBlockAsDirty(node.hash)
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) setFutureCoveringSet(node *blockNode, futureCoveringSet futureCoveringBlockSet) error {
|
||||
// load the reachability data from DB to store.loaded
|
||||
_, exists := store.reachabilityDataByHash(node.hash)
|
||||
if !exists {
|
||||
return reachabilityNotFoundError(node)
|
||||
}
|
||||
|
||||
store.loaded[*node.hash].futureCoveringSet = futureCoveringSet
|
||||
store.setBlockAsDirty(node.hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) setBlockAsDirty(blockHash *daghash.Hash) {
|
||||
store.dirty[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
func reachabilityNotFoundError(node *blockNode) error {
|
||||
return errors.Errorf("Couldn't find reachability data for block %s", node.hash)
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) treeNodeByBlockNode(node *blockNode) (*reachabilityTreeNode, error) {
|
||||
reachabilityData, exists := store.reachabilityDataByHash(node.hash)
|
||||
if !exists {
|
||||
return nil, reachabilityNotFoundError(node)
|
||||
}
|
||||
return reachabilityData.treeNode, nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) futureCoveringSetByBlockNode(node *blockNode) (futureCoveringBlockSet, error) {
|
||||
reachabilityData, exists := store.reachabilityDataByHash(node.hash)
|
||||
if !exists {
|
||||
return nil, reachabilityNotFoundError(node)
|
||||
}
|
||||
return reachabilityData.futureCoveringSet, nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) reachabilityDataByHash(hash *daghash.Hash) (*reachabilityData, bool) {
|
||||
reachabilityData, ok := store.loaded[*hash]
|
||||
return reachabilityData, ok
|
||||
}
|
||||
|
||||
// flushToDB writes all dirty reachability data to the database.
|
||||
func (store *reachabilityStore) flushToDB(dbTx database.Tx) error {
|
||||
if len(store.dirty) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for hash := range store.dirty {
|
||||
hash := hash // Copy hash to a new variable to avoid passing the same pointer
|
||||
reachabilityData := store.loaded[hash]
|
||||
err := store.dbStoreReachabilityData(dbTx, &hash, reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) clearDirtyEntries() {
|
||||
store.dirty = make(map[daghash.Hash]struct{})
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) init(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata().Bucket(reachabilityDataBucketName)
|
||||
|
||||
// TODO: (Stas) This is a quick and dirty hack.
|
||||
// We iterate over the entire bucket twice:
|
||||
// * First, populate the loaded set with all entries
|
||||
// * Second, connect the parent/children pointers in each entry
|
||||
// with other nodes, which are now guaranteed to exist
|
||||
cursor := bucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
err := store.initReachabilityData(cursor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
cursor = bucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
err := store.loadReachabilityDataFromCursor(cursor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) error {
|
||||
hash, err := daghash.NewHash(cursor.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store.loaded[*hash] = &reachabilityData{
|
||||
treeNode: &reachabilityTreeNode{},
|
||||
futureCoveringSet: nil,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.Cursor) error {
|
||||
hash, err := daghash.NewHash(cursor.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reachabilityData, ok := store.reachabilityDataByHash(hash)
|
||||
if !ok {
|
||||
return errors.Errorf("cannot find reachability data for block hash: %s", hash)
|
||||
}
|
||||
|
||||
err = store.deserializeReachabilityData(cursor.Value(), reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Connect the treeNode with its blockNode
|
||||
reachabilityData.treeNode.blockNode = store.dag.index.LookupNode(hash)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbStoreReachabilityData stores the reachability data to the database.
|
||||
// This overwrites the current entry if there exists one.
|
||||
func (store *reachabilityStore) dbStoreReachabilityData(dbTx database.Tx, hash *daghash.Hash, reachabilityData *reachabilityData) error {
|
||||
serializedReachabilyData, err := store.serializeReachabilityData(reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dbTx.Metadata().Bucket(reachabilityDataBucketName).Put(hash[:], serializedReachabilyData)
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) serializeReachabilityData(reachabilityData *reachabilityData) ([]byte, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := store.serializeTreeNode(w, reachabilityData.treeNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = store.serializeFutureCoveringSet(w, reachabilityData.futureCoveringSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w.Bytes(), nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) serializeTreeNode(w io.Writer, treeNode *reachabilityTreeNode) error {
|
||||
// Serialize the interval
|
||||
err := store.serializeReachabilityInterval(w, treeNode.interval)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize the remaining interval
|
||||
err = store.serializeReachabilityInterval(w, treeNode.remainingInterval)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize the parent
|
||||
// If this is the genesis block, write the zero hash instead
|
||||
parentHash := &daghash.ZeroHash
|
||||
if treeNode.parent != nil {
|
||||
parentHash = treeNode.parent.blockNode.hash
|
||||
}
|
||||
err = wire.WriteElement(w, parentHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize the amount of children
|
||||
err = wire.WriteVarInt(w, uint64(len(treeNode.children)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize the children
|
||||
for _, child := range treeNode.children {
|
||||
err = wire.WriteElement(w, child.blockNode.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) serializeReachabilityInterval(w io.Writer, interval *reachabilityInterval) error {
|
||||
// Serialize start
|
||||
err := wire.WriteElement(w, interval.start)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize end
|
||||
err = wire.WriteElement(w, interval.end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) serializeFutureCoveringSet(w io.Writer, futureCoveringSet futureCoveringBlockSet) error {
|
||||
// Serialize the set size
|
||||
err := wire.WriteVarInt(w, uint64(len(futureCoveringSet)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize each block in the set
|
||||
for _, block := range futureCoveringSet {
|
||||
err = wire.WriteElement(w, block.blockNode.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) deserializeReachabilityData(
|
||||
serializedReachabilityDataBytes []byte, destination *reachabilityData) error {
|
||||
|
||||
r := bytes.NewBuffer(serializedReachabilityDataBytes)
|
||||
|
||||
// Deserialize the tree node
|
||||
err := store.deserializeTreeNode(r, destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Deserialize the future covering set
|
||||
err = store.deserializeFutureCoveringSet(r, destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) deserializeTreeNode(r io.Reader, destination *reachabilityData) error {
|
||||
// Deserialize the interval
|
||||
interval, err := store.deserializeReachabilityInterval(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destination.treeNode.interval = interval
|
||||
|
||||
// Deserialize the remaining interval
|
||||
remainingInterval, err := store.deserializeReachabilityInterval(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destination.treeNode.remainingInterval = remainingInterval
|
||||
|
||||
// Deserialize the parent
|
||||
// If this is the zero hash, this node is the genesis and as such doesn't have a parent
|
||||
parentHash := &daghash.Hash{}
|
||||
err = wire.ReadElement(r, parentHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !daghash.ZeroHash.IsEqual(parentHash) {
|
||||
parentReachabilityData, ok := store.reachabilityDataByHash(parentHash)
|
||||
if !ok {
|
||||
return errors.Errorf("parent reachability data not found for hash: %s", parentHash)
|
||||
}
|
||||
destination.treeNode.parent = parentReachabilityData.treeNode
|
||||
}
|
||||
|
||||
// Deserialize the amount of children
|
||||
childCount, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Deserialize the children
|
||||
children := make([]*reachabilityTreeNode, childCount)
|
||||
for i := uint64(0); i < childCount; i++ {
|
||||
childHash := &daghash.Hash{}
|
||||
err = wire.ReadElement(r, childHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
childReachabilityData, ok := store.reachabilityDataByHash(childHash)
|
||||
if !ok {
|
||||
return errors.Errorf("child reachability data not found for hash: %s", parentHash)
|
||||
}
|
||||
children[i] = childReachabilityData.treeNode
|
||||
}
|
||||
destination.treeNode.children = children
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) deserializeReachabilityInterval(r io.Reader) (*reachabilityInterval, error) {
|
||||
interval := &reachabilityInterval{}
|
||||
|
||||
// Deserialize start
|
||||
start := uint64(0)
|
||||
err := wire.ReadElement(r, &start)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
interval.start = start
|
||||
|
||||
// Deserialize end
|
||||
end := uint64(0)
|
||||
err = wire.ReadElement(r, &end)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
interval.end = end
|
||||
|
||||
return interval, nil
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) deserializeFutureCoveringSet(r io.Reader, destination *reachabilityData) error {
|
||||
// Deserialize the set size
|
||||
setSize, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Deserialize each block in the set
|
||||
futureCoveringSet := make(futureCoveringBlockSet, setSize)
|
||||
for i := uint64(0); i < setSize; i++ {
|
||||
blockHash := &daghash.Hash{}
|
||||
err = wire.ReadElement(r, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockNode := store.dag.index.LookupNode(blockHash)
|
||||
if blockNode == nil {
|
||||
return errors.Errorf("blockNode not found for hash %s", blockHash)
|
||||
}
|
||||
blockReachabilityData, ok := store.reachabilityDataByHash(blockHash)
|
||||
if !ok {
|
||||
return errors.Errorf("block reachability data not found for hash: %s", blockHash)
|
||||
}
|
||||
futureCoveringSet[i] = &futureCoveringBlock{
|
||||
blockNode: blockNode,
|
||||
treeNode: blockReachabilityData.treeNode,
|
||||
}
|
||||
}
|
||||
destination.futureCoveringSet = futureCoveringSet
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -6,13 +6,12 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// txValidateItem holds a transaction along with which input to validate.
|
||||
@@ -23,7 +22,7 @@ type txValidateItem struct {
|
||||
}
|
||||
|
||||
// txValidator provides a type which asynchronously validates transaction
|
||||
// inputs. It provides several channels for communication and a processing
|
||||
// inputs. It provides several channels for communication and a processing
|
||||
// function that is intended to be in run multiple goroutines.
|
||||
type txValidator struct {
|
||||
validateChan chan *txValidateItem
|
||||
@@ -35,7 +34,7 @@ type txValidator struct {
|
||||
}
|
||||
|
||||
// sendResult sends the result of a script pair validation on the internal
|
||||
// result channel while respecting the quit channel. This allows orderly
|
||||
// result channel while respecting the quit channel. This allows orderly
|
||||
// shutdown when the validation process is aborted early due to a validation
|
||||
// error in one of the other goroutines.
|
||||
func (v *txValidator) sendResult(result error) {
|
||||
@@ -55,12 +54,12 @@ out:
|
||||
case txVI := <-v.validateChan:
|
||||
// Ensure the referenced input utxo is available.
|
||||
txIn := txVI.txIn
|
||||
entry, ok := v.utxoSet.Get(txIn.PreviousOutPoint)
|
||||
entry, ok := v.utxoSet.Get(txIn.PreviousOutpoint)
|
||||
if !ok {
|
||||
str := fmt.Sprintf("unable to find unspent "+
|
||||
"output %s referenced from "+
|
||||
"transaction %s:%d",
|
||||
txIn.PreviousOutPoint, txVI.tx.ID(),
|
||||
"transaction %s input %d",
|
||||
txIn.PreviousOutpoint, txVI.tx.ID(),
|
||||
txVI.txInIndex)
|
||||
err := ruleError(ErrMissingTxOut, str)
|
||||
v.sendResult(err)
|
||||
@@ -69,8 +68,8 @@ out:
|
||||
|
||||
// Create a new script engine for the script pair.
|
||||
sigScript := txIn.SignatureScript
|
||||
pkScript := entry.PkScript()
|
||||
vm, err := txscript.NewEngine(pkScript, txVI.tx.MsgTx(),
|
||||
scriptPubKey := entry.ScriptPubKey()
|
||||
vm, err := txscript.NewEngine(scriptPubKey, txVI.tx.MsgTx(),
|
||||
txVI.txInIndex, v.flags, v.sigCache)
|
||||
if err != nil {
|
||||
str := fmt.Sprintf("failed to parse input "+
|
||||
@@ -78,7 +77,7 @@ out:
|
||||
"%s (input script bytes %x, prev "+
|
||||
"output script bytes %x)",
|
||||
txVI.tx.ID(), txVI.txInIndex,
|
||||
txIn.PreviousOutPoint, err, sigScript, pkScript)
|
||||
txIn.PreviousOutpoint, err, sigScript, scriptPubKey)
|
||||
err := ruleError(ErrScriptMalformed, str)
|
||||
v.sendResult(err)
|
||||
break out
|
||||
@@ -91,7 +90,7 @@ out:
|
||||
"%s (input script bytes %x, prev output "+
|
||||
"script bytes %x)",
|
||||
txVI.tx.ID(), txVI.txInIndex,
|
||||
txIn.PreviousOutPoint, err, sigScript, pkScript)
|
||||
txIn.PreviousOutpoint, err, sigScript, scriptPubKey)
|
||||
err := ruleError(ErrScriptValidation, str)
|
||||
v.sendResult(err)
|
||||
break out
|
||||
@@ -114,7 +113,7 @@ func (v *txValidator) Validate(items []*txValidateItem) error {
|
||||
}
|
||||
|
||||
// Limit the number of goroutines to do script validation based on the
|
||||
// number of processor cores. This helps ensure the system stays
|
||||
// number of processor cores. This helps ensure the system stays
|
||||
// reasonably responsive under heavy load.
|
||||
maxGoRoutines := runtime.NumCPU() * 3
|
||||
if maxGoRoutines <= 0 {
|
||||
@@ -127,10 +126,10 @@ func (v *txValidator) Validate(items []*txValidateItem) error {
|
||||
// Start up validation handlers that are used to asynchronously
|
||||
// validate each transaction input.
|
||||
for i := 0; i < maxGoRoutines; i++ {
|
||||
go v.validateHandler()
|
||||
spawn(v.validateHandler)
|
||||
}
|
||||
|
||||
// Validate each of the inputs. The quit channel is closed when any
|
||||
// Validate each of the inputs. The quit channel is closed when any
|
||||
// errors occur so all processing goroutines exit regardless of which
|
||||
// input had the validation error.
|
||||
numInputs := len(items)
|
||||
@@ -138,7 +137,7 @@ func (v *txValidator) Validate(items []*txValidateItem) error {
|
||||
processedItems := 0
|
||||
for processedItems < numInputs {
|
||||
// Only send items while there are still items that need to
|
||||
// be processed. The select statement will never select a nil
|
||||
// be processed. The select statement will never select a nil
|
||||
// channel.
|
||||
var validateChan chan *txValidateItem
|
||||
var item *txValidateItem
|
||||
@@ -180,16 +179,16 @@ func newTxValidator(utxoSet UTXOSet, flags txscript.ScriptFlags, sigCache *txscr
|
||||
// ValidateTransactionScripts validates the scripts for the passed transaction
|
||||
// using multiple goroutines.
|
||||
func ValidateTransactionScripts(tx *util.Tx, utxoSet UTXOSet, flags txscript.ScriptFlags, sigCache *txscript.SigCache) error {
|
||||
// Don't validate coinbase transaction scripts.
|
||||
if tx.IsCoinBase() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect all of the transaction inputs and required information for
|
||||
// validation.
|
||||
txIns := tx.MsgTx().TxIn
|
||||
txValItems := make([]*txValidateItem, 0, len(txIns))
|
||||
for txInIdx, txIn := range txIns {
|
||||
// Skip block reward transactions.
|
||||
if txIn.PreviousOutPoint.Index == math.MaxUint32 {
|
||||
continue
|
||||
}
|
||||
|
||||
txVI := &txValidateItem{
|
||||
txInIndex: txInIdx,
|
||||
txIn: txIn,
|
||||
@@ -214,12 +213,11 @@ func checkBlockScripts(block *blockNode, utxoSet UTXOSet, transactions []*util.T
|
||||
}
|
||||
txValItems := make([]*txValidateItem, 0, numInputs)
|
||||
for _, tx := range transactions {
|
||||
// Skip coinbase transactions.
|
||||
if tx.IsCoinBase() {
|
||||
continue
|
||||
}
|
||||
for txInIdx, txIn := range tx.MsgTx().TxIn {
|
||||
// Skip block reward transactions.
|
||||
if txIn.PreviousOutPoint.Index == math.MaxUint32 {
|
||||
continue
|
||||
}
|
||||
|
||||
txVI := &txValidateItem{
|
||||
txInIndex: txInIdx,
|
||||
txIn: txIn,
|
||||
|
||||
@@ -6,10 +6,11 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
)
|
||||
|
||||
// TestCheckBlockScripts ensures that validating the all of the scripts in a
|
||||
@@ -20,7 +21,7 @@ func TestCheckBlockScripts(t *testing.T) {
|
||||
|
||||
testBlockNum := 277647
|
||||
blockDataFile := fmt.Sprintf("%d.dat", testBlockNum)
|
||||
blocks, err := loadBlocks(blockDataFile)
|
||||
blocks, err := LoadBlocks(filepath.Join("testdata/", blockDataFile))
|
||||
if err != nil {
|
||||
t.Errorf("Error loading file: %v\n", err)
|
||||
return
|
||||
|
||||
@@ -4,12 +4,13 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// SubnetworkStore stores the subnetworks data
|
||||
@@ -23,38 +24,28 @@ func newSubnetworkStore(db database.DB) *SubnetworkStore {
|
||||
}
|
||||
}
|
||||
|
||||
// registerSubnetworks scans a list of accepted transactions, singles out
|
||||
// registerSubnetworks scans a list of transactions, singles out
|
||||
// subnetwork registry transactions, validates them, and registers a new
|
||||
// subnetwork based on it.
|
||||
// This function returns an error if one or more transactions are invalid
|
||||
func registerSubnetworks(dbTx database.Tx, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
|
||||
validSubnetworkRegistryTxs := make([]*wire.MsgTx, 0)
|
||||
func registerSubnetworks(dbTx database.Tx, txs []*util.Tx) error {
|
||||
subnetworkRegistryTxs := make([]*wire.MsgTx, 0)
|
||||
for _, tx := range txs {
|
||||
msgTx := tx.MsgTx()
|
||||
|
||||
for _, txs := range txsAcceptanceData {
|
||||
for _, txData := range txs {
|
||||
if !txData.IsAccepted {
|
||||
continue
|
||||
}
|
||||
if msgTx.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDRegistry) {
|
||||
subnetworkRegistryTxs = append(subnetworkRegistryTxs, msgTx)
|
||||
}
|
||||
|
||||
tx := txData.Tx.MsgTx()
|
||||
if tx.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDRegistry) {
|
||||
err := validateSubnetworkRegistryTransaction(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
validSubnetworkRegistryTxs = append(validSubnetworkRegistryTxs, tx)
|
||||
}
|
||||
|
||||
if subnetworkid.Less(subnetworkid.SubnetworkIDRegistry, &tx.SubnetworkID) {
|
||||
// Transactions are ordered by subnetwork, so we can safely assume
|
||||
// that the rest of the transactions will not be subnetwork registry
|
||||
// transactions.
|
||||
break
|
||||
}
|
||||
if subnetworkid.Less(subnetworkid.SubnetworkIDRegistry, &msgTx.SubnetworkID) {
|
||||
// Transactions are ordered by subnetwork, so we can safely assume
|
||||
// that the rest of the transactions will not be subnetwork registry
|
||||
// transactions.
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, registryTx := range validSubnetworkRegistryTxs {
|
||||
for _, registryTx := range subnetworkRegistryTxs {
|
||||
subnetworkID, err := TxToSubnetworkID(registryTx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -67,7 +58,7 @@ func registerSubnetworks(dbTx database.Tx, txsAcceptanceData MultiBlockTxsAccept
|
||||
createdSubnetwork := newSubnetwork(registryTx)
|
||||
err := dbRegisterSubnetwork(dbTx, subnetworkID, createdSubnetwork)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed registering subnetwork"+
|
||||
return errors.Errorf("failed registering subnetwork"+
|
||||
"for tx '%s': %s", registryTx.TxHash(), err)
|
||||
}
|
||||
}
|
||||
@@ -104,10 +95,10 @@ func (s *SubnetworkStore) subnetwork(subnetworkID *subnetworkid.SubnetworkID) (*
|
||||
return nil
|
||||
})
|
||||
if dbErr != nil {
|
||||
return nil, fmt.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, dbErr)
|
||||
return nil, errors.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, dbErr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, err)
|
||||
return nil, errors.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
return sNet, nil
|
||||
@@ -121,7 +112,7 @@ func (s *SubnetworkStore) GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uin
|
||||
return 0, err
|
||||
}
|
||||
if sNet == nil {
|
||||
return 0, fmt.Errorf("subnetwork '%s' not found", subnetworkID)
|
||||
return 0, errors.Errorf("subnetwork '%s' not found", subnetworkID)
|
||||
}
|
||||
|
||||
return sNet.gasLimit, nil
|
||||
@@ -132,14 +123,14 @@ func dbRegisterSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.Subnetwor
|
||||
// Serialize the subnetwork
|
||||
serializedSubnetwork, err := serializeSubnetwork(network)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize sub-netowrk '%s': %s", subnetworkID, err)
|
||||
return errors.Errorf("failed to serialize sub-netowrk '%s': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
// Store the subnetwork
|
||||
subnetworksBucket := dbTx.Metadata().Bucket(subnetworksBucketName)
|
||||
err = subnetworksBucket.Put(subnetworkID[:], serializedSubnetwork)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write sub-netowrk '%s': %s", subnetworkID, err)
|
||||
return errors.Errorf("failed to write sub-netowrk '%s': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -161,13 +152,16 @@ type subnetwork struct {
|
||||
}
|
||||
|
||||
func newSubnetwork(tx *wire.MsgTx) *subnetwork {
|
||||
gasLimit := binary.LittleEndian.Uint64(tx.Payload[:8])
|
||||
|
||||
return &subnetwork{
|
||||
gasLimit: gasLimit,
|
||||
gasLimit: ExtractGasLimit(tx),
|
||||
}
|
||||
}
|
||||
|
||||
// ExtractGasLimit extracts the gas limit from the transaction payload
|
||||
func ExtractGasLimit(tx *wire.MsgTx) uint64 {
|
||||
return binary.LittleEndian.Uint64(tx.Payload[:8])
|
||||
}
|
||||
|
||||
// serializeSubnetwork serializes a subnetwork into the following binary format:
|
||||
// | gasLimit (8 bytes) |
|
||||
func serializeSubnetwork(sNet *subnetwork) ([]byte, error) {
|
||||
@@ -176,7 +170,7 @@ func serializeSubnetwork(sNet *subnetwork) ([]byte, error) {
|
||||
// Write the gas limit
|
||||
err := binary.Write(serializedSNet, byteOrder, sNet.gasLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize subnetwork: %s", err)
|
||||
return nil, errors.Errorf("failed to serialize subnetwork: %s", err)
|
||||
}
|
||||
|
||||
return serializedSNet.Bytes(), nil
|
||||
@@ -191,7 +185,7 @@ func deserializeSubnetwork(serializedSNetBytes []byte) (*subnetwork, error) {
|
||||
var gasLimit uint64
|
||||
err := binary.Read(serializedSNet, byteOrder, &gasLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to deserialize subnetwork: %s", err)
|
||||
return nil, errors.Errorf("failed to deserialize subnetwork: %s", err)
|
||||
}
|
||||
|
||||
return &subnetwork{
|
||||
|
||||
@@ -3,29 +3,32 @@ package blockdag
|
||||
// This file functions are not considered safe for regular use, and should be used for test purposes only.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
_ "github.com/daglabs/btcd/database/ffldb" // blank import ffldb so that its init() function runs before tests
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb" // blank import ffldb so that its init() function runs before tests
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// testDbType is the database backend type to use for the tests.
|
||||
testDbType = "ffldb"
|
||||
|
||||
// testDbRoot is the root directory used to create all test databases.
|
||||
testDbRoot = "testdbs"
|
||||
|
||||
// blockDataNet is the expected network in the test block data.
|
||||
blockDataNet = wire.MainNet
|
||||
blockDataNet = wire.Mainnet
|
||||
)
|
||||
|
||||
// isSupportedDbType returns whether or not the passed database type is
|
||||
@@ -41,8 +44,8 @@ func isSupportedDbType(dbType string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// filesExists returns whether or not the named file or directory exists.
|
||||
func fileExists(name string) bool {
|
||||
// FileExists returns whether or not the named file or directory exists.
|
||||
func FileExists(name string) bool {
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
@@ -51,40 +54,52 @@ func fileExists(name string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// DAGSetup is used to create a new db and chain instance with the genesis
|
||||
// block already inserted. In addition to the new chain instance, it returns
|
||||
// DAGSetup is used to create a new db and DAG instance with the genesis
|
||||
// block already inserted. In addition to the new DAG instance, it returns
|
||||
// a teardown function the caller should invoke when done testing to clean up.
|
||||
func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
|
||||
if !isSupportedDbType(testDbType) {
|
||||
return nil, nil, fmt.Errorf("unsupported db type %s", testDbType)
|
||||
return nil, nil, errors.Errorf("unsupported db type %s", testDbType)
|
||||
}
|
||||
|
||||
var teardown func()
|
||||
|
||||
if config.DB == nil {
|
||||
// Create the root directory for test databases.
|
||||
if !fileExists(testDbRoot) {
|
||||
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
|
||||
err := fmt.Errorf("unable to create test db "+
|
||||
"root: %s", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
// To make sure that the teardown function is not called before any goroutines finished to run -
|
||||
// overwrite `spawn` to count the number of running goroutines
|
||||
spawnWaitGroup := sync.WaitGroup{}
|
||||
realSpawn := spawn
|
||||
spawn = func(f func()) {
|
||||
spawnWaitGroup.Add(1)
|
||||
realSpawn(func() {
|
||||
f()
|
||||
spawnWaitGroup.Done()
|
||||
})
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(testDbRoot, dbName)
|
||||
if config.DB == nil {
|
||||
tmpDir := os.TempDir()
|
||||
|
||||
dbPath := filepath.Join(tmpDir, dbName)
|
||||
_ = os.RemoveAll(dbPath)
|
||||
var err error
|
||||
config.DB, err = database.Create(testDbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating db: %s", err)
|
||||
return nil, nil, errors.Errorf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
teardown = func() {
|
||||
spawnWaitGroup.Wait()
|
||||
spawn = realSpawn
|
||||
config.DB.Close()
|
||||
os.RemoveAll(dbPath)
|
||||
os.RemoveAll(testDbRoot)
|
||||
}
|
||||
} else {
|
||||
teardown = func() {
|
||||
spawnWaitGroup.Wait()
|
||||
spawn = realSpawn
|
||||
config.DB.Close()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,7 +110,7 @@ func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
|
||||
dag, err := New(&config)
|
||||
if err != nil {
|
||||
teardown()
|
||||
err := fmt.Errorf("failed to create dag instance: %s", err)
|
||||
err := errors.Errorf("failed to create dag instance: %s", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return dag, teardown, nil
|
||||
@@ -116,7 +131,7 @@ func createTxForTest(numInputs uint32, numOutputs uint32, outputValue uint64, su
|
||||
|
||||
for i := uint32(0); i < numInputs; i++ {
|
||||
txIns = append(txIns, &wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.TxID{}, i),
|
||||
PreviousOutpoint: *wire.NewOutpoint(&daghash.TxID{}, i),
|
||||
SignatureScript: []byte{},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
})
|
||||
@@ -124,8 +139,8 @@ func createTxForTest(numInputs uint32, numOutputs uint32, outputValue uint64, su
|
||||
|
||||
for i := uint32(0); i < numOutputs; i++ {
|
||||
txOuts = append(txOuts, &wire.TxOut{
|
||||
PkScript: OpTrueScript,
|
||||
Value: outputValue,
|
||||
ScriptPubKey: OpTrueScript,
|
||||
Value: outputValue,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -136,74 +151,162 @@ func createTxForTest(numInputs uint32, numOutputs uint32, outputValue uint64, su
|
||||
return wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
|
||||
}
|
||||
|
||||
// createCoinbaseTxForTest returns a coinbase transaction with the requested number of
|
||||
// outputs paying an appropriate subsidy based on the passed block height to the
|
||||
// address associated with the harness. It automatically uses a standard
|
||||
// signature script that starts with the block height
|
||||
func createCoinbaseTxForTest(blockHeight int32, numOutputs uint32, extraNonce int64, params *dagconfig.Params) (*wire.MsgTx, error) {
|
||||
// Create standard coinbase script.
|
||||
coinbaseScript, err := txscript.NewScriptBuilder().
|
||||
AddInt64(int64(blockHeight)).AddInt64(extraNonce).Script()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txIns := []*wire.TxIn{&wire.TxIn{
|
||||
// Coinbase transactions have no inputs, so previous outpoint is
|
||||
// zero hash and max index.
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.TxID{},
|
||||
wire.MaxPrevOutIndex),
|
||||
SignatureScript: coinbaseScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}}
|
||||
|
||||
txOuts := []*wire.TxOut{}
|
||||
|
||||
totalInput := CalcBlockSubsidy(blockHeight, params)
|
||||
amountPerOutput := totalInput / uint64(numOutputs)
|
||||
remainder := totalInput - amountPerOutput*uint64(numOutputs)
|
||||
for i := uint32(0); i < numOutputs; i++ {
|
||||
// Ensure the final output accounts for any remainder that might
|
||||
// be left from splitting the input amount.
|
||||
amount := amountPerOutput
|
||||
if i == numOutputs-1 {
|
||||
amount = amountPerOutput + remainder
|
||||
}
|
||||
txOuts = append(txOuts, &wire.TxOut{
|
||||
PkScript: OpTrueScript,
|
||||
Value: amount,
|
||||
})
|
||||
}
|
||||
|
||||
return wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts), nil
|
||||
}
|
||||
// VirtualForTest is an exported version for virtualBlock, so that it can be returned by exported test_util methods
|
||||
type VirtualForTest *virtualBlock
|
||||
|
||||
// SetVirtualForTest replaces the dag's virtual block. This function is used for test purposes only
|
||||
func SetVirtualForTest(dag *BlockDAG, virtual *virtualBlock) *virtualBlock {
|
||||
func SetVirtualForTest(dag *BlockDAG, virtual VirtualForTest) VirtualForTest {
|
||||
oldVirtual := dag.virtual
|
||||
dag.virtual = virtual
|
||||
return oldVirtual
|
||||
return VirtualForTest(oldVirtual)
|
||||
}
|
||||
|
||||
// GetVirtualFromParentsForTest generates a virtual block with the given parents.
|
||||
func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (*virtualBlock, error) {
|
||||
parents := newSet()
|
||||
func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (VirtualForTest, error) {
|
||||
parents := newBlockSet()
|
||||
for _, hash := range parentHashes {
|
||||
parent := dag.index.LookupNode(hash)
|
||||
if parent == nil {
|
||||
return nil, fmt.Errorf("GetVirtualFromParentsForTest: didn't found node for hash %s", hash)
|
||||
return nil, errors.Errorf("GetVirtualFromParentsForTest: didn't found node for hash %s", hash)
|
||||
}
|
||||
parents.add(parent)
|
||||
}
|
||||
virtual := newVirtualBlock(parents, dag.dagParams.K)
|
||||
virtual := newVirtualBlock(dag, parents)
|
||||
|
||||
pastUTXO, _, err := virtual.pastUTXO(dag)
|
||||
pastUTXO, _, err := dag.pastUTXO(&virtual.blockNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffPastUTXO := pastUTXO.clone().(*DiffUTXOSet)
|
||||
diffPastUTXO.meldToBase()
|
||||
virtual.utxoSet = diffPastUTXO.base
|
||||
diffUTXO := pastUTXO.clone().(*DiffUTXOSet)
|
||||
err = diffUTXO.meldToBase()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
virtual.utxoSet = diffUTXO.base
|
||||
|
||||
return virtual, nil
|
||||
return VirtualForTest(virtual), nil
|
||||
}
|
||||
|
||||
// LoadBlocks reads files containing kaspa gzipped block data from disk
|
||||
// and returns them as an array of util.Block.
|
||||
func LoadBlocks(filename string) (blocks []*util.Block, err error) {
|
||||
var network = wire.Mainnet
|
||||
var dr io.Reader
|
||||
var fi io.ReadCloser
|
||||
|
||||
fi, err = os.Open(filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasSuffix(filename, ".bz2") {
|
||||
dr = bzip2.NewReader(fi)
|
||||
} else {
|
||||
dr = fi
|
||||
}
|
||||
defer fi.Close()
|
||||
|
||||
var block *util.Block
|
||||
|
||||
err = nil
|
||||
for height := uint64(0); err == nil; height++ {
|
||||
var rintbuf uint32
|
||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
||||
if err == io.EOF {
|
||||
// hit end of file at expected offset: no warning
|
||||
height--
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if rintbuf != uint32(network) {
|
||||
break
|
||||
}
|
||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
||||
blocklen := rintbuf
|
||||
|
||||
rbytes := make([]byte, blocklen)
|
||||
|
||||
// read block
|
||||
dr.Read(rbytes)
|
||||
|
||||
block, err = util.NewBlockFromBytes(rbytes)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// opTrueAddress returns an address pointing to a P2SH anyone-can-spend script
|
||||
func opTrueAddress(prefix util.Bech32Prefix) (util.Address, error) {
|
||||
return util.NewAddressScriptHash(OpTrueScript, prefix)
|
||||
}
|
||||
|
||||
// PrepareBlockForTest generates a block with the proper merkle roots, coinbase transaction etc. This function is used for test purposes only
|
||||
func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*wire.MsgTx) (*wire.MsgBlock, error) {
|
||||
newVirtual, err := GetVirtualFromParentsForTest(dag, parentHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oldVirtual := SetVirtualForTest(dag, newVirtual)
|
||||
defer SetVirtualForTest(dag, oldVirtual)
|
||||
|
||||
OpTrueAddr, err := opTrueAddress(dag.dagParams.Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blockTransactions := make([]*util.Tx, len(transactions)+1)
|
||||
|
||||
extraNonce := generateDeterministicExtraNonceForTest()
|
||||
coinbasePayloadExtraData, err := CoinbasePayloadExtraData(extraNonce, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blockTransactions[0], err = dag.NextCoinbaseFromAddress(OpTrueAddr, coinbasePayloadExtraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, tx := range transactions {
|
||||
blockTransactions[i+1] = util.NewTx(tx)
|
||||
}
|
||||
|
||||
// Sort transactions by subnetwork ID
|
||||
sort.Slice(blockTransactions, func(i, j int) bool {
|
||||
if blockTransactions[i].MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase) {
|
||||
return true
|
||||
}
|
||||
if blockTransactions[j].MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase) {
|
||||
return false
|
||||
}
|
||||
return subnetworkid.Less(&blockTransactions[i].MsgTx().SubnetworkID, &blockTransactions[j].MsgTx().SubnetworkID)
|
||||
})
|
||||
|
||||
block, err := dag.BlockForMining(blockTransactions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block.Header.Timestamp = dag.NextBlockMinimumTime()
|
||||
block.Header.Bits = dag.NextRequiredDifficulty(block.Header.Timestamp)
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
// generateDeterministicExtraNonceForTest returns a unique deterministic extra nonce for coinbase data, in order to create unique coinbase transactions.
|
||||
func generateDeterministicExtraNonceForTest() uint64 {
|
||||
extraNonceForTest++
|
||||
return extraNonceForTest
|
||||
}
|
||||
|
||||
func resetExtraNonceForTest() {
|
||||
extraNonceForTest = 0
|
||||
}
|
||||
|
||||
var extraNonceForTest = uint64(0)
|
||||
|
||||
@@ -1,14 +1,7 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"bou.ke/monkey"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
)
|
||||
|
||||
func TestIsSupportedDbType(t *testing.T) {
|
||||
@@ -19,38 +12,3 @@ func TestIsSupportedDbType(t *testing.T) {
|
||||
t.Errorf("madeUpDb should not be a supported DB driver")
|
||||
}
|
||||
}
|
||||
|
||||
// TestDAGSetupErrors tests all error-cases in DAGSetup.
|
||||
// The non-error-cases are tested in the more general tests.
|
||||
func TestDAGSetupErrors(t *testing.T) {
|
||||
os.RemoveAll(testDbRoot)
|
||||
testDAGSetupErrorThroughPatching(t, "unable to create test db root: ", os.MkdirAll, func(path string, perm os.FileMode) error {
|
||||
return errors.New("Made up error")
|
||||
})
|
||||
|
||||
testDAGSetupErrorThroughPatching(t, "failed to create dag instance: ", New, func(config *Config) (*BlockDAG, error) {
|
||||
return nil, errors.New("Made up error")
|
||||
})
|
||||
|
||||
testDAGSetupErrorThroughPatching(t, "unsupported db type ", isSupportedDbType, func(dbType string) bool {
|
||||
return false
|
||||
})
|
||||
|
||||
testDAGSetupErrorThroughPatching(t, "error creating db: ", database.Create, func(dbType string, args ...interface{}) (database.DB, error) {
|
||||
return nil, errors.New("Made up error")
|
||||
})
|
||||
}
|
||||
|
||||
func testDAGSetupErrorThroughPatching(t *testing.T, expectedErrorMessage string, targetFunction interface{}, replacementFunction interface{}) {
|
||||
guard := monkey.Patch(targetFunction, replacementFunction)
|
||||
defer guard.Unpatch()
|
||||
_, tearDown, err := DAGSetup("TestDAGSetup", Config{
|
||||
DAGParams: &dagconfig.MainNetParams,
|
||||
})
|
||||
if tearDown != nil {
|
||||
defer tearDown()
|
||||
}
|
||||
if err == nil || !strings.HasPrefix(err.Error(), expectedErrorMessage) {
|
||||
t.Errorf("DAGSetup: expected error to have prefix '%s' but got error '%v'", expectedErrorMessage, err)
|
||||
}
|
||||
}
|
||||
|
||||
BIN
blockdag/testdata/blk_0_to_4.dat
vendored
BIN
blockdag/testdata/blk_0_to_4.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3A.dat
vendored
BIN
blockdag/testdata/blk_3A.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3B.dat
vendored
BIN
blockdag/testdata/blk_3B.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3C.dat
vendored
BIN
blockdag/testdata/blk_3C.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3D.dat
vendored
BIN
blockdag/testdata/blk_3D.dat
vendored
Binary file not shown.
@@ -7,7 +7,7 @@ package blockdag
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// ThresholdState define the various threshold states used when voting on
|
||||
@@ -77,14 +77,14 @@ type thresholdConditionChecker interface {
|
||||
|
||||
// RuleChangeActivationThreshold is the number of blocks for which the
|
||||
// condition must be true in order to lock in a rule change.
|
||||
RuleChangeActivationThreshold() uint32
|
||||
RuleChangeActivationThreshold() uint64
|
||||
|
||||
// MinerConfirmationWindow is the number of blocks in each threshold
|
||||
// state retarget window.
|
||||
MinerConfirmationWindow() uint32
|
||||
MinerConfirmationWindow() uint64
|
||||
|
||||
// Condition returns whether or not the rule change activation condition
|
||||
// has been met. This typically involves checking whether or not the
|
||||
// has been met. This typically involves checking whether or not the
|
||||
// bit associated with the condition is set, but can be more complex as
|
||||
// needed.
|
||||
Condition(*blockNode) (bool, error)
|
||||
@@ -122,23 +122,23 @@ func newThresholdCaches(numCaches uint32) []thresholdStateCache {
|
||||
}
|
||||
|
||||
// thresholdState returns the current rule change threshold state for the block
|
||||
// AFTER the given node and deployment ID. The cache is used to ensure the
|
||||
// AFTER the given node and deployment ID. The cache is used to ensure the
|
||||
// threshold states for previous windows are only calculated once.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
// This function MUST be called with the DAG state lock held (for writes).
|
||||
func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdConditionChecker, cache *thresholdStateCache) (ThresholdState, error) {
|
||||
// The threshold state for the window that contains the genesis block is
|
||||
// defined by definition.
|
||||
confirmationWindow := int32(checker.MinerConfirmationWindow())
|
||||
if prevNode == nil || (prevNode.height+1) < confirmationWindow {
|
||||
confirmationWindow := checker.MinerConfirmationWindow()
|
||||
if prevNode == nil || (prevNode.blueScore+1) < confirmationWindow {
|
||||
return ThresholdDefined, nil
|
||||
}
|
||||
|
||||
// Get the ancestor that is the last block of the previous confirmation
|
||||
// window in order to get its threshold state. This can be done because
|
||||
// window in order to get its threshold state. This can be done because
|
||||
// the state is the same for all blocks within a given window.
|
||||
prevNode = prevNode.SelectedAncestor(prevNode.height -
|
||||
(prevNode.height+1)%confirmationWindow)
|
||||
prevNode = prevNode.SelectedAncestor(prevNode.blueScore -
|
||||
(prevNode.blueScore+1)%confirmationWindow)
|
||||
|
||||
// Iterate backwards through each of the previous confirmation windows
|
||||
// to find the most recently cached threshold state.
|
||||
@@ -152,7 +152,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
||||
|
||||
// The start and expiration times are based on the median block
|
||||
// time, so calculate it now.
|
||||
medianTime := prevNode.PastMedianTime()
|
||||
medianTime := prevNode.PastMedianTime(dag)
|
||||
|
||||
// The state is simply defined if the start time hasn't been
|
||||
// been reached yet.
|
||||
@@ -192,7 +192,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
||||
case ThresholdDefined:
|
||||
// The deployment of the rule change fails if it expires
|
||||
// before it is accepted and locked in.
|
||||
medianTime := prevNode.PastMedianTime()
|
||||
medianTime := prevNode.PastMedianTime(dag)
|
||||
medianTimeUnix := uint64(medianTime.Unix())
|
||||
if medianTimeUnix >= checker.EndTime() {
|
||||
state = ThresholdFailed
|
||||
@@ -209,7 +209,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
||||
case ThresholdStarted:
|
||||
// The deployment of the rule change fails if it expires
|
||||
// before it is accepted and locked in.
|
||||
medianTime := prevNode.PastMedianTime()
|
||||
medianTime := prevNode.PastMedianTime(dag)
|
||||
if uint64(medianTime.Unix()) >= checker.EndTime() {
|
||||
state = ThresholdFailed
|
||||
break
|
||||
@@ -218,19 +218,18 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
||||
// At this point, the rule change is still being voted
|
||||
// on by the miners, so iterate backwards through the
|
||||
// confirmation window to count all of the votes in it.
|
||||
var count uint32
|
||||
countNode := prevNode
|
||||
for i := int32(0); i < confirmationWindow; i++ {
|
||||
condition, err := checker.Condition(countNode)
|
||||
var count uint64
|
||||
windowNodes := make([]*blockNode, 0, confirmationWindow)
|
||||
windowNodes = append(windowNodes, prevNode)
|
||||
windowNodes = append(windowNodes, blueBlockWindow(prevNode, confirmationWindow-1)...)
|
||||
for _, current := range windowNodes {
|
||||
condition, err := checker.Condition(current)
|
||||
if err != nil {
|
||||
return ThresholdFailed, err
|
||||
}
|
||||
if condition {
|
||||
count++
|
||||
}
|
||||
|
||||
// Get the previous block node.
|
||||
countNode = countNode.selectedParent
|
||||
}
|
||||
|
||||
// The state is locked in if the number of blocks in the
|
||||
@@ -260,13 +259,13 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
||||
}
|
||||
|
||||
// ThresholdState returns the current rule change threshold state of the given
|
||||
// deployment ID for the block AFTER the end of the current best chain.
|
||||
// deployment ID for the block AFTER the blueScore of the current DAG.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) ThresholdState(deploymentID uint32) (ThresholdState, error) {
|
||||
dag.dagLock.Lock()
|
||||
defer dag.dagLock.Unlock()
|
||||
state, err := dag.deploymentState(dag.selectedTip(), deploymentID)
|
||||
dag.dagLock.Unlock()
|
||||
|
||||
return state, err
|
||||
}
|
||||
@@ -277,8 +276,8 @@ func (dag *BlockDAG) ThresholdState(deploymentID uint32) (ThresholdState, error)
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
|
||||
dag.dagLock.Lock()
|
||||
defer dag.dagLock.Unlock()
|
||||
state, err := dag.deploymentState(dag.selectedTip(), deploymentID)
|
||||
dag.dagLock.Unlock()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -292,33 +291,33 @@ func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
|
||||
//
|
||||
// It is important to note that, as the variable name indicates, this function
|
||||
// expects the block node prior to the block for which the deployment state is
|
||||
// desired. In other words, the returned deployment state is for the block
|
||||
// desired. In other words, the returned deployment state is for the block
|
||||
// AFTER the passed node.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
// This function MUST be called with the DAG state lock held (for writes).
|
||||
func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) {
|
||||
if deploymentID > uint32(len(dag.dagParams.Deployments)) {
|
||||
return ThresholdFailed, DeploymentError(deploymentID)
|
||||
}
|
||||
|
||||
deployment := &dag.dagParams.Deployments[deploymentID]
|
||||
checker := deploymentChecker{deployment: deployment, chain: dag}
|
||||
checker := deploymentChecker{deployment: deployment, dag: dag}
|
||||
cache := &dag.deploymentCaches[deploymentID]
|
||||
|
||||
return dag.thresholdState(prevNode, checker, cache)
|
||||
}
|
||||
|
||||
// initThresholdCaches initializes the threshold state caches for each warning
|
||||
// bit and defined deployment and provides warnings if the chain is current per
|
||||
// bit and defined deployment and provides warnings if the DAG is current per
|
||||
// the warnUnknownVersions and warnUnknownRuleActivations functions.
|
||||
func (dag *BlockDAG) initThresholdCaches() error {
|
||||
// Initialize the warning and deployment caches by calculating the
|
||||
// threshold state for each of them. This will ensure the caches are
|
||||
// threshold state for each of them. This will ensure the caches are
|
||||
// populated and any states that needed to be recalculated due to
|
||||
// definition changes is done now.
|
||||
prevNode := dag.selectedTip().selectedParent
|
||||
for bit := uint32(0); bit < vbNumBits; bit++ {
|
||||
checker := bitConditionChecker{bit: bit, chain: dag}
|
||||
checker := bitConditionChecker{bit: bit, dag: dag}
|
||||
cache := &dag.warningCaches[bit]
|
||||
_, err := dag.thresholdState(prevNode, checker, cache)
|
||||
if err != nil {
|
||||
@@ -328,14 +327,14 @@ func (dag *BlockDAG) initThresholdCaches() error {
|
||||
for id := 0; id < len(dag.dagParams.Deployments); id++ {
|
||||
deployment := &dag.dagParams.Deployments[id]
|
||||
cache := &dag.deploymentCaches[id]
|
||||
checker := deploymentChecker{deployment: deployment, chain: dag}
|
||||
checker := deploymentChecker{deployment: deployment, dag: dag}
|
||||
_, err := dag.thresholdState(prevNode, checker, cache)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// No warnings about unknown rules or versions until the chain is
|
||||
// No warnings about unknown rules or versions until the DAG is
|
||||
// current.
|
||||
if dag.isCurrent() {
|
||||
// Warn if a high enough percentage of the last blocks have
|
||||
|
||||
@@ -7,7 +7,7 @@ package blockdag
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// TestThresholdStateStringer tests the stringized output for the
|
||||
|
||||
@@ -8,20 +8,20 @@ package blockdag
|
||||
// be sorted.
|
||||
type timeSorter []int64
|
||||
|
||||
// Len returns the number of timestamps in the slice. It is part of the
|
||||
// Len returns the number of timestamps in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s timeSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Swap swaps the timestamps at the passed indices. It is part of the
|
||||
// Swap swaps the timestamps at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s timeSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Less returns whether the timstamp with index i should sort before the
|
||||
// timestamp with index j. It is part of the sort.Interface implementation.
|
||||
// timestamp with index j. It is part of the sort.Interface implementation.
|
||||
func (s timeSorter) Less(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
}
|
||||
|
||||
32
blockdag/utxo_ecmh.go
Normal file
32
blockdag/utxo_ecmh.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/kaspanet/kaspad/ecc"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
const ecmhCacheSize = 4_000_000
|
||||
|
||||
var (
|
||||
utxoToECMHCache = lru.New(ecmhCacheSize)
|
||||
)
|
||||
|
||||
func utxoMultiset(entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := serializeUTXO(w, entry, outpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serializedUTXO := w.Bytes()
|
||||
utxoHash := daghash.DoubleHashH(serializedUTXO)
|
||||
|
||||
if cachedMSPoint, ok := utxoToECMHCache.Get(utxoHash); ok {
|
||||
return cachedMSPoint.(*ecc.Multiset), nil
|
||||
}
|
||||
msPoint := ecc.NewMultiset(ecc.S256()).Add(serializedUTXO)
|
||||
utxoToECMHCache.Add(utxoHash, msPoint)
|
||||
return msPoint, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user