mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-22 11:39:15 +00:00
Compare commits
401 Commits
v1.2.3-tes
...
v0.6.0-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e1318aa326 | ||
|
|
2bd4a71913 | ||
|
|
5b206f4c9d | ||
|
|
3f969a2921 | ||
|
|
90be14fd57 | ||
|
|
1a5d9fc65c | ||
|
|
ec03a094e5 | ||
|
|
9d60bb1ee7 | ||
|
|
cd10de2dce | ||
|
|
658fb08c02 | ||
|
|
3b40488877 | ||
|
|
d3d0ad0cf3 | ||
|
|
473cc37a75 | ||
|
|
966cba4a4e | ||
|
|
da90755530 | ||
|
|
fa58623815 | ||
|
|
26af4da507 | ||
|
|
b527470153 | ||
|
|
e70561141d | ||
|
|
20b547984e | ||
|
|
16a658a5be | ||
|
|
42e50e6dc2 | ||
|
|
3d942ce355 | ||
|
|
94f617b06a | ||
|
|
211c4d05e8 | ||
|
|
a9f3bdf4ab | ||
|
|
2303aecab4 | ||
|
|
7655841e9f | ||
|
|
c4bbcf9de6 | ||
|
|
0cec1ce23e | ||
|
|
089fe828aa | ||
|
|
24a09fb3df | ||
|
|
b2901454d6 | ||
|
|
6cf589dc9b | ||
|
|
683ceda3a7 | ||
|
|
6a18b56587 | ||
|
|
2c9e5be816 | ||
|
|
5d5a0ef335 | ||
|
|
428f16ffef | ||
|
|
f93e54b63c | ||
|
|
c30b350e8e | ||
|
|
8fdb5aa024 | ||
|
|
83a3c30d01 | ||
|
|
63646c8c92 | ||
|
|
097e7ab42a | ||
|
|
3d45c8de50 | ||
|
|
8e1958c20b | ||
|
|
3e6c1792ef | ||
|
|
6b5b4bfb2a | ||
|
|
b797436884 | ||
|
|
2de3c1d0d4 | ||
|
|
7e81757e2f | ||
|
|
4773f87875 | ||
|
|
aa5bc34280 | ||
|
|
b9a25c1141 | ||
|
|
b42b8b16fd | ||
|
|
e0aac68759 | ||
|
|
9939671ccc | ||
|
|
eaa8515442 | ||
|
|
04b578cee1 | ||
|
|
f8e53d309c | ||
|
|
6076309b3e | ||
|
|
05db135d23 | ||
|
|
433cdb6006 | ||
|
|
4a4dca1926 | ||
|
|
6d591dde74 | ||
|
|
8e624e057e | ||
|
|
eb2642ba90 | ||
|
|
1a43cabfb9 | ||
|
|
580e37943b | ||
|
|
749775c7ea | ||
|
|
8ff8c30fb4 | ||
|
|
9893b7396c | ||
|
|
8c90344f28 | ||
|
|
e4955729d2 | ||
|
|
8a7b0314e5 | ||
|
|
e87d00c9cf | ||
|
|
336347b3c5 | ||
|
|
15d0899406 | ||
|
|
ad096f9781 | ||
|
|
d3c6a3dffc | ||
|
|
57b1653383 | ||
|
|
a86255ba51 | ||
|
|
0a7a4ce7d6 | ||
|
|
4c3735a897 | ||
|
|
22fd38c053 | ||
|
|
895f67a8d4 | ||
|
|
56e807b663 | ||
|
|
af64c7dc2d | ||
|
|
1e6458973b | ||
|
|
7bf8bb5436 | ||
|
|
1358911d95 | ||
|
|
1271d2f113 | ||
|
|
bc0227b49b | ||
|
|
dc643c2d76 | ||
|
|
0744e8ebc0 | ||
|
|
d4c9fdf6ac | ||
|
|
829979b6c7 | ||
|
|
32cd29bf70 | ||
|
|
03cb6cbd4d | ||
|
|
ba4a89488e | ||
|
|
b0d4a92e47 | ||
|
|
3e5a840c5a | ||
|
|
d6d34238d2 | ||
|
|
8bbced5925 | ||
|
|
20da1b9c9a | ||
|
|
b6a6e577c4 | ||
|
|
84888221ae | ||
|
|
222477b33e | ||
|
|
4a50d94633 | ||
|
|
b4dba782fb | ||
|
|
9c78a797e4 | ||
|
|
35c733a4c1 | ||
|
|
e5810d023e | ||
|
|
96930bd6ea | ||
|
|
e09ce32146 | ||
|
|
d15c009b3c | ||
|
|
95c8b8e9d8 | ||
|
|
2d798a5611 | ||
|
|
3a22249be9 | ||
|
|
a4c1898624 | ||
|
|
672f02490a | ||
|
|
fc00275d9c | ||
|
|
6219b93430 | ||
|
|
3a4571d671 | ||
|
|
96052ac69a | ||
|
|
6463a4b5d0 | ||
|
|
0ca127853d | ||
|
|
b884ba128e | ||
|
|
fe25ea3d8c | ||
|
|
e0f587f599 | ||
|
|
e9e1ef4772 | ||
|
|
eb8b841850 | ||
|
|
28681affda | ||
|
|
378f0b659a | ||
|
|
35b943e04f | ||
|
|
65f75c17fc | ||
|
|
806eab817c | ||
|
|
585510d76c | ||
|
|
c8a381d5bb | ||
|
|
3d04e6bded | ||
|
|
f8e851a6ed | ||
|
|
e70a615135 | ||
|
|
73ad0adf72 | ||
|
|
5b74e51db1 | ||
|
|
2e2492cc5d | ||
|
|
2ef5c2cbac | ||
|
|
3c89e1f7b3 | ||
|
|
2910724b49 | ||
|
|
3af945692e | ||
|
|
5fe9dae557 | ||
|
|
42c53ec3e2 | ||
|
|
291df8bfef | ||
|
|
d015286f65 | ||
|
|
fe91b4c878 | ||
|
|
7609c50641 | ||
|
|
df934990d7 | ||
|
|
3c4a80f16d | ||
|
|
a31139d4a5 | ||
|
|
6da3606721 | ||
|
|
bfbc72724d | ||
|
|
956b6f7d95 | ||
|
|
c1a039de3f | ||
|
|
f8b18e09d6 | ||
|
|
b20a7a679b | ||
|
|
36d866375e | ||
|
|
024edc30a3 | ||
|
|
6aa5e0b5a8 | ||
|
|
1a38550fdd | ||
|
|
3e7ebb5a84 | ||
|
|
4bca7342d3 | ||
|
|
f80908fb4e | ||
|
|
e000e10738 | ||
|
|
d83862f36c | ||
|
|
1020402b34 | ||
|
|
bc6ce6ed53 | ||
|
|
d3b1953deb | ||
|
|
3c67215e76 | ||
|
|
586624c836 | ||
|
|
49855e6333 | ||
|
|
624249c0f3 | ||
|
|
1cf443a63b | ||
|
|
8909679f44 | ||
|
|
e58efbf0ea | ||
|
|
34fb066590 | ||
|
|
299826f392 | ||
|
|
3d8dd8724d | ||
|
|
b8a00f7519 | ||
|
|
4dfc8cf5b0 | ||
|
|
5a99e4d2f3 | ||
|
|
606cd668ff | ||
|
|
dd537f5143 | ||
|
|
a1c631be62 | ||
|
|
707a728656 | ||
|
|
80b5631a48 | ||
|
|
2373965551 | ||
|
|
65cbb6655b | ||
|
|
cdd96d0670 | ||
|
|
ad04bbde83 | ||
|
|
5374d95416 | ||
|
|
de9aa39cc5 | ||
|
|
98987f4a8f | ||
|
|
9745f31b69 | ||
|
|
ee08531a52 | ||
|
|
61baf7b260 | ||
|
|
650e4f735e | ||
|
|
550b12b041 | ||
|
|
a4bb070722 | ||
|
|
30fe0c279b | ||
|
|
e405dd5981 | ||
|
|
243b4b8021 | ||
|
|
dd4c93e1ef | ||
|
|
a07335d74d | ||
|
|
7567cd4cb9 | ||
|
|
51ff9e2562 | ||
|
|
5b8ab63890 | ||
|
|
3dd7dc4496 | ||
|
|
d90a08ecfa | ||
|
|
45dc1a3e7b | ||
|
|
4ffb5daa37 | ||
|
|
b9138b720d | ||
|
|
d8954f1339 | ||
|
|
eb953286ec | ||
|
|
41c8178ad3 | ||
|
|
aa74b51e6f | ||
|
|
f7800eb5c4 | ||
|
|
193add502f | ||
|
|
44c55900f8 | ||
|
|
4c0ea78026 | ||
|
|
03a93fe51e | ||
|
|
eca0514465 | ||
|
|
aadbebb720 | ||
|
|
5daab45947 | ||
|
|
607b838ded | ||
|
|
25bdaeed31 | ||
|
|
8b2d3f07ce | ||
|
|
a3dc2f7da7 | ||
|
|
bf36f9ceb6 | ||
|
|
11de12304e | ||
|
|
a10320ad7b | ||
|
|
fd2bbf3557 | ||
|
|
7f9cf17274 | ||
|
|
ba0e239557 | ||
|
|
ed606bfda3 | ||
|
|
c0463a8a68 | ||
|
|
52e0a0967d | ||
|
|
29bcc271b5 | ||
|
|
94ec159147 | ||
|
|
9d434de4a5 | ||
|
|
49418f4222 | ||
|
|
38b4749f20 | ||
|
|
045984e6b9 | ||
|
|
38883d1a98 | ||
|
|
b5f365d282 | ||
|
|
a7d3a40465 | ||
|
|
359b16fca9 | ||
|
|
8b8e73feb5 | ||
|
|
6044b6ac1a | ||
|
|
a177ea4f15 | ||
|
|
3a15aa4bae | ||
|
|
427185b6a8 | ||
|
|
b282734a3f | ||
|
|
6d765f58ba | ||
|
|
20819ca4cd | ||
|
|
2174a0a7f2 | ||
|
|
ea6f7a28c2 | ||
|
|
ac9aa74a75 | ||
|
|
d46857677f | ||
|
|
cd719b1d5b | ||
|
|
7cf15ac93b | ||
|
|
d8e3191469 | ||
|
|
784d3de4ca | ||
|
|
733d06af5a | ||
|
|
df91643976 | ||
|
|
ebf635e6ff | ||
|
|
e41d9866c3 | ||
|
|
d984151549 | ||
|
|
6099ce56bd | ||
|
|
e0b5c145f7 | ||
|
|
cf37f733ef | ||
|
|
66a92a243c | ||
|
|
4a88eea57e | ||
|
|
fbaf360a42 | ||
|
|
1346810af8 | ||
|
|
9cbab94264 | ||
|
|
48f29cc11f | ||
|
|
e2b57e6231 | ||
|
|
f72afc8bbb | ||
|
|
0d1f447cb7 | ||
|
|
818f8c93eb | ||
|
|
264ffaae93 | ||
|
|
03b7af9a13 | ||
|
|
e3d7e83d44 | ||
|
|
07651e51c8 | ||
|
|
1cd2eb9308 | ||
|
|
a140327dd2 | ||
|
|
c1f7ae72e0 | ||
|
|
3a12fe9b1d | ||
|
|
c25c9b25bd | ||
|
|
f46dec449d | ||
|
|
60ab6330ff | ||
|
|
89dee3e005 | ||
|
|
70d7009985 | ||
|
|
3322a892e9 | ||
|
|
61d066e958 | ||
|
|
7b9ffc6c25 | ||
|
|
7a163d4dd7 | ||
|
|
189a3380a2 | ||
|
|
8680231e5a | ||
|
|
30f0e95969 | ||
|
|
c94becf144 | ||
|
|
369ec449a8 | ||
|
|
f4c6859e51 | ||
|
|
683dd52fcf | ||
|
|
11e936d109 | ||
|
|
9adb105e37 | ||
|
|
7b6ed9a778 | ||
|
|
3218fc5a04 | ||
|
|
3f94f8ca4c | ||
|
|
0842778c2c | ||
|
|
1332e1aa68 | ||
|
|
e872ebc7b3 | ||
|
|
e68b242243 | ||
|
|
9cc2a7260b | ||
|
|
bcd73012de | ||
|
|
1fea2a9421 | ||
|
|
bb7d68deda | ||
|
|
3ab861227d | ||
|
|
8f0d98ef9b | ||
|
|
dbd8bf3d2c | ||
|
|
1b6b02e0d2 | ||
|
|
2402bae1ff | ||
|
|
3dcf8d88b8 | ||
|
|
dbf9c09a2e | ||
|
|
5e9fc2defc | ||
|
|
bdc3cbceaa | ||
|
|
a71528fefb | ||
|
|
6725742d2c | ||
|
|
9a510e2e23 | ||
|
|
08a4b0dbf6 | ||
|
|
0c9e55a358 | ||
|
|
532e57b61c | ||
|
|
b1f59914d2 | ||
|
|
9a54b286c9 | ||
|
|
6e4b18a498 | ||
|
|
b5f8a0452e | ||
|
|
fab043ef14 | ||
|
|
8e0e62f21a | ||
|
|
9a1c2e2641 | ||
|
|
8cbc6670cc | ||
|
|
28ee6a8026 | ||
|
|
af39e96e3e | ||
|
|
db6e9c773f | ||
|
|
47214121a7 | ||
|
|
7b07609fd8 | ||
|
|
acb4b3f260 | ||
|
|
e0221aa8ab | ||
|
|
cba346d753 | ||
|
|
0f34cfb1a2 | ||
|
|
ea846a3284 | ||
|
|
63bfac9740 | ||
|
|
7284815c21 | ||
|
|
80307d108b | ||
|
|
722437afe9 | ||
|
|
684cf4b5fa | ||
|
|
c95a7b13a6 | ||
|
|
1ce7f21026 | ||
|
|
7d7df10493 | ||
|
|
8179862e0b | ||
|
|
6828f623b4 | ||
|
|
2c88a5b2fe | ||
|
|
a7f08598f3 | ||
|
|
83bad65d3a | ||
|
|
1f35378a4d | ||
|
|
39eab7a6d5 | ||
|
|
9dd025d4da | ||
|
|
bb75ea5020 | ||
|
|
8dbd4a2bed | ||
|
|
24305cda68 | ||
|
|
770dfd147d | ||
|
|
a9ff9b0e70 | ||
|
|
3cc6f2d648 | ||
|
|
a8f0d7b05b | ||
|
|
13f06ca293 | ||
|
|
c88fa1492e | ||
|
|
40657a83f5 | ||
|
|
44dd58b461 | ||
|
|
47891b17ab | ||
|
|
f7fbfbf5c4 | ||
|
|
0e278ca22b | ||
|
|
c66fb294c8 | ||
|
|
88b7e7ca03 | ||
|
|
a9b659a36f | ||
|
|
90fc6ba3e7 | ||
|
|
8ea97aa3fd | ||
|
|
7c9f5a65d8 | ||
|
|
e2d3c4c821 | ||
|
|
92578e2853 | ||
|
|
3018c18616 | ||
|
|
3ac9fa83c1 | ||
|
|
c5b0398dac |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -2,7 +2,7 @@
|
||||
*~
|
||||
|
||||
# Databases
|
||||
btcd.db
|
||||
kaspad.db
|
||||
*-shm
|
||||
*-wal
|
||||
|
||||
|
||||
955
CHANGES
955
CHANGES
@@ -1,955 +0,0 @@
|
||||
============================================================================
|
||||
User visible changes for btcd
|
||||
A full-node bitcoin implementation written in Go
|
||||
============================================================================
|
||||
|
||||
Changes in 0.12.0 (Fri Nov 20 2015)
|
||||
- Protocol and network related changes:
|
||||
- Add a new checkpoint at block height 382320 (#555)
|
||||
- Implement BIP0065 which includes support for version 4 blocks, a new
|
||||
consensus opcode (OP_CHECKLOCKTIMEVERIFY) that enforces transaction
|
||||
lock times, and a double-threshold switchover mechanism (#535, #459,
|
||||
#455)
|
||||
- Implement BIP0111 which provides a new bloom filter service flag and
|
||||
hence provides support for protocol version 70011 (#499)
|
||||
- Add a new parameter --nopeerbloomfilters to allow disabling bloom
|
||||
filter support (#499)
|
||||
- Reject non-canonically encoded variable length integers (#507)
|
||||
- Add mainnet peer discovery DNS seed (seed.bitcoin.jonasschnelli.ch)
|
||||
(#496)
|
||||
- Correct reconnect handling for persistent peers (#463, #464)
|
||||
- Ignore requests for block headers if not fully synced (#444)
|
||||
- Add CLI support for specifying the zone id on IPv6 addresses (#538)
|
||||
- Fix a couple of issues where the initial block sync could stall (#518,
|
||||
#229, #486)
|
||||
- Fix an issue which prevented the --onion option from working as
|
||||
intended (#446)
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Require transactions to only include signatures encoded with the
|
||||
canonical 'low-s' encoding (#512)
|
||||
- Add a new parameter --minrelaytxfee to allow the minimum transaction
|
||||
fee in BTC/kB to be overridden (#520)
|
||||
- Retain memory pool transactions when they redeem another one that is
|
||||
removed when a block is accepted (#539)
|
||||
- Do not send reject messages for a transaction if it is valid but
|
||||
causes an orphan transaction which depends on it to be determined
|
||||
as invalid (#546)
|
||||
- Refrain from attempting to add orphans to the memory pool multiple
|
||||
times when the transaction they redeem is added (#551)
|
||||
- Modify minimum transaction fee calculations to scale based on bytes
|
||||
instead of full kilobyte boundaries (#521, #537)
|
||||
- Implement signature cache:
|
||||
- Provides a limited memory cache of validated signatures which is a
|
||||
huge optimization when verifying blocks for transactions that are
|
||||
already in the memory pool (#506)
|
||||
- Add a new parameter '--sigcachemaxsize' which allows the size of the
|
||||
new cache to be manually changed if desired (#506)
|
||||
- Mining support changes:
|
||||
- Notify getblocktemplate long polling clients when a block is pushed
|
||||
via submitblock (#488)
|
||||
- Speed up getblocktemplate by making use of the new signature cache
|
||||
(#506)
|
||||
- RPC changes:
|
||||
- Implement getmempoolinfo command (#453)
|
||||
- Implement getblockheader command (#461)
|
||||
- Modify createrawtransaction command to accept a new optional parameter
|
||||
'locktime' (#529)
|
||||
- Modify listunspent result to include the 'spendable' field (#440)
|
||||
- Modify getinfo command to include 'errors' field (#511)
|
||||
- Add timestamps to blockconnected and blockdisconnected notifications
|
||||
(#450)
|
||||
- Several modifications to searchrawtranscations command:
|
||||
- Accept a new optional parameter 'vinextra' which causes the results
|
||||
to include information about the outputs referenced by a transaction's
|
||||
inputs (#485, #487)
|
||||
- Skip entries in the mempool too (#495)
|
||||
- Accept a new optional parameter 'reverse' to return the results in
|
||||
reverse order (most recent to oldest) (#497)
|
||||
- Accept a new optional parameter 'filteraddrs' which causes the
|
||||
results to only include inputs and outputs which involve the
|
||||
provided addresses (#516)
|
||||
- Change the notification order to notify clients about mined
|
||||
transactions (recvtx, redeemingtx) before the blockconnected
|
||||
notification (#449)
|
||||
- Update verifymessage RPC to use the standard algorithm so it is
|
||||
compatible with other implementations (#515)
|
||||
- Improve ping statistics by pinging on an interval (#517)
|
||||
- Websocket changes:
|
||||
- Implement session command which returns a per-session unique id (#500,
|
||||
#503)
|
||||
- btcctl utility changes:
|
||||
- Add getmempoolinfo command (#453)
|
||||
- Add getblockheader command (#461)
|
||||
- Add getwalletinfo command (#471)
|
||||
- Notable developer-related package changes:
|
||||
- Introduce a new peer package which acts a common base for creating and
|
||||
concurrently managing bitcoin network peers (#445)
|
||||
- Various cleanup of the new peer package (#528, #531, #524, #534,
|
||||
#549)
|
||||
- Blocks heights now consistently use int32 everywhere (#481)
|
||||
- The BlockHeader type in the wire package now provides the BtcDecode
|
||||
and BtcEncode methods (#467)
|
||||
- Update wire package to recognize BIP0064 (getutxo) service bit (#489)
|
||||
- Export LockTimeThreshold constant from txscript package (#454)
|
||||
- Export MaxDataCarrierSize constant from txscript package (#466)
|
||||
- Provide new IsUnspendable function from the txscript package (#478)
|
||||
- Export variable length string functions from the wire package (#514)
|
||||
- Export DNS Seeds for each network from the chaincfg package (#544)
|
||||
- Preliminary work towards separating the memory pool into a separate
|
||||
package (#525, #548)
|
||||
- Misc changes:
|
||||
- Various documentation updates (#442, #462, #465, #460, #470, #473,
|
||||
#505, #530, #545)
|
||||
- Add installation instructions for gentoo (#542)
|
||||
- Ensure an error is shown if OS limits can't be set at startup (#498)
|
||||
- Tighten the standardness checks for multisig scripts (#526)
|
||||
- Test coverage improvement (#468, #494, #527, #543, #550)
|
||||
- Several optimizations (#457, #474, #475, #476, #508, #509)
|
||||
- Minor code cleanup and refactoring (#472, #479, #482, #519, #540)
|
||||
- Contributors (alphabetical order):
|
||||
- Ben Echols
|
||||
- Bruno Clermont
|
||||
- danda
|
||||
- Daniel Krawisz
|
||||
- Dario Nieuwenhuis
|
||||
- Dave Collins
|
||||
- David Hill
|
||||
- Javed Khan
|
||||
- Jonathan Gillham
|
||||
- Joseph Becher
|
||||
- Josh Rickmar
|
||||
- Justus Ranvier
|
||||
- Mawuli Adzoe
|
||||
- Olaoluwa Osuntokun
|
||||
- Rune T. Aune
|
||||
|
||||
Changes in 0.11.1 (Wed May 27 2015)
|
||||
- Protocol and network related changes:
|
||||
- Use correct sub-command in reject message for rejected transactions
|
||||
(#436, #437)
|
||||
- Add a new parameter --torisolation which forces new circuits for each
|
||||
connection when using tor (#430)
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Reduce the default number max number of allowed orphan transactions
|
||||
to 1000 (#419)
|
||||
- Add a new parameter --maxorphantx which allows the maximum number of
|
||||
orphan transactions stored in the mempool to be specified (#419)
|
||||
- RPC changes:
|
||||
- Modify listtransactions result to include the 'involveswatchonly' and
|
||||
'vout' fields (#427)
|
||||
- Update getrawtransaction result to omit the 'confirmations' field
|
||||
when it is 0 (#420, #422)
|
||||
- Update signrawtransaction result to include errors (#423)
|
||||
- btcctl utility changes:
|
||||
- Add gettxoutproof command (#428)
|
||||
- Add verifytxoutproof command (#428)
|
||||
- Notable developer-related package changes:
|
||||
- The btcec package now provides the ability to perform ECDH
|
||||
encryption and decryption (#375)
|
||||
- The block and header validation in the blockchain package has been
|
||||
split to help pave the way toward concurrent downloads (#386)
|
||||
- Misc changes:
|
||||
- Minor peer optimization (#433)
|
||||
- Contributors (alphabetical order):
|
||||
- Dave Collins
|
||||
- David Hill
|
||||
- Federico Bond
|
||||
- Ishbir Singh
|
||||
- Josh Rickmar
|
||||
|
||||
Changes in 0.11.0 (Wed May 06 2015)
|
||||
- Protocol and network related changes:
|
||||
- **IMPORTANT: Update is required due to the following point**
|
||||
- Correct a few corner cases in script handling which could result in
|
||||
forking from the network on non-standard transactions (#425)
|
||||
- Add a new checkpoint at block height 352940 (#418)
|
||||
- Optimized script execution (#395, #400, #404, #409)
|
||||
- Fix a case that could lead stalled syncs (#138, #296)
|
||||
- Network address manager changes:
|
||||
- Implement eclipse attack countermeasures as proposed in
|
||||
http://cs-people.bu.edu/heilman/eclipse (#370, #373)
|
||||
- Optional address indexing changes:
|
||||
- Fix an issue where a reorg could cause an orderly shutdown when the
|
||||
address index is active (#340, #357)
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Increase maximum allowed space for nulldata transactions to 80 bytes
|
||||
(#331)
|
||||
- Implement support for the following rules specified by BIP0062:
|
||||
- The S value in ECDSA signature must be at most half the curve order
|
||||
(rule 5) (#349)
|
||||
- Script execution must result in a single non-zero value on the stack
|
||||
(rule 6) (#347)
|
||||
- NOTE: All 7 rules of BIP0062 are now implemented
|
||||
- Use network adjusted time in finalized transaction checks to improve
|
||||
consistency across nodes (#332)
|
||||
- Process orphan transactions on acceptance of new transactions (#345)
|
||||
- RPC changes:
|
||||
- Add support for a limited RPC user which is not allowed admin level
|
||||
operations on the server (#363)
|
||||
- Implement node command for more unified control over connected peers
|
||||
(#79, #341)
|
||||
- Implement generate command for regtest/simnet to support
|
||||
deterministically mining a specified number of blocks (#362, #407)
|
||||
- Update searchrawtransactions to return the matching transactions in
|
||||
order (#354)
|
||||
- Correct an issue with searchrawtransactions where it could return
|
||||
duplicates (#346, #354)
|
||||
- Increase precision of 'difficulty' field in getblock result to 8
|
||||
(#414, #415)
|
||||
- Omit 'nextblockhash' field from getblock result when it is empty
|
||||
(#416, #417)
|
||||
- Add 'id' and 'timeoffset' fields to getpeerinfo result (#335)
|
||||
- Websocket changes:
|
||||
- Implement new commands stopnotifyspent, stopnotifyreceived,
|
||||
stopnotifyblocks, and stopnotifynewtransactions to allow clients to
|
||||
cancel notification registrations (#122, #342)
|
||||
- btcctl utility changes:
|
||||
- A single dash can now be used as an argument to cause that argument to
|
||||
be read from stdin (#348)
|
||||
- Add generate command
|
||||
- Notable developer-related package changes:
|
||||
- The new version 2 btcjson package has now replaced the deprecated
|
||||
version 1 package (#368)
|
||||
- The btcec package now performs all signing using RFC6979 deterministic
|
||||
signatures (#358, #360)
|
||||
- The txscript package has been significantly cleaned up and had a few
|
||||
API changes (#387, #388, #389, #390, #391, #392, #393, #395, #396,
|
||||
#400, #403, #404, #405, #406, #408, #409, #410, #412)
|
||||
- A new PkScriptLocs function has been added to the wire package MsgTx
|
||||
type which provides callers that deal with scripts optimization
|
||||
opportunities (#343)
|
||||
- Misc changes:
|
||||
- Minor wire hashing optimizations (#366, #367)
|
||||
- Other minor internal optimizations
|
||||
- Contributors (alphabetical order):
|
||||
- Alex Akselrod
|
||||
- Arne Brutschy
|
||||
- Chris Jepson
|
||||
- Daniel Krawisz
|
||||
- Dave Collins
|
||||
- David Hill
|
||||
- Jimmy Song
|
||||
- Jonas Nick
|
||||
- Josh Rickmar
|
||||
- Olaoluwa Osuntokun
|
||||
- Oleg Andreev
|
||||
|
||||
Changes in 0.10.0 (Sun Mar 01 2015)
|
||||
- Protocol and network related changes:
|
||||
- Add a new checkpoint at block height 343185
|
||||
- Implement BIP066 which includes support for version 3 blocks, a new
|
||||
consensus rule which prevents non-DER encoded signatures, and a
|
||||
double-threshold switchover mechanism
|
||||
- Rather than announcing all known addresses on getaddr requests which
|
||||
can possibly result in multiple messages, randomize the results and
|
||||
limit them to the max allowed by a single message (1000 addresses)
|
||||
- Add more reserved IP spaces to the address manager
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Make transactions which contain reserved opcodes nonstandard
|
||||
- No longer accept or relay free and low-fee transactions that have
|
||||
insufficient priority to be mined in the next block
|
||||
- Implement support for the following rules specified by BIP0062:
|
||||
- ECDSA signature must use strict DER encoding (rule 1)
|
||||
- The signature script must only contain push operations (rule 2)
|
||||
- All push operations must use the smallest possible encoding (rule 3)
|
||||
- All stack values interpreted as a number must be encoding using the
|
||||
shortest possible form (rule 4)
|
||||
- NOTE: Rule 1 was already enforced, however the entire script now
|
||||
evaluates to false rather than only the signature verification as
|
||||
required by BIP0062
|
||||
- Allow transactions with nulldata transaction outputs to be treated as
|
||||
standard
|
||||
- Mining support changes:
|
||||
- Modify the getblocktemplate RPC to generate and return block templates
|
||||
for version 3 blocks which are compatible with BIP0066
|
||||
- Allow getblocktemplate to serve blocks when the current time is
|
||||
less than the minimum allowed time for a generated block template
|
||||
(https://github.com/btcsuite/btcd/issues/209)
|
||||
- Crypto changes:
|
||||
- Optimize scalar multiplication by the base point by using a
|
||||
pre-computed table which results in approximately a 35% speedup
|
||||
(https://github.com/btcsuite/btcec/issues/2)
|
||||
- Optimize general scalar multiplication by using the secp256k1
|
||||
endomorphism which results in approximately a 17-20% speedup
|
||||
(https://github.com/btcsuite/btcec/issues/1)
|
||||
- Optimize general scalar multiplication by using non-adjacent form
|
||||
which results in approximately an additional 8% speedup
|
||||
(https://github.com/btcsuite/btcec/issues/3)
|
||||
- Implement optional address indexing:
|
||||
- Add a new parameter --addrindex which will enable the creation of an
|
||||
address index which can be queried to determine all transactions which
|
||||
involve a given address
|
||||
(https://github.com/btcsuite/btcd/issues/190)
|
||||
- Add a new logging subsystem for address index related operations
|
||||
- Support new searchrawtransactions RPC
|
||||
(https://github.com/btcsuite/btcd/issues/185)
|
||||
- RPC changes:
|
||||
- Require TLS version 1.2 as the minimum version for all TLS connections
|
||||
- Provide support for disabling TLS when only listening on localhost
|
||||
(https://github.com/btcsuite/btcd/pull/192)
|
||||
- Modify help output for all commands to provide much more consistent
|
||||
and detailed information
|
||||
- Correct case in getrawtransaction which would refuse to serve certain
|
||||
transactions with invalid scripts
|
||||
(https://github.com/btcsuite/btcd/issues/210)
|
||||
- Correct error handling in the getrawtransaction RPC which could lead
|
||||
to a crash in rare cases
|
||||
(https://github.com/btcsuite/btcd/issues/196)
|
||||
- Update getinfo RPC to include the appropriate 'timeoffset' calculated
|
||||
from the median network time
|
||||
- Modify listreceivedbyaddress result type to include txids field so it
|
||||
is compatible
|
||||
- Add 'iswatchonly' field to validateaddress result
|
||||
- Add 'startingpriority' and 'currentpriority' fields to getrawmempool
|
||||
(https://github.com/btcsuite/btcd/issues/178)
|
||||
- Don't omit the 'confirmations' field from getrawtransaction when it is
|
||||
zero
|
||||
- Websocket changes:
|
||||
- Modify the behavior of the rescan command to automatically register
|
||||
for notifications about transactions paying to rescanned addresses
|
||||
or spending outputs from the final rescan utxo set when the rescan
|
||||
is through the best block in the chain
|
||||
- btcctl utility changes:
|
||||
- Make the list of commands available via the -l option rather than
|
||||
dumping the entire list on usage errors
|
||||
- Alphabetize and categorize the list of commands by chain and wallet
|
||||
- Make the help option only show the help options instead of also
|
||||
dumping all of the commands
|
||||
- Make the usage syntax much more consistent and correct a few cases of
|
||||
misnamed fields
|
||||
(https://github.com/btcsuite/btcd/issues/305)
|
||||
- Improve usage errors to show the specific parameter number, reason,
|
||||
and error code
|
||||
- Only show the usage for specific command is shown when a valid command
|
||||
is provided with invalid parameters
|
||||
- Add support for a SOCK5 proxy
|
||||
- Modify output for integer fields (such as timestamps) to display
|
||||
normally instead in scientific notation
|
||||
- Add invalidateblock command
|
||||
- Add reconsiderblock command
|
||||
- Add createnewaccount command
|
||||
- Add renameaccount command
|
||||
- Add searchrawtransactions command
|
||||
- Add importaddress command
|
||||
- Add importpubkey command
|
||||
- showblock utility changes:
|
||||
- Remove utility in favor of the RPC getblock method
|
||||
- Notable developer-related package changes:
|
||||
- Many of the core packages have been relocated into the btcd repository
|
||||
(https://github.com/btcsuite/btcd/issues/214)
|
||||
- A new version of the btcjson package that has been completely
|
||||
redesigned from the ground up based based upon how the project has
|
||||
evolved and lessons learned while using it since it was first written
|
||||
is now available in the btcjson/v2/btcjson directory
|
||||
- This will ultimately replace the current version so anyone making
|
||||
use of this package will need to update their code accordingly
|
||||
- The btcec package now provides better facilities for working directly
|
||||
with its public and private keys without having to mix elements from
|
||||
the ecdsa package
|
||||
- Update the script builder to ensure all rules specified by BIP0062 are
|
||||
adhered to when creating scripts
|
||||
- The blockchain package now provides a MedianTimeSource interface and
|
||||
concrete implementation for providing time samples from remote peers
|
||||
and using that data to calculate an offset against the local time
|
||||
- Misc changes:
|
||||
- Fix a slow memory leak due to tickers not being stopped
|
||||
(https://github.com/btcsuite/btcd/issues/189)
|
||||
- Fix an issue where a mix of orphans and SPV clients could trigger a
|
||||
condition where peers would no longer be served
|
||||
(https://github.com/btcsuite/btcd/issues/231)
|
||||
- The RPC username and password can now contain symbols which previously
|
||||
conflicted with special symbols used in URLs
|
||||
- Improve handling of obtaining random nonces to prevent cases where it
|
||||
could error when not enough entropy was available
|
||||
- Improve handling of home directory creation errors such as in the case
|
||||
of unmounted symlinks (https://github.com/btcsuite/btcd/issues/193)
|
||||
- Improve the error reporting for rejected transactions to include the
|
||||
inputs which are missing and/or being double spent
|
||||
- Update sample config file with new options and correct a comment
|
||||
regarding the fact the RPC server only listens on localhost by default
|
||||
(https://github.com/btcsuite/btcd/issues/218)
|
||||
- Update the continuous integration builds to run several tools which
|
||||
help keep code quality high
|
||||
- Significant amount of internal code cleanup and improvements
|
||||
- Other minor internal optimizations
|
||||
- Code Contributors (alphabetical order):
|
||||
- Beldur
|
||||
- Ben Holden-Crowther
|
||||
- Dave Collins
|
||||
- David Evans
|
||||
- David Hill
|
||||
- Guilherme Salgado
|
||||
- Javed Khan
|
||||
- Jimmy Song
|
||||
- John C. Vernaleo
|
||||
- Jonathan Gillham
|
||||
- Josh Rickmar
|
||||
- Michael Ford
|
||||
- Michail Kargakis
|
||||
- kac
|
||||
- Olaoluwa Osuntokun
|
||||
|
||||
Changes in 0.9.0 (Sat Sep 20 2014)
|
||||
- Protocol and network related changes:
|
||||
- Add a new checkpoint at block height 319400
|
||||
- Add support for BIP0037 bloom filters
|
||||
(https://github.com/conformal/btcd/issues/132)
|
||||
- Implement BIP0061 reject handling and hence support for protocol
|
||||
version 70002 (https://github.com/conformal/btcd/issues/133)
|
||||
- Add testnet DNS seeds for peer discovery (testnet-seed.alexykot.me
|
||||
and testnet-seed.bitcoin.schildbach.de)
|
||||
- Add mainnet DNS seed for peer discovery (seeds.bitcoin.open-nodes.org)
|
||||
- Make multisig transactions with non-null dummy data nonstandard
|
||||
(https://github.com/conformal/btcd/issues/131)
|
||||
- Make transactions with an excessive number of signature operations
|
||||
nonstandard
|
||||
- Perform initial DNS lookups concurrently which allows connections
|
||||
more quickly
|
||||
- Improve the address manager to significantly reduce memory usage and
|
||||
add tests
|
||||
- Remove orphan transactions when they appear in a mined block
|
||||
(https://github.com/conformal/btcd/issues/166)
|
||||
- Apply incremental back off on connection retries for persistent peers
|
||||
that give invalid replies to mirror the logic used for failed
|
||||
connections (https://github.com/conformal/btcd/issues/103)
|
||||
- Correct rate-limiting of free and low-fee transactions
|
||||
- Mining support changes:
|
||||
- Implement getblocktemplate RPC with the following support:
|
||||
(https://github.com/conformal/btcd/issues/124)
|
||||
- BIP0022 Non-Optional Sections
|
||||
- BIP0022 Long Polling
|
||||
- BIP0023 Basic Pool Extensions
|
||||
- BIP0023 Mutation coinbase/append
|
||||
- BIP0023 Mutations time, time/increment, and time/decrement
|
||||
- BIP0023 Mutation transactions/add
|
||||
- BIP0023 Mutations prevblock, coinbase, and generation
|
||||
- BIP0023 Block Proposals
|
||||
- Implement built-in concurrent CPU miner
|
||||
(https://github.com/conformal/btcd/issues/137)
|
||||
NOTE: CPU mining on mainnet is pointless. This has been provided
|
||||
for testing purposes such as for the new simulation test network
|
||||
- Add --generate flag to enable CPU mining
|
||||
- Deprecate the --getworkkey flag in favor of --miningaddr which
|
||||
specifies which addresses generated blocks will choose from to pay
|
||||
the subsidy to
|
||||
- RPC changes:
|
||||
- Implement gettxout command
|
||||
(https://github.com/conformal/btcd/issues/141)
|
||||
- Implement validateaddress command
|
||||
- Implement verifymessage command
|
||||
- Mark getunconfirmedbalance RPC as wallet-only
|
||||
- Mark getwalletinfo RPC as wallet-only
|
||||
- Update getgenerate, setgenerate, gethashespersec, and getmininginfo
|
||||
to return the appropriate information about new CPU mining status
|
||||
- Modify getpeerinfo pingtime and pingwait field types to float64 so
|
||||
they are compatible
|
||||
- Improve disconnect handling for normal HTTP clients
|
||||
- Make error code returns for invalid hex more consistent
|
||||
- Websocket changes:
|
||||
- Switch to a new more efficient websocket package
|
||||
(https://github.com/conformal/btcd/issues/134)
|
||||
- Add rescanfinished notification
|
||||
- Modify the rescanprogress notification to include block hash as well
|
||||
as height (https://github.com/conformal/btcd/issues/151)
|
||||
- btcctl utility changes:
|
||||
- Accept --simnet flag which automatically selects the appropriate port
|
||||
and TLS certificates needed to communicate with btcd and btcwallet on
|
||||
the simulation test network
|
||||
- Fix createrawtransaction command to send amounts denominated in BTC
|
||||
- Add estimatefee command
|
||||
- Add estimatepriority command
|
||||
- Add getmininginfo command
|
||||
- Add getnetworkinfo command
|
||||
- Add gettxout command
|
||||
- Add lockunspent command
|
||||
- Add signrawtransaction command
|
||||
- addblock utility changes:
|
||||
- Accept --simnet flag which automatically selects the appropriate port
|
||||
and TLS certificates needed to communicate with btcd and btcwallet on
|
||||
the simulation test network
|
||||
- Notable developer-related package changes:
|
||||
- Provide a new bloom package in btcutil which allows creating and
|
||||
working with BIP0037 bloom filters
|
||||
- Provide a new hdkeychain package in btcutil which allows working with
|
||||
BIP0032 hierarchical deterministic key chains
|
||||
- Introduce a new btcnet package which houses network parameters
|
||||
- Provide new simnet network (--simnet) which is useful for private
|
||||
simulation testing
|
||||
- Enforce low S values in serialized signatures as detailed in BIP0062
|
||||
- Return errors from all methods on the btcdb.Db interface
|
||||
(https://github.com/conformal/btcdb/issues/5)
|
||||
- Allow behavior flags to alter btcchain.ProcessBlock
|
||||
(https://github.com/conformal/btcchain/issues/5)
|
||||
- Provide a new SerializeSize API for blocks
|
||||
(https://github.com/conformal/btcwire/issues/19)
|
||||
- Several of the core packages now work with Google App Engine
|
||||
- Misc changes:
|
||||
- Correct an issue where the database could corrupt under certain
|
||||
circumstances which would require a new chain download
|
||||
- Slightly optimize deserialization
|
||||
- Use the correct IP block for he.net
|
||||
- Fix an issue where it was possible the block manager could hang on
|
||||
shutdown
|
||||
- Update sample config file so the comments are on a separate line
|
||||
rather than the end of a line so they are not interpreted as settings
|
||||
(https://github.com/conformal/btcd/issues/135)
|
||||
- Correct an issue where getdata requests were not being properly
|
||||
throttled which could lead to larger than necessary memory usage
|
||||
- Always show help when given the help flag even when the config file
|
||||
contains invalid entries
|
||||
- General code cleanup and minor optimizations
|
||||
|
||||
Changes in 0.8.0-beta (Sun May 25 2014)
|
||||
- Btcd is now Beta (https://github.com/conformal/btcd/issues/130)
|
||||
- Add a new checkpoint at block height 300255
|
||||
- Protocol and network related changes:
|
||||
- Lower the minimum transaction relay fee to 1000 satoshi to match
|
||||
recent reference client changes
|
||||
(https://github.com/conformal/btcd/issues/100)
|
||||
- Raise the maximum signature script size to support standard 15-of-15
|
||||
multi-signature pay-to-sript-hash transactions with compressed pubkeys
|
||||
to remain compatible with the reference client
|
||||
(https://github.com/conformal/btcd/issues/128)
|
||||
- Reduce max bytes allowed for a standard nulldata transaction to 40 for
|
||||
compatibility with the reference client
|
||||
- Introduce a new btcnet package which houses all of the network params
|
||||
for each network (mainnet, testnet, regtest) to ultimately enable
|
||||
easier addition and tweaking of networks without needing to change
|
||||
several packages
|
||||
- Fix several script discrepancies found by reference client test data
|
||||
- Add new DNS seed for peer discovery (seed.bitnodes.io)
|
||||
- Reduce the max known inventory cache from 20000 items to 1000 items
|
||||
- Fix an issue where unknown inventory types could lead to a hung peer
|
||||
- Implement inventory rebroadcast handler for sendrawtransaction
|
||||
(https://github.com/conformal/btcd/issues/99)
|
||||
- Update user agent to fully support BIP0014
|
||||
(https://github.com/conformal/btcwire/issues/10)
|
||||
- Implement initial mining support:
|
||||
- Add a new logging subsystem for mining related operations
|
||||
- Implement infrastructure for creating block templates
|
||||
- Provide options to control block template creation settings
|
||||
- Support the getwork RPC
|
||||
- Allow address identifiers to apply to more than one network since both
|
||||
testnet and the regression test network unfortunately use the same
|
||||
identifier
|
||||
- RPC changes:
|
||||
- Set the content type for HTTP POST RPC connections to application/json
|
||||
(https://github.com/conformal/btcd/issues/121)
|
||||
- Modified the RPC server startup so it only requires at least one valid
|
||||
listen interface
|
||||
- Correct an error path where it was possible certain errors would not
|
||||
be returned
|
||||
- Implement getwork command
|
||||
(https://github.com/conformal/btcd/issues/125)
|
||||
- Update sendrawtransaction command to reject orphans
|
||||
- Update sendrawtransaction command to include the reason a transaction
|
||||
was rejected
|
||||
- Update getinfo command to populate connection count field
|
||||
- Update getinfo command to include relay fee field
|
||||
(https://github.com/conformal/btcd/issues/107)
|
||||
- Allow transactions submitted with sendrawtransaction to bypass the
|
||||
rate limiter
|
||||
- Allow the getcurrentnet and getbestblock extensions to be accessed via
|
||||
HTTP POST in addition to Websockets
|
||||
(https://github.com/conformal/btcd/issues/127)
|
||||
- Websocket changes:
|
||||
- Rework notifications to ensure they are delivered in the order they
|
||||
occur
|
||||
- Rename notifynewtxs command to notifyreceived (funds received)
|
||||
- Rename notifyallnewtxs command to notifynewtransactions
|
||||
- Rename alltx notification to txaccepted
|
||||
- Rename allverbosetx notification to txacceptedverbose
|
||||
(https://github.com/conformal/btcd/issues/98)
|
||||
- Add rescan progress notification
|
||||
- Add recvtx notification
|
||||
- Add redeemingtx notification
|
||||
- Modify notifyspent command to accept an array of outpoints
|
||||
(https://github.com/conformal/btcd/issues/123)
|
||||
- Significantly optimize the rescan command to yield up to a 60x speed
|
||||
increase
|
||||
- btcctl utility changes:
|
||||
- Add createencryptedwallet command
|
||||
- Add getblockchaininfo command
|
||||
- Add importwallet command
|
||||
- Add addmultisigaddress command
|
||||
- Add setgenerate command
|
||||
- Accept --testnet and --wallet flags which automatically select
|
||||
the appropriate port and TLS certificates needed to communicate
|
||||
with btcd and btcwallet (https://github.com/conformal/btcd/issues/112)
|
||||
- Allow path expansion from config file entries
|
||||
(https://github.com/conformal/btcd/issues/113)
|
||||
- Minor refactor simplify handling of options
|
||||
- addblock utility changes:
|
||||
- Improve logging by making it consistent with the logging provided by
|
||||
btcd (https://github.com/conformal/btcd/issues/90)
|
||||
- Improve several package APIs for developers:
|
||||
- Add new amount type for consistently handling monetary values
|
||||
- Add new coin selector API
|
||||
- Add new WIF (Wallet Import Format) API
|
||||
- Add new crypto types for private keys and signatures
|
||||
- Add new API to sign transactions including script merging and hash
|
||||
types
|
||||
- Expose function to extract all pushed data from a script
|
||||
(https://github.com/conformal/btcscript/issues/8)
|
||||
- Misc changes:
|
||||
- Optimize address manager shuffling to do 67% less work on average
|
||||
- Resolve a couple of benign data races found by the race detector
|
||||
(https://github.com/conformal/btcd/issues/101)
|
||||
- Add IP address to all peer related errors to clarify which peer is the
|
||||
cause (https://github.com/conformal/btcd/issues/102)
|
||||
- Fix a UPNP case issue that prevented the --upnp option from working
|
||||
with some UPNP servers
|
||||
- Update documentation in the sample config file regarding debug levels
|
||||
- Adjust some logging levels to improve debug messages
|
||||
- Improve the throughput of query messages to the block manager
|
||||
- Several minor optimizations to reduce GC churn and enhance speed
|
||||
- Other minor refactoring
|
||||
- General code cleanup
|
||||
|
||||
Changes in 0.7.0 (Thu Feb 20 2014)
|
||||
- Fix an issue when parsing scripts which contain a multi-signature script
|
||||
which require zero signatures such as testnet block
|
||||
000000001881dccfeda317393c261f76d09e399e15e27d280e5368420f442632
|
||||
(https://github.com/conformal/btcscript/issues/7)
|
||||
- Add check to ensure all transactions accepted to mempool only contain
|
||||
canonical data pushes (https://github.com/conformal/btcscript/issues/6)
|
||||
- Fix an issue causing excessive memory consumption
|
||||
- Significantly rework and improve the websocket notification system:
|
||||
- Each client is now independent so slow clients no longer limit the
|
||||
speed of other connected clients
|
||||
- Potentially long-running operations such as rescans are now run in
|
||||
their own handler and rate-limited to one operation at a time without
|
||||
preventing simultaneous requests from the same client for the faster
|
||||
requests or notifications
|
||||
- A couple of scenarios which could cause shutdown to hang have been
|
||||
resolved
|
||||
- Update notifynewtx notifications to support all address types instead
|
||||
of only pay-to-pubkey-hash
|
||||
- Provide a --rpcmaxwebsockets option to allow limiting the number of
|
||||
concurrent websocket clients
|
||||
- Add a new websocket command notifyallnewtxs to request notifications
|
||||
(https://github.com/conformal/btcd/issues/86) (thanks @flammit)
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Add getnetworkhashps command
|
||||
- Add gettransaction command (wallet-specific)
|
||||
- Add signmessage command (wallet-specific)
|
||||
- Update getwork command to accept
|
||||
- Continue cleanup and work on implementing the RPC API:
|
||||
- Implement getnettotals command
|
||||
(https://github.com/conformal/btcd/issues/84)
|
||||
- Implement networkhashps command
|
||||
(https://github.com/conformal/btcd/issues/87)
|
||||
- Update getpeerinfo to always include syncnode field even when false
|
||||
- Remove help addenda for getpeerinfo now that it supports all fields
|
||||
- Close standard RPC connections on auth failure
|
||||
- Provide a --rpcmaxclients option to allow limiting the number of
|
||||
concurrent RPC clients (https://github.com/conformal/btcd/issues/68)
|
||||
- Include IP address in RPC auth failure log messages
|
||||
- Resolve a rather harmless data races found by the race detector
|
||||
(https://github.com/conformal/btcd/issues/94)
|
||||
- Increase block priority size and max standard transaction size to 50k
|
||||
and 100k, respectively (https://github.com/conformal/btcd/issues/71)
|
||||
- Add rate limiting of free transactions to the memory pool to prevent
|
||||
penny flooding (https://github.com/conformal/btcd/issues/40)
|
||||
- Provide a --logdir option (https://github.com/conformal/btcd/issues/95)
|
||||
- Change the default log file path to include the network
|
||||
- Add a new ScriptBuilder interface to btcscript to support creation of
|
||||
custom scripts (https://github.com/conformal/btcscript/issues/5)
|
||||
- General code cleanup
|
||||
|
||||
Changes in 0.6.0 (Tue Feb 04 2014)
|
||||
- Fix an issue when parsing scripts which contain invalid signatures that
|
||||
caused a chain fork on block
|
||||
0000000000000001e4241fd0b3469a713f41c5682605451c05d3033288fb2244
|
||||
- Correct an issue which could lead to an error in removeBlockNode
|
||||
(https://github.com/conformal/btcchain/issues/4)
|
||||
- Improve addblock utility as follows:
|
||||
- Check imported blocks against all chain rules and checkpoints
|
||||
- Skip blocks which are already known so you can stop and restart the
|
||||
import or start the import after you have already downloaded a portion
|
||||
of the chain
|
||||
- Correct an issue where the utility did not shutdown cleanly after
|
||||
processing all blocks
|
||||
- Add error on attempt to import orphan blocks
|
||||
- Improve error handling and reporting
|
||||
- Display statistics after input file has been fully processed
|
||||
- Rework, optimize, and improve headers-first mode:
|
||||
- Resuming the chain sync from any point before the final checkpoint
|
||||
will now use headers-first mode
|
||||
(https://github.com/conformal/btcd/issues/69)
|
||||
- Verify all checkpoints as opposed to only the final one
|
||||
- Reduce and bound memory usage
|
||||
- Rollback to the last known good point when a header does not match a
|
||||
checkpoint
|
||||
- Log information about what is happening with headers
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Add getaddednodeinfo command
|
||||
- Add getnettotals command
|
||||
- Add getblocktemplate command (wallet-specific)
|
||||
- Add getwork command (wallet-specific)
|
||||
- Add getnewaddress command (wallet-specific)
|
||||
- Add walletpassphrasechange command (wallet-specific)
|
||||
- Add walletlock command (wallet-specific)
|
||||
- Add sendfrom command (wallet-specific)
|
||||
- Add sendmany command (wallet-specific)
|
||||
- Add settxfee command (wallet-specific)
|
||||
- Add listsinceblock command (wallet-specific)
|
||||
- Add listaccounts command (wallet-specific)
|
||||
- Add keypoolrefill command (wallet-specific)
|
||||
- Add getreceivedbyaccount command (wallet-specific)
|
||||
- Add getrawchangeaddress command (wallet-specific)
|
||||
- Add gettxoutsetinfo command (wallet-specific)
|
||||
- Add listaddressgroupings command (wallet-specific)
|
||||
- Add listlockunspent command (wallet-specific)
|
||||
- Add listlock command (wallet-specific)
|
||||
- Add listreceivedbyaccount command (wallet-specific)
|
||||
- Add validateaddress command (wallet-specific)
|
||||
- Add verifymessage command (wallet-specific)
|
||||
- Add sendtoaddress command (wallet-specific)
|
||||
- Continue cleanup and work on implementing the RPC API:
|
||||
- Implement submitblock command
|
||||
(https://github.com/conformal/btcd/issues/61)
|
||||
- Implement help command
|
||||
- Implement ping command
|
||||
- Implement getaddednodeinfo command
|
||||
(https://github.com/conformal/btcd/issues/78)
|
||||
- Implement getinfo command
|
||||
- Update getpeerinfo to support bytesrecv and bytessent
|
||||
(https://github.com/conformal/btcd/issues/83)
|
||||
- Improve and correct several RPC server and websocket areas:
|
||||
- Change the connection endpoint for websockets from /wallet to /ws
|
||||
(https://github.com/conformal/btcd/issues/80)
|
||||
- Implement an alternative authentication for websockets so clients
|
||||
such as javascript from browsers that don't support setting HTTP
|
||||
headers can authenticate (https://github.com/conformal/btcd/issues/77)
|
||||
- Add an authentication deadline for RPC connections
|
||||
(https://github.com/conformal/btcd/issues/68)
|
||||
- Use standard authentication failure responses for RPC connections
|
||||
- Make automatically generated certificate more standard so it works
|
||||
from client such as node.js and Firefox
|
||||
- Correct some minor issues which could prevent the RPC server from
|
||||
shutting down in an orderly fashion
|
||||
- Make all websocket notifications require registration
|
||||
- Change the data sent over websockets to text since it is JSON-RPC
|
||||
- Allow connections that do not have an Origin header set
|
||||
- Expose and track the number of bytes read and written per peer
|
||||
(https://github.com/conformal/btcwire/issues/6)
|
||||
- Correct an issue with sendrawtransaction when invoked via websockets
|
||||
which prevented a minedtx notification from being added
|
||||
- Rescan operations issued from remote wallets are no stopped when
|
||||
the wallet disconnects mid-operation
|
||||
(https://github.com/conformal/btcd/issues/66)
|
||||
- Several optimizations related to fetching block information from the
|
||||
database
|
||||
- General code cleanup
|
||||
|
||||
Changes in 0.5.0 (Mon Jan 13 2014)
|
||||
- Optimize initial block download by introducing a new mode which
|
||||
downloads the block headers first (up to the final checkpoint)
|
||||
- Improve peer handling to remove the potential for slow peers to cause
|
||||
sluggishness amongst all peers
|
||||
(https://github.com/conformal/btcd/issues/63)
|
||||
- Fix an issue where the initial block sync could stall when the sync peer
|
||||
disconnects (https://github.com/conformal/btcd/issues/62)
|
||||
- Correct an issue where --externalip was doing a DNS lookup on the full
|
||||
host:port instead of just the host portion
|
||||
(https://github.com/conformal/btcd/issues/38)
|
||||
- Fix an issue which could lead to a panic on chain switches
|
||||
(https://github.com/conformal/btcd/issues/70)
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Show getdifficulty output as floating point to 6 digits of precision
|
||||
- Show all JSON object replies formatted as standard JSON
|
||||
- Allow btcctl getblock to accept optional params
|
||||
- Add getaccount command (wallet-specific)
|
||||
- Add getaccountaddress command (wallet-specific)
|
||||
- Add sendrawtransaction command
|
||||
- Continue cleanup and work on implementing RPC API calls
|
||||
- Update getrawmempool to support new optional verbose flag
|
||||
- Update getrawtransaction to match the reference client
|
||||
- Update getblock to support new optional verbose flag
|
||||
- Update raw transactions to fully match the reference client including
|
||||
support for all transaction types and address types
|
||||
- Correct getrawmempool fee field to return BTC instead of Satoshi
|
||||
- Correct getpeerinfo service flag to return 8 digit string so it
|
||||
matches the reference client
|
||||
- Correct verifychain to return a boolean
|
||||
- Implement decoderawtransaction command
|
||||
- Implement createrawtransaction command
|
||||
- Implement decodescript command
|
||||
- Implement gethashespersec command
|
||||
- Allow RPC handler overrides when invoked via a websocket versus
|
||||
legacy connection
|
||||
- Add new DNS seed for peer discovery
|
||||
- Display user agent on new valid peer log message
|
||||
(https://github.com/conformal/btcd/issues/64)
|
||||
- Notify wallet when new transactions that pay to registered addresses
|
||||
show up in the mempool before being mined into a block
|
||||
- Support a tor-specific proxy in addition to a normal proxy
|
||||
(https://github.com/conformal/btcd/issues/47)
|
||||
- Remove deprecated sqlite3 imports from utilities
|
||||
- Remove leftover profile write from addblock utility
|
||||
- Quite a bit of code cleanup and refactoring to improve maintainability
|
||||
|
||||
Changes in 0.4.0 (Thu Dec 12 2013)
|
||||
- Allow listen interfaces to be specified via --listen instead of only the
|
||||
port (https://github.com/conformal/btcd/issues/33)
|
||||
- Allow listen interfaces for the RPC server to be specified via
|
||||
--rpclisten instead of only the port
|
||||
(https://github.com/conformal/btcd/issues/34)
|
||||
- Only disable listening when --connect or --proxy are used when no
|
||||
--listen interface are specified
|
||||
(https://github.com/conformal/btcd/issues/10)
|
||||
- Add several new standard transaction checks to transaction memory pool:
|
||||
- Support nulldata scripts as standard
|
||||
- Only allow a max of one nulldata output per transaction
|
||||
- Enforce a maximum of 3 public keys in multi-signature transactions
|
||||
- The number of signatures in multi-signature transactions must not
|
||||
exceed the number of public keys
|
||||
- The number of inputs to a signature script must match the expected
|
||||
number of inputs for the script type
|
||||
- The number of inputs pushed onto the stack by a redeeming signature
|
||||
script must match the number of inputs consumed by the referenced
|
||||
public key script
|
||||
- When a block is connected, remove any transactions from the memory pool
|
||||
which are now double spends as a result of the newly connected
|
||||
transactions
|
||||
- Don't relay transactions resurrected during a chain switch since
|
||||
other peers will also be switching chains and therefore already know
|
||||
about them
|
||||
- Cleanup a few cases where rejected transactions showed as an error
|
||||
rather than as a rejected transaction
|
||||
- Ignore the default configuration file when --regtest (regression test
|
||||
mode) is specified
|
||||
- Implement TLS support for RPC including automatic certificate generation
|
||||
- Support HTTP authentication headers for web sockets
|
||||
- Update address manager to recognize and properly work with Tor
|
||||
addresses (https://github.com/conformal/btcd/issues/36) and
|
||||
(https://github.com/conformal/btcd/issues/37)
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Add the ability to specify a configuration file
|
||||
- Add a default entry for the RPC cert to point to the location
|
||||
it will likely be in the btcd home directory
|
||||
- Implement --version flag
|
||||
- Provide a --notls option to support non-TLS configurations
|
||||
- Fix a couple of minor races found by the Go race detector
|
||||
- Improve logging
|
||||
- Allow logging level to be specified on a per subsystem basis
|
||||
(https://github.com/conformal/btcd/issues/48)
|
||||
- Allow logging levels to be dynamically changed via RPC
|
||||
(https://github.com/conformal/btcd/issues/15)
|
||||
- Implement a rolling log file with a max of 10MB per file and a
|
||||
rotation size of 3 which results in a max logging size of 30 MB
|
||||
- Correct a minor issue with the rescanning websocket call
|
||||
(https://github.com/conformal/btcd/issues/54)
|
||||
- Fix a race with pushing address messages that could lead to a panic
|
||||
(https://github.com/conformal/btcd/issues/58)
|
||||
- Improve which external IP address is reported to peers based on which
|
||||
interface they are connected through
|
||||
(https://github.com/conformal/btcd/issues/35)
|
||||
- Add --externalip option to allow an external IP address to be specified
|
||||
for cases such as tor hidden services or advanced network configurations
|
||||
(https://github.com/conformal/btcd/issues/38)
|
||||
- Add --upnp option to support automatic port mapping via UPnP
|
||||
(https://github.com/conformal/btcd/issues/51)
|
||||
- Update Ctrl+C interrupt handler to properly sync address manager and
|
||||
remove the UPnP port mapping (if needed)
|
||||
- Continue cleanup and work on implementing RPC API calls
|
||||
- Add importprivkey (import private key) command to btcctl
|
||||
- Update getrawtransaction to provide addresses properly, support
|
||||
new verbose param, and match the reference implementation with the
|
||||
exception of MULTISIG (thanks @flammit)
|
||||
- Update getblock with new verbose flag (thanks @flammit)
|
||||
- Add listtransactions command to btcctl
|
||||
- Add getbalance command to btcctl
|
||||
- Add basic support for btcd to run as a native Windows service
|
||||
(https://github.com/conformal/btcd/issues/42)
|
||||
- Package addblock utility with Windows MSIs
|
||||
- Add support for TravisCI (continuous build integration)
|
||||
- Cleanup some documentation and usage
|
||||
- Several other minor bug fixes and general code cleanup
|
||||
|
||||
Changes in 0.3.3 (Wed Nov 13 2013)
|
||||
- Significantly improve initial block chain download speed
|
||||
(https://github.com/conformal/btcd/issues/20)
|
||||
- Add a new checkpoint at block height 267300
|
||||
- Optimize most recently used inventory handling
|
||||
(https://github.com/conformal/btcd/issues/21)
|
||||
- Optimize duplicate transaction input check
|
||||
(https://github.com/conformal/btcchain/issues/2)
|
||||
- Optimize transaction hashing
|
||||
(https://github.com/conformal/btcd/issues/25)
|
||||
- Rework and optimize wallet listener notifications
|
||||
(https://github.com/conformal/btcd/issues/22)
|
||||
- Optimize serialization and deserialization
|
||||
(https://github.com/conformal/btcd/issues/27)
|
||||
- Add support for minimum transaction fee to memory pool acceptance
|
||||
(https://github.com/conformal/btcd/issues/29)
|
||||
- Improve leveldb database performance by removing explicit GC call
|
||||
- Fix an issue where Ctrl+C was not always finishing orderly database
|
||||
shutdown
|
||||
- Fix an issue in the script handling for OP_CHECKSIG
|
||||
- Impose max limits on all variable length protocol entries to prevent
|
||||
abuse from malicious peers
|
||||
- Enforce DER signatures for transactions allowed into the memory pool
|
||||
- Separate the debug profile http server from the RPC server
|
||||
- Rework of the RPC code to improve performance and make the code cleaner
|
||||
- The getrawtransaction RPC call now properly checks the memory pool
|
||||
before consulting the db (https://github.com/conformal/btcd/issues/26)
|
||||
- Add support for the following RPC calls: getpeerinfo, getconnectedcount,
|
||||
addnode, verifychain
|
||||
(https://github.com/conformal/btcd/issues/13)
|
||||
(https://github.com/conformal/btcd/issues/17)
|
||||
- Implement rescan websocket extension to allow wallet rescans
|
||||
- Use correct paths for application data storage for all supported
|
||||
operating systems (https://github.com/conformal/btcd/issues/30)
|
||||
- Add a default redirect to the http profiling page when accessing the
|
||||
http profile server
|
||||
- Add a new --cpuprofile option which can be used to generate CPU
|
||||
profiling data on platforms that support it
|
||||
- Several other minor performance optimizations
|
||||
- Other minor bug fixes and general code cleanup
|
||||
|
||||
Changes in 0.3.2 (Tue Oct 22 2013)
|
||||
- Fix an issue that could cause the download of the block chain to stall
|
||||
(https://github.com/conformal/btcd/issues/12)
|
||||
- Remove deprecated sqlite as an available database backend
|
||||
- Close sqlite compile issue as sqlite has now been removed
|
||||
(https://github.com/conformal/btcd/issues/11)
|
||||
- Change default RPC ports to 8334 (mainnet) and 18334 (testnet)
|
||||
- Continue cleanup and work on implementing RPC API calls
|
||||
- Add support for the following RPC calls: getrawmempool,
|
||||
getbestblockhash, decoderawtransaction, getdifficulty,
|
||||
getconnectioncount, getpeerinfo, and addnode
|
||||
- Improve the btcctl utility that is used to issue JSON-RPC commands
|
||||
- Fix an issue preventing btcd from cleanly shutting down with the RPC
|
||||
stop command
|
||||
- Add a number of database interface tests to ensure backends implement
|
||||
the expected interface
|
||||
- Expose some additional information from btcscript to be used for
|
||||
identifying "standard"" transactions
|
||||
- Add support for plan9 - thanks @mischief
|
||||
(https://github.com/conformal/btcd/pull/19)
|
||||
- Other minor bug fixes and general code cleanup
|
||||
|
||||
Changes in 0.3.1-alpha (Tue Oct 15 2013)
|
||||
- Change default database to leveldb
|
||||
NOTE: This does mean you will have to redownload the block chain. Since we
|
||||
are still in alpha, we didn't feel writing a converter was worth the time as
|
||||
it would take away from more important issues at this stage
|
||||
- Add a warning if there are multiple block chain databases of different types
|
||||
- Fix issue with unexpected EOF in leveldb -- https://github.com/conformal/btcd/issues/18
|
||||
- Fix issue preventing block 21066 on testnet -- https://github.com/conformal/btcchain/issues/1
|
||||
- Fix issue preventing block 96464 on testnet -- https://github.com/conformal/btcscript/issues/1
|
||||
- Optimize transaction lookups
|
||||
- Correct a few cases of list removal that could result in improper cleanup
|
||||
of no longer needed orphans
|
||||
- Add functionality to increase ulimits on non-Windows platforms
|
||||
- Add support for mempool command which allows remote peers to query the
|
||||
transaction memory pool via the bitcoin protocol
|
||||
- Clean up logging a bit
|
||||
- Add a flag to disable checkpoints for developers
|
||||
- Add a lot of useful debug logging such as message summaries
|
||||
- Other minor bug fixes and general code cleanup
|
||||
|
||||
Initial Release 0.3.0-alpha (Sat Oct 05 2013):
|
||||
- Initial release
|
||||
10
Jenkinsfile
vendored
10
Jenkinsfile
vendored
@@ -1,10 +0,0 @@
|
||||
node {
|
||||
stage 'Checkout'
|
||||
checkout scm
|
||||
|
||||
stage 'Version'
|
||||
sh './deploy.sh version'
|
||||
|
||||
stage 'Build'
|
||||
sh "./deploy.sh build"
|
||||
}
|
||||
3
LICENSE
3
LICENSE
@@ -1,8 +1,9 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2018-2019 DAGLabs
|
||||
Copyright (c) 2018-2019 The kaspanet developers
|
||||
Copyright (c) 2013-2018 The btcsuite developers
|
||||
Copyright (c) 2015-2016 The Decred developers
|
||||
Copyright (c) 2013-2014 Conformal Systems LLC.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
|
||||
125
README.md
125
README.md
@@ -1,49 +1,24 @@
|
||||
btcd
|
||||
|
||||
Kaspad
|
||||
====
|
||||
Warning: This is pre-alpha software. There's no guarantee anything works.
|
||||
====
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd)
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad)
|
||||
|
||||
btcd is an alternative full node bitcoin implementation written in Go (golang).
|
||||
Kaspad is the reference full node Kaspa implementation written in Go (golang).
|
||||
|
||||
This project is currently under active development and is in a Beta state. It
|
||||
is extremely stable and has been in production use since October 2013.
|
||||
|
||||
It properly downloads, validates, and serves the block chain using the exact
|
||||
rules (including consensus bugs) for block acceptance as Bitcoin Core. We have
|
||||
taken great care to avoid btcd causing a fork to the block chain. It includes a
|
||||
full block validation testing framework which contains all of the 'official'
|
||||
block acceptance tests (and some additional ones) that is run on every pull
|
||||
request to help ensure it properly follows consensus. Also, it passes all of
|
||||
the JSON test data in the Bitcoin Core code.
|
||||
|
||||
It also properly relays newly mined blocks, maintains a transaction pool, and
|
||||
relays individual transactions that have not yet made it into a block. It
|
||||
ensures all individual transactions admitted to the pool follow the rules
|
||||
required by the block chain and also includes more strict checks which filter
|
||||
transactions based on miner requirements ("standard" transactions).
|
||||
|
||||
One key difference between btcd and Bitcoin Core is that btcd does *NOT* include
|
||||
wallet functionality and this was a very intentional design decision. See the
|
||||
blog entry [here](https://blog.conformal.com/btcd-not-your-moms-bitcoin-daemon)
|
||||
for more details. This means you can't actually make or receive payments
|
||||
directly with btcd. That functionality is provided by the
|
||||
[btcwallet](https://github.com/btcsuite/btcwallet) and
|
||||
[Paymetheus](https://github.com/btcsuite/Paymetheus) (Windows-only) projects
|
||||
which are both under active development.
|
||||
This project is currently under active development and is in a pre-Alpha state.
|
||||
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
|
||||
|
||||
## Requirements
|
||||
|
||||
[Go](http://golang.org) 1.8 or newer.
|
||||
Latest version of [Go](http://golang.org) (currently 1.13).
|
||||
|
||||
## Installation
|
||||
|
||||
#### Windows - MSI Available
|
||||
|
||||
https://github.com/daglabs/btcd/releases
|
||||
|
||||
#### Linux/BSD/MacOSX/POSIX - Build from Source
|
||||
#### Build from Source
|
||||
|
||||
- Install Go according to the installation instructions here:
|
||||
http://golang.org/doc/install
|
||||
@@ -55,92 +30,50 @@ $ go version
|
||||
$ go env GOROOT GOPATH
|
||||
```
|
||||
|
||||
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
|
||||
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
|
||||
recommended that `GOPATH` is set to a directory in your home directory such as
|
||||
`~/goprojects` to avoid write permission issues. It is also recommended to add
|
||||
`~/dev/go` to avoid write permission issues. It is also recommended to add
|
||||
`$GOPATH/bin` to your `PATH` at this point.
|
||||
|
||||
- Run the following commands to obtain btcd, all dependencies, and install it:
|
||||
- Run the following commands to obtain and install kaspad including all dependencies:
|
||||
|
||||
```bash
|
||||
$ # Install dep: https://golang.github.io/dep/docs/installation.html
|
||||
$ git clone https://github.com/daglabs/btcd $GOPATH/src/github.com/daglabs/btcd
|
||||
$ cd $GOPATH/src/github.com/daglabs/btcd
|
||||
$ dep ensure
|
||||
$ git clone https://github.com/kaspanet/kaspad $GOPATH/src/github.com/kaspanet/kaspad
|
||||
$ cd $GOPATH/src/github.com/kaspanet/kaspad
|
||||
$ ./test.sh
|
||||
$ go install . ./cmd/...
|
||||
```
|
||||
`./test.sh` tests can be skipped, but some things might not run correctly on your system if tests fail.
|
||||
|
||||
- btcd (and utilities) will now be installed in ```$GOPATH/bin```. If you did
|
||||
- Kaspad (and utilities) should now be installed in `$GOPATH/bin`. If you did
|
||||
not already add the bin directory to your system path during Go installation,
|
||||
we recommend you do so now.
|
||||
you are encouraged to do so now.
|
||||
|
||||
## Updating
|
||||
|
||||
#### Windows
|
||||
|
||||
Install a newer MSI
|
||||
|
||||
#### Linux/BSD/MacOSX/POSIX - Build from Source
|
||||
|
||||
- Run the following commands to update btcd, all dependencies, and install it:
|
||||
|
||||
```bash
|
||||
$ cd $GOPATH/src/github.com/daglabs/btcd
|
||||
$ git pull && dep ensure
|
||||
$ go install . ./cmd/...
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
btcd has several configuration options available to tweak how it runs, but all
|
||||
of the basic operations described in the intro section work with zero
|
||||
configuration.
|
||||
|
||||
#### Windows (Installed from MSI)
|
||||
|
||||
Launch btcd from your Start menu.
|
||||
Kaspad has several configuration options available to tweak how it runs, but all
|
||||
of the basic operations work with zero configuration.
|
||||
|
||||
#### Linux/BSD/POSIX/Source
|
||||
|
||||
```bash
|
||||
$ ./btcd
|
||||
$ ./kaspad
|
||||
```
|
||||
|
||||
## IRC
|
||||
|
||||
- irc.freenode.net
|
||||
- channel #btcd
|
||||
- [webchat](https://webchat.freenode.net/?channels=btcd)
|
||||
## Discord
|
||||
Join our discord server using the following link: https://discord.gg/WmGhhzk
|
||||
|
||||
## Issue Tracker
|
||||
|
||||
The [integrated github issue tracker](https://github.com/daglabs/btcd/issues)
|
||||
The [integrated github issue tracker](https://github.com/kaspanet/kaspad/issues)
|
||||
is used for this project.
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is a work-in-progress. It is located in the [docs](https://github.com/daglabs/btcd/tree/master/docs) folder.
|
||||
|
||||
## GPG Verification Key
|
||||
|
||||
All official release tags are signed by Conformal so users can ensure the code
|
||||
has not been tampered with and is coming from the btcsuite developers. To
|
||||
verify the signature perform the following:
|
||||
|
||||
- Download the Conformal public key:
|
||||
https://raw.githubusercontent.com/btcsuite/btcd/master/release/GIT-GPG-KEY-conformal.txt
|
||||
|
||||
- Import the public key into your GPG keyring:
|
||||
```bash
|
||||
gpg --import GIT-GPG-KEY-conformal.txt
|
||||
```
|
||||
|
||||
- Verify the release tag with the following command where `TAG_NAME` is a
|
||||
placeholder for the specific tag:
|
||||
```bash
|
||||
git tag -v TAG_NAME
|
||||
```
|
||||
The documentation is a work-in-progress. It is located in the [docs](https://github.com/kaspanet/kaspad/tree/master/docs) folder.
|
||||
|
||||
## License
|
||||
|
||||
btcd is licensed under the [copyfree](http://copyfree.org) ISC License.
|
||||
Kaspad is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/).
|
||||
|
||||
|
||||
1421
addressmanager/addressmanager.go
Normal file
1421
addressmanager/addressmanager.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,26 +2,31 @@
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// naTest is used to describe a test to be performed against the NetAddressKey
|
||||
// method.
|
||||
type naTest struct {
|
||||
in wire.NetAddress
|
||||
want string
|
||||
want AddressKey
|
||||
}
|
||||
|
||||
// naTests houses all of the tests to be performed against the NetAddressKey
|
||||
@@ -35,92 +40,122 @@ var someIP = "173.194.115.66"
|
||||
func addNaTests() {
|
||||
// IPv4
|
||||
// Localhost
|
||||
addNaTest("127.0.0.1", 8333, "127.0.0.1:8333")
|
||||
addNaTest("127.0.0.1", 8334, "127.0.0.1:8334")
|
||||
addNaTest("127.0.0.1", 16111, "127.0.0.1:16111")
|
||||
addNaTest("127.0.0.1", 16110, "127.0.0.1:16110")
|
||||
|
||||
// Class A
|
||||
addNaTest("1.0.0.1", 8333, "1.0.0.1:8333")
|
||||
addNaTest("2.2.2.2", 8334, "2.2.2.2:8334")
|
||||
addNaTest("1.0.0.1", 16111, "1.0.0.1:16111")
|
||||
addNaTest("2.2.2.2", 16110, "2.2.2.2:16110")
|
||||
addNaTest("27.253.252.251", 8335, "27.253.252.251:8335")
|
||||
addNaTest("123.3.2.1", 8336, "123.3.2.1:8336")
|
||||
|
||||
// Private Class A
|
||||
addNaTest("10.0.0.1", 8333, "10.0.0.1:8333")
|
||||
addNaTest("10.1.1.1", 8334, "10.1.1.1:8334")
|
||||
addNaTest("10.0.0.1", 16111, "10.0.0.1:16111")
|
||||
addNaTest("10.1.1.1", 16110, "10.1.1.1:16110")
|
||||
addNaTest("10.2.2.2", 8335, "10.2.2.2:8335")
|
||||
addNaTest("10.10.10.10", 8336, "10.10.10.10:8336")
|
||||
|
||||
// Class B
|
||||
addNaTest("128.0.0.1", 8333, "128.0.0.1:8333")
|
||||
addNaTest("129.1.1.1", 8334, "129.1.1.1:8334")
|
||||
addNaTest("128.0.0.1", 16111, "128.0.0.1:16111")
|
||||
addNaTest("129.1.1.1", 16110, "129.1.1.1:16110")
|
||||
addNaTest("180.2.2.2", 8335, "180.2.2.2:8335")
|
||||
addNaTest("191.10.10.10", 8336, "191.10.10.10:8336")
|
||||
|
||||
// Private Class B
|
||||
addNaTest("172.16.0.1", 8333, "172.16.0.1:8333")
|
||||
addNaTest("172.16.1.1", 8334, "172.16.1.1:8334")
|
||||
addNaTest("172.16.0.1", 16111, "172.16.0.1:16111")
|
||||
addNaTest("172.16.1.1", 16110, "172.16.1.1:16110")
|
||||
addNaTest("172.16.2.2", 8335, "172.16.2.2:8335")
|
||||
addNaTest("172.16.172.172", 8336, "172.16.172.172:8336")
|
||||
|
||||
// Class C
|
||||
addNaTest("193.0.0.1", 8333, "193.0.0.1:8333")
|
||||
addNaTest("200.1.1.1", 8334, "200.1.1.1:8334")
|
||||
addNaTest("193.0.0.1", 16111, "193.0.0.1:16111")
|
||||
addNaTest("200.1.1.1", 16110, "200.1.1.1:16110")
|
||||
addNaTest("205.2.2.2", 8335, "205.2.2.2:8335")
|
||||
addNaTest("223.10.10.10", 8336, "223.10.10.10:8336")
|
||||
|
||||
// Private Class C
|
||||
addNaTest("192.168.0.1", 8333, "192.168.0.1:8333")
|
||||
addNaTest("192.168.1.1", 8334, "192.168.1.1:8334")
|
||||
addNaTest("192.168.0.1", 16111, "192.168.0.1:16111")
|
||||
addNaTest("192.168.1.1", 16110, "192.168.1.1:16110")
|
||||
addNaTest("192.168.2.2", 8335, "192.168.2.2:8335")
|
||||
addNaTest("192.168.192.192", 8336, "192.168.192.192:8336")
|
||||
|
||||
// IPv6
|
||||
// Localhost
|
||||
addNaTest("::1", 8333, "[::1]:8333")
|
||||
addNaTest("fe80::1", 8334, "[fe80::1]:8334")
|
||||
addNaTest("::1", 16111, "[::1]:16111")
|
||||
addNaTest("fe80::1", 16110, "[fe80::1]:16110")
|
||||
|
||||
// Link-local
|
||||
addNaTest("fe80::1:1", 8333, "[fe80::1:1]:8333")
|
||||
addNaTest("fe91::2:2", 8334, "[fe91::2:2]:8334")
|
||||
addNaTest("fe80::1:1", 16111, "[fe80::1:1]:16111")
|
||||
addNaTest("fe91::2:2", 16110, "[fe91::2:2]:16110")
|
||||
addNaTest("fea2::3:3", 8335, "[fea2::3:3]:8335")
|
||||
addNaTest("feb3::4:4", 8336, "[feb3::4:4]:8336")
|
||||
|
||||
// Site-local
|
||||
addNaTest("fec0::1:1", 8333, "[fec0::1:1]:8333")
|
||||
addNaTest("fed1::2:2", 8334, "[fed1::2:2]:8334")
|
||||
addNaTest("fec0::1:1", 16111, "[fec0::1:1]:16111")
|
||||
addNaTest("fed1::2:2", 16110, "[fed1::2:2]:16110")
|
||||
addNaTest("fee2::3:3", 8335, "[fee2::3:3]:8335")
|
||||
addNaTest("fef3::4:4", 8336, "[fef3::4:4]:8336")
|
||||
}
|
||||
|
||||
func addNaTest(ip string, port uint16, want string) {
|
||||
func addNaTest(ip string, port uint16, want AddressKey) {
|
||||
nip := net.ParseIP(ip)
|
||||
na := *wire.NewNetAddressIPPort(nip, port, wire.SFNodeNetwork)
|
||||
test := naTest{na, want}
|
||||
naTests = append(naTests, test)
|
||||
}
|
||||
|
||||
func lookupFunc(host string) ([]net.IP, error) {
|
||||
func lookupFuncForTest(host string) ([]net.IP, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func newAddrManagerForTest(t *testing.T, testName string,
|
||||
localSubnetworkID *subnetworkid.SubnetworkID) (addressManager *AddressManager, teardown func()) {
|
||||
|
||||
cfg := config.DefaultConfig()
|
||||
cfg.SubnetworkID = localSubnetworkID
|
||||
|
||||
dbPath, err := ioutil.TempDir("", testName)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
}
|
||||
|
||||
databaseContext, err := dbaccess.New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
addressManager = New(cfg, databaseContext)
|
||||
|
||||
return addressManager, func() {
|
||||
err := databaseContext.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("error closing the database: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartStop(t *testing.T) {
|
||||
n := New("teststartstop", lookupFunc, nil)
|
||||
n.Start()
|
||||
err := n.Stop()
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestStartStop", nil)
|
||||
defer teardown()
|
||||
err := amgr.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("Address Manager failed to start: %v", err)
|
||||
}
|
||||
err = amgr.Stop()
|
||||
if err != nil {
|
||||
t.Fatalf("Address Manager failed to stop: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddAddressByIP(t *testing.T) {
|
||||
fmtErr := fmt.Errorf("")
|
||||
fmtErr := errors.Errorf("")
|
||||
addrErr := &net.AddrError{}
|
||||
var tests = []struct {
|
||||
addrIP string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
someIP + ":8333",
|
||||
someIP + ":16111",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
@@ -137,19 +172,20 @@ func TestAddAddressByIP(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
amgr := New("testaddressbyip", nil, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil)
|
||||
defer teardown()
|
||||
for i, test := range tests {
|
||||
err := amgr.AddAddressByIP(test.addrIP, nil)
|
||||
if test.err != nil && err == nil {
|
||||
t.Errorf("TestGood test %d failed expected an error and got none", i)
|
||||
t.Errorf("TestAddAddressByIP test %d failed expected an error and got none", i)
|
||||
continue
|
||||
}
|
||||
if test.err == nil && err != nil {
|
||||
t.Errorf("TestGood test %d failed expected no error and got one", i)
|
||||
t.Errorf("TestAddAddressByIP test %d failed expected no error and got one", i)
|
||||
continue
|
||||
}
|
||||
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
|
||||
t.Errorf("TestGood test %d failed got %v, want %v", i,
|
||||
t.Errorf("TestAddAddressByIP test %d failed got %v, want %v", i,
|
||||
reflect.TypeOf(err), reflect.TypeOf(test.err))
|
||||
continue
|
||||
}
|
||||
@@ -193,7 +229,8 @@ func TestAddLocalAddress(t *testing.T) {
|
||||
true,
|
||||
},
|
||||
}
|
||||
amgr := New("testaddlocaladdress", nil, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAddLocalAddress", nil)
|
||||
defer teardown()
|
||||
for x, test := range tests {
|
||||
result := amgr.AddLocalAddress(&test.address, test.priority)
|
||||
if result == nil && !test.valid {
|
||||
@@ -210,21 +247,22 @@ func TestAddLocalAddress(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAttempt(t *testing.T) {
|
||||
n := New("testattempt", lookupFunc, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAttempt", nil)
|
||||
defer teardown()
|
||||
|
||||
// Add a new address and get it
|
||||
err := n.AddAddressByIP(someIP+":8333", nil)
|
||||
err := amgr.AddAddressByIP(someIP+":8333", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Adding address failed: %v", err)
|
||||
}
|
||||
ka := n.GetAddress()
|
||||
ka := amgr.GetAddress()
|
||||
|
||||
if !ka.LastAttempt().IsZero() {
|
||||
t.Errorf("Address should not have attempts, but does")
|
||||
}
|
||||
|
||||
na := ka.NetAddress()
|
||||
n.Attempt(na)
|
||||
amgr.Attempt(na)
|
||||
|
||||
if ka.LastAttempt().IsZero() {
|
||||
t.Errorf("Address should have an attempt, but does not")
|
||||
@@ -232,19 +270,20 @@ func TestAttempt(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConnected(t *testing.T) {
|
||||
n := New("testconnected", lookupFunc, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestConnected", nil)
|
||||
defer teardown()
|
||||
|
||||
// Add a new address and get it
|
||||
err := n.AddAddressByIP(someIP+":8333", nil)
|
||||
err := amgr.AddAddressByIP(someIP+":8333", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Adding address failed: %v", err)
|
||||
}
|
||||
ka := n.GetAddress()
|
||||
ka := amgr.GetAddress()
|
||||
na := ka.NetAddress()
|
||||
// make it an hour ago
|
||||
na.Timestamp = time.Unix(time.Now().Add(time.Hour*-1).Unix(), 0)
|
||||
na.Timestamp = mstime.Now().Add(time.Hour * -1)
|
||||
|
||||
n.Connected(na)
|
||||
amgr.Connected(na)
|
||||
|
||||
if !ka.NetAddress().Timestamp.After(na.Timestamp) {
|
||||
t.Errorf("Address should have a new timestamp, but does not")
|
||||
@@ -252,9 +291,10 @@ func TestConnected(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNeedMoreAddresses(t *testing.T) {
|
||||
n := New("testneedmoreaddresses", lookupFunc, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestNeedMoreAddresses", nil)
|
||||
defer teardown()
|
||||
addrsToAdd := 1500
|
||||
b := n.NeedMoreAddresses()
|
||||
b := amgr.NeedMoreAddresses()
|
||||
if !b {
|
||||
t.Errorf("Expected that we need more addresses")
|
||||
}
|
||||
@@ -262,8 +302,8 @@ func TestNeedMoreAddresses(t *testing.T) {
|
||||
|
||||
var err error
|
||||
for i := 0; i < addrsToAdd; i++ {
|
||||
s := fmt.Sprintf("%d.%d.173.147:8333", i/128+60, i%128+60)
|
||||
addrs[i], err = n.DeserializeNetAddress(s)
|
||||
s := AddressKey(fmt.Sprintf("%d.%d.173.147:8333", i/128+60, i%128+60))
|
||||
addrs[i], err = amgr.DeserializeNetAddress(s)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to turn %s into an address: %v", s, err)
|
||||
}
|
||||
@@ -271,20 +311,21 @@ func TestNeedMoreAddresses(t *testing.T) {
|
||||
|
||||
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
|
||||
n.AddAddresses(addrs, srcAddr, nil)
|
||||
numAddrs := n.TotalNumAddresses()
|
||||
amgr.AddAddresses(addrs, srcAddr, nil)
|
||||
numAddrs := amgr.TotalNumAddresses()
|
||||
if numAddrs > addrsToAdd {
|
||||
t.Errorf("Number of addresses is too many %d vs %d", numAddrs, addrsToAdd)
|
||||
}
|
||||
|
||||
b = n.NeedMoreAddresses()
|
||||
b = amgr.NeedMoreAddresses()
|
||||
if b {
|
||||
t.Errorf("Expected that we don't need more addresses")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGood(t *testing.T) {
|
||||
n := New("testgood", lookupFunc, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestGood", nil)
|
||||
defer teardown()
|
||||
addrsToAdd := 64 * 64
|
||||
addrs := make([]*wire.NetAddress, addrsToAdd)
|
||||
subnetworkCount := 32
|
||||
@@ -292,8 +333,8 @@ func TestGood(t *testing.T) {
|
||||
|
||||
var err error
|
||||
for i := 0; i < addrsToAdd; i++ {
|
||||
s := fmt.Sprintf("%d.173.147.%d:8333", i/64+60, i%64+60)
|
||||
addrs[i], err = n.DeserializeNetAddress(s)
|
||||
s := AddressKey(fmt.Sprintf("%d.173.147.%d:8333", i/64+60, i%64+60))
|
||||
addrs[i], err = amgr.DeserializeNetAddress(s)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to turn %s into an address: %v", s, err)
|
||||
}
|
||||
@@ -305,24 +346,24 @@ func TestGood(t *testing.T) {
|
||||
|
||||
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
|
||||
n.AddAddresses(addrs, srcAddr, nil)
|
||||
amgr.AddAddresses(addrs, srcAddr, nil)
|
||||
for i, addr := range addrs {
|
||||
n.Good(addr, subnetworkIDs[i%subnetworkCount])
|
||||
amgr.Good(addr, subnetworkIDs[i%subnetworkCount])
|
||||
}
|
||||
|
||||
numAddrs := n.TotalNumAddresses()
|
||||
numAddrs := amgr.TotalNumAddresses()
|
||||
if numAddrs >= addrsToAdd {
|
||||
t.Errorf("Number of addresses is too many: %d vs %d", numAddrs, addrsToAdd)
|
||||
}
|
||||
|
||||
numCache := len(n.AddressCache(true, nil))
|
||||
numCache := len(amgr.AddressCache(true, nil))
|
||||
if numCache == 0 || numCache >= numAddrs/4 {
|
||||
t.Errorf("Number of addresses in cache: got %d, want positive and less than %d",
|
||||
numCache, numAddrs/4)
|
||||
}
|
||||
|
||||
for i := 0; i < subnetworkCount; i++ {
|
||||
numCache = len(n.AddressCache(false, subnetworkIDs[i]))
|
||||
numCache = len(amgr.AddressCache(false, subnetworkIDs[i]))
|
||||
if numCache == 0 || numCache >= numAddrs/subnetworkCount {
|
||||
t.Errorf("Number of addresses in subnetwork cache: got %d, want positive and less than %d",
|
||||
numCache, numAddrs/4/subnetworkCount)
|
||||
@@ -331,17 +372,18 @@ func TestGood(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
n := New("test_good_change_subnetwork_id", lookupFunc, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestGoodChangeSubnetworkID", nil)
|
||||
defer teardown()
|
||||
addr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
addrKey := NetAddressKey(addr)
|
||||
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
|
||||
oldSubnetwork := subnetworkid.SubnetworkIDNative
|
||||
n.AddAddress(addr, srcAddr, oldSubnetwork)
|
||||
n.Good(addr, oldSubnetwork)
|
||||
amgr.AddAddress(addr, srcAddr, oldSubnetwork)
|
||||
amgr.Good(addr, oldSubnetwork)
|
||||
|
||||
// make sure address was saved to addrIndex under oldSubnetwork
|
||||
ka := n.find(addr)
|
||||
// make sure address was saved to addressIndex under oldSubnetwork
|
||||
ka := amgr.knownAddress(addr)
|
||||
if ka == nil {
|
||||
t.Fatalf("Address was not found after first time .Good called")
|
||||
}
|
||||
@@ -350,10 +392,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
}
|
||||
|
||||
// make sure address was added to correct bucket under oldSubnetwork
|
||||
bucket := n.addrTried[*oldSubnetwork][n.getTriedBucket(addr)]
|
||||
bucket := amgr.subnetworkTriedAddresBucketArrays[*oldSubnetwork][amgr.triedAddressBucketIndex(addr)]
|
||||
wasFound := false
|
||||
for e := bucket.Front(); e != nil; e = e.Next() {
|
||||
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
|
||||
for _, ka := range bucket {
|
||||
if NetAddressKey(ka.NetAddress()) == addrKey {
|
||||
wasFound = true
|
||||
}
|
||||
}
|
||||
@@ -363,10 +405,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
|
||||
// now call .Good again with a different subnetwork
|
||||
newSubnetwork := subnetworkid.SubnetworkIDRegistry
|
||||
n.Good(addr, newSubnetwork)
|
||||
amgr.Good(addr, newSubnetwork)
|
||||
|
||||
// make sure address was updated in addrIndex under newSubnetwork
|
||||
ka = n.find(addr)
|
||||
// make sure address was updated in addressIndex under newSubnetwork
|
||||
ka = amgr.knownAddress(addr)
|
||||
if ka == nil {
|
||||
t.Fatalf("Address was not found after second time .Good called")
|
||||
}
|
||||
@@ -375,10 +417,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
}
|
||||
|
||||
// make sure address was removed from bucket under oldSubnetwork
|
||||
bucket = n.addrTried[*oldSubnetwork][n.getTriedBucket(addr)]
|
||||
bucket = amgr.subnetworkTriedAddresBucketArrays[*oldSubnetwork][amgr.triedAddressBucketIndex(addr)]
|
||||
wasFound = false
|
||||
for e := bucket.Front(); e != nil; e = e.Next() {
|
||||
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
|
||||
for _, ka := range bucket {
|
||||
if NetAddressKey(ka.NetAddress()) == addrKey {
|
||||
wasFound = true
|
||||
}
|
||||
}
|
||||
@@ -387,10 +429,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
}
|
||||
|
||||
// make sure address was added to correct bucket under newSubnetwork
|
||||
bucket = n.addrTried[*newSubnetwork][n.getTriedBucket(addr)]
|
||||
bucket = amgr.subnetworkTriedAddresBucketArrays[*newSubnetwork][amgr.triedAddressBucketIndex(addr)]
|
||||
wasFound = false
|
||||
for e := bucket.Front(); e != nil; e = e.Next() {
|
||||
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
|
||||
for _, ka := range bucket {
|
||||
if NetAddressKey(ka.NetAddress()) == addrKey {
|
||||
wasFound = true
|
||||
}
|
||||
}
|
||||
@@ -401,34 +443,35 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
|
||||
func TestGetAddress(t *testing.T) {
|
||||
localSubnetworkID := &subnetworkid.SubnetworkID{0xff}
|
||||
n := New("testgetaddress", lookupFunc, localSubnetworkID)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestGetAddress", localSubnetworkID)
|
||||
defer teardown()
|
||||
|
||||
// Get an address from an empty set (should error)
|
||||
if rv := n.GetAddress(); rv != nil {
|
||||
if rv := amgr.GetAddress(); rv != nil {
|
||||
t.Errorf("GetAddress failed: got: %v want: %v\n", rv, nil)
|
||||
}
|
||||
|
||||
// Add a new address and get it
|
||||
err := n.AddAddressByIP(someIP+":8332", localSubnetworkID)
|
||||
err := amgr.AddAddressByIP(someIP+":8332", localSubnetworkID)
|
||||
if err != nil {
|
||||
t.Fatalf("Adding address failed: %v", err)
|
||||
}
|
||||
ka := n.GetAddress()
|
||||
ka := amgr.GetAddress()
|
||||
if ka == nil {
|
||||
t.Fatalf("Did not get an address where there is one in the pool")
|
||||
}
|
||||
n.Attempt(ka.NetAddress())
|
||||
amgr.Attempt(ka.NetAddress())
|
||||
|
||||
// Checks that we don't get it if we find that it has other subnetwork ID than expected.
|
||||
actualSubnetworkID := &subnetworkid.SubnetworkID{0xfe}
|
||||
n.Good(ka.NetAddress(), actualSubnetworkID)
|
||||
ka = n.GetAddress()
|
||||
amgr.Good(ka.NetAddress(), actualSubnetworkID)
|
||||
ka = amgr.GetAddress()
|
||||
if ka != nil {
|
||||
t.Errorf("Didn't expect to get an address because there shouldn't be any address from subnetwork ID %s or nil", localSubnetworkID)
|
||||
}
|
||||
|
||||
// Checks that the total number of addresses incremented although the new address is not full node or a partial node of the same subnetwork as the local node.
|
||||
numAddrs := n.TotalNumAddresses()
|
||||
numAddrs := amgr.TotalNumAddresses()
|
||||
if numAddrs != 1 {
|
||||
t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1)
|
||||
}
|
||||
@@ -436,11 +479,11 @@ func TestGetAddress(t *testing.T) {
|
||||
// Now we repeat the same process, but now the address has the expected subnetwork ID.
|
||||
|
||||
// Add a new address and get it
|
||||
err = n.AddAddressByIP(someIP+":8333", localSubnetworkID)
|
||||
err = amgr.AddAddressByIP(someIP+":8333", localSubnetworkID)
|
||||
if err != nil {
|
||||
t.Fatalf("Adding address failed: %v", err)
|
||||
}
|
||||
ka = n.GetAddress()
|
||||
ka = amgr.GetAddress()
|
||||
if ka == nil {
|
||||
t.Fatalf("Did not get an address where there is one in the pool")
|
||||
}
|
||||
@@ -450,11 +493,11 @@ func TestGetAddress(t *testing.T) {
|
||||
if !ka.SubnetworkID().IsEqual(localSubnetworkID) {
|
||||
t.Errorf("Wrong Subnetwork ID: got %v, want %v", *ka.SubnetworkID(), localSubnetworkID)
|
||||
}
|
||||
n.Attempt(ka.NetAddress())
|
||||
amgr.Attempt(ka.NetAddress())
|
||||
|
||||
// Mark this as a good address and get it
|
||||
n.Good(ka.NetAddress(), localSubnetworkID)
|
||||
ka = n.GetAddress()
|
||||
amgr.Good(ka.NetAddress(), localSubnetworkID)
|
||||
ka = amgr.GetAddress()
|
||||
if ka == nil {
|
||||
t.Fatalf("Did not get an address where there is one in the pool")
|
||||
}
|
||||
@@ -465,7 +508,7 @@ func TestGetAddress(t *testing.T) {
|
||||
t.Errorf("Wrong Subnetwork ID: got %v, want %v", ka.SubnetworkID(), localSubnetworkID)
|
||||
}
|
||||
|
||||
numAddrs = n.TotalNumAddresses()
|
||||
numAddrs = amgr.TotalNumAddresses()
|
||||
if numAddrs != 2 {
|
||||
t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1)
|
||||
}
|
||||
@@ -521,7 +564,8 @@ func TestGetBestLocalAddress(t *testing.T) {
|
||||
*/
|
||||
}
|
||||
|
||||
amgr := New("testgetbestlocaladdress", nil, nil)
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestGetBestLocalAddress", nil)
|
||||
defer teardown()
|
||||
|
||||
// Test against default when there's no address
|
||||
for x, test := range tests {
|
||||
@@ -564,7 +608,6 @@ func TestGetBestLocalAddress(t *testing.T) {
|
||||
// Add a Tor generated IP address
|
||||
localAddr = wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")}
|
||||
amgr.AddLocalAddress(&localAddr, ManualPrio)
|
||||
|
||||
// Test against want3
|
||||
for x, test := range tests {
|
||||
got := amgr.GetBestLocalAddress(&test.remoteAddr)
|
||||
34
addressmanager/doc.go
Normal file
34
addressmanager/doc.go
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
Package addressmanager implements concurrency safe Kaspa address manager.
|
||||
|
||||
Address Manager Overview
|
||||
|
||||
In order maintain the peer-to-peer Kaspa network, there needs to be a source
|
||||
of addresses to connect to as nodes come and go. The Kaspa protocol provides
|
||||
the getaddr and addr messages to allow peers to communicate known addresses with
|
||||
each other. However, there needs to a mechanism to store those results and
|
||||
select peers from them. It is also important to note that remote peers can't
|
||||
be trusted to send valid peers nor attempt to provide you with only peers they
|
||||
control with malicious intent.
|
||||
|
||||
With that in mind, this package provides a concurrency safe address manager for
|
||||
caching and selecting peers in a non-deterministic manner. The general idea is
|
||||
the caller adds addresses to the address manager and notifies it when addresses
|
||||
are connected, known good, and attempted. The caller also requests addresses as
|
||||
it needs them.
|
||||
|
||||
The address manager internally segregates the addresses into groups and
|
||||
non-deterministically selects groups in a cryptographically random manner. This
|
||||
reduce the chances multiple addresses from the same nets are selected which
|
||||
generally helps provide greater peer diversity, and perhaps more importantly,
|
||||
drastically reduces the chances an attacker is able to coerce your peer into
|
||||
only connecting to nodes they control.
|
||||
|
||||
The address manager also understands routability and tries hard to only return
|
||||
routable addresses. In addition, it uses the information provided by the caller
|
||||
about connected, known good, and attempted addresses to periodically purge
|
||||
peers which no longer appear to be good peers as well as bias the selection
|
||||
toward known good peers. The general idea is to make a best effort at only
|
||||
providing usable addresses.
|
||||
*/
|
||||
package addressmanager
|
||||
@@ -2,12 +2,11 @@
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
func TstKnownAddressIsBad(ka *KnownAddress) bool {
|
||||
@@ -19,7 +18,7 @@ func TstKnownAddressChance(ka *KnownAddress) float64 {
|
||||
}
|
||||
|
||||
func TstNewKnownAddress(na *wire.NetAddress, attempts int,
|
||||
lastattempt, lastsuccess time.Time, tried bool, refs int) *KnownAddress {
|
||||
return &KnownAddress{na: na, attempts: attempts, lastattempt: lastattempt,
|
||||
lastsuccess: lastsuccess, tried: tried, refs: refs}
|
||||
lastattempt, lastsuccess mstime.Time, tried bool, refs int) *KnownAddress {
|
||||
return &KnownAddress{netAddress: na, attempts: attempts, lastAttempt: lastattempt,
|
||||
lastSuccess: lastsuccess, tried: tried, referenceCount: refs}
|
||||
}
|
||||
@@ -2,33 +2,36 @@
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// KnownAddress tracks information about a known network address that is used
|
||||
// to determine how viable an address is.
|
||||
type KnownAddress struct {
|
||||
na *wire.NetAddress
|
||||
srcAddr *wire.NetAddress
|
||||
attempts int
|
||||
lastattempt time.Time
|
||||
lastsuccess time.Time
|
||||
tried bool
|
||||
refs int // reference count of new buckets
|
||||
subnetworkID *subnetworkid.SubnetworkID
|
||||
netAddress *wire.NetAddress
|
||||
sourceAddress *wire.NetAddress
|
||||
attempts int
|
||||
lastAttempt mstime.Time
|
||||
lastSuccess mstime.Time
|
||||
tried bool
|
||||
referenceCount int // reference count of new buckets
|
||||
subnetworkID *subnetworkid.SubnetworkID
|
||||
isBanned bool
|
||||
bannedTime mstime.Time
|
||||
}
|
||||
|
||||
// NetAddress returns the underlying wire.NetAddress associated with the
|
||||
// known address.
|
||||
func (ka *KnownAddress) NetAddress() *wire.NetAddress {
|
||||
return ka.na
|
||||
return ka.netAddress
|
||||
}
|
||||
|
||||
// SubnetworkID returns the subnetwork ID of the known address.
|
||||
@@ -37,16 +40,16 @@ func (ka *KnownAddress) SubnetworkID() *subnetworkid.SubnetworkID {
|
||||
}
|
||||
|
||||
// LastAttempt returns the last time the known address was attempted.
|
||||
func (ka *KnownAddress) LastAttempt() time.Time {
|
||||
return ka.lastattempt
|
||||
func (ka *KnownAddress) LastAttempt() mstime.Time {
|
||||
return ka.lastAttempt
|
||||
}
|
||||
|
||||
// chance returns the selection probability for a known address. The priority
|
||||
// chance returns the selection probability for a known address. The priority
|
||||
// depends upon how recently the address has been seen, how recently it was last
|
||||
// attempted and how often attempts to connect to it have failed.
|
||||
func (ka *KnownAddress) chance() float64 {
|
||||
now := time.Now()
|
||||
lastAttempt := now.Sub(ka.lastattempt)
|
||||
now := mstime.Now()
|
||||
lastAttempt := now.Sub(ka.lastAttempt)
|
||||
|
||||
if lastAttempt < 0 {
|
||||
lastAttempt = 0
|
||||
@@ -76,27 +79,27 @@ func (ka *KnownAddress) chance() float64 {
|
||||
// All addresses that meet these criteria are assumed to be worthless and not
|
||||
// worth keeping hold of.
|
||||
func (ka *KnownAddress) isBad() bool {
|
||||
if ka.lastattempt.After(time.Now().Add(-1 * time.Minute)) {
|
||||
if ka.lastAttempt.After(mstime.Now().Add(-1 * time.Minute)) {
|
||||
return false
|
||||
}
|
||||
|
||||
// From the future?
|
||||
if ka.na.Timestamp.After(time.Now().Add(10 * time.Minute)) {
|
||||
if ka.netAddress.Timestamp.After(mstime.Now().Add(10 * time.Minute)) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Over a month old?
|
||||
if ka.na.Timestamp.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
|
||||
if ka.netAddress.Timestamp.Before(mstime.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Never succeeded?
|
||||
if ka.lastsuccess.IsZero() && ka.attempts >= numRetries {
|
||||
if ka.lastSuccess.IsZero() && ka.attempts >= numRetries {
|
||||
return true
|
||||
}
|
||||
|
||||
// Hasn't succeeded in too long?
|
||||
if !ka.lastsuccess.After(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
|
||||
if !ka.lastSuccess.After(mstime.Now().Add(-1*minBadDays*time.Hour*24)) &&
|
||||
ka.attempts >= maxFailures {
|
||||
return true
|
||||
}
|
||||
115
addressmanager/knownaddress_test.go
Normal file
115
addressmanager/knownaddress_test.go
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addressmanager_test
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/addressmanager"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
func TestChance(t *testing.T) {
|
||||
now := mstime.Now()
|
||||
var tests = []struct {
|
||||
addr *addressmanager.KnownAddress
|
||||
expected float64
|
||||
}{
|
||||
{
|
||||
//Test normal case
|
||||
addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastseen < 0
|
||||
addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)},
|
||||
0, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastAttempt < 0
|
||||
addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, mstime.Now().Add(30*time.Minute), mstime.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case in which lastAttempt < ten minutes
|
||||
addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, mstime.Now().Add(-5*time.Minute), mstime.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case with several failed attempts.
|
||||
addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
2, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0),
|
||||
1 / 1.5 / 1.5,
|
||||
},
|
||||
}
|
||||
|
||||
err := .0001
|
||||
for i, test := range tests {
|
||||
chance := addressmanager.TstKnownAddressChance(test.addr)
|
||||
if math.Abs(test.expected-chance) >= err {
|
||||
t.Errorf("case %d: got %f, expected %f", i, chance, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsBad(t *testing.T) {
|
||||
now := mstime.Now()
|
||||
future := now.Add(35 * time.Minute)
|
||||
monthOld := now.Add(-43 * time.Hour * 24)
|
||||
secondsOld := now.Add(-2 * time.Second)
|
||||
minutesOld := now.Add(-27 * time.Minute)
|
||||
hoursOld := now.Add(-5 * time.Hour)
|
||||
zeroTime := mstime.Time{}
|
||||
|
||||
futureNa := &wire.NetAddress{Timestamp: future}
|
||||
minutesOldNa := &wire.NetAddress{Timestamp: minutesOld}
|
||||
monthOldNa := &wire.NetAddress{Timestamp: monthOld}
|
||||
currentNa := &wire.NetAddress{Timestamp: secondsOld}
|
||||
|
||||
//Test addresses that have been tried in the last minute.
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) {
|
||||
t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
|
||||
//Test address that claims to be from the future.
|
||||
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 6: addresses that claim to be from the future are bad.")
|
||||
}
|
||||
|
||||
//Test address that has not been seen in over a month.
|
||||
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 7: addresses more than a month old are bad.")
|
||||
}
|
||||
|
||||
//It has failed at least three times and never succeeded.
|
||||
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
|
||||
t.Errorf("test case 8: addresses that have never succeeded are bad.")
|
||||
}
|
||||
|
||||
//It has failed ten times in the last week
|
||||
if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
|
||||
}
|
||||
|
||||
//Test an address that should work.
|
||||
if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 10: This should be a valid address.")
|
||||
}
|
||||
}
|
||||
@@ -2,12 +2,12 @@
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/logger"
|
||||
"github.com/daglabs/btcd/util/panics"
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.ADXR)
|
||||
var spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
@@ -2,15 +2,12 @@
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/daglabs/btcd/config"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -73,19 +70,6 @@ var (
|
||||
// rfc6598Net specifies the IPv4 block as defined by RFC6598 (100.64.0.0/10)
|
||||
rfc6598Net = ipNet("100.64.0.0", 10, 32)
|
||||
|
||||
// onionCatNet defines the IPv6 address block used to support Tor.
|
||||
// bitcoind encodes a .onion address as a 16 byte number by decoding the
|
||||
// address prior to the .onion (i.e. the key hash) base32 into a ten
|
||||
// byte number. It then stores the first 6 bytes of the address as
|
||||
// 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43.
|
||||
//
|
||||
// This is the same range used by OnionCat, which is part part of the
|
||||
// RFC4193 unique local IPv6 range.
|
||||
//
|
||||
// In summary the format is:
|
||||
// { magic 6 bytes, 10 bytes base32 decode of key hash }
|
||||
onionCatNet = ipNet("fd87:d87e:eb43::", 48, 128)
|
||||
|
||||
// zero4Net defines the IPv4 address block for address staring with 0
|
||||
// (0.0.0.0/8).
|
||||
zero4Net = ipNet("0.0.0.0", 8, 32)
|
||||
@@ -111,14 +95,6 @@ func IsLocal(na *wire.NetAddress) bool {
|
||||
return na.IP.IsLoopback() || zero4Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsOnionCatTor returns whether or not the passed address is in the IPv6 range
|
||||
// used by bitcoin to support Tor (fd87:d87e:eb43::/48). Note that this range
|
||||
// is the same range used by OnionCat, which is part of the RFC4193 unique local
|
||||
// IPv6 range.
|
||||
func IsOnionCatTor(na *wire.NetAddress) bool {
|
||||
return onionCatNet.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsRFC1918 returns whether or not the passed address is part of the IPv4
|
||||
// private network address space as defined by RFC1918 (10.0.0.0/8,
|
||||
// 172.16.0.0/12, or 192.168.0.0/16).
|
||||
@@ -210,7 +186,7 @@ func IsRFC6598(na *wire.NetAddress) bool {
|
||||
return rfc6598Net.Contains(na.IP)
|
||||
}
|
||||
|
||||
// IsValid returns whether or not the passed address is valid. The address is
|
||||
// IsValid returns whether or not the passed address is valid. The address is
|
||||
// considered invalid under the following circumstances:
|
||||
// IPv4: It is either a zero or all bits set address.
|
||||
// IPv6: It is either a zero or RFC3849 documentation address.
|
||||
@@ -222,29 +198,28 @@ func IsValid(na *wire.NetAddress) bool {
|
||||
}
|
||||
|
||||
// IsRoutable returns whether or not the passed address is routable over
|
||||
// the public internet. This is true as long as the address is valid and is not
|
||||
// the public internet. This is true as long as the address is valid and is not
|
||||
// in any reserved ranges.
|
||||
func IsRoutable(na *wire.NetAddress) bool {
|
||||
if config.ActiveNetParams().AcceptUnroutable {
|
||||
func (am *AddressManager) IsRoutable(na *wire.NetAddress) bool {
|
||||
if am.cfg.NetParams().AcceptUnroutable {
|
||||
return !IsLocal(na)
|
||||
}
|
||||
|
||||
return IsValid(na) && !(IsRFC1918(na) || IsRFC2544(na) ||
|
||||
IsRFC3927(na) || IsRFC4862(na) || IsRFC3849(na) ||
|
||||
IsRFC4843(na) || IsRFC5737(na) || IsRFC6598(na) ||
|
||||
IsLocal(na) || (IsRFC4193(na) && !IsOnionCatTor(na)))
|
||||
IsLocal(na) || (IsRFC4193(na)))
|
||||
}
|
||||
|
||||
// GroupKey returns a string representing the network group an address is part
|
||||
// of. This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string
|
||||
// "local" for a local address, the string "tor:key" where key is the /4 of the
|
||||
// onion address for Tor address, and the string "unroutable" for an unroutable
|
||||
// of. This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string
|
||||
// "local" for a local address, and the string "unroutable" for an unroutable
|
||||
// address.
|
||||
func GroupKey(na *wire.NetAddress) string {
|
||||
func (am *AddressManager) GroupKey(na *wire.NetAddress) string {
|
||||
if IsLocal(na) {
|
||||
return "local"
|
||||
}
|
||||
if !IsRoutable(na) {
|
||||
if !am.IsRoutable(na) {
|
||||
return "unroutable"
|
||||
}
|
||||
if IsIPv4(na) {
|
||||
@@ -270,14 +245,10 @@ func GroupKey(na *wire.NetAddress) string {
|
||||
}
|
||||
return ip.Mask(net.CIDRMask(16, 32)).String()
|
||||
}
|
||||
if IsOnionCatTor(na) {
|
||||
// group is keyed off the first 4 bits of the actual onion key.
|
||||
return fmt.Sprintf("tor:%d", na.IP[6]&((1<<4)-1))
|
||||
}
|
||||
|
||||
// OK, so now we know ourselves to be a IPv6 address.
|
||||
// bitcoind uses /32 for everything, except for Hurricane Electric's
|
||||
// (he.net) IP range, which it uses /36 for.
|
||||
// We use /32 for everything, except for Hurricane Electric's
|
||||
// (he.net) IP range, which we use /36 for.
|
||||
bits := 32
|
||||
if heNet.Contains(na.IP) {
|
||||
bits = 36
|
||||
@@ -2,19 +2,20 @@
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr_test
|
||||
package addressmanager
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/addrmgr"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// TestIPTypes ensures the various functions which determine the type of an IP
|
||||
// address based on RFCs work as intended.
|
||||
func TestIPTypes(t *testing.T) {
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil)
|
||||
defer teardown()
|
||||
type ipTest struct {
|
||||
in wire.NetAddress
|
||||
rfc1918 bool
|
||||
@@ -39,7 +40,7 @@ func TestIPTypes(t *testing.T) {
|
||||
rfc4193, rfc4380, rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598,
|
||||
local, valid, routable bool) ipTest {
|
||||
nip := net.ParseIP(ip)
|
||||
na := *wire.NewNetAddressIPPort(nip, 8333, wire.SFNodeNetwork)
|
||||
na := *wire.NewNetAddressIPPort(nip, 16111, wire.SFNodeNetwork)
|
||||
test := ipTest{na, rfc1918, rfc2544, rfc3849, rfc3927, rfc3964, rfc4193, rfc4380,
|
||||
rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598, local, valid, routable}
|
||||
return test
|
||||
@@ -88,55 +89,55 @@ func TestIPTypes(t *testing.T) {
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for _, test := range tests {
|
||||
if rv := addrmgr.IsRFC1918(&test.in); rv != test.rfc1918 {
|
||||
if rv := IsRFC1918(&test.in); rv != test.rfc1918 {
|
||||
t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc1918)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC3849(&test.in); rv != test.rfc3849 {
|
||||
if rv := IsRFC3849(&test.in); rv != test.rfc3849 {
|
||||
t.Errorf("IsRFC3849 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3849)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC3927(&test.in); rv != test.rfc3927 {
|
||||
if rv := IsRFC3927(&test.in); rv != test.rfc3927 {
|
||||
t.Errorf("IsRFC3927 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3927)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC3964(&test.in); rv != test.rfc3964 {
|
||||
if rv := IsRFC3964(&test.in); rv != test.rfc3964 {
|
||||
t.Errorf("IsRFC3964 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3964)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC4193(&test.in); rv != test.rfc4193 {
|
||||
if rv := IsRFC4193(&test.in); rv != test.rfc4193 {
|
||||
t.Errorf("IsRFC4193 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4193)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC4380(&test.in); rv != test.rfc4380 {
|
||||
if rv := IsRFC4380(&test.in); rv != test.rfc4380 {
|
||||
t.Errorf("IsRFC4380 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4380)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC4843(&test.in); rv != test.rfc4843 {
|
||||
if rv := IsRFC4843(&test.in); rv != test.rfc4843 {
|
||||
t.Errorf("IsRFC4843 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4843)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC4862(&test.in); rv != test.rfc4862 {
|
||||
if rv := IsRFC4862(&test.in); rv != test.rfc4862 {
|
||||
t.Errorf("IsRFC4862 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4862)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC6052(&test.in); rv != test.rfc6052 {
|
||||
if rv := IsRFC6052(&test.in); rv != test.rfc6052 {
|
||||
t.Errorf("isRFC6052 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6052)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRFC6145(&test.in); rv != test.rfc6145 {
|
||||
if rv := IsRFC6145(&test.in); rv != test.rfc6145 {
|
||||
t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6145)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsLocal(&test.in); rv != test.local {
|
||||
if rv := IsLocal(&test.in); rv != test.local {
|
||||
t.Errorf("IsLocal %s\n got: %v want: %v", test.in.IP, rv, test.local)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsValid(&test.in); rv != test.valid {
|
||||
if rv := IsValid(&test.in); rv != test.valid {
|
||||
t.Errorf("IsValid %s\n got: %v want: %v", test.in.IP, rv, test.valid)
|
||||
}
|
||||
|
||||
if rv := addrmgr.IsRoutable(&test.in); rv != test.routable {
|
||||
if rv := amgr.IsRoutable(&test.in); rv != test.routable {
|
||||
t.Errorf("IsRoutable %s\n got: %v want: %v", test.in.IP, rv, test.routable)
|
||||
}
|
||||
}
|
||||
@@ -145,6 +146,9 @@ func TestIPTypes(t *testing.T) {
|
||||
// TestGroupKey tests the GroupKey function to ensure it properly groups various
|
||||
// IP addresses.
|
||||
func TestGroupKey(t *testing.T) {
|
||||
amgr, teardown := newAddrManagerForTest(t, "TestAddAddressByIP", nil)
|
||||
defer teardown()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
ip string
|
||||
@@ -179,9 +183,9 @@ func TestGroupKey(t *testing.T) {
|
||||
{name: "ipv6 rfc6145 translated ipv4", ip: "::ffff:0:0c01:0203", expected: "12.1.0.0"},
|
||||
|
||||
// Tor.
|
||||
{name: "ipv6 tor onioncat", ip: "fd87:d87e:eb43:1234::5678", expected: "tor:2"},
|
||||
{name: "ipv6 tor onioncat 2", ip: "fd87:d87e:eb43:1245::6789", expected: "tor:2"},
|
||||
{name: "ipv6 tor onioncat 3", ip: "fd87:d87e:eb43:1345::6789", expected: "tor:3"},
|
||||
{name: "ipv6 tor onioncat", ip: "fd87:d87e:eb43:1234::5678", expected: "unroutable"},
|
||||
{name: "ipv6 tor onioncat 2", ip: "fd87:d87e:eb43:1245::6789", expected: "unroutable"},
|
||||
{name: "ipv6 tor onioncat 3", ip: "fd87:d87e:eb43:1345::6789", expected: "unroutable"},
|
||||
|
||||
// IPv6 normal.
|
||||
{name: "ipv6 normal", ip: "2602:100::1", expected: "2602:100::"},
|
||||
@@ -193,7 +197,7 @@ func TestGroupKey(t *testing.T) {
|
||||
for i, test := range tests {
|
||||
nip := net.ParseIP(test.ip)
|
||||
na := *wire.NewNetAddressIPPort(nip, 8333, wire.SFNodeNetwork)
|
||||
if key := addrmgr.GroupKey(&na); key != test.expected {
|
||||
if key := amgr.GroupKey(&na); key != test.expected {
|
||||
t.Errorf("TestGroupKey #%d (%s): unexpected group key "+
|
||||
"- got '%s', want '%s'", i, test.name,
|
||||
key, test.expected)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,17 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script uses gocov to generate a test coverage report.
|
||||
# The gocov tool my be obtained with the following command:
|
||||
# go get github.com/axw/gocov/gocov
|
||||
#
|
||||
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
|
||||
|
||||
# Check for gocov.
|
||||
type gocov >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo >&2 "This script requires the gocov tool."
|
||||
echo >&2 "You may obtain it with the following command:"
|
||||
echo >&2 "go get github.com/axw/gocov/gocov"
|
||||
exit 1
|
||||
fi
|
||||
gocov test | gocov report
|
||||
@@ -1,38 +0,0 @@
|
||||
// Copyright (c) 2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package addrmgr implements concurrency safe Bitcoin address manager.
|
||||
|
||||
Address Manager Overview
|
||||
|
||||
In order maintain the peer-to-peer Bitcoin network, there needs to be a source
|
||||
of addresses to connect to as nodes come and go. The Bitcoin protocol provides
|
||||
the getaddr and addr messages to allow peers to communicate known addresses with
|
||||
each other. However, there needs to a mechanism to store those results and
|
||||
select peers from them. It is also important to note that remote peers can't
|
||||
be trusted to send valid peers nor attempt to provide you with only peers they
|
||||
control with malicious intent.
|
||||
|
||||
With that in mind, this package provides a concurrency safe address manager for
|
||||
caching and selecting peers in a non-deterministic manner. The general idea is
|
||||
the caller adds addresses to the address manager and notifies it when addresses
|
||||
are connected, known good, and attempted. The caller also requests addresses as
|
||||
it needs them.
|
||||
|
||||
The address manager internally segregates the addresses into groups and
|
||||
non-deterministically selects groups in a cryptographically random manner. This
|
||||
reduce the chances multiple addresses from the same nets are selected which
|
||||
generally helps provide greater peer diversity, and perhaps more importantly,
|
||||
drastically reduces the chances an attacker is able to coerce your peer into
|
||||
only connecting to nodes they control.
|
||||
|
||||
The address manager also understands routability and Tor addresses and tries
|
||||
hard to only return routable addresses. In addition, it uses the information
|
||||
provided by the caller about connected, known good, and attempted addresses to
|
||||
periodically purge peers which no longer appear to be good peers as well as
|
||||
bias the selection toward known good peers. The general idea is to make a best
|
||||
effort at only providing usable addresses.
|
||||
*/
|
||||
package addrmgr
|
||||
@@ -1,114 +0,0 @@
|
||||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/addrmgr"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
func TestChance(t *testing.T) {
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
var tests = []struct {
|
||||
addr *addrmgr.KnownAddress
|
||||
expected float64
|
||||
}{
|
||||
{
|
||||
//Test normal case
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastseen < 0
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)},
|
||||
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastattempt < 0
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(30*time.Minute), time.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case in which lastattempt < ten minutes
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(-5*time.Minute), time.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case with several failed attempts.
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
2, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1 / 1.5 / 1.5,
|
||||
},
|
||||
}
|
||||
|
||||
err := .0001
|
||||
for i, test := range tests {
|
||||
chance := addrmgr.TstKnownAddressChance(test.addr)
|
||||
if math.Abs(test.expected-chance) >= err {
|
||||
t.Errorf("case %d: got %f, expected %f", i, chance, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsBad(t *testing.T) {
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
future := now.Add(35 * time.Minute)
|
||||
monthOld := now.Add(-43 * time.Hour * 24)
|
||||
secondsOld := now.Add(-2 * time.Second)
|
||||
minutesOld := now.Add(-27 * time.Minute)
|
||||
hoursOld := now.Add(-5 * time.Hour)
|
||||
zeroTime := time.Time{}
|
||||
|
||||
futureNa := &wire.NetAddress{Timestamp: future}
|
||||
minutesOldNa := &wire.NetAddress{Timestamp: minutesOld}
|
||||
monthOldNa := &wire.NetAddress{Timestamp: monthOld}
|
||||
currentNa := &wire.NetAddress{Timestamp: secondsOld}
|
||||
|
||||
//Test addresses that have been tried in the last minute.
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) {
|
||||
t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
|
||||
//Test address that claims to be from the future.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 6: addresses that claim to be from the future are bad.")
|
||||
}
|
||||
|
||||
//Test address that has not been seen in over a month.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 7: addresses more than a month old are bad.")
|
||||
}
|
||||
|
||||
//It has failed at least three times and never succeeded.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
|
||||
t.Errorf("test case 8: addresses that have never succeeded are bad.")
|
||||
}
|
||||
|
||||
//It has failed ten times in the last week
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
|
||||
}
|
||||
|
||||
//Test an address that should work.
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 10: This should be a valid address.")
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
|
||||
github.com/conformal/btcd/addrmgr/network.go GroupKey 100.00% (23/23)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.reset 100.00% (6/6)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC5737 100.00% (4/4)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC1918 100.00% (4/4)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go New 100.00% (3/3)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go NetAddressKey 100.00% (2/2)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4862 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.numAddresses 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/log.go init 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/log.go DisableLog 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go ipNet 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsIPv4 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsLocal 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsOnionCatTor 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC2544 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC3849 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC3927 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC3964 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4193 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4380 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4843 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC6052 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC6145 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC6598 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsValid 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRoutable 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetBestLocalAddress 94.74% (18/19)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddLocalAddress 90.91% (10/11)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go getReachabilityFrom 51.52% (17/33)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go ipString 50.00% (2/4)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetAddress 9.30% (4/43)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.deserializePeers 0.00% (0/50)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Good 0.00% (0/44)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.savePeers 0.00% (0/39)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.updateAddress 0.00% (0/30)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.expireNew 0.00% (0/22)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddressCache 0.00% (0/16)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.HostToNetAddress 0.00% (0/15)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getNewBucket 0.00% (0/15)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddressByIP 0.00% (0/14)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getTriedBucket 0.00% (0/14)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.chance 0.00% (0/13)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.loadPeers 0.00% (0/11)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.isBad 0.00% (0/11)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Connected 0.00% (0/10)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.addressHandler 0.00% (0/9)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.pickTried 0.00% (0/8)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.DeserializeNetAddress 0.00% (0/7)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Stop 0.00% (0/7)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Attempt 0.00% (0/7)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Start 0.00% (0/6)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddresses 0.00% (0/4)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NeedMoreAddresses 0.00% (0/3)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NumAddresses 0.00% (0/3)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddress 0.00% (0/3)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.LastAttempt 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.NetAddress 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.find 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr/log.go UseLogger 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr --------------------------------- 21.04% (113/537)
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/daglabs/btcd/apiserver/logger"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/jessevdk/go-flags"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLogFilename = "apiserver.log"
|
||||
defaultErrLogFilename = "apiserver_err.log"
|
||||
)
|
||||
|
||||
var (
|
||||
// activeNetParams are the currently active net params
|
||||
activeNetParams dagconfig.Params
|
||||
)
|
||||
|
||||
var (
|
||||
// Default configuration options
|
||||
defaultLogDir = util.AppDataDir("apiserver", false)
|
||||
defaultDBAddress = "localhost:3306"
|
||||
defaultHTTPListen = "0.0.0.0:8080"
|
||||
)
|
||||
|
||||
// Config defines the configuration options for the API server.
|
||||
type Config struct {
|
||||
LogDir string `long:"logdir" description:"Directory to log output."`
|
||||
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
|
||||
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
|
||||
DisableTLS bool `long:"notls" description:"Disable TLS"`
|
||||
DBAddress string `long:"dbaddress" description:"Database address"`
|
||||
DBUser string `long:"dbuser" description:"Database user" required:"true"`
|
||||
DBPassword string `long:"dbpass" description:"Database password" required:"true"`
|
||||
DBName string `long:"dbname" description:"Database name" required:"true"`
|
||||
HTTPListen string `long:"listen" description:"HTTP address to listen on (default: 0.0.0.0:8080)"`
|
||||
Migrate bool `long:"migrate" description:"Migrate the database to the latest version. The server will not start when using this flag."`
|
||||
TestNet bool `long:"testnet" description:"Connect to testnet"`
|
||||
SimNet bool `long:"simnet" description:"Connect to the simulation test network"`
|
||||
DevNet bool `long:"devnet" description:"Connect to the development test network"`
|
||||
}
|
||||
|
||||
// Parse parses the CLI arguments and returns a config struct.
|
||||
func Parse() (*Config, error) {
|
||||
cfg := &Config{
|
||||
LogDir: defaultLogDir,
|
||||
DBAddress: defaultDBAddress,
|
||||
HTTPListen: defaultHTTPListen,
|
||||
}
|
||||
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
|
||||
_, err := parser.Parse()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !cfg.Migrate {
|
||||
if cfg.RPCUser == "" {
|
||||
return nil, errors.New("--rpcuser is required if --migrate flag is not used")
|
||||
}
|
||||
if cfg.RPCPassword == "" {
|
||||
return nil, errors.New("--rpcpass is required if --migrate flag is not used")
|
||||
}
|
||||
if cfg.RPCServer == "" {
|
||||
return nil, errors.New("--rpcserver is required if --migrate flag is not used")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.RPCCert == "" && !cfg.DisableTLS {
|
||||
return nil, errors.New("--notls has to be disabled if --cert is used")
|
||||
}
|
||||
|
||||
if cfg.RPCCert != "" && cfg.DisableTLS {
|
||||
return nil, errors.New("--cert should be omitted if --notls is used")
|
||||
}
|
||||
|
||||
err = resolveNetwork(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logFile := filepath.Join(cfg.LogDir, defaultLogFilename)
|
||||
errLogFile := filepath.Join(cfg.LogDir, defaultErrLogFilename)
|
||||
logger.InitLog(logFile, errLogFile)
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func resolveNetwork(cfg *Config) error {
|
||||
// Multiple networks can't be selected simultaneously.
|
||||
numNets := 0
|
||||
if cfg.TestNet {
|
||||
numNets++
|
||||
}
|
||||
if cfg.SimNet {
|
||||
numNets++
|
||||
}
|
||||
if cfg.DevNet {
|
||||
numNets++
|
||||
}
|
||||
if numNets > 1 {
|
||||
return errors.New("multiple net params (testnet, simnet, devnet, etc.) can't be used " +
|
||||
"together -- choose one of them")
|
||||
}
|
||||
|
||||
activeNetParams = dagconfig.MainNetParams
|
||||
switch {
|
||||
case cfg.TestNet:
|
||||
activeNetParams = dagconfig.TestNetParams
|
||||
case cfg.SimNet:
|
||||
activeNetParams = dagconfig.SimNetParams
|
||||
case cfg.DevNet:
|
||||
activeNetParams = dagconfig.DevNetParams
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveNetParams returns the currently active net params
|
||||
func ActiveNetParams() *dagconfig.Params {
|
||||
return &activeNetParams
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/database"
|
||||
"github.com/daglabs/btcd/apiserver/models"
|
||||
"github.com/daglabs/btcd/apiserver/utils"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
const (
|
||||
// OrderAscending is parameter that can be used
|
||||
// in a get list handler to get a list ordered
|
||||
// in an ascending order.
|
||||
OrderAscending = "asc"
|
||||
|
||||
// OrderDescending is parameter that can be used
|
||||
// in a get list handler to get a list ordered
|
||||
// in an ascending order.
|
||||
OrderDescending = "desc"
|
||||
)
|
||||
|
||||
const maxGetBlocksLimit = 100
|
||||
|
||||
// GetBlockByHashHandler returns a block by a given hash.
|
||||
func GetBlockByHashHandler(blockHash string) (interface{}, *utils.HandlerError) {
|
||||
if bytes, err := hex.DecodeString(blockHash); err != nil || len(bytes) != daghash.HashSize {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("The given block hash is not a hex-encoded %d-byte hash.", daghash.HashSize))
|
||||
}
|
||||
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, utils.NewInternalServerHandlerError(err.Error())
|
||||
}
|
||||
|
||||
block := &models.Block{}
|
||||
dbResult := db.Where(&models.Block{BlockHash: blockHash}).Preload("AcceptingBlock").First(block)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return nil, utils.NewHandlerError(http.StatusNotFound, "No block with the given block hash was found.")
|
||||
}
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading transactions from the database:", dbResult.GetErrors())
|
||||
}
|
||||
return convertBlockModelToBlockResponse(block), nil
|
||||
}
|
||||
|
||||
// GetBlocksHandler searches for all blocks
|
||||
func GetBlocksHandler(order string, skip uint64, limit uint64) (interface{}, *utils.HandlerError) {
|
||||
if limit > maxGetBlocksLimit {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("The maximum allowed value for the limit is %d", maxGetTransactionsLimit))
|
||||
}
|
||||
blocks := []*models.Block{}
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
|
||||
}
|
||||
query := db.
|
||||
Limit(limit).
|
||||
Offset(skip).
|
||||
Preload("AcceptingBlock")
|
||||
if order == OrderAscending {
|
||||
query = query.Order("`id` ASC")
|
||||
} else if order == OrderDescending {
|
||||
query = query.Order("`id` DESC")
|
||||
} else {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("'%s' is not a valid order", order))
|
||||
}
|
||||
query.Find(&blocks)
|
||||
blockResponses := make([]*blockResponse, len(blocks))
|
||||
for i, block := range blocks {
|
||||
blockResponses[i] = convertBlockModelToBlockResponse(block)
|
||||
}
|
||||
return blockResponses, nil
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import "github.com/daglabs/btcd/apiserver/utils"
|
||||
|
||||
// GetFeeEstimatesHandler returns the fee estimates for different priorities
|
||||
// for accepting a transaction in the DAG.
|
||||
func GetFeeEstimatesHandler() (interface{}, *utils.HandlerError) {
|
||||
return &feeEstimateResponse{
|
||||
HighPriority: 3,
|
||||
NormalPriority: 2,
|
||||
LowPriority: 1,
|
||||
}, nil
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
package controllers
|
||||
|
||||
// RawTransaction represents a raw transaction posted to the API server
|
||||
type RawTransaction struct {
|
||||
RawTransaction string `json:"rawTransaction"`
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/daglabs/btcd/apiserver/models"
|
||||
"github.com/daglabs/btcd/btcjson"
|
||||
)
|
||||
|
||||
type transactionResponse struct {
|
||||
TransactionHash string `json:"transactionHash"`
|
||||
TransactionID string `json:"transactionId"`
|
||||
AcceptingBlockHash string `json:"acceptingBlockHash,omitempty"`
|
||||
AcceptingBlockBlueScore uint64 `json:"acceptingBlockBlueScore,omitempty"`
|
||||
SubnetworkID string `json:"subnetworkId"`
|
||||
LockTime uint64 `json:"lockTime"`
|
||||
Gas uint64 `json:"gas,omitempty"`
|
||||
PayloadHash string `json:"payloadHash,omitempty"`
|
||||
Payload string `json:"payload,omitempty"`
|
||||
Inputs []*transactionInputResponse `json:"inputs"`
|
||||
Outputs []*transactionOutputResponse `json:"outputs"`
|
||||
Mass uint64 `json:"mass"`
|
||||
}
|
||||
|
||||
type transactionOutputResponse struct {
|
||||
TransactionID string `json:"transactionId,omitempty"`
|
||||
Value uint64 `json:"value"`
|
||||
ScriptPubKey string `json:"scriptPubKey"`
|
||||
Address string `json:"address,omitempty"`
|
||||
AcceptingBlockHash string `json:"acceptingBlockHash,omitempty"`
|
||||
AcceptingBlockBlueScore uint64 `json:"acceptingBlockBlueScore,omitempty"`
|
||||
}
|
||||
|
||||
type transactionInputResponse struct {
|
||||
TransactionID string `json:"transactionId,omitempty"`
|
||||
PreviousTransactionID string `json:"previousTransactionId"`
|
||||
PreviousTransactionOutputIndex uint32 `json:"previousTransactionOutputIndex"`
|
||||
SignatureScript string `json:"signatureScript"`
|
||||
Sequence uint64 `json:"sequence"`
|
||||
Address string `json:"address"`
|
||||
}
|
||||
|
||||
type blockResponse struct {
|
||||
BlockHash string
|
||||
Version int32
|
||||
HashMerkleRoot string
|
||||
AcceptedIDMerkleRoot string
|
||||
UTXOCommitment string
|
||||
Timestamp uint64
|
||||
Bits uint32
|
||||
Nonce uint64
|
||||
AcceptingBlockHash *string
|
||||
BlueScore uint64
|
||||
IsChainBlock bool
|
||||
Mass uint64
|
||||
}
|
||||
|
||||
type feeEstimateResponse struct {
|
||||
HighPriority, NormalPriority, LowPriority float64
|
||||
}
|
||||
|
||||
func convertTxModelToTxResponse(tx *models.Transaction) *transactionResponse {
|
||||
txRes := &transactionResponse{
|
||||
TransactionHash: tx.TransactionHash,
|
||||
TransactionID: tx.TransactionID,
|
||||
AcceptingBlockHash: tx.AcceptingBlock.BlockHash,
|
||||
AcceptingBlockBlueScore: tx.AcceptingBlock.BlueScore,
|
||||
SubnetworkID: tx.Subnetwork.SubnetworkID,
|
||||
LockTime: tx.LockTime,
|
||||
Gas: tx.Gas,
|
||||
PayloadHash: tx.PayloadHash,
|
||||
Payload: hex.EncodeToString(tx.Payload),
|
||||
Inputs: make([]*transactionInputResponse, len(tx.TransactionInputs)),
|
||||
Outputs: make([]*transactionOutputResponse, len(tx.TransactionOutputs)),
|
||||
Mass: tx.Mass,
|
||||
}
|
||||
for i, txOut := range tx.TransactionOutputs {
|
||||
txRes.Outputs[i] = &transactionOutputResponse{
|
||||
Value: txOut.Value,
|
||||
ScriptPubKey: hex.EncodeToString(txOut.ScriptPubKey),
|
||||
Address: txOut.Address.Address,
|
||||
}
|
||||
}
|
||||
for i, txIn := range tx.TransactionInputs {
|
||||
txRes.Inputs[i] = &transactionInputResponse{
|
||||
PreviousTransactionID: txIn.PreviousTransactionOutput.Transaction.TransactionID,
|
||||
PreviousTransactionOutputIndex: txIn.PreviousTransactionOutput.Index,
|
||||
SignatureScript: hex.EncodeToString(txIn.SignatureScript),
|
||||
Sequence: txIn.Sequence,
|
||||
Address: txIn.PreviousTransactionOutput.Address.Address,
|
||||
}
|
||||
}
|
||||
return txRes
|
||||
}
|
||||
|
||||
func convertBlockModelToBlockResponse(block *models.Block) *blockResponse {
|
||||
blockRes := &blockResponse{
|
||||
BlockHash: block.BlockHash,
|
||||
Version: block.Version,
|
||||
HashMerkleRoot: block.HashMerkleRoot,
|
||||
AcceptedIDMerkleRoot: block.AcceptedIDMerkleRoot,
|
||||
UTXOCommitment: block.UTXOCommitment,
|
||||
Timestamp: uint64(block.Timestamp.Unix()),
|
||||
Bits: block.Bits,
|
||||
Nonce: block.Nonce,
|
||||
BlueScore: block.BlueScore,
|
||||
IsChainBlock: block.IsChainBlock,
|
||||
Mass: block.Mass,
|
||||
}
|
||||
if block.AcceptingBlock != nil {
|
||||
blockRes.AcceptingBlockHash = btcjson.String(block.AcceptingBlock.BlockHash)
|
||||
}
|
||||
return blockRes
|
||||
}
|
||||
@@ -1,187 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/database"
|
||||
"github.com/daglabs/btcd/apiserver/jsonrpc"
|
||||
"github.com/daglabs/btcd/apiserver/models"
|
||||
"github.com/daglabs/btcd/apiserver/utils"
|
||||
"github.com/daglabs/btcd/btcjson"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
const maxGetTransactionsLimit = 1000
|
||||
|
||||
// GetTransactionByIDHandler returns a transaction by a given transaction ID.
|
||||
func GetTransactionByIDHandler(txID string) (interface{}, *utils.HandlerError) {
|
||||
if bytes, err := hex.DecodeString(txID); err != nil || len(bytes) != daghash.TxIDSize {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("The given txid is not a hex-encoded %d-byte hash.", daghash.TxIDSize))
|
||||
}
|
||||
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, utils.NewInternalServerHandlerError(err.Error())
|
||||
}
|
||||
|
||||
tx := &models.Transaction{}
|
||||
query := db.Where(&models.Transaction{TransactionID: txID})
|
||||
dbResult := addTxPreloadedFields(query).First(&tx)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return nil, utils.NewHandlerError(http.StatusNotFound, "No transaction with the given txid was found.")
|
||||
}
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading transaction from the database:", dbErrors)
|
||||
}
|
||||
return convertTxModelToTxResponse(tx), nil
|
||||
}
|
||||
|
||||
// GetTransactionByHashHandler returns a transaction by a given transaction hash.
|
||||
func GetTransactionByHashHandler(txHash string) (interface{}, *utils.HandlerError) {
|
||||
if bytes, err := hex.DecodeString(txHash); err != nil || len(bytes) != daghash.HashSize {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("The given txhash is not a hex-encoded %d-byte hash.", daghash.HashSize))
|
||||
}
|
||||
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
|
||||
}
|
||||
|
||||
tx := &models.Transaction{}
|
||||
query := db.Where(&models.Transaction{TransactionHash: txHash})
|
||||
dbResult := addTxPreloadedFields(query).First(&tx)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return nil, utils.NewHandlerError(http.StatusNotFound, "No transaction with the given txhash was found.")
|
||||
}
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading transaction from the database:", dbErrors)
|
||||
}
|
||||
return convertTxModelToTxResponse(tx), nil
|
||||
}
|
||||
|
||||
// GetTransactionsByAddressHandler searches for all transactions
|
||||
// where the given address is either an input or an output.
|
||||
func GetTransactionsByAddressHandler(address string, skip uint64, limit uint64) (interface{}, *utils.HandlerError) {
|
||||
if limit > maxGetTransactionsLimit {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("The maximum allowed value for the limit is %d", maxGetTransactionsLimit))
|
||||
}
|
||||
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
|
||||
}
|
||||
|
||||
txs := []*models.Transaction{}
|
||||
query := db.
|
||||
Joins("LEFT JOIN `transaction_outputs` ON `transaction_outputs`.`transaction_id` = `transactions`.`id`").
|
||||
Joins("LEFT JOIN `addresses` AS `out_addresses` ON `out_addresses`.`id` = `transaction_outputs`.`address_id`").
|
||||
Joins("LEFT JOIN `transaction_inputs` ON `transaction_inputs`.`transaction_id` = `transactions`.`id`").
|
||||
Joins("LEFT JOIN `transaction_outputs` AS `inputs_outs` ON `inputs_outs`.`id` = `transaction_inputs`.`transaction_output_id`").
|
||||
Joins("LEFT JOIN `addresses` AS `in_addresses` ON `in_addresses`.`id` = `inputs_outs`.`address_id`").
|
||||
Where("`out_addresses`.`address` = ?", address).
|
||||
Or("`in_addresses`.`address` = ?", address).
|
||||
Limit(limit).
|
||||
Offset(skip).
|
||||
Order("`transactions`.`id` ASC")
|
||||
dbResult := addTxPreloadedFields(query).Find(&txs)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading transactions from the database:", dbErrors)
|
||||
}
|
||||
txResponses := make([]*transactionResponse, len(txs))
|
||||
for i, tx := range txs {
|
||||
txResponses[i] = convertTxModelToTxResponse(tx)
|
||||
}
|
||||
return txResponses, nil
|
||||
}
|
||||
|
||||
// GetUTXOsByAddressHandler searches for all UTXOs that belong to a certain address.
|
||||
func GetUTXOsByAddressHandler(address string) (interface{}, *utils.HandlerError) {
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
|
||||
}
|
||||
|
||||
var transactionOutputs []*models.TransactionOutput
|
||||
dbErrors := db.
|
||||
Joins("LEFT JOIN `addresses` ON `addresses`.`id` = `transaction_outputs`.`address_id`").
|
||||
Where("`addresses`.`address` = ? AND `transaction_outputs`.`is_spent` = 0", address).
|
||||
Preload("Transaction.AcceptingBlock").
|
||||
Find(&transactionOutputs).GetErrors()
|
||||
if len(dbErrors) > 0 {
|
||||
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading UTXOs from the database:", dbErrors)
|
||||
}
|
||||
|
||||
UTXOsResponses := make([]*transactionOutputResponse, len(transactionOutputs))
|
||||
for i, transactionOutput := range transactionOutputs {
|
||||
UTXOsResponses[i] = &transactionOutputResponse{
|
||||
Value: transactionOutput.Value,
|
||||
ScriptPubKey: hex.EncodeToString(transactionOutput.ScriptPubKey),
|
||||
AcceptingBlockHash: transactionOutput.Transaction.AcceptingBlock.BlockHash,
|
||||
AcceptingBlockBlueScore: transactionOutput.Transaction.AcceptingBlock.BlueScore,
|
||||
}
|
||||
}
|
||||
return UTXOsResponses, nil
|
||||
}
|
||||
|
||||
func addTxPreloadedFields(query *gorm.DB) *gorm.DB {
|
||||
return query.Preload("AcceptingBlock").
|
||||
Preload("Subnetwork").
|
||||
Preload("TransactionOutputs").
|
||||
Preload("TransactionOutputs.Address").
|
||||
Preload("TransactionInputs.PreviousTransactionOutput.Transaction").
|
||||
Preload("TransactionInputs.PreviousTransactionOutput.Address")
|
||||
}
|
||||
|
||||
// PostTransaction forwards a raw transaction to the JSON-RPC API server
|
||||
func PostTransaction(requestBody []byte) *utils.HandlerError {
|
||||
client, err := jsonrpc.GetClient()
|
||||
if err != nil {
|
||||
return utils.NewInternalServerHandlerError(err.Error())
|
||||
}
|
||||
|
||||
rawTx := &RawTransaction{}
|
||||
err = json.Unmarshal(requestBody, rawTx)
|
||||
if err != nil {
|
||||
return utils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("Error unmarshalling request body: %s", err),
|
||||
"The request body is not json-formatted")
|
||||
}
|
||||
|
||||
txBytes, err := hex.DecodeString(rawTx.RawTransaction)
|
||||
if err != nil {
|
||||
return utils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("Error decoding hex raw transaction: %s", err),
|
||||
"The raw transaction is not a hex-encoded transaction")
|
||||
}
|
||||
|
||||
txReader := bytes.NewReader(txBytes)
|
||||
tx := &wire.MsgTx{}
|
||||
err = tx.BtcDecode(txReader, 0)
|
||||
if err != nil {
|
||||
return utils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("Error decoding raw transaction: %s", err),
|
||||
"Error decoding raw transaction")
|
||||
}
|
||||
|
||||
_, err = client.SendRawTransaction(tx, true)
|
||||
if err != nil {
|
||||
if rpcErr, ok := err.(btcjson.RPCError); ok && rpcErr.Code == btcjson.ErrRPCVerify {
|
||||
return utils.NewHandlerError(http.StatusInternalServerError, rpcErr.Message)
|
||||
}
|
||||
return utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/config"
|
||||
"github.com/golang-migrate/migrate/v4/source"
|
||||
"github.com/jinzhu/gorm"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
)
|
||||
|
||||
// db is the API server database.
|
||||
var db *gorm.DB
|
||||
|
||||
// DB returns a reference to the database connection
|
||||
func DB() (*gorm.DB, error) {
|
||||
if db == nil {
|
||||
return nil, errors.New("Database is not connected")
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
type gormLogger struct{}
|
||||
|
||||
func (l gormLogger) Print(v ...interface{}) {
|
||||
str := fmt.Sprint(v...)
|
||||
log.Errorf(str)
|
||||
}
|
||||
|
||||
// Connect connects to the database mentioned in
|
||||
// config variable.
|
||||
func Connect(cfg *config.Config) error {
|
||||
connectionString := buildConnectionString(cfg)
|
||||
migrator, driver, err := openMigrator(connectionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isCurrent, version, err := isCurrent(migrator, driver)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error checking whether the database is current: %s", err)
|
||||
}
|
||||
if !isCurrent {
|
||||
return fmt.Errorf("Database is not current (version %d). Please migrate"+
|
||||
" the database by running the server with --migrate flag and then run it again.", version)
|
||||
}
|
||||
|
||||
db, err = gorm.Open("mysql", connectionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db.SetLogger(gormLogger{})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the connection to the database
|
||||
func Close() error {
|
||||
if db == nil {
|
||||
return nil
|
||||
}
|
||||
err := db.Close()
|
||||
db = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func buildConnectionString(cfg *config.Config) string {
|
||||
return fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True",
|
||||
cfg.DBUser, cfg.DBPassword, cfg.DBAddress, cfg.DBName)
|
||||
}
|
||||
|
||||
// isCurrent resolves whether the database is on the latest
|
||||
// version of the schema.
|
||||
func isCurrent(migrator *migrate.Migrate, driver source.Driver) (bool, uint, error) {
|
||||
// Get the current version
|
||||
version, isDirty, err := migrator.Version()
|
||||
if err == migrate.ErrNilVersion {
|
||||
return false, 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, 0, err
|
||||
}
|
||||
if isDirty {
|
||||
return false, 0, fmt.Errorf("Database is dirty")
|
||||
}
|
||||
|
||||
// The database is current if Next returns ErrNotExist
|
||||
_, err = driver.Next(version)
|
||||
if pathErr, ok := err.(*os.PathError); ok {
|
||||
if pathErr.Err == os.ErrNotExist {
|
||||
return true, version, nil
|
||||
}
|
||||
}
|
||||
return false, version, err
|
||||
}
|
||||
|
||||
func openMigrator(connectionString string) (*migrate.Migrate, source.Driver, error) {
|
||||
driver, err := source.Open("file://migrations")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
migrator, err := migrate.NewWithSourceInstance(
|
||||
"migrations", driver, "mysql://"+connectionString)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return migrator, driver, nil
|
||||
}
|
||||
|
||||
// Migrate database to the latest version.
|
||||
func Migrate(cfg *config.Config) error {
|
||||
connectionString := buildConnectionString(cfg)
|
||||
migrator, driver, err := openMigrator(connectionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isCurrent, version, err := isCurrent(migrator, driver)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error checking whether the database is current: %s", err)
|
||||
}
|
||||
if isCurrent {
|
||||
log.Infof("Database is already up-to-date (version %d)", version)
|
||||
return nil
|
||||
}
|
||||
err = migrator.Up()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
version, isDirty, err := migrator.Version()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isDirty {
|
||||
return fmt.Errorf("error migrating database: database is dirty")
|
||||
}
|
||||
log.Infof("Migrated database to the latest version (version %d)", version)
|
||||
return nil
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package database
|
||||
|
||||
import "github.com/daglabs/btcd/util/panics"
|
||||
import "github.com/daglabs/btcd/apiserver/logger"
|
||||
|
||||
var (
|
||||
log = logger.BackendLog.Logger("DTBS")
|
||||
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
)
|
||||
@@ -1,28 +0,0 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.13-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/daglabs/btcd
|
||||
|
||||
WORKDIR /go/src/github.com/daglabs/btcd
|
||||
|
||||
RUN apk add --no-cache curl git
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN cd apiserver && CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o apiserver .
|
||||
|
||||
# --- multistage docker build: stage #2: runtime image
|
||||
FROM alpine
|
||||
WORKDIR /app
|
||||
|
||||
RUN apk add --no-cache tini
|
||||
|
||||
COPY --from=build /go/src/github.com/daglabs/btcd/apiserver/ /app/
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD ["/app/apiserver"]
|
||||
@@ -1,125 +0,0 @@
|
||||
package jsonrpc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/config"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
|
||||
"github.com/daglabs/btcd/rpcclient"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// Client represents a connection to the JSON-RPC API of a full node
|
||||
type Client struct {
|
||||
*rpcclient.Client
|
||||
OnBlockAdded chan *BlockAddedMsg
|
||||
OnChainChanged chan *ChainChangedMsg
|
||||
}
|
||||
|
||||
var client *Client
|
||||
|
||||
// GetClient returns an instance of the JSON-RPC client, in case we have an active connection
|
||||
func GetClient() (*Client, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("JSON-RPC is not connected")
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// BlockAddedMsg defines the message received in onBlockAdded
|
||||
type BlockAddedMsg struct {
|
||||
ChainHeight uint64
|
||||
Header *wire.BlockHeader
|
||||
}
|
||||
|
||||
// ChainChangedMsg defines the message received in onChainChanged
|
||||
type ChainChangedMsg struct {
|
||||
RemovedChainBlockHashes []*daghash.Hash
|
||||
AddedChainBlocks []*rpcclient.ChainBlock
|
||||
}
|
||||
|
||||
// Close closes the connection to the JSON-RPC API server
|
||||
func Close() {
|
||||
if client == nil {
|
||||
return
|
||||
}
|
||||
|
||||
client.Disconnect()
|
||||
client = nil
|
||||
}
|
||||
|
||||
// Connect initiates a connection to the JSON-RPC API Server
|
||||
func Connect(cfg *config.Config) error {
|
||||
var cert []byte
|
||||
if !cfg.DisableTLS {
|
||||
var err error
|
||||
cert, err = ioutil.ReadFile(cfg.RPCCert)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading certificates file: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
connCfg := &rpcclient.ConnConfig{
|
||||
Host: cfg.RPCServer,
|
||||
Endpoint: "ws",
|
||||
User: cfg.RPCUser,
|
||||
Pass: cfg.RPCPassword,
|
||||
DisableTLS: cfg.DisableTLS,
|
||||
RequestTimeout: time.Second * 5,
|
||||
}
|
||||
|
||||
if !cfg.DisableTLS {
|
||||
connCfg.Certificates = cert
|
||||
}
|
||||
|
||||
var err error
|
||||
client, err = newClient(connCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error connecting to address %s: %s", cfg.RPCServer, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newClient(connCfg *rpcclient.ConnConfig) (*Client, error) {
|
||||
client = &Client{
|
||||
OnBlockAdded: make(chan *BlockAddedMsg),
|
||||
OnChainChanged: make(chan *ChainChangedMsg),
|
||||
}
|
||||
notificationHandlers := &rpcclient.NotificationHandlers{
|
||||
OnFilteredBlockAdded: func(height uint64, header *wire.BlockHeader,
|
||||
txs []*util.Tx) {
|
||||
client.OnBlockAdded <- &BlockAddedMsg{
|
||||
ChainHeight: height,
|
||||
Header: header,
|
||||
}
|
||||
},
|
||||
OnChainChanged: func(removedChainBlockHashes []*daghash.Hash,
|
||||
addedChainBlocks []*rpcclient.ChainBlock) {
|
||||
client.OnChainChanged <- &ChainChangedMsg{
|
||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||
AddedChainBlocks: addedChainBlocks,
|
||||
}
|
||||
},
|
||||
}
|
||||
var err error
|
||||
client.Client, err = rpcclient.New(connCfg, notificationHandlers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error connecting to address %s: %s", connCfg.Host, err)
|
||||
}
|
||||
|
||||
if err = client.NotifyBlocks(); err != nil {
|
||||
return nil, fmt.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err)
|
||||
}
|
||||
if err = client.NotifyChainChanges(); err != nil {
|
||||
return nil, fmt.Errorf("Error while registering client %s for chain changes notifications: %s", client.Host(), err)
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/logger"
|
||||
"github.com/daglabs/btcd/util/panics"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logger.BackendLog.Logger("APIS")
|
||||
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
)
|
||||
@@ -1,24 +0,0 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/logs"
|
||||
"os"
|
||||
)
|
||||
|
||||
// BackendLog is the logging backend used to create all subsystem loggers.
|
||||
var BackendLog = logs.NewBackend()
|
||||
|
||||
// InitLog attaches log file and error log file to the backend log.
|
||||
func InitLog(logFile, errLogFile string) {
|
||||
err := BackendLog.AddLogFile(logFile, logs.LevelTrace)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logs.LevelTrace, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = BackendLog.AddLogFile(errLogFile, logs.LevelWarn)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logs.LevelWarn, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/config"
|
||||
"github.com/daglabs/btcd/apiserver/database"
|
||||
"github.com/daglabs/btcd/apiserver/jsonrpc"
|
||||
"github.com/daglabs/btcd/apiserver/server"
|
||||
"github.com/daglabs/btcd/logger"
|
||||
"github.com/daglabs/btcd/signal"
|
||||
"github.com/daglabs/btcd/util/panics"
|
||||
_ "github.com/golang-migrate/migrate/v4/database/mysql"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
_ "github.com/jinzhu/gorm/dialects/mysql"
|
||||
)
|
||||
|
||||
func main() {
|
||||
defer panics.HandlePanic(log, logger.BackendLog)
|
||||
|
||||
cfg, err := config.Parse()
|
||||
if err != nil {
|
||||
errString := fmt.Sprintf("Error parsing command-line arguments: %s", err)
|
||||
_, fErr := fmt.Fprintf(os.Stderr, errString)
|
||||
if fErr != nil {
|
||||
panic(errString)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if cfg.Migrate {
|
||||
err := database.Migrate(cfg)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Error migrating database: %s", err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = database.Connect(cfg)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Error connecting to database: %s", err))
|
||||
}
|
||||
defer func() {
|
||||
err := database.Close()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Error closing the database: %s", err))
|
||||
}
|
||||
}()
|
||||
|
||||
err = jsonrpc.Connect(cfg)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Error connecting to servers: %s", err))
|
||||
}
|
||||
defer jsonrpc.Close()
|
||||
|
||||
shutdownServer := server.Start(cfg.HTTPListen)
|
||||
defer shutdownServer()
|
||||
|
||||
doneChan := make(chan struct{}, 1)
|
||||
spawn(func() {
|
||||
err := startSync(doneChan)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
|
||||
interrupt := signal.InterruptListener()
|
||||
<-interrupt
|
||||
|
||||
// Gracefully stop syncing
|
||||
doneChan <- struct{}{}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `blocks`;
|
||||
@@ -1,23 +0,0 @@
|
||||
CREATE TABLE `blocks`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`block_hash` CHAR(64) NOT NULL,
|
||||
`accepting_block_id` BIGINT UNSIGNED NULL,
|
||||
`version` INT NOT NULL,
|
||||
`hash_merkle_root` CHAR(64) NOT NULL,
|
||||
`accepted_id_merkle_root` CHAR(64) NOT NULL,
|
||||
`utxo_commitment` CHAR(64) NOT NULL,
|
||||
`timestamp` DATETIME NOT NULL,
|
||||
`bits` INT UNSIGNED NOT NULL,
|
||||
`nonce` BIGINT UNSIGNED NOT NULL,
|
||||
`blue_score` BIGINT UNSIGNED NOT NULL,
|
||||
`is_chain_block` TINYINT NOT NULL,
|
||||
`mass` BIGINT NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE INDEX `idx_blocks_block_hash` (`block_hash`),
|
||||
INDEX `idx_blocks_timestamp` (`timestamp`),
|
||||
INDEX `idx_blocks_is_chain_block` (`is_chain_block`),
|
||||
CONSTRAINT `fk_blocks_accepting_block_id`
|
||||
FOREIGN KEY (`accepting_block_id`)
|
||||
REFERENCES `blocks` (`id`)
|
||||
);
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `parent_blocks`;
|
||||
@@ -1,12 +0,0 @@
|
||||
CREATE TABLE `parent_blocks`
|
||||
(
|
||||
`block_id` BIGINT UNSIGNED NOT NULL,
|
||||
`parent_block_id` BIGINT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY (`block_id`, `parent_block_id`),
|
||||
CONSTRAINT `fk_parent_blocks_block_id`
|
||||
FOREIGN KEY (`block_id`)
|
||||
REFERENCES `blocks` (`id`),
|
||||
CONSTRAINT `fk_parent_blocks_parent_block_id`
|
||||
FOREIGN KEY (`parent_block_id`)
|
||||
REFERENCES `blocks` (`id`)
|
||||
);
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `raw_blocks`;
|
||||
@@ -1,9 +0,0 @@
|
||||
CREATE TABLE `raw_blocks`
|
||||
(
|
||||
`block_id` BIGINT UNSIGNED NOT NULL,
|
||||
`block_data` BLOB NOT NULL,
|
||||
PRIMARY KEY (`block_id`),
|
||||
CONSTRAINT `fk_raw_blocks_block_id`
|
||||
FOREIGN KEY (`block_id`)
|
||||
REFERENCES `blocks` (`id`)
|
||||
);
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `subnetworks`;
|
||||
@@ -1,8 +0,0 @@
|
||||
CREATE TABLE `subnetworks`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`subnetwork_id` CHAR(64) NOT NULL,
|
||||
`gas_limit` BIGINT UNSIGNED NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE INDEX `idx_subnetworks_subnetwork_id` (`subnetwork_id`)
|
||||
);
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `transactions`;
|
||||
@@ -1,19 +0,0 @@
|
||||
CREATE TABLE `transactions`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`accepting_block_id` BIGINT UNSIGNED NULL,
|
||||
`transaction_hash` CHAR(64) NOT NULL,
|
||||
`transaction_id` CHAR(64) NOT NULL,
|
||||
`lock_time` BIGINT UNSIGNED NOT NULL,
|
||||
`subnetwork_id` BIGINT UNSIGNED NOT NULL,
|
||||
`gas` BIGINT UNSIGNED NOT NULL,
|
||||
`payload_hash` CHAR(64) NOT NULL,
|
||||
`payload` BLOB NOT NULL,
|
||||
`mass` BIGINT NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE INDEX `idx_transactions_transaction_hash` (`transaction_hash`),
|
||||
INDEX `idx_transactions_transaction_id` (`transaction_id`),
|
||||
CONSTRAINT `fk_transactions_accepting_block_id`
|
||||
FOREIGN KEY (`accepting_block_id`)
|
||||
REFERENCES `blocks` (`id`)
|
||||
);
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `transactions_to_blocks`;
|
||||
@@ -1,14 +0,0 @@
|
||||
CREATE TABLE `transactions_to_blocks`
|
||||
(
|
||||
`transaction_id` BIGINT UNSIGNED NOT NULL,
|
||||
`block_id` BIGINT UNSIGNED NOT NULL,
|
||||
`index` INT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY (`transaction_id`, `block_id`),
|
||||
INDEX `idx_transactions_to_blocks_index` (`index`),
|
||||
CONSTRAINT `fk_transactions_to_blocks_block_id`
|
||||
FOREIGN KEY (`block_id`)
|
||||
REFERENCES `blocks` (`id`),
|
||||
CONSTRAINT `fk_transactions_to_blocks_transaction_id`
|
||||
FOREIGN KEY (`transaction_id`)
|
||||
REFERENCES `transactions` (`id`)
|
||||
);
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `addresses`;
|
||||
@@ -1,7 +0,0 @@
|
||||
CREATE TABLE `addresses`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`address` CHAR(50) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE INDEX `idx_addresses_address` (`address`)
|
||||
)
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `transaction_outputs`;
|
||||
@@ -1,18 +0,0 @@
|
||||
CREATE TABLE `transaction_outputs`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`transaction_id` BIGINT UNSIGNED NOT NULL,
|
||||
`index` INT UNSIGNED NOT NULL,
|
||||
`value` BIGINT UNSIGNED NOT NULL,
|
||||
`script_pub_key` BLOB NOT NULL,
|
||||
`is_spent` TINYINT NOT NULL,
|
||||
`address_id` BIGINT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
INDEX `idx_transaction_outputs_transaction_id` (`transaction_id`),
|
||||
CONSTRAINT `fk_transaction_outputs_transaction_id`
|
||||
FOREIGN KEY (`transaction_id`)
|
||||
REFERENCES `transactions` (`id`),
|
||||
CONSTRAINT `fk_transaction_outputs_address_id`
|
||||
FOREIGN KEY (`address_id`)
|
||||
REFERENCES `addresses` (`id`)
|
||||
);
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE `transaction_inputs`;
|
||||
@@ -1,18 +0,0 @@
|
||||
CREATE TABLE `transaction_inputs`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`transaction_id` BIGINT UNSIGNED NULL,
|
||||
`previous_transaction_output_id` BIGINT UNSIGNED NOT NULL,
|
||||
`index` INT UNSIGNED NOT NULL,
|
||||
`signature_script` BLOB NOT NULL,
|
||||
`sequence` BIGINT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
INDEX `idx_transaction_inputs_transaction_id` (`transaction_id`),
|
||||
INDEX `idx_transaction_inputs_previous_transaction_output_id` (`previous_transaction_output_id`),
|
||||
CONSTRAINT `fk_transaction_inputs_transaction_id`
|
||||
FOREIGN KEY (`transaction_id`)
|
||||
REFERENCES `transactions` (`id`),
|
||||
CONSTRAINT `fk_transaction_inputs_previous_transaction_output_id`
|
||||
FOREIGN KEY (`previous_transaction_output_id`)
|
||||
REFERENCES `transaction_outputs` (`id`)
|
||||
);
|
||||
@@ -1,111 +0,0 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Block is the gorm model for the 'blocks' table
|
||||
type Block struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
BlockHash string
|
||||
AcceptingBlockID *uint64
|
||||
AcceptingBlock *Block
|
||||
Version int32
|
||||
HashMerkleRoot string
|
||||
AcceptedIDMerkleRoot string
|
||||
UTXOCommitment string
|
||||
Timestamp time.Time
|
||||
Bits uint32
|
||||
Nonce uint64
|
||||
BlueScore uint64
|
||||
IsChainBlock bool
|
||||
Mass uint64
|
||||
ParentBlocks []Block `gorm:"many2many:parent_blocks;"`
|
||||
}
|
||||
|
||||
// ParentBlock is the gorm model for the 'parent_blocks' table
|
||||
type ParentBlock struct {
|
||||
BlockID uint64
|
||||
Block Block
|
||||
ParentBlockID uint64
|
||||
ParentBlock Block
|
||||
}
|
||||
|
||||
// RawBlock is the gorm model for the 'raw_blocks' table
|
||||
type RawBlock struct {
|
||||
BlockID uint64
|
||||
Block Block
|
||||
BlockData []byte
|
||||
}
|
||||
|
||||
// Subnetwork is the gorm model for the 'subnetworks' table
|
||||
type Subnetwork struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
SubnetworkID string
|
||||
GasLimit *uint64
|
||||
}
|
||||
|
||||
// Transaction is the gorm model for the 'transactions' table
|
||||
type Transaction struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
AcceptingBlockID *uint64
|
||||
AcceptingBlock *Block
|
||||
TransactionHash string
|
||||
TransactionID string
|
||||
LockTime uint64
|
||||
SubnetworkID uint64
|
||||
Subnetwork Subnetwork
|
||||
Gas uint64
|
||||
PayloadHash string
|
||||
Payload []byte
|
||||
Mass uint64
|
||||
Blocks []Block `gorm:"many2many:transactions_to_blocks;"`
|
||||
TransactionOutputs []TransactionOutput
|
||||
TransactionInputs []TransactionInput
|
||||
}
|
||||
|
||||
// TransactionBlock is the gorm model for the 'transactions_to_blocks' table
|
||||
type TransactionBlock struct {
|
||||
TransactionID uint64
|
||||
Transaction Transaction
|
||||
BlockID uint64
|
||||
Block Block
|
||||
Index uint32
|
||||
}
|
||||
|
||||
// TableName returns the table name associated to the
|
||||
// TransactionBlock gorm model
|
||||
func (TransactionBlock) TableName() string {
|
||||
return "transactions_to_blocks"
|
||||
}
|
||||
|
||||
// TransactionOutput is the gorm model for the 'transaction_outputs' table
|
||||
type TransactionOutput struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
TransactionID uint64
|
||||
Transaction Transaction
|
||||
Index uint32
|
||||
Value uint64
|
||||
ScriptPubKey []byte
|
||||
IsSpent bool
|
||||
AddressID uint64
|
||||
Address Address
|
||||
}
|
||||
|
||||
// TransactionInput is the gorm model for the 'transaction_inputs' table
|
||||
type TransactionInput struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
TransactionID uint64
|
||||
Transaction Transaction
|
||||
PreviousTransactionOutputID uint64
|
||||
PreviousTransactionOutput TransactionOutput
|
||||
Index uint32
|
||||
SignatureScript []byte
|
||||
Sequence uint64
|
||||
}
|
||||
|
||||
// Address is the gorm model for the 'utxos' table
|
||||
type Address struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
Address string
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package server
|
||||
|
||||
import "github.com/daglabs/btcd/util/panics"
|
||||
import "github.com/daglabs/btcd/apiserver/logger"
|
||||
|
||||
var (
|
||||
log = logger.BackendLog.Logger("REST")
|
||||
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
)
|
||||
@@ -1,50 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/apiserver/utils"
|
||||
"net/http"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var nextRequestID uint64 = 1
|
||||
|
||||
func addRequestMetadataMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
rCtx := utils.ToAPIServerContext(r.Context()).SetRequestID(nextRequestID)
|
||||
r.WithContext(rCtx)
|
||||
nextRequestID++
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func loggingMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := utils.ToAPIServerContext(r.Context())
|
||||
ctx.Infof("Method: %s URI: %s", r.Method, r.RequestURI)
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func recoveryMiddleware(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := utils.ToAPIServerContext(r.Context())
|
||||
defer func() {
|
||||
recoveryErr := recover()
|
||||
if recoveryErr != nil {
|
||||
recoveryErrStr := fmt.Sprintf("%s", recoveryErr)
|
||||
log.Criticalf("Fatal error: %s", recoveryErrStr)
|
||||
log.Criticalf("Stack trace: %s", debug.Stack())
|
||||
sendErr(ctx, w, utils.NewInternalServerHandlerError(recoveryErrStr))
|
||||
}
|
||||
}()
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func setJSONMiddleware(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
@@ -1,244 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/controllers"
|
||||
"github.com/daglabs/btcd/apiserver/utils"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
const (
|
||||
routeParamTxID = "txID"
|
||||
routeParamTxHash = "txHash"
|
||||
routeParamAddress = "address"
|
||||
routeParamBlockHash = "blockHash"
|
||||
)
|
||||
|
||||
const (
|
||||
queryParamSkip = "skip"
|
||||
queryParamLimit = "limit"
|
||||
queryParamOrder = "order"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultGetTransactionsLimit = 100
|
||||
defaultGetBlocksLimit = 25
|
||||
defaultGetBlocksOrder = controllers.OrderAscending
|
||||
)
|
||||
|
||||
type handlerFunc func(ctx *utils.APIServerContext, routeParams map[string]string, queryParams map[string]string, requestBody []byte) (
|
||||
interface{}, *utils.HandlerError)
|
||||
|
||||
func makeHandler(handler handlerFunc) func(http.ResponseWriter, *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := utils.ToAPIServerContext(r.Context())
|
||||
|
||||
var requestBody []byte
|
||||
if r.Method == "POST" {
|
||||
var err error
|
||||
requestBody, err = ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
sendErr(ctx, w, utils.NewHandlerError(500, "Internal server error occured"))
|
||||
}
|
||||
}
|
||||
|
||||
flattenedQueryParams, hErr := flattenQueryParams(r.URL.Query())
|
||||
if hErr != nil {
|
||||
sendErr(ctx, w, hErr)
|
||||
return
|
||||
}
|
||||
|
||||
response, hErr := handler(ctx, mux.Vars(r), flattenedQueryParams, requestBody)
|
||||
if hErr != nil {
|
||||
sendErr(ctx, w, hErr)
|
||||
return
|
||||
}
|
||||
if response != nil {
|
||||
sendJSONResponse(w, response)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func flattenQueryParams(queryParams map[string][]string) (map[string]string, *utils.HandlerError) {
|
||||
flattenedMap := make(map[string]string)
|
||||
for param, valuesSlice := range queryParams {
|
||||
if len(valuesSlice) > 1 {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("Couldn't parse the '%s' query parameter:"+
|
||||
" expected a single value but got multiple values", param))
|
||||
}
|
||||
flattenedMap[param] = valuesSlice[0]
|
||||
}
|
||||
return flattenedMap, nil
|
||||
}
|
||||
|
||||
type clientError struct {
|
||||
ErrorCode int `json:"errorCode"`
|
||||
ErrorMessage string `json:"errorMessage"`
|
||||
}
|
||||
|
||||
func sendErr(ctx *utils.APIServerContext, w http.ResponseWriter, hErr *utils.HandlerError) {
|
||||
errMsg := fmt.Sprintf("got error: %s", hErr)
|
||||
ctx.Warnf(errMsg)
|
||||
w.WriteHeader(hErr.Code)
|
||||
sendJSONResponse(w, &clientError{
|
||||
ErrorCode: hErr.Code,
|
||||
ErrorMessage: hErr.ClientMessage,
|
||||
})
|
||||
}
|
||||
|
||||
func sendJSONResponse(w http.ResponseWriter, response interface{}) {
|
||||
b, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = fmt.Fprintf(w, string(b))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func mainHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string, _ []byte) (interface{}, *utils.HandlerError) {
|
||||
return struct {
|
||||
Message string `json:"message"`
|
||||
}{
|
||||
Message: "API server is running",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func addRoutes(router *mux.Router) {
|
||||
router.HandleFunc("/", makeHandler(mainHandler))
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/transaction/id/{%s}", routeParamTxID),
|
||||
makeHandler(getTransactionByIDHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/transaction/hash/{%s}", routeParamTxHash),
|
||||
makeHandler(getTransactionByHashHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/transactions/address/{%s}", routeParamAddress),
|
||||
makeHandler(getTransactionsByAddressHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/utxos/address/{%s}", routeParamAddress),
|
||||
makeHandler(getUTXOsByAddressHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/block/{%s}", routeParamBlockHash),
|
||||
makeHandler(getBlockByHashHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
"/blocks",
|
||||
makeHandler(getBlocksHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
"/fee-estimates",
|
||||
makeHandler(getFeeEstimatesHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
"/transaction",
|
||||
makeHandler(postTransactionHandler)).
|
||||
Methods("POST")
|
||||
}
|
||||
|
||||
func convertQueryParamToInt(queryParams map[string]string, param string, defaultValue int) (int, *utils.HandlerError) {
|
||||
if _, ok := queryParams[param]; ok {
|
||||
intValue, err := strconv.Atoi(queryParams[param])
|
||||
if err != nil {
|
||||
return 0, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("Couldn't parse the '%s' query parameter: %s", param, err))
|
||||
}
|
||||
return intValue, nil
|
||||
}
|
||||
return defaultValue, nil
|
||||
}
|
||||
|
||||
func getTransactionByIDHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, *utils.HandlerError) {
|
||||
|
||||
return controllers.GetTransactionByIDHandler(routeParams[routeParamTxID])
|
||||
}
|
||||
|
||||
func getTransactionByHashHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, *utils.HandlerError) {
|
||||
|
||||
return controllers.GetTransactionByHashHandler(routeParams[routeParamTxHash])
|
||||
}
|
||||
|
||||
func getTransactionsByAddressHandler(_ *utils.APIServerContext, routeParams map[string]string, queryParams map[string]string,
|
||||
_ []byte) (interface{}, *utils.HandlerError) {
|
||||
|
||||
skip, hErr := convertQueryParamToInt(queryParams, queryParamSkip, 0)
|
||||
if hErr != nil {
|
||||
return nil, hErr
|
||||
}
|
||||
limit, hErr := convertQueryParamToInt(queryParams, queryParamLimit, defaultGetTransactionsLimit)
|
||||
if hErr != nil {
|
||||
return nil, hErr
|
||||
}
|
||||
if _, ok := queryParams[queryParamLimit]; ok {
|
||||
var err error
|
||||
skip, err = strconv.Atoi(queryParams[queryParamLimit])
|
||||
if err != nil {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
fmt.Sprintf("Couldn't parse the '%s' query parameter: %s", queryParamLimit, err))
|
||||
}
|
||||
}
|
||||
return controllers.GetTransactionsByAddressHandler(routeParams[routeParamAddress], uint64(skip), uint64(limit))
|
||||
}
|
||||
|
||||
func getUTXOsByAddressHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, *utils.HandlerError) {
|
||||
|
||||
return controllers.GetUTXOsByAddressHandler(routeParams[routeParamAddress])
|
||||
}
|
||||
|
||||
func getBlockByHashHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, *utils.HandlerError) {
|
||||
|
||||
return controllers.GetBlockByHashHandler(routeParams[routeParamBlockHash])
|
||||
}
|
||||
|
||||
func getFeeEstimatesHandler(_ *utils.APIServerContext, _ map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, *utils.HandlerError) {
|
||||
|
||||
return controllers.GetFeeEstimatesHandler()
|
||||
}
|
||||
|
||||
func getBlocksHandler(_ *utils.APIServerContext, _ map[string]string, queryParams map[string]string,
|
||||
_ []byte) (interface{}, *utils.HandlerError) {
|
||||
|
||||
skip, hErr := convertQueryParamToInt(queryParams, queryParamSkip, 0)
|
||||
if hErr != nil {
|
||||
return nil, hErr
|
||||
}
|
||||
limit, hErr := convertQueryParamToInt(queryParams, queryParamLimit, defaultGetBlocksLimit)
|
||||
if hErr != nil {
|
||||
return nil, hErr
|
||||
}
|
||||
order := defaultGetBlocksOrder
|
||||
if orderParamValue, ok := queryParams[queryParamOrder]; ok {
|
||||
if orderParamValue != controllers.OrderAscending && orderParamValue != controllers.OrderDescending {
|
||||
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("'%s' is not a valid value for the '%s' query parameter", orderParamValue, queryParamLimit))
|
||||
}
|
||||
order = orderParamValue
|
||||
}
|
||||
return controllers.GetBlocksHandler(order, uint64(skip), uint64(limit))
|
||||
}
|
||||
|
||||
func postTransactionHandler(_ *utils.APIServerContext, _ map[string]string, _ map[string]string,
|
||||
requestBody []byte) (interface{}, *utils.HandlerError) {
|
||||
return nil, controllers.PostTransaction(requestBody)
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
const gracefulShutdownTimeout = 30 * time.Second
|
||||
|
||||
// Start starts the HTTP REST server and returns a
|
||||
// function to gracefully shutdown it.
|
||||
func Start(listenAddr string) func() {
|
||||
router := mux.NewRouter()
|
||||
router.Use(addRequestMetadataMiddleware)
|
||||
router.Use(recoveryMiddleware)
|
||||
router.Use(loggingMiddleware)
|
||||
router.Use(setJSONMiddleware)
|
||||
addRoutes(router)
|
||||
httpServer := &http.Server{
|
||||
Addr: listenAddr,
|
||||
Handler: handlers.CORS()(router),
|
||||
}
|
||||
spawn(func() {
|
||||
log.Errorf("%s", httpServer.ListenAndServe())
|
||||
})
|
||||
|
||||
return func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), gracefulShutdownTimeout)
|
||||
defer cancel()
|
||||
err := httpServer.Shutdown(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("Error shutting down HTTP server: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,877 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/apiserver/config"
|
||||
"github.com/daglabs/btcd/apiserver/database"
|
||||
"github.com/daglabs/btcd/apiserver/jsonrpc"
|
||||
"github.com/daglabs/btcd/apiserver/models"
|
||||
"github.com/daglabs/btcd/apiserver/utils"
|
||||
"github.com/daglabs/btcd/btcjson"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/jinzhu/gorm"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// startSync keeps the node and the API server in sync. On start, it downloads
|
||||
// all data that's missing from the API server, and once it's done it keeps
|
||||
// sync with the node via notifications.
|
||||
func startSync(doneChan chan struct{}) error {
|
||||
client, err := jsonrpc.GetClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Mass download missing data
|
||||
err = fetchInitialData(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Keep the node and the API server in sync
|
||||
sync(client, doneChan)
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchInitialData downloads all data that's currently missing from
|
||||
// the database.
|
||||
func fetchInitialData(client *jsonrpc.Client) error {
|
||||
err := syncBlocks(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = syncSelectedParentChain(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sync keeps the API server in sync with the node via notifications
|
||||
func sync(client *jsonrpc.Client, doneChan chan struct{}) {
|
||||
// ChainChangedMsgs must be processed in order and there may be times
|
||||
// when we may not be able to process them (e.g. appropriate
|
||||
// BlockAddedMsgs haven't arrived yet). As such, we pop messages from
|
||||
// client.OnChainChanged, make sure we're able to handle them, and
|
||||
// only then push them into nextChainChangedChan for them to be
|
||||
// actually handled.
|
||||
blockAddedMsgHandledChan := make(chan struct{})
|
||||
nextChainChangedChan := make(chan *jsonrpc.ChainChangedMsg)
|
||||
spawn(func() {
|
||||
for chainChanged := range client.OnChainChanged {
|
||||
for {
|
||||
<-blockAddedMsgHandledChan
|
||||
canHandle, err := canHandleChainChangedMsg(chainChanged)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if canHandle {
|
||||
break
|
||||
}
|
||||
}
|
||||
nextChainChangedChan <- chainChanged
|
||||
}
|
||||
})
|
||||
|
||||
// Handle client notifications until we're told to stop
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case blockAdded := <-client.OnBlockAdded:
|
||||
handleBlockAddedMsg(client, blockAdded)
|
||||
blockAddedMsgHandledChan <- struct{}{}
|
||||
case chainChanged := <-nextChainChangedChan:
|
||||
handleChainChangedMsg(chainChanged)
|
||||
case <-doneChan:
|
||||
log.Infof("startSync stopped")
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// syncBlocks attempts to download all DAG blocks starting with
|
||||
// the bluest block, and then inserts them into the database.
|
||||
func syncBlocks(client *jsonrpc.Client) error {
|
||||
// Start syncing from the bluest block hash. We use blue score to
|
||||
// simulate the "last" block we have because blue-block order is
|
||||
// the order that the node uses in the various JSONRPC calls.
|
||||
startHash, err := findHashOfBluestBlock(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var blocks []string
|
||||
var rawBlocks []btcjson.GetBlockVerboseResult
|
||||
for {
|
||||
blocksResult, err := client.GetBlocks(true, false, startHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(blocksResult.Hashes) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
rawBlocksResult, err := client.GetBlocks(true, true, startHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
startHash = &blocksResult.Hashes[len(blocksResult.Hashes)-1]
|
||||
blocks = append(blocks, blocksResult.Blocks...)
|
||||
rawBlocks = append(rawBlocks, rawBlocksResult.RawBlocks...)
|
||||
}
|
||||
|
||||
return addBlocks(client, blocks, rawBlocks)
|
||||
}
|
||||
|
||||
// syncSelectedParentChain attempts to download the selected parent
|
||||
// chain starting with the bluest chain-block, and then updates the
|
||||
// database accordingly.
|
||||
func syncSelectedParentChain(client *jsonrpc.Client) error {
|
||||
// Start syncing from the bluest chain-block hash. We use blue
|
||||
// score to simulate the "last" block we have because blue-block
|
||||
// order is the order that the node uses in the various JSONRPC
|
||||
// calls.
|
||||
startHash, err := findHashOfBluestBlock(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
chainFromBlockResult, err := client.GetChainFromBlock(false, startHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(chainFromBlockResult.AddedChainBlocks) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
startHash = &chainFromBlockResult.AddedChainBlocks[len(chainFromBlockResult.AddedChainBlocks)-1].Hash
|
||||
err = updateSelectedParentChain(chainFromBlockResult.RemovedChainBlockHashes,
|
||||
chainFromBlockResult.AddedChainBlocks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// findHashOfBluestBlock finds the block with the highest
|
||||
// blue score in the database. If the database is empty,
|
||||
// return nil.
|
||||
func findHashOfBluestBlock(mustBeChainBlock bool) (*string, error) {
|
||||
dbTx, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var block models.Block
|
||||
dbQuery := dbTx.Order("blue_score DESC")
|
||||
if mustBeChainBlock {
|
||||
dbQuery = dbQuery.Where(&models.Block{IsChainBlock: true})
|
||||
}
|
||||
dbResult := dbQuery.First(&block)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to find hash of bluest block: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return nil, nil
|
||||
}
|
||||
return &block.BlockHash, nil
|
||||
}
|
||||
|
||||
// fetchBlock downloads the serialized block and raw block data of
|
||||
// the block with hash blockHash.
|
||||
func fetchBlock(client *jsonrpc.Client, blockHash *daghash.Hash) (
|
||||
block string, rawBlock *btcjson.GetBlockVerboseResult, err error) {
|
||||
msgBlock, err := client.GetBlock(blockHash, nil)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
writer := bytes.NewBuffer(make([]byte, 0, msgBlock.SerializeSize()))
|
||||
err = msgBlock.Serialize(writer)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
block = hex.EncodeToString(writer.Bytes())
|
||||
|
||||
rawBlock, err = client.GetBlockVerboseTx(blockHash, nil)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return block, rawBlock, nil
|
||||
}
|
||||
|
||||
// addBlocks inserts data in the given blocks and rawBlocks pairwise
|
||||
// into the database. See addBlock for further details.
|
||||
func addBlocks(client *jsonrpc.Client, blocks []string, rawBlocks []btcjson.GetBlockVerboseResult) error {
|
||||
for i, rawBlock := range rawBlocks {
|
||||
block := blocks[i]
|
||||
err := addBlock(client, block, rawBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func doesBlockExist(dbTx *gorm.DB, blockHash string) (bool, error) {
|
||||
var dbBlock models.Block
|
||||
dbResult := dbTx.
|
||||
Where(&models.Block{BlockHash: blockHash}).
|
||||
First(&dbBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return false, utils.NewErrorFromDBErrors("failed to find block: ", dbErrors)
|
||||
}
|
||||
return !utils.IsDBRecordNotFoundError(dbErrors), nil
|
||||
}
|
||||
|
||||
// addBlocks inserts all the data that could be gleaned out of the serialized
|
||||
// block and raw block data into the database. This includes transactions,
|
||||
// subnetworks, and addresses.
|
||||
// Note that if this function may take a nil dbTx, in which case it would start
|
||||
// a database transaction by itself and commit it before returning.
|
||||
func addBlock(client *jsonrpc.Client, block string, rawBlock btcjson.GetBlockVerboseResult) error {
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbTx := db.Begin()
|
||||
|
||||
// Skip this block if it already exists.
|
||||
blockExists, err := doesBlockExist(dbTx, rawBlock.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockExists {
|
||||
dbTx.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
dbBlock, err := insertBlock(dbTx, rawBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = insertBlockParents(dbTx, rawBlock, dbBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = insertBlockData(dbTx, block, dbBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, transaction := range rawBlock.RawTx {
|
||||
dbSubnetwork, err := insertSubnetwork(dbTx, &transaction, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbTransaction, err := insertTransaction(dbTx, &transaction, dbSubnetwork)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = insertTransactionBlock(dbTx, dbBlock, dbTransaction, uint32(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = insertTransactionInputs(dbTx, &transaction, dbTransaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = insertTransactionOutputs(dbTx, &transaction, dbTransaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dbTx.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertBlock(dbTx *gorm.DB, rawBlock btcjson.GetBlockVerboseResult) (*models.Block, error) {
|
||||
bits, err := strconv.ParseUint(rawBlock.Bits, 16, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbBlock := models.Block{
|
||||
BlockHash: rawBlock.Hash,
|
||||
Version: rawBlock.Version,
|
||||
HashMerkleRoot: rawBlock.HashMerkleRoot,
|
||||
AcceptedIDMerkleRoot: rawBlock.AcceptedIDMerkleRoot,
|
||||
UTXOCommitment: rawBlock.UTXOCommitment,
|
||||
Timestamp: time.Unix(rawBlock.Time, 0),
|
||||
Bits: uint32(bits),
|
||||
Nonce: rawBlock.Nonce,
|
||||
BlueScore: rawBlock.BlueScore,
|
||||
IsChainBlock: false, // This must be false for updateSelectedParentChain to work properly
|
||||
Mass: rawBlock.Mass,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to insert block: ", dbErrors)
|
||||
}
|
||||
return &dbBlock, nil
|
||||
}
|
||||
|
||||
func insertBlockParents(dbTx *gorm.DB, rawBlock btcjson.GetBlockVerboseResult, dbBlock *models.Block) error {
|
||||
// Exit early if this is the genesis block
|
||||
if len(rawBlock.ParentHashes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
dbWhereBlockIDsIn := make([]*models.Block, len(rawBlock.ParentHashes))
|
||||
for i, parentHash := range rawBlock.ParentHashes {
|
||||
dbWhereBlockIDsIn[i] = &models.Block{BlockHash: parentHash}
|
||||
}
|
||||
var dbParents []models.Block
|
||||
dbResult := dbTx.
|
||||
Where(dbWhereBlockIDsIn).
|
||||
First(&dbParents)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find blocks: ", dbErrors)
|
||||
}
|
||||
if len(dbParents) != len(rawBlock.ParentHashes) {
|
||||
return fmt.Errorf("some parents are missing for block: %s", rawBlock.Hash)
|
||||
}
|
||||
|
||||
for _, dbParent := range dbParents {
|
||||
dbParentBlock := models.ParentBlock{
|
||||
BlockID: dbBlock.ID,
|
||||
ParentBlockID: dbParent.ID,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbParentBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to insert parentBlock: ", dbErrors)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertBlockData(dbTx *gorm.DB, block string, dbBlock *models.Block) error {
|
||||
blockData, err := hex.DecodeString(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbRawBlock := models.RawBlock{
|
||||
BlockID: dbBlock.ID,
|
||||
BlockData: blockData,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbRawBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to insert rawBlock: ", dbErrors)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertSubnetwork(dbTx *gorm.DB, transaction *btcjson.TxRawResult, client *jsonrpc.Client) (*models.Subnetwork, error) {
|
||||
var dbSubnetwork models.Subnetwork
|
||||
dbResult := dbTx.
|
||||
Where(&models.Subnetwork{SubnetworkID: transaction.Subnetwork}).
|
||||
First(&dbSubnetwork)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to find subnetwork: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
subnetwork, err := client.GetSubnetwork(transaction.Subnetwork)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbSubnetwork = models.Subnetwork{
|
||||
SubnetworkID: transaction.Subnetwork,
|
||||
GasLimit: subnetwork.GasLimit,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbSubnetwork)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to insert subnetwork: ", dbErrors)
|
||||
}
|
||||
}
|
||||
return &dbSubnetwork, nil
|
||||
}
|
||||
|
||||
func insertTransaction(dbTx *gorm.DB, transaction *btcjson.TxRawResult, dbSubnetwork *models.Subnetwork) (*models.Transaction, error) {
|
||||
var dbTransaction models.Transaction
|
||||
dbResult := dbTx.
|
||||
Where(&models.Transaction{TransactionID: transaction.TxID}).
|
||||
First(&dbTransaction)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to find transaction: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
payload, err := hex.DecodeString(transaction.Payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbTransaction = models.Transaction{
|
||||
TransactionHash: transaction.Hash,
|
||||
TransactionID: transaction.TxID,
|
||||
LockTime: transaction.LockTime,
|
||||
SubnetworkID: dbSubnetwork.ID,
|
||||
Gas: transaction.Gas,
|
||||
Mass: transaction.Mass,
|
||||
PayloadHash: transaction.PayloadHash,
|
||||
Payload: payload,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbTransaction)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to insert transaction: ", dbErrors)
|
||||
}
|
||||
}
|
||||
return &dbTransaction, nil
|
||||
}
|
||||
|
||||
func insertTransactionBlock(dbTx *gorm.DB, dbBlock *models.Block, dbTransaction *models.Transaction, index uint32) error {
|
||||
var dbTransactionBlock models.TransactionBlock
|
||||
dbResult := dbTx.
|
||||
Where(&models.TransactionBlock{TransactionID: dbTransaction.ID, BlockID: dbBlock.ID}).
|
||||
First(&dbTransactionBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find transactionBlock: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
dbTransactionBlock = models.TransactionBlock{
|
||||
TransactionID: dbTransaction.ID,
|
||||
BlockID: dbBlock.ID,
|
||||
Index: index,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbTransactionBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to insert transactionBlock: ", dbErrors)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertTransactionInputs(dbTx *gorm.DB, transaction *btcjson.TxRawResult, dbTransaction *models.Transaction) error {
|
||||
isCoinbase, err := isTransactionCoinbase(transaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !isCoinbase {
|
||||
for _, input := range transaction.Vin {
|
||||
err := insertTransactionInput(dbTx, dbTransaction, &input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isTransactionCoinbase(transaction *btcjson.TxRawResult) (bool, error) {
|
||||
subnetwork, err := subnetworkid.NewFromStr(transaction.Subnetwork)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return subnetwork.IsEqual(subnetworkid.SubnetworkIDCoinbase), nil
|
||||
}
|
||||
|
||||
func insertTransactionInput(dbTx *gorm.DB, dbTransaction *models.Transaction, input *btcjson.Vin) error {
|
||||
var dbPreviousTransactionOutput models.TransactionOutput
|
||||
dbResult := dbTx.
|
||||
Joins("LEFT JOIN `transactions` ON `transactions`.`id` = `transaction_outputs`.`transaction_id`").
|
||||
Where("`transactions`.`transactiond_id` = ? AND `transaction_outputs`.`index` = ?", input.TxID, input.Vout).
|
||||
First(&dbPreviousTransactionOutput)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find previous transactionOutput: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return fmt.Errorf("missing output transaction output for txID: %s and index: %d", input.TxID, input.Vout)
|
||||
}
|
||||
|
||||
var dbTransactionInputCount int
|
||||
dbResult = dbTx.
|
||||
Model(&models.TransactionInput{}).
|
||||
Where(&models.TransactionInput{TransactionID: dbTransaction.ID, PreviousTransactionOutputID: dbPreviousTransactionOutput.ID}).
|
||||
Count(&dbTransactionInputCount)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find transactionInput: ", dbErrors)
|
||||
}
|
||||
if dbTransactionInputCount == 0 {
|
||||
scriptSig, err := hex.DecodeString(input.ScriptSig.Hex)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
dbTransactionInput := models.TransactionInput{
|
||||
TransactionID: dbTransaction.ID,
|
||||
PreviousTransactionOutputID: dbPreviousTransactionOutput.ID,
|
||||
Index: input.Vout,
|
||||
SignatureScript: scriptSig,
|
||||
Sequence: input.Sequence,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbTransactionInput)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to insert transactionInput: ", dbErrors)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertTransactionOutputs(dbTx *gorm.DB, transaction *btcjson.TxRawResult, dbTransaction *models.Transaction) error {
|
||||
for _, output := range transaction.Vout {
|
||||
scriptPubKey, err := hex.DecodeString(output.ScriptPubKey.Hex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbAddress, err := insertAddress(dbTx, scriptPubKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = insertTransactionOutput(dbTx, dbTransaction, &output, scriptPubKey, dbAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertAddress(dbTx *gorm.DB, scriptPubKey []byte) (*models.Address, error) {
|
||||
_, addr, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey, config.ActiveNetParams())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hexAddress := addr.EncodeAddress()
|
||||
|
||||
var dbAddress models.Address
|
||||
dbResult := dbTx.
|
||||
Where(&models.Address{Address: hexAddress}).
|
||||
First(&dbAddress)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to find address: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
dbAddress = models.Address{
|
||||
Address: hexAddress,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbAddress)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return nil, utils.NewErrorFromDBErrors("failed to insert address: ", dbErrors)
|
||||
}
|
||||
}
|
||||
return &dbAddress, nil
|
||||
}
|
||||
|
||||
func insertTransactionOutput(dbTx *gorm.DB, dbTransaction *models.Transaction,
|
||||
output *btcjson.Vout, scriptPubKey []byte, dbAddress *models.Address) error {
|
||||
var dbTransactionOutputCount int
|
||||
dbResult := dbTx.
|
||||
Model(&models.TransactionOutput{}).
|
||||
Where(&models.TransactionOutput{TransactionID: dbTransaction.ID, Index: output.N}).
|
||||
Count(&dbTransactionOutputCount)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find transactionOutput: ", dbErrors)
|
||||
}
|
||||
if dbTransactionOutputCount == 0 {
|
||||
dbTransactionOutput := models.TransactionOutput{
|
||||
TransactionID: dbTransaction.ID,
|
||||
Index: output.N,
|
||||
Value: output.Value,
|
||||
IsSpent: false, // This must be false for updateSelectedParentChain to work properly
|
||||
ScriptPubKey: scriptPubKey,
|
||||
AddressID: dbAddress.ID,
|
||||
}
|
||||
dbResult := dbTx.Create(&dbTransactionOutput)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to insert transactionOutput: ", dbErrors)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateSelectedParentChain updates the database to reflect the current selected
|
||||
// parent chain. First it "unaccepts" all removedChainHashes and then it "accepts"
|
||||
// all addChainBlocks.
|
||||
// Note that if this function may take a nil dbTx, in which case it would start
|
||||
// a database transaction by itself and commit it before returning.
|
||||
func updateSelectedParentChain(removedChainHashes []string, addedChainBlocks []btcjson.ChainBlock) error {
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbTx := db.Begin()
|
||||
|
||||
for _, removedHash := range removedChainHashes {
|
||||
err := updateRemovedChainHashes(dbTx, removedHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, addedBlock := range addedChainBlocks {
|
||||
err := updateAddedChainBlocks(dbTx, &addedBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dbTx.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateRemovedChainHashes "unaccepts" the block of the given removedHash.
|
||||
// That is to say, it marks it as not in the selected parent chain in the
|
||||
// following ways:
|
||||
// * All its TransactionInputs.PreviousTransactionOutputs are set IsSpent = false
|
||||
// * All its Transactions are set AcceptingBlockID = nil
|
||||
// * The block is set IsChainBlock = false
|
||||
// This function will return an error if any of the above are in an unexpected state
|
||||
func updateRemovedChainHashes(dbTx *gorm.DB, removedHash string) error {
|
||||
var dbBlock models.Block
|
||||
dbResult := dbTx.
|
||||
Where(&models.Block{BlockHash: removedHash}).
|
||||
First(&dbBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find block: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return fmt.Errorf("missing block for hash: %s", removedHash)
|
||||
}
|
||||
if !dbBlock.IsChainBlock {
|
||||
return fmt.Errorf("block erroneously marked as not a chain block: %s", removedHash)
|
||||
}
|
||||
|
||||
var dbTransactions []models.Transaction
|
||||
dbResult = dbTx.
|
||||
Where(&models.Transaction{AcceptingBlockID: &dbBlock.ID}).
|
||||
Preload("TransactionInputs.PreviousTransactionOutput").
|
||||
Find(&dbTransactions)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find transactions: ", dbErrors)
|
||||
}
|
||||
for _, dbTransaction := range dbTransactions {
|
||||
for _, dbTransactionInput := range dbTransaction.TransactionInputs {
|
||||
dbPreviousTransactionOutput := dbTransactionInput.PreviousTransactionOutput
|
||||
if !dbPreviousTransactionOutput.IsSpent {
|
||||
return fmt.Errorf("cannot de-spend an unspent transaction output: %s index: %d",
|
||||
dbTransaction.TransactionID, dbTransactionInput.Index)
|
||||
}
|
||||
|
||||
dbPreviousTransactionOutput.IsSpent = false
|
||||
dbResult = dbTx.Save(&dbPreviousTransactionOutput)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to update transactionOutput: ", dbErrors)
|
||||
}
|
||||
}
|
||||
|
||||
dbTransaction.AcceptingBlockID = nil
|
||||
dbResult := dbTx.Save(&dbTransaction)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to update transaction: ", dbErrors)
|
||||
}
|
||||
}
|
||||
|
||||
dbBlock.IsChainBlock = false
|
||||
dbResult = dbTx.Save(&dbBlock)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to update block: ", dbErrors)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateAddedChainBlocks "accepts" the given addedBlock. That is to say,
|
||||
// it marks it as in the selected parent chain in the following ways:
|
||||
// * All its TransactionInputs.PreviousTransactionOutputs are set IsSpent = true
|
||||
// * All its Transactions are set AcceptingBlockID = addedBlock
|
||||
// * The block is set IsChainBlock = true
|
||||
// This function will return an error if any of the above are in an unexpected state
|
||||
func updateAddedChainBlocks(dbTx *gorm.DB, addedBlock *btcjson.ChainBlock) error {
|
||||
for _, acceptedBlock := range addedBlock.AcceptedBlocks {
|
||||
var dbAccepedBlock models.Block
|
||||
dbResult := dbTx.
|
||||
Where(&models.Block{BlockHash: acceptedBlock.Hash}).
|
||||
First(&dbAccepedBlock)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find block: ", dbErrors)
|
||||
}
|
||||
if utils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return fmt.Errorf("missing block for hash: %s", acceptedBlock.Hash)
|
||||
}
|
||||
if dbAccepedBlock.IsChainBlock {
|
||||
return fmt.Errorf("block erroneously marked as a chain block: %s", acceptedBlock.Hash)
|
||||
}
|
||||
|
||||
dbWhereTransactionIDsIn := make([]*models.Transaction, len(acceptedBlock.AcceptedTxIDs))
|
||||
for i, acceptedTxID := range acceptedBlock.AcceptedTxIDs {
|
||||
dbWhereTransactionIDsIn[i] = &models.Transaction{TransactionID: acceptedTxID}
|
||||
}
|
||||
var dbAcceptedTransactions []models.Transaction
|
||||
dbResult = dbTx.
|
||||
Where(dbWhereTransactionIDsIn).
|
||||
Preload("TransactionInputs.PreviousTransactionOutput").
|
||||
First(&dbAcceptedTransactions)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to find transactions: ", dbErrors)
|
||||
}
|
||||
if len(dbAcceptedTransactions) != len(acceptedBlock.AcceptedTxIDs) {
|
||||
return fmt.Errorf("some transaction are missing for block: %s", acceptedBlock.Hash)
|
||||
}
|
||||
|
||||
for _, dbAcceptedTransaction := range dbAcceptedTransactions {
|
||||
for _, dbTransactionInput := range dbAcceptedTransaction.TransactionInputs {
|
||||
dbPreviousTransactionOutput := dbTransactionInput.PreviousTransactionOutput
|
||||
if dbPreviousTransactionOutput.IsSpent {
|
||||
return fmt.Errorf("cannot spend an already spent transaction output: %s index: %d",
|
||||
dbAcceptedTransaction.TransactionID, dbTransactionInput.Index)
|
||||
}
|
||||
|
||||
dbPreviousTransactionOutput.IsSpent = true
|
||||
dbResult = dbTx.Save(&dbPreviousTransactionOutput)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to update transactionOutput: ", dbErrors)
|
||||
}
|
||||
}
|
||||
|
||||
dbAcceptedTransaction.AcceptingBlockID = &dbAccepedBlock.ID
|
||||
dbResult = dbTx.Save(&dbAcceptedTransaction)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to update transaction: ", dbErrors)
|
||||
}
|
||||
}
|
||||
|
||||
dbAccepedBlock.IsChainBlock = true
|
||||
dbResult = dbTx.Save(&dbAccepedBlock)
|
||||
dbErrors = dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return utils.NewErrorFromDBErrors("failed to update block: ", dbErrors)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleBlockAddedMsg handles onBlockAdded messages
|
||||
func handleBlockAddedMsg(client *jsonrpc.Client, blockAdded *jsonrpc.BlockAddedMsg) {
|
||||
hash := blockAdded.Header.BlockHash()
|
||||
block, rawBlock, err := fetchBlock(client, hash)
|
||||
if err != nil {
|
||||
log.Warnf("Could not fetch block %s: %s", hash, err)
|
||||
return
|
||||
}
|
||||
err = addBlock(client, block, *rawBlock)
|
||||
if err != nil {
|
||||
log.Warnf("Could not insert block %s: %s", hash, err)
|
||||
return
|
||||
}
|
||||
log.Infof("Added block %s", hash)
|
||||
}
|
||||
|
||||
// canHandleChainChangedMsg checks whether we have all the necessary data
|
||||
// to successfully handle a ChainChangedMsg.
|
||||
func canHandleChainChangedMsg(chainChanged *jsonrpc.ChainChangedMsg) (bool, error) {
|
||||
dbTx, err := database.DB()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Collect all unique referenced block hashes
|
||||
hashes := make(map[string]struct{})
|
||||
for _, removedHash := range chainChanged.RemovedChainBlockHashes {
|
||||
hashes[removedHash.String()] = struct{}{}
|
||||
}
|
||||
for _, addedBlock := range chainChanged.AddedChainBlocks {
|
||||
hashes[addedBlock.Hash.String()] = struct{}{}
|
||||
for _, acceptedBlock := range addedBlock.AcceptedBlocks {
|
||||
hashes[acceptedBlock.Hash.String()] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that all the hashes exist in the database
|
||||
dbWhereBlockHashesIn := make([]*models.Block, len(hashes))
|
||||
i := 0
|
||||
for hash := range hashes {
|
||||
dbWhereBlockHashesIn[i] = &models.Block{BlockHash: hash}
|
||||
i++
|
||||
}
|
||||
var dbBlocksCount int
|
||||
dbResult := dbTx.
|
||||
Where(dbWhereBlockHashesIn).
|
||||
Count(&dbBlocksCount)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if utils.HasDBError(dbErrors) {
|
||||
return false, utils.NewErrorFromDBErrors("failed to find block count: ", dbErrors)
|
||||
}
|
||||
if len(hashes) != dbBlocksCount {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// handleChainChangedMsg handles onChainChanged messages
|
||||
func handleChainChangedMsg(chainChanged *jsonrpc.ChainChangedMsg) {
|
||||
// Convert the data in chainChanged to something we can feed into
|
||||
// updateSelectedParentChain
|
||||
removedHashes, addedBlocks := convertChainChangedMsg(chainChanged)
|
||||
|
||||
err := updateSelectedParentChain(removedHashes, addedBlocks)
|
||||
if err != nil {
|
||||
log.Warnf("Could not update selected parent chain: %s", err)
|
||||
return
|
||||
}
|
||||
log.Infof("Chain changed: removed &d blocks and added %d block",
|
||||
len(removedHashes), len(addedBlocks))
|
||||
}
|
||||
|
||||
func convertChainChangedMsg(chainChanged *jsonrpc.ChainChangedMsg) (
|
||||
removedHashes []string, addedBlocks []btcjson.ChainBlock) {
|
||||
|
||||
removedHashes = make([]string, len(chainChanged.RemovedChainBlockHashes))
|
||||
for i, hash := range chainChanged.RemovedChainBlockHashes {
|
||||
removedHashes[i] = hash.String()
|
||||
}
|
||||
|
||||
addedBlocks = make([]btcjson.ChainBlock, len(chainChanged.AddedChainBlocks))
|
||||
for i, addedBlock := range chainChanged.AddedChainBlocks {
|
||||
acceptedBlocks := make([]btcjson.AcceptedBlock, len(addedBlock.AcceptedBlocks))
|
||||
for j, acceptedBlock := range addedBlock.AcceptedBlocks {
|
||||
acceptedTxIDs := make([]string, len(acceptedBlock.AcceptedTxIDs))
|
||||
for k, acceptedTxID := range acceptedBlock.AcceptedTxIDs {
|
||||
acceptedTxIDs[k] = acceptedTxID.String()
|
||||
}
|
||||
acceptedBlocks[j] = btcjson.AcceptedBlock{
|
||||
Hash: acceptedBlock.Hash.String(),
|
||||
AcceptedTxIDs: acceptedTxIDs,
|
||||
}
|
||||
}
|
||||
addedBlocks[i] = btcjson.ChainBlock{
|
||||
Hash: addedBlock.Hash.String(),
|
||||
AcceptedBlocks: acceptedBlocks,
|
||||
}
|
||||
}
|
||||
|
||||
return removedHashes, addedBlocks
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type contextKey string
|
||||
|
||||
const (
|
||||
contextKeyRequestID contextKey = "REQUEST_ID"
|
||||
)
|
||||
|
||||
// APIServerContext is a context.Context wrapper that
|
||||
// enables custom logs with request ID.
|
||||
type APIServerContext struct {
|
||||
context.Context
|
||||
}
|
||||
|
||||
// ToAPIServerContext takes a context.Context instance
|
||||
// and converts it to *ApiServerContext.
|
||||
func ToAPIServerContext(ctx context.Context) *APIServerContext {
|
||||
if asCtx, ok := ctx.(*APIServerContext); ok {
|
||||
return asCtx
|
||||
}
|
||||
return &APIServerContext{Context: ctx}
|
||||
}
|
||||
|
||||
// SetRequestID associates a request ID for the context.
|
||||
func (ctx *APIServerContext) SetRequestID(requestID uint64) context.Context {
|
||||
context.WithValue(ctx, contextKeyRequestID, requestID)
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (ctx *APIServerContext) requestID() uint64 {
|
||||
id := ctx.Value(contextKeyRequestID)
|
||||
uint64ID, _ := id.(uint64)
|
||||
return uint64ID
|
||||
}
|
||||
|
||||
func (ctx *APIServerContext) getLogString(format string, params ...interface{}) string {
|
||||
return fmt.Sprintf("RID %d: ", ctx.requestID()) + fmt.Sprintf(format, params...)
|
||||
}
|
||||
|
||||
// Tracef writes a customized formatted context
|
||||
// related log with log level 'Trace'.
|
||||
func (ctx *APIServerContext) Tracef(format string, params ...interface{}) {
|
||||
log.Trace(ctx.getLogString(format, params...))
|
||||
}
|
||||
|
||||
// Debugf writes a customized formatted context
|
||||
// related log with log level 'Debug'.
|
||||
func (ctx *APIServerContext) Debugf(format string, params ...interface{}) {
|
||||
log.Debug(ctx.getLogString(format, params...))
|
||||
}
|
||||
|
||||
// Infof writes a customized formatted context
|
||||
// related log with log level 'Info'.
|
||||
func (ctx *APIServerContext) Infof(format string, params ...interface{}) {
|
||||
log.Info(ctx.getLogString(format, params...))
|
||||
}
|
||||
|
||||
// Warnf writes a customized formatted context
|
||||
// related log with log level 'Warn'.
|
||||
func (ctx *APIServerContext) Warnf(format string, params ...interface{}) {
|
||||
log.Warn(ctx.getLogString(format, params...))
|
||||
}
|
||||
|
||||
// Errorf writes a customized formatted context
|
||||
// related log with log level 'Error'.
|
||||
func (ctx *APIServerContext) Errorf(format string, params ...interface{}) {
|
||||
log.Error(ctx.getLogString(format, params...))
|
||||
}
|
||||
|
||||
// Criticalf writes a customized formatted context
|
||||
// related log with log level 'Critical'.
|
||||
func (ctx *APIServerContext) Criticalf(format string, params ...interface{}) {
|
||||
log.Criticalf(ctx.getLogString(format, params...))
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jinzhu/gorm"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// HandlerError is an error returned from
|
||||
// a rest route handler or a middleware.
|
||||
type HandlerError struct {
|
||||
Code int
|
||||
Message string
|
||||
ClientMessage string
|
||||
}
|
||||
|
||||
func (hErr *HandlerError) Error() string {
|
||||
return hErr.Message
|
||||
}
|
||||
|
||||
// NewHandlerError returns a HandlerError with the given code and message.
|
||||
func NewHandlerError(code int, message string) *HandlerError {
|
||||
return &HandlerError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
ClientMessage: message,
|
||||
}
|
||||
}
|
||||
|
||||
// NewHandlerErrorWithCustomClientMessage returns a HandlerError with
|
||||
// the given code, message and client error message.
|
||||
func NewHandlerErrorWithCustomClientMessage(code int, message, clientMessage string) *HandlerError {
|
||||
return &HandlerError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
ClientMessage: clientMessage,
|
||||
}
|
||||
}
|
||||
|
||||
// NewInternalServerHandlerError returns a HandlerError with
|
||||
// the given message, and the http.StatusInternalServerError
|
||||
// status text as client message.
|
||||
func NewInternalServerHandlerError(message string) *HandlerError {
|
||||
return NewHandlerErrorWithCustomClientMessage(http.StatusInternalServerError, message, http.StatusText(http.StatusInternalServerError))
|
||||
}
|
||||
|
||||
// NewErrorFromDBErrors takes a slice of database errors and a prefix, and
|
||||
// returns an error with all of the database errors formatted to one string with
|
||||
// the given prefix
|
||||
func NewErrorFromDBErrors(prefix string, dbErrors []error) error {
|
||||
dbErrorsStrings := make([]string, len(dbErrors))
|
||||
for i, dbErr := range dbErrors {
|
||||
dbErrorsStrings[i] = fmt.Sprintf("\"%s\"", dbErr)
|
||||
}
|
||||
return fmt.Errorf("%s [%s]", prefix, strings.Join(dbErrorsStrings, ","))
|
||||
}
|
||||
|
||||
// NewHandlerErrorFromDBErrors takes a slice of database errors and a prefix, and
|
||||
// returns an HandlerError with error code http.StatusInternalServerError with
|
||||
// all of the database errors formatted to one string with the given prefix
|
||||
func NewHandlerErrorFromDBErrors(prefix string, dbErrors []error) *HandlerError {
|
||||
return NewInternalServerHandlerError(NewErrorFromDBErrors(prefix, dbErrors).Error())
|
||||
}
|
||||
|
||||
// IsDBRecordNotFoundError returns true if the given dbErrors contains only a RecordNotFound error
|
||||
func IsDBRecordNotFoundError(dbErrors []error) bool {
|
||||
return len(dbErrors) == 1 && gorm.IsRecordNotFoundError(dbErrors[0])
|
||||
}
|
||||
|
||||
// HasDBError returns true if the given dbErrors contain any errors that aren't RecordNotFound
|
||||
func HasDBError(dbErrors []error) bool {
|
||||
return !IsDBRecordNotFoundError(dbErrors) && len(dbErrors) > 0
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package utils
|
||||
|
||||
import "github.com/daglabs/btcd/util/panics"
|
||||
import "github.com/daglabs/btcd/apiserver/logger"
|
||||
|
||||
var (
|
||||
log = logger.BackendLog.Logger("UTIL")
|
||||
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
)
|
||||
248
app/app.go
Normal file
248
app/app.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/addressmanager"
|
||||
|
||||
"github.com/kaspanet/kaspad/netadapter/id"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/blockdag/indexers"
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/kaspanet/kaspad/connmanager"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/dnsseed"
|
||||
"github.com/kaspanet/kaspad/mempool"
|
||||
"github.com/kaspanet/kaspad/mining"
|
||||
"github.com/kaspanet/kaspad/netadapter"
|
||||
"github.com/kaspanet/kaspad/protocol"
|
||||
"github.com/kaspanet/kaspad/rpc"
|
||||
"github.com/kaspanet/kaspad/signal"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// App is a wrapper for all the kaspad services
|
||||
type App struct {
|
||||
cfg *config.Config
|
||||
rpcServer *rpc.Server
|
||||
addressManager *addressmanager.AddressManager
|
||||
protocolManager *protocol.Manager
|
||||
connectionManager *connmanager.ConnectionManager
|
||||
netAdapter *netadapter.NetAdapter
|
||||
|
||||
started, shutdown int32
|
||||
}
|
||||
|
||||
// Start launches all the kaspad services.
|
||||
func (a *App) Start() {
|
||||
// Already started?
|
||||
if atomic.AddInt32(&a.started, 1) != 1 {
|
||||
return
|
||||
}
|
||||
|
||||
log.Trace("Starting kaspad")
|
||||
|
||||
err := a.protocolManager.Start()
|
||||
if err != nil {
|
||||
panics.Exit(log, fmt.Sprintf("Error starting the p2p protocol: %+v", err))
|
||||
}
|
||||
|
||||
a.maybeSeedFromDNS()
|
||||
|
||||
a.connectionManager.Start()
|
||||
|
||||
if !a.cfg.DisableRPC {
|
||||
a.rpcServer.Start()
|
||||
}
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down all the kaspad services.
|
||||
func (a *App) Stop() error {
|
||||
// Make sure this only happens once.
|
||||
if atomic.AddInt32(&a.shutdown, 1) != 1 {
|
||||
log.Infof("Kaspad is already in the process of shutting down")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Warnf("Kaspad shutting down")
|
||||
|
||||
a.connectionManager.Stop()
|
||||
|
||||
err := a.protocolManager.Stop()
|
||||
if err != nil {
|
||||
log.Errorf("Error stopping the p2p protocol: %+v", err)
|
||||
}
|
||||
|
||||
// Shutdown the RPC server if it's not disabled.
|
||||
if !a.cfg.DisableRPC {
|
||||
err := a.rpcServer.Stop()
|
||||
if err != nil {
|
||||
log.Errorf("Error stopping rpcServer: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// New returns a new App instance configured to listen on addr for the
|
||||
// kaspa network type specified by dagParams. Use start to begin accepting
|
||||
// connections from peers.
|
||||
func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{}) (*App, error) {
|
||||
indexManager, acceptanceIndex := setupIndexes(cfg)
|
||||
|
||||
sigCache := txscript.NewSigCache(cfg.SigCacheMaxSize)
|
||||
|
||||
// Create a new block DAG instance with the appropriate configuration.
|
||||
dag, err := setupDAG(cfg, databaseContext, interrupt, sigCache, indexManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txMempool := setupMempool(cfg, dag, sigCache)
|
||||
|
||||
netAdapter, err := netadapter.NewNetAdapter(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addressManager := addressmanager.New(cfg, databaseContext)
|
||||
|
||||
connectionManager, err := connmanager.New(cfg, netAdapter, addressManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
protocolManager, err := protocol.NewManager(cfg, dag, netAdapter, addressManager, txMempool, connectionManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpcServer, err := setupRPC(
|
||||
cfg, dag, txMempool, sigCache, acceptanceIndex, connectionManager, addressManager, protocolManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &App{
|
||||
cfg: cfg,
|
||||
rpcServer: rpcServer,
|
||||
protocolManager: protocolManager,
|
||||
connectionManager: connectionManager,
|
||||
netAdapter: netAdapter,
|
||||
addressManager: addressManager,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *App) maybeSeedFromDNS() {
|
||||
if !a.cfg.DisableDNSSeed {
|
||||
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, wire.SFNodeNetwork, false, nil,
|
||||
a.cfg.Lookup, func(addresses []*wire.NetAddress) {
|
||||
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
|
||||
// IPs of nodes and not its own IP, we can not know real IP of
|
||||
// source. So we'll take first returned address as source.
|
||||
a.addressManager.AddAddresses(addresses, addresses[0], nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
func setupDAG(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{},
|
||||
sigCache *txscript.SigCache, indexManager blockdag.IndexManager) (*blockdag.BlockDAG, error) {
|
||||
|
||||
dag, err := blockdag.New(&blockdag.Config{
|
||||
Interrupt: interrupt,
|
||||
DatabaseContext: databaseContext,
|
||||
DAGParams: cfg.NetParams(),
|
||||
TimeSource: blockdag.NewTimeSource(),
|
||||
SigCache: sigCache,
|
||||
IndexManager: indexManager,
|
||||
SubnetworkID: cfg.SubnetworkID,
|
||||
})
|
||||
return dag, err
|
||||
}
|
||||
|
||||
func setupIndexes(cfg *config.Config) (blockdag.IndexManager, *indexers.AcceptanceIndex) {
|
||||
// Create indexes if needed.
|
||||
var indexes []indexers.Indexer
|
||||
var acceptanceIndex *indexers.AcceptanceIndex
|
||||
if cfg.AcceptanceIndex {
|
||||
log.Info("acceptance index is enabled")
|
||||
indexes = append(indexes, acceptanceIndex)
|
||||
}
|
||||
|
||||
// Create an index manager if any of the optional indexes are enabled.
|
||||
if len(indexes) < 0 {
|
||||
return nil, nil
|
||||
}
|
||||
indexManager := indexers.NewManager(indexes)
|
||||
return indexManager, acceptanceIndex
|
||||
}
|
||||
|
||||
func setupMempool(cfg *config.Config, dag *blockdag.BlockDAG, sigCache *txscript.SigCache) *mempool.TxPool {
|
||||
mempoolConfig := mempool.Config{
|
||||
Policy: mempool.Policy{
|
||||
AcceptNonStd: cfg.RelayNonStd,
|
||||
MaxOrphanTxs: cfg.MaxOrphanTxs,
|
||||
MaxOrphanTxSize: config.DefaultMaxOrphanTxSize,
|
||||
MinRelayTxFee: cfg.MinRelayTxFee,
|
||||
MaxTxVersion: 1,
|
||||
},
|
||||
CalcSequenceLockNoLock: func(tx *util.Tx, utxoSet blockdag.UTXOSet) (*blockdag.SequenceLock, error) {
|
||||
return dag.CalcSequenceLockNoLock(tx, utxoSet, true)
|
||||
},
|
||||
IsDeploymentActive: dag.IsDeploymentActive,
|
||||
SigCache: sigCache,
|
||||
DAG: dag,
|
||||
}
|
||||
|
||||
return mempool.New(&mempoolConfig)
|
||||
}
|
||||
|
||||
func setupRPC(cfg *config.Config,
|
||||
dag *blockdag.BlockDAG,
|
||||
txMempool *mempool.TxPool,
|
||||
sigCache *txscript.SigCache,
|
||||
acceptanceIndex *indexers.AcceptanceIndex,
|
||||
connectionManager *connmanager.ConnectionManager,
|
||||
addressManager *addressmanager.AddressManager,
|
||||
protocolManager *protocol.Manager) (*rpc.Server, error) {
|
||||
|
||||
if !cfg.DisableRPC {
|
||||
policy := mining.Policy{
|
||||
BlockMaxMass: cfg.BlockMaxMass,
|
||||
}
|
||||
blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy, txMempool, dag, sigCache)
|
||||
|
||||
rpcServer, err := rpc.NewRPCServer(cfg, dag, txMempool, acceptanceIndex, blockTemplateGenerator,
|
||||
connectionManager, addressManager, protocolManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Signal process shutdown when the RPC server requests it.
|
||||
spawn("setupRPC-handleShutdownRequest", func() {
|
||||
<-rpcServer.RequestedProcessShutdown()
|
||||
signal.ShutdownRequestChannel <- struct{}{}
|
||||
})
|
||||
|
||||
return rpcServer, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// P2PNodeID returns the network ID associated with this App
|
||||
func (a *App) P2PNodeID() *id.ID {
|
||||
return a.netAdapter.ID()
|
||||
}
|
||||
|
||||
// AddressManager returns the AddressManager associated with this App
|
||||
func (a *App) AddressManager() *addressmanager.AddressManager {
|
||||
return a.addressManager
|
||||
}
|
||||
|
||||
// WaitForShutdown blocks until the main listener and peer handlers are stopped.
|
||||
func (a *App) WaitForShutdown() {
|
||||
// TODO(libp2p)
|
||||
// a.p2pServer.WaitForShutdown()
|
||||
}
|
||||
14
app/log.go
Normal file
14
app/log.go
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Copyright (c) 2017 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.KASD)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
@@ -1,33 +1,18 @@
|
||||
blockchain
|
||||
==========
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd/blockchain)
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad/blockchain)
|
||||
|
||||
Package blockchain implements bitcoin block handling and chain selection rules.
|
||||
The test coverage is currently only around 60%, but will be increasing over
|
||||
time. See `test_coverage.txt` for the gocov coverage report. Alternatively, if
|
||||
you are running a POSIX OS, you can run the `cov_report.sh` script for a
|
||||
real-time report. Package blockchain is licensed under the liberal ISC license.
|
||||
Package blockdag implements Kaspa block handling, organization of the blockDAG,
|
||||
block sorting and UTXO-set maintenance.
|
||||
The test coverage is currently only around 75%, but will be increasing over
|
||||
time.
|
||||
|
||||
There is an associated blog post about the release of this package
|
||||
[here](https://blog.conformal.com/btcchain-the-bitcoin-chain-package-from-bctd/).
|
||||
## Kaspad BlockDAG Processing Overview
|
||||
|
||||
This package has intentionally been designed so it can be used as a standalone
|
||||
package for any projects needing to handle processing of blocks into the bitcoin
|
||||
block chain.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/daglabs/btcd/blockchain
|
||||
```
|
||||
|
||||
## Bitcoin Chain Processing Overview
|
||||
|
||||
Before a block is allowed into the block chain, it must go through an intensive
|
||||
series of validation rules. The following list serves as a general outline of
|
||||
Before a block is allowed into the block DAG, it must go through an intensive
|
||||
series of validation rules. The following list serves as a general outline of
|
||||
those rules to provide some intuition into what is going on under the hood, but
|
||||
is by no means exhaustive:
|
||||
|
||||
@@ -35,69 +20,22 @@ is by no means exhaustive:
|
||||
- Perform a series of sanity checks on the block and its transactions such as
|
||||
verifying proof of work, timestamps, number and character of transactions,
|
||||
transaction amounts, script complexity, and merkle root calculations
|
||||
- Compare the block against predetermined checkpoints for expected timestamps
|
||||
and difficulty based on elapsed time since the checkpoint
|
||||
- Save the most recent orphan blocks for a limited time in case their parent
|
||||
blocks become available
|
||||
- Stop processing if the block is an orphan as the rest of the processing
|
||||
depends on the block's position within the block chain
|
||||
blocks become available.
|
||||
- Save blocks from the future for delayed processing
|
||||
- Stop processing if the block is an orphan or delayed as the rest of the
|
||||
processing depends on the block's position within the block chain
|
||||
- Make sure the block does not violate finality rules
|
||||
- Perform a series of more thorough checks that depend on the block's position
|
||||
within the block chain such as verifying block difficulties adhere to
|
||||
within the blockDAG such as verifying block difficulties adhere to
|
||||
difficulty retarget rules, timestamps are after the median of the last
|
||||
several blocks, all transactions are finalized, checkpoint blocks match, and
|
||||
block versions are in line with the previous blocks
|
||||
- Determine how the block fits into the chain and perform different actions
|
||||
accordingly in order to ensure any side chains which have higher difficulty
|
||||
than the main chain become the new main chain
|
||||
- When a block is being connected to the main chain (either through
|
||||
reorganization of a side chain to the main chain or just extending the
|
||||
main chain), perform further checks on the block's transactions such as
|
||||
verifying transaction duplicates, script complexity for the combination of
|
||||
connected scripts, coinbase maturity, double spends, and connected
|
||||
transaction values
|
||||
- Determine how the block fits into the DAG and perform different actions
|
||||
accordingly
|
||||
- Run the transaction scripts to verify the spender is allowed to spend the
|
||||
coins
|
||||
- Run GhostDAG to fit the block in a canonical sorting
|
||||
- Build the block's UTXO Set, as well as update the global UTXO Set accordingly
|
||||
- Insert the block into the block database
|
||||
|
||||
## Examples
|
||||
|
||||
* [ProcessBlock Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-BlockChain-ProcessBlock)
|
||||
Demonstrates how to create a new chain instance and use ProcessBlock to
|
||||
attempt to add a block to the chain. This example intentionally
|
||||
attempts to insert a duplicate genesis block to illustrate how an invalid
|
||||
block is handled.
|
||||
|
||||
* [CompactToBig Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-CompactToBig)
|
||||
Demonstrates how to convert the compact "bits" in a block header which
|
||||
represent the target difficulty to a big integer and display it using the
|
||||
typical hex notation.
|
||||
|
||||
* [BigToCompact Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-BigToCompact)
|
||||
Demonstrates how to convert a target difficulty into the
|
||||
compact "bits" in a block header which represent that target difficulty.
|
||||
|
||||
## GPG Verification Key
|
||||
|
||||
All official release tags are signed by Conformal so users can ensure the code
|
||||
has not been tampered with and is coming from the btcsuite developers. To
|
||||
verify the signature perform the following:
|
||||
|
||||
- Download the public key from the Conformal website at
|
||||
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
|
||||
|
||||
- Import the public key into your GPG keyring:
|
||||
```bash
|
||||
gpg --import GIT-GPG-KEY-conformal.txt
|
||||
```
|
||||
|
||||
- Verify the release tag with the following command where `TAG_NAME` is a
|
||||
placeholder for the specific tag:
|
||||
```bash
|
||||
git tag -v TAG_NAME
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
|
||||
Package blockchain is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
||||
|
||||
@@ -6,16 +6,28 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error {
|
||||
blockHeader := &block.MsgBlock().Header
|
||||
newNode := newBlockNode(blockHeader, newSet(), dag.dagParams.K)
|
||||
newNode, _ := dag.newBlockNode(blockHeader, newBlockSet())
|
||||
newNode.status = statusInvalidAncestor
|
||||
dag.index.AddNode(newNode)
|
||||
return dag.index.flushToDB()
|
||||
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
err = dag.index.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dbTx.Commit()
|
||||
}
|
||||
|
||||
// maybeAcceptBlock potentially accepts a block into the block DAG. It
|
||||
@@ -23,14 +35,15 @@ func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error
|
||||
// the block DAG before adding it. The block is expected to have already
|
||||
// gone through ProcessBlock before calling this function with it.
|
||||
//
|
||||
// The flags are also passed to checkBlockContext and connectToDAG. See
|
||||
// The flags are also passed to checkBlockContext and connectToDAG. See
|
||||
// their documentation for how the flags modify their behavior.
|
||||
//
|
||||
// This function MUST be called with the dagLock held (for writes).
|
||||
func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error {
|
||||
parents, err := lookupParentNodes(block, dag)
|
||||
if err != nil {
|
||||
if rErr, ok := err.(RuleError); ok && rErr.ErrorCode == ErrInvalidAncestorBlock {
|
||||
var ruleErr RuleError
|
||||
if ok := errors.As(err, &ruleErr); ok && ruleErr.ErrorCode == ErrInvalidAncestorBlock {
|
||||
err := dag.addNodeToIndexWithInvalidAncestor(block)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -47,26 +60,39 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
|
||||
}
|
||||
|
||||
// Create a new block node for the block and add it to the node index.
|
||||
newNode := newBlockNode(&block.MsgBlock().Header, parents, dag.dagParams.K)
|
||||
newNode, selectedParentAnticone := dag.newBlockNode(&block.MsgBlock().Header, parents)
|
||||
newNode.status = statusDataStored
|
||||
dag.index.AddNode(newNode)
|
||||
|
||||
// Insert the block into the database if it's not already there. Even
|
||||
// Insert the block into the database if it's not already there. Even
|
||||
// though it is possible the block will ultimately fail to connect, it
|
||||
// has already passed all proof-of-work and validity tests which means
|
||||
// it would be prohibitively expensive for an attacker to fill up the
|
||||
// disk with a bunch of blocks that fail to connect. This is necessary
|
||||
// disk with a bunch of blocks that fail to connect. This is necessary
|
||||
// since it allows block download to be decoupled from the much more
|
||||
// expensive connection logic. It also has some other nice properties
|
||||
// expensive connection logic. It also has some other nice properties
|
||||
// such as making blocks that never become part of the DAG or
|
||||
// blocks that fail to connect available for further analysis.
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
err := dbStoreBlock(dbTx, block)
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
blockExists, err := dbaccess.HasBlock(dbTx, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockExists {
|
||||
err := storeBlock(dbTx, block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dag.index.flushToDBWithTx(dbTx)
|
||||
})
|
||||
}
|
||||
err = dag.index.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -80,44 +106,44 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
|
||||
}
|
||||
}
|
||||
|
||||
block.SetChainHeight(newNode.chainHeight)
|
||||
|
||||
// Connect the passed block to the DAG. This also handles validation of the
|
||||
// transaction scripts.
|
||||
chainUpdates, err := dag.addBlock(newNode, parents, block, flags)
|
||||
chainUpdates, err := dag.addBlock(newNode, block, selectedParentAnticone, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Notify the caller that the new block was accepted into the block
|
||||
// DAG. The caller would typically want to react by relaying the
|
||||
// DAG. The caller would typically want to react by relaying the
|
||||
// inventory to other peers.
|
||||
dag.dagLock.Unlock()
|
||||
dag.sendNotification(NTBlockAdded, &BlockAddedNotificationData{
|
||||
Block: block,
|
||||
WasUnorphaned: flags&BFWasUnorphaned != 0,
|
||||
})
|
||||
dag.sendNotification(NTChainChanged, &ChainChangedNotificationData{
|
||||
RemovedChainBlockHashes: chainUpdates.removedChainBlockHashes,
|
||||
AddedChainBlockHashes: chainUpdates.addedChainBlockHashes,
|
||||
})
|
||||
if len(chainUpdates.addedChainBlockHashes) > 0 {
|
||||
dag.sendNotification(NTChainChanged, &ChainChangedNotificationData{
|
||||
RemovedChainBlockHashes: chainUpdates.removedChainBlockHashes,
|
||||
AddedChainBlockHashes: chainUpdates.addedChainBlockHashes,
|
||||
})
|
||||
}
|
||||
dag.dagLock.Lock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error) {
|
||||
func lookupParentNodes(block *util.Block, dag *BlockDAG) (blockSet, error) {
|
||||
header := block.MsgBlock().Header
|
||||
parentHashes := header.ParentHashes
|
||||
|
||||
nodes := newSet()
|
||||
nodes := newBlockSet()
|
||||
for _, parentHash := range parentHashes {
|
||||
node := blockDAG.index.LookupNode(parentHash)
|
||||
if node == nil {
|
||||
str := fmt.Sprintf("parent block %s is unknown", parentHashes)
|
||||
node, ok := dag.index.LookupNode(parentHash)
|
||||
if !ok {
|
||||
str := fmt.Sprintf("parent block %s is unknown", parentHash)
|
||||
return nil, ruleError(ErrParentBlockUnknown, str)
|
||||
} else if blockDAG.index.NodeStatus(node).KnownInvalid() {
|
||||
str := fmt.Sprintf("parent block %s is known to be invalid", parentHashes)
|
||||
} else if dag.index.NodeStatus(node).KnownInvalid() {
|
||||
str := fmt.Sprintf("parent block %s is known to be invalid", parentHash)
|
||||
return nil, ruleError(ErrInvalidAncestorBlock, str)
|
||||
}
|
||||
|
||||
|
||||
@@ -3,19 +3,15 @@ package blockdag
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"bou.ke/monkey"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
)
|
||||
|
||||
func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", Config{
|
||||
DAGParams: &dagconfig.SimNetParams,
|
||||
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: Failed to setup DAG instance: %v", err)
|
||||
@@ -38,8 +34,8 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
|
||||
"Expected: %s, got: <nil>", ErrParentBlockUnknown)
|
||||
}
|
||||
ruleErr, ok := err.(RuleError)
|
||||
if !ok {
|
||||
var ruleErr RuleError
|
||||
if ok := errors.As(err, &ruleErr); !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrParentBlockUnknown {
|
||||
@@ -57,17 +53,20 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
|
||||
// Add a valid block and mark it as invalid
|
||||
block1 := blocks[1]
|
||||
isOrphan, delay, err := dag.ProcessBlock(block1, BFNone)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(block1, BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: Valid block unexpectedly returned an error: %s", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
if isDelayed {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: block 1 is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: incorrectly returned block 1 is an orphan")
|
||||
}
|
||||
blockNode1 := dag.index.LookupNode(block1.Hash())
|
||||
blockNode1, ok := dag.index.LookupNode(block1.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", block1.Hash())
|
||||
}
|
||||
dag.index.SetStatusFlags(blockNode1, statusValidateFailed)
|
||||
|
||||
block2 := blocks[2]
|
||||
@@ -76,8 +75,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
|
||||
"Expected: %s, got: <nil>", ErrInvalidAncestorBlock)
|
||||
}
|
||||
ruleErr, ok = err.(RuleError)
|
||||
if !ok {
|
||||
if ok := errors.As(err, &ruleErr); !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrInvalidAncestorBlock {
|
||||
@@ -96,8 +94,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
|
||||
"Expected: %s, got: <nil>", ErrUnexpectedDifficulty)
|
||||
}
|
||||
ruleErr, ok = err.(RuleError)
|
||||
if !ok {
|
||||
if ok := errors.As(err, &ruleErr); !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrUnexpectedDifficulty {
|
||||
@@ -107,37 +104,4 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
|
||||
// Set block2's bits back to valid for next tests
|
||||
block2.MsgBlock().Header.Bits = originalBits
|
||||
|
||||
// Test rejecting the node due to database error
|
||||
databaseErrorMessage := "database error"
|
||||
guard := monkey.Patch(dbStoreBlock, func(dbTx database.Tx, block *util.Block) error {
|
||||
return errors.New(databaseErrorMessage)
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
err = dag.maybeAcceptBlock(block2, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to database error: "+
|
||||
"Expected: %s, got: <nil>", databaseErrorMessage)
|
||||
}
|
||||
if !strings.Contains(err.Error(), databaseErrorMessage) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to database error: "+
|
||||
"Unexpected error. Want: %s, got: %s", databaseErrorMessage, err)
|
||||
}
|
||||
guard.Unpatch()
|
||||
|
||||
// Test rejecting the node due to index error
|
||||
indexErrorMessage := "index error"
|
||||
guard = monkey.Patch((*blockIndex).flushToDB, func(_ *blockIndex) error {
|
||||
return errors.New(indexErrorMessage)
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
err = dag.maybeAcceptBlock(block2, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to index error: "+
|
||||
"Expected %s, got: <nil>", indexErrorMessage)
|
||||
}
|
||||
if !strings.Contains(err.Error(), indexErrorMessage) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to index error: "+
|
||||
"Unexpected error. Want: %s, got: %s", indexErrorMessage, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,8 +2,6 @@ package blockdag
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// baseHeap is an implementation for heap.Interface that sorts blocks by their height
|
||||
@@ -28,22 +26,14 @@ func (h *baseHeap) Pop() interface{} {
|
||||
type upHeap struct{ baseHeap }
|
||||
|
||||
func (h upHeap) Less(i, j int) bool {
|
||||
if h.baseHeap[i].blueScore == h.baseHeap[j].blueScore {
|
||||
return daghash.HashToBig(h.baseHeap[i].hash).Cmp(daghash.HashToBig(h.baseHeap[j].hash)) < 0
|
||||
}
|
||||
|
||||
return h.baseHeap[i].blueScore < h.baseHeap[j].blueScore
|
||||
return h.baseHeap[i].less(h.baseHeap[j])
|
||||
}
|
||||
|
||||
// downHeap extends baseHeap to include Less operation that traverses from top to bottom
|
||||
type downHeap struct{ baseHeap }
|
||||
|
||||
func (h downHeap) Less(i, j int) bool {
|
||||
if h.baseHeap[i].blueScore == h.baseHeap[j].blueScore {
|
||||
return daghash.HashToBig(h.baseHeap[i].hash).Cmp(daghash.HashToBig(h.baseHeap[j].hash)) > 0
|
||||
}
|
||||
|
||||
return h.baseHeap[i].blueScore > h.baseHeap[j].blueScore
|
||||
return !h.baseHeap[i].less(h.baseHeap[j])
|
||||
}
|
||||
|
||||
// blockHeap represents a mutable heap of Blocks, sorted by their height
|
||||
@@ -77,7 +67,7 @@ func (bh blockHeap) Push(block *blockNode) {
|
||||
|
||||
// pushSet pushes a blockset to the heap.
|
||||
func (bh blockHeap) pushSet(bs blockSet) {
|
||||
for _, block := range bs {
|
||||
for block := range bs {
|
||||
heap.Push(bh.impl, block)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,19 +3,28 @@ package blockdag
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// TestBlockHeap tests pushing, popping, and determining the length of the heap.
|
||||
func TestBlockHeap(t *testing.T) {
|
||||
block0Header := dagconfig.MainNetParams.GenesisBlock.Header
|
||||
block0 := newBlockNode(&block0Header, newSet(), dagconfig.MainNetParams.K)
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestBlockHeap", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestBlockHeap: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
block0Header := dagconfig.SimnetParams.GenesisBlock.Header
|
||||
block0, _ := dag.newBlockNode(&block0Header, newBlockSet())
|
||||
|
||||
block100000Header := Block100000.Header
|
||||
block100000 := newBlockNode(&block100000Header, setFromSlice(block0), dagconfig.MainNetParams.K)
|
||||
block100000, _ := dag.newBlockNode(&block100000Header, blockSetFromSlice(block0))
|
||||
|
||||
block0smallHash := newBlockNode(&block0Header, newSet(), dagconfig.MainNetParams.K)
|
||||
block0smallHash, _ := dag.newBlockNode(&block0Header, newBlockSet())
|
||||
block0smallHash.hash = &daghash.Hash{}
|
||||
|
||||
tests := []struct {
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
var (
|
||||
// idByHashIndexBucketName is the name of the db bucket used to house
|
||||
// the block hash -> block id index.
|
||||
idByHashIndexBucketName = []byte("idbyhashidx")
|
||||
|
||||
// hashByIDIndexBucketName is the name of the db bucket used to house
|
||||
// the block id -> block hash index.
|
||||
hashByIDIndexBucketName = []byte("hashbyididx")
|
||||
|
||||
currentBlockIDKey = []byte("currentblockid")
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// This is a mapping between block hashes and unique IDs. The ID
|
||||
// is simply a sequentially incremented uint64 that is used instead of block hash
|
||||
// for the indexers. This is useful because it is only 8 bytes versus 32 bytes
|
||||
// hashes and thus saves a ton of space when a block is referenced in an index.
|
||||
// It consists of three buckets: the first bucket maps the hash of each
|
||||
// block to the unique ID and the second maps that ID back to the block hash.
|
||||
// The third bucket contains the last received block ID, and is used
|
||||
// when starting the node to check that the enabled indexes are up to date
|
||||
// with the latest received block, and if not, initiate recovery process.
|
||||
//
|
||||
// The serialized format for keys and values in the block hash to ID bucket is:
|
||||
// <hash> = <ID>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash 32 bytes
|
||||
// ID uint64 8 bytes
|
||||
// -----
|
||||
// Total: 40 bytes
|
||||
//
|
||||
// The serialized format for keys and values in the ID to block hash bucket is:
|
||||
// <ID> = <hash>
|
||||
//
|
||||
// Field Type Size
|
||||
// ID uint64 8 bytes
|
||||
// hash daghash.Hash 32 bytes
|
||||
// -----
|
||||
// Total: 40 bytes
|
||||
//
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const blockIDSize = 8 // 8 bytes for block ID
|
||||
|
||||
// DBFetchBlockIDByHash uses an existing database transaction to retrieve the
|
||||
// block id for the provided hash from the index.
|
||||
func DBFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint64, error) {
|
||||
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
|
||||
serializedID := hashIndex.Get(hash[:])
|
||||
if serializedID == nil {
|
||||
return 0, fmt.Errorf("no entry in the block ID index for block with hash %s", hash)
|
||||
}
|
||||
|
||||
return DeserializeBlockID(serializedID), nil
|
||||
}
|
||||
|
||||
// DBFetchBlockHashBySerializedID uses an existing database transaction to
|
||||
// retrieve the hash for the provided serialized block id from the index.
|
||||
func DBFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
|
||||
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
|
||||
hashBytes := idIndex.Get(serializedID)
|
||||
if hashBytes == nil {
|
||||
return nil, fmt.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID))
|
||||
}
|
||||
|
||||
var hash daghash.Hash
|
||||
copy(hash[:], hashBytes)
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
|
||||
// the index entries for the hash to id and id to hash mappings for the provided
|
||||
// values.
|
||||
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, serializedID []byte) error {
|
||||
// Add the block hash to ID mapping to the index.
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(idByHashIndexBucketName)
|
||||
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the block ID to hash mapping to the index.
|
||||
idIndex := meta.Bucket(hashByIDIndexBucketName)
|
||||
return idIndex.Put(serializedID[:], hash[:])
|
||||
}
|
||||
|
||||
// DBFetchCurrentBlockID returns the last known block ID.
|
||||
func DBFetchCurrentBlockID(dbTx database.Tx) uint64 {
|
||||
serializedID := dbTx.Metadata().Get(currentBlockIDKey)
|
||||
if serializedID == nil {
|
||||
return 0
|
||||
}
|
||||
return DeserializeBlockID(serializedID)
|
||||
}
|
||||
|
||||
// DeserializeBlockID returns a deserialized block id
|
||||
func DeserializeBlockID(serializedID []byte) uint64 {
|
||||
return byteOrder.Uint64(serializedID)
|
||||
}
|
||||
|
||||
// SerializeBlockID returns a serialized block id
|
||||
func SerializeBlockID(blockID uint64) []byte {
|
||||
serializedBlockID := make([]byte, blockIDSize)
|
||||
byteOrder.PutUint64(serializedBlockID, blockID)
|
||||
return serializedBlockID
|
||||
}
|
||||
|
||||
// DBFetchBlockHashByID uses an existing database transaction to retrieve the
|
||||
// hash for the provided block id from the index.
|
||||
func DBFetchBlockHashByID(dbTx database.Tx, id uint64) (*daghash.Hash, error) {
|
||||
return DBFetchBlockHashBySerializedID(dbTx, SerializeBlockID(id))
|
||||
}
|
||||
|
||||
func createBlockID(dbTx database.Tx, blockHash *daghash.Hash) (uint64, error) {
|
||||
currentBlockID := DBFetchCurrentBlockID(dbTx)
|
||||
newBlockID := currentBlockID + 1
|
||||
serializedNewBlockID := SerializeBlockID(newBlockID)
|
||||
err := dbTx.Metadata().Put(currentBlockIDKey, serializedNewBlockID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dbPutBlockIDIndexEntry(dbTx, blockHash, serializedNewBlockID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return newBlockID, nil
|
||||
}
|
||||
@@ -5,23 +5,19 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"sync"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// blockIndex provides facilities for keeping track of an in-memory index of the
|
||||
// block chain. Although the name block chain suggests a single chain of
|
||||
// blocks, it is actually a tree-shaped structure where any node can have
|
||||
// multiple children. However, there can only be one active branch which does
|
||||
// indeed form a chain from the tip all the way back to the genesis block.
|
||||
// block DAG.
|
||||
type blockIndex struct {
|
||||
// The following fields are set when the instance is created and can't
|
||||
// be changed afterwards, so there is no need to protect them with a
|
||||
// separate mutex.
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
|
||||
sync.RWMutex
|
||||
@@ -29,12 +25,11 @@ type blockIndex struct {
|
||||
dirty map[*blockNode]struct{}
|
||||
}
|
||||
|
||||
// newBlockIndex returns a new empty instance of a block index. The index will
|
||||
// newBlockIndex returns a new empty instance of a block index. The index will
|
||||
// be dynamically populated as block nodes are loaded from the database and
|
||||
// manually added.
|
||||
func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex {
|
||||
func newBlockIndex(dagParams *dagconfig.Params) *blockIndex {
|
||||
return &blockIndex{
|
||||
db: db,
|
||||
dagParams: dagParams,
|
||||
index: make(map[daghash.Hash]*blockNode),
|
||||
dirty: make(map[*blockNode]struct{}),
|
||||
@@ -46,20 +41,20 @@ func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) HaveBlock(hash *daghash.Hash) bool {
|
||||
bi.RLock()
|
||||
defer bi.RUnlock()
|
||||
_, hasBlock := bi.index[*hash]
|
||||
bi.RUnlock()
|
||||
return hasBlock
|
||||
}
|
||||
|
||||
// LookupNode returns the block node identified by the provided hash. It will
|
||||
// LookupNode returns the block node identified by the provided hash. It will
|
||||
// return nil if there is no entry for the hash.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode {
|
||||
func (bi *blockIndex) LookupNode(hash *daghash.Hash) (*blockNode, bool) {
|
||||
bi.RLock()
|
||||
node := bi.index[*hash]
|
||||
bi.RUnlock()
|
||||
return node
|
||||
defer bi.RUnlock()
|
||||
node, ok := bi.index[*hash]
|
||||
return node, ok
|
||||
}
|
||||
|
||||
// AddNode adds the provided node to the block index and marks it as dirty.
|
||||
@@ -68,9 +63,9 @@ func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) AddNode(node *blockNode) {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
bi.addNode(node)
|
||||
bi.dirty[node] = struct{}{}
|
||||
bi.Unlock()
|
||||
}
|
||||
|
||||
// addNode adds the provided node to the block index, but does not mark it as
|
||||
@@ -86,8 +81,8 @@ func (bi *blockIndex) addNode(node *blockNode) {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
|
||||
bi.RLock()
|
||||
defer bi.RUnlock()
|
||||
status := node.status
|
||||
bi.RUnlock()
|
||||
return status
|
||||
}
|
||||
|
||||
@@ -98,9 +93,9 @@ func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
node.status |= flags
|
||||
bi.dirty[node] = struct{}{}
|
||||
bi.Unlock()
|
||||
}
|
||||
|
||||
// UnsetStatusFlags flips the provided status flags on the block node to off,
|
||||
@@ -109,22 +104,13 @@ func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
node.status &^= flags
|
||||
bi.dirty[node] = struct{}{}
|
||||
bi.Unlock()
|
||||
}
|
||||
|
||||
// flushToDB writes all dirty block nodes to the database. If all writes
|
||||
// succeed, this clears the dirty set.
|
||||
func (bi *blockIndex) flushToDB() error {
|
||||
return bi.db.Update(func(dbTx database.Tx) error {
|
||||
return bi.flushToDBWithTx(dbTx)
|
||||
})
|
||||
}
|
||||
|
||||
// flushToDBWithTx writes all dirty block nodes to the database. If all
|
||||
// writes succeed, this clears the dirty set.
|
||||
func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
|
||||
// flushToDB writes all dirty block nodes to the database.
|
||||
func (bi *blockIndex) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
if len(bi.dirty) == 0 {
|
||||
@@ -132,13 +118,19 @@ func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
|
||||
}
|
||||
|
||||
for node := range bi.dirty {
|
||||
err := dbStoreBlockNode(dbTx, node)
|
||||
serializedBlockNode, err := serializeBlockNode(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := blockIndexKey(node.hash, node.blueScore)
|
||||
err = dbaccess.StoreIndexBlock(dbContext, key, serializedBlockNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If write was successful, clear the dirty set.
|
||||
bi.dirty = make(map[*blockNode]struct{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bi *blockIndex) clearDirtyEntries() {
|
||||
bi.dirty = make(map[*blockNode]struct{})
|
||||
}
|
||||
|
||||
@@ -1,58 +1,26 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bou.ke/monkey"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
)
|
||||
|
||||
func TestAncestorErrors(t *testing.T) {
|
||||
node := newTestNode(newSet(), int32(0x10000000), 0, time.Unix(0, 0), dagconfig.MainNetParams.K)
|
||||
node.chainHeight = 2
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestAncestorErrors: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
node := newTestNode(dag, newBlockSet(), int32(0x10000000), 0, mstime.Now())
|
||||
node.blueScore = 2
|
||||
ancestor := node.SelectedAncestor(3)
|
||||
if ancestor != nil {
|
||||
t.Errorf("TestAncestorErrors: Ancestor() unexpectedly returned a node. Expected: <nil>")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlushToDBErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestFlushToDBErrors", Config{
|
||||
DAGParams: &dagconfig.SimNetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFlushToDBErrors: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Call flushToDB without anything to flush. This should succeed
|
||||
err = dag.index.flushToDB()
|
||||
if err != nil {
|
||||
t.Errorf("TestFlushToDBErrors: flushToDB without anything to flush: "+
|
||||
"Unexpected flushToDB error: %s", err)
|
||||
}
|
||||
|
||||
// Mark the genesis block as dirty
|
||||
dag.index.SetStatusFlags(dag.genesis, statusValid)
|
||||
|
||||
// Test flushToDB failure due to database error
|
||||
databaseErrorMessage := "database error"
|
||||
guard := monkey.Patch(dbStoreBlockNode, func(_ database.Tx, _ *blockNode) error {
|
||||
return errors.New(databaseErrorMessage)
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
err = dag.index.flushToDB()
|
||||
if err == nil {
|
||||
t.Errorf("TestFlushToDBErrors: flushToDB failure due to database error: "+
|
||||
"Expected: %s, got: <nil>", databaseErrorMessage)
|
||||
}
|
||||
if !strings.Contains(err.Error(), databaseErrorMessage) {
|
||||
t.Errorf("TestFlushToDBErrors: flushToDB failure due to database error: "+
|
||||
"Unexpected flushToDB error. Expected: %s, got: %s", databaseErrorMessage, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// BlockLocator is used to help locate a specific block. The algorithm for
|
||||
// BlockLocator is used to help locate a specific block. The algorithm for
|
||||
// building the block locator is to add block hashes in reverse order on the
|
||||
// block's selected parent chain until the desired stop block is reached.
|
||||
// In order to keep the list of locator hashes to a reasonable number of entries,
|
||||
@@ -21,100 +21,66 @@ import (
|
||||
// [17 16 14 11 7 2 genesis]
|
||||
type BlockLocator []*daghash.Hash
|
||||
|
||||
// BlockLocatorFromHashes returns a block locator from start and stop hash.
|
||||
// BlockLocatorFromHashes returns a block locator from high and low hash.
|
||||
// See BlockLocator for details on the algorithm used to create a block locator.
|
||||
//
|
||||
// In addition to the general algorithm referenced above, this function will
|
||||
// return the block locator for the selected tip if the passed hash is not currently
|
||||
// known.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) BlockLocatorFromHashes(startHash, stopHash *daghash.Hash) BlockLocator {
|
||||
func (dag *BlockDAG) BlockLocatorFromHashes(highHash, lowHash *daghash.Hash) (BlockLocator, error) {
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
startNode := dag.index.LookupNode(startHash)
|
||||
var stopNode *blockNode
|
||||
if !stopHash.IsEqual(&daghash.ZeroHash) {
|
||||
stopNode = dag.index.LookupNode(stopHash)
|
||||
|
||||
highNode, ok := dag.index.LookupNode(highHash)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("block %s is unknown", highHash)
|
||||
}
|
||||
return dag.blockLocator(startNode, stopNode)
|
||||
|
||||
lowNode, ok := dag.index.LookupNode(lowHash)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("block %s is unknown", lowHash)
|
||||
}
|
||||
|
||||
return dag.blockLocator(highNode, lowNode)
|
||||
}
|
||||
|
||||
// LatestBlockLocator returns a block locator for the current tips of the DAG.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) LatestBlockLocator() BlockLocator {
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
return dag.blockLocator(nil, nil)
|
||||
}
|
||||
|
||||
// blockLocator returns a block locator for the passed start and stop nodes.
|
||||
// The default value for the start node is the selected tip, and the default
|
||||
// values of the stop node is the genesis block.
|
||||
//
|
||||
// blockLocator returns a block locator for the passed high and low nodes.
|
||||
// See the BlockLocator type comments for more details.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for reads).
|
||||
func (dag *BlockDAG) blockLocator(startNode, stopNode *blockNode) BlockLocator {
|
||||
// Use the selected tip if requested.
|
||||
if startNode == nil {
|
||||
startNode = dag.virtual.selectedParent
|
||||
}
|
||||
|
||||
if stopNode == nil {
|
||||
stopNode = dag.genesis
|
||||
}
|
||||
|
||||
// We use the selected parent of the start node, so the
|
||||
// block locator won't contain the start node.
|
||||
startNode = startNode.selectedParent
|
||||
|
||||
// If the start node or the stop node are not in the
|
||||
// virtual's selected parent chain, we replace them with their
|
||||
// closest selected parent that is part of the virtual's
|
||||
// selected parent chain.
|
||||
for !dag.IsInSelectedParentChain(stopNode.hash) {
|
||||
stopNode = stopNode.selectedParent
|
||||
}
|
||||
|
||||
for !dag.IsInSelectedParentChain(startNode.hash) {
|
||||
startNode = startNode.selectedParent
|
||||
}
|
||||
|
||||
// Calculate the max number of entries that will ultimately be in the
|
||||
// block locator. See the description of the algorithm for how these
|
||||
// numbers are derived.
|
||||
|
||||
// startNode.hash + stopNode.hash.
|
||||
// Then floor(log2(startNode.chainHeight-stopNode.chainHeight)) entries for the skip portion.
|
||||
maxEntries := 2 + util.FastLog2Floor(startNode.chainHeight-stopNode.chainHeight)
|
||||
locator := make(BlockLocator, 0, maxEntries)
|
||||
func (dag *BlockDAG) blockLocator(highNode, lowNode *blockNode) (BlockLocator, error) {
|
||||
// We use the selected parent of the high node, so the
|
||||
// block locator won't contain the high node.
|
||||
highNode = highNode.selectedParent
|
||||
|
||||
node := highNode
|
||||
step := uint64(1)
|
||||
for node := startNode; node != nil; {
|
||||
locator := make(BlockLocator, 0)
|
||||
for node != nil {
|
||||
locator = append(locator, node.hash)
|
||||
|
||||
// Nothing more to add once the stop node has been added.
|
||||
if node.chainHeight == stopNode.chainHeight {
|
||||
// Nothing more to add once the low node has been added.
|
||||
if node.blueScore <= lowNode.blueScore {
|
||||
if node != lowNode {
|
||||
return nil, errors.Errorf("highNode and lowNode are " +
|
||||
"not in the same selected parent chain.")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Calculate chainHeight of previous node to include ensuring the
|
||||
// final node is stopNode.
|
||||
nextChainHeight := node.chainHeight - step
|
||||
if nextChainHeight < stopNode.chainHeight {
|
||||
nextChainHeight = stopNode.chainHeight
|
||||
// Calculate blueScore of previous node to include ensuring the
|
||||
// final node is lowNode.
|
||||
nextBlueScore := node.blueScore - step
|
||||
if nextBlueScore < lowNode.blueScore {
|
||||
nextBlueScore = lowNode.blueScore
|
||||
}
|
||||
|
||||
// walk backwards through the nodes to the correct ancestor.
|
||||
node = node.SelectedAncestor(nextChainHeight)
|
||||
node = node.SelectedAncestor(nextBlueScore)
|
||||
|
||||
// Double the distance between included hashes.
|
||||
step *= 2
|
||||
}
|
||||
|
||||
return locator
|
||||
return locator, nil
|
||||
}
|
||||
|
||||
// FindNextLocatorBoundaries returns the lowest unknown block locator, hash
|
||||
@@ -123,21 +89,21 @@ func (dag *BlockDAG) blockLocator(startNode, stopNode *blockNode) BlockLocator {
|
||||
// sync peer.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for reads).
|
||||
func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (startHash, stopHash *daghash.Hash) {
|
||||
// Find the most recent locator block hash in the DAG. In the case none of
|
||||
func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (highHash, lowHash *daghash.Hash) {
|
||||
// Find the most recent locator block hash in the DAG. In the case none of
|
||||
// the hashes in the locator are in the DAG, fall back to the genesis block.
|
||||
stopNode := dag.genesis
|
||||
lowNode := dag.genesis
|
||||
nextBlockLocatorIndex := int64(len(locator) - 1)
|
||||
for i, hash := range locator {
|
||||
node := dag.index.LookupNode(hash)
|
||||
if node != nil {
|
||||
stopNode = node
|
||||
node, ok := dag.index.LookupNode(hash)
|
||||
if ok {
|
||||
lowNode = node
|
||||
nextBlockLocatorIndex = int64(i) - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
if nextBlockLocatorIndex < 0 {
|
||||
return nil, stopNode.hash
|
||||
return nil, lowNode.hash
|
||||
}
|
||||
return locator[nextBlockLocatorIndex], stopNode.hash
|
||||
return locator[nextBlockLocatorIndex], lowNode.hash
|
||||
}
|
||||
|
||||
@@ -6,10 +6,14 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"math"
|
||||
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// blockStatus is a bit field representing the validation state of the block.
|
||||
@@ -28,11 +32,6 @@ const (
|
||||
// statusInvalidAncestor indicates that one of the block's ancestors has
|
||||
// has failed validation, thus the block is also invalid.
|
||||
statusInvalidAncestor
|
||||
|
||||
// statusNone indicates that the block has no validation state flags set.
|
||||
//
|
||||
// NOTE: This must be defined last in order to avoid influencing iota.
|
||||
statusNone blockStatus = 0
|
||||
)
|
||||
|
||||
// KnownValid returns whether the block is known to be valid. This will return
|
||||
@@ -54,8 +53,8 @@ func (status blockStatus) KnownInvalid() bool {
|
||||
type blockNode struct {
|
||||
// NOTE: Additions, deletions, or modifications to the order of the
|
||||
// definitions in this struct should not be changed without considering
|
||||
// how it affects alignment on 64-bit platforms. The current order is
|
||||
// specifically crafted to result in minimal padding. There will be
|
||||
// how it affects alignment on 64-bit platforms. The current order is
|
||||
// specifically crafted to result in minimal padding. There will be
|
||||
// hundreds of thousands of these in memory, so a few extra bytes of
|
||||
// padding adds up.
|
||||
|
||||
@@ -75,19 +74,16 @@ type blockNode struct {
|
||||
// blueScore is the count of all the blue blocks in this block's past
|
||||
blueScore uint64
|
||||
|
||||
// bluesAnticoneSizes is a map holding the set of blues affected by this block and their
|
||||
// modified blue anticone size.
|
||||
bluesAnticoneSizes map[*blockNode]dagconfig.KType
|
||||
|
||||
// hash is the double sha 256 of the block.
|
||||
hash *daghash.Hash
|
||||
|
||||
// height is the position in the block DAG.
|
||||
height uint64
|
||||
|
||||
// chainHeight is the number of hops you need to go down the selected parent chain in order to get to the genesis block.
|
||||
chainHeight uint64
|
||||
|
||||
// Some fields from block headers to aid in best chain selection and
|
||||
// reconstructing headers from memory. These must be treated as
|
||||
// immutable and are intentionally ordered to avoid padding on 64-bit
|
||||
// platforms.
|
||||
// Some fields from block headers to aid in reconstructing headers
|
||||
// from memory. These must be treated as immutable and are intentionally
|
||||
// ordered to avoid padding on 64-bit platforms.
|
||||
version int32
|
||||
bits uint32
|
||||
nonce uint64
|
||||
@@ -106,14 +102,17 @@ type blockNode struct {
|
||||
isFinalized bool
|
||||
}
|
||||
|
||||
// initBlockNode initializes a block node from the given header and parent nodes.
|
||||
// This function is NOT safe for concurrent access. It must only be called when
|
||||
// initially creating a node.
|
||||
func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents blockSet, phantomK uint32) {
|
||||
*node = blockNode{
|
||||
parents: parents,
|
||||
children: make(blockSet),
|
||||
timestamp: time.Now().Unix(),
|
||||
// newBlockNode returns a new block node for the given block header and parents, and the
|
||||
// anticone of its selected parent (parent with highest blue score).
|
||||
// selectedParentAnticone is used to update reachability data we store for future reachability queries.
|
||||
// This function is NOT safe for concurrent access.
|
||||
func (dag *BlockDAG) newBlockNode(blockHeader *wire.BlockHeader, parents blockSet) (node *blockNode, selectedParentAnticone []*blockNode) {
|
||||
node = &blockNode{
|
||||
parents: parents,
|
||||
children: make(blockSet),
|
||||
blueScore: math.MaxUint64, // Initialized to the max value to avoid collisions with the genesis block
|
||||
timestamp: dag.Now().UnixMilliseconds(),
|
||||
bluesAnticoneSizes: make(map[*blockNode]dagconfig.KType),
|
||||
}
|
||||
|
||||
// blockHeader is nil only for the virtual block
|
||||
@@ -122,7 +121,7 @@ func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents block
|
||||
node.version = blockHeader.Version
|
||||
node.bits = blockHeader.Bits
|
||||
node.nonce = blockHeader.Nonce
|
||||
node.timestamp = blockHeader.Timestamp.Unix()
|
||||
node.timestamp = blockHeader.Timestamp.UnixMilliseconds()
|
||||
node.hashMerkleRoot = blockHeader.HashMerkleRoot
|
||||
node.acceptedIDMerkleRoot = blockHeader.AcceptedIDMerkleRoot
|
||||
node.utxoCommitment = blockHeader.UTXOCommitment
|
||||
@@ -130,42 +129,34 @@ func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents block
|
||||
node.hash = &daghash.ZeroHash
|
||||
}
|
||||
|
||||
if len(parents) > 0 {
|
||||
node.blues, node.selectedParent, node.blueScore = phantom(node, phantomK)
|
||||
node.height = calculateNodeHeight(node)
|
||||
node.chainHeight = calculateChainHeight(node)
|
||||
if len(parents) == 0 {
|
||||
// The genesis block is defined to have a blueScore of 0
|
||||
node.blueScore = 0
|
||||
return node, nil
|
||||
}
|
||||
}
|
||||
|
||||
func calculateNodeHeight(node *blockNode) uint64 {
|
||||
if node.isGenesis() {
|
||||
return 0
|
||||
selectedParentAnticone, err := dag.ghostdag(node)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "unexpected error in GHOSTDAG"))
|
||||
}
|
||||
return node.parents.maxHeight() + 1
|
||||
}
|
||||
|
||||
func calculateChainHeight(node *blockNode) uint64 {
|
||||
if node.isGenesis() {
|
||||
return 0
|
||||
}
|
||||
return node.selectedParent.chainHeight + 1
|
||||
}
|
||||
|
||||
// newBlockNode returns a new block node for the given block header and parent
|
||||
//nodes. This function is NOT safe for concurrent access.
|
||||
func newBlockNode(blockHeader *wire.BlockHeader, parents blockSet, phantomK uint32) *blockNode {
|
||||
var node blockNode
|
||||
initBlockNode(&node, blockHeader, parents, phantomK)
|
||||
return &node
|
||||
return node, selectedParentAnticone
|
||||
}
|
||||
|
||||
// updateParentsChildren updates the node's parents to point to new node
|
||||
func (node *blockNode) updateParentsChildren() {
|
||||
for _, parent := range node.parents {
|
||||
for parent := range node.parents {
|
||||
parent.children.add(node)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *blockNode) less(other *blockNode) bool {
|
||||
if node.blueScore == other.blueScore {
|
||||
return daghash.Less(node.hash, other.hash)
|
||||
}
|
||||
|
||||
return node.blueScore < other.blueScore
|
||||
}
|
||||
|
||||
// Header constructs a block header from the node and returns it.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
@@ -177,50 +168,50 @@ func (node *blockNode) Header() *wire.BlockHeader {
|
||||
HashMerkleRoot: node.hashMerkleRoot,
|
||||
AcceptedIDMerkleRoot: node.acceptedIDMerkleRoot,
|
||||
UTXOCommitment: node.utxoCommitment,
|
||||
Timestamp: time.Unix(node.timestamp, 0),
|
||||
Timestamp: node.time(),
|
||||
Bits: node.bits,
|
||||
Nonce: node.nonce,
|
||||
}
|
||||
}
|
||||
|
||||
// SelectedAncestor returns the ancestor block node at the provided chain-height by following
|
||||
// SelectedAncestor returns the ancestor block node at the provided blue score by following
|
||||
// the selected-parents chain backwards from this node. The returned block will be nil when a
|
||||
// height is requested that is after the height of the passed node.
|
||||
// blue score is requested that is higher than the blue score of the passed node.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) SelectedAncestor(chainHeight uint64) *blockNode {
|
||||
if chainHeight < 0 || chainHeight > node.chainHeight {
|
||||
func (node *blockNode) SelectedAncestor(blueScore uint64) *blockNode {
|
||||
if blueScore > node.blueScore {
|
||||
return nil
|
||||
}
|
||||
|
||||
n := node
|
||||
for ; n != nil && n.chainHeight != chainHeight; n = n.selectedParent {
|
||||
// Intentionally left blank
|
||||
for n != nil && n.blueScore > blueScore {
|
||||
n = n.selectedParent
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// RelativeAncestor returns the ancestor block node a relative 'distance' of
|
||||
// chain-blocks before this node. This is equivalent to calling Ancestor with
|
||||
// the node's chain-height minus provided distance.
|
||||
// blue blocks before this node. This is equivalent to calling Ancestor with
|
||||
// the node's blue score minus provided distance.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) RelativeAncestor(distance uint64) *blockNode {
|
||||
return node.SelectedAncestor(node.chainHeight - distance)
|
||||
return node.SelectedAncestor(node.blueScore - distance)
|
||||
}
|
||||
|
||||
// CalcPastMedianTime returns the median time of the previous few blocks
|
||||
// prior to, and including, the block node.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) PastMedianTime(dag *BlockDAG) time.Time {
|
||||
func (node *blockNode) PastMedianTime(dag *BlockDAG) mstime.Time {
|
||||
window := blueBlockWindow(node, 2*dag.TimestampDeviationTolerance-1)
|
||||
medianTimestamp, err := window.medianTimestamp()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("blueBlockWindow: %s", err))
|
||||
}
|
||||
return time.Unix(medianTimestamp, 0)
|
||||
return mstime.UnixMilliseconds(medianTimestamp)
|
||||
}
|
||||
|
||||
func (node *blockNode) ParentHashes() []*daghash.Hash {
|
||||
@@ -232,11 +223,15 @@ func (node *blockNode) isGenesis() bool {
|
||||
return len(node.parents) == 0
|
||||
}
|
||||
|
||||
func (node *blockNode) finalityScore() uint64 {
|
||||
return node.blueScore / FinalityInterval
|
||||
func (node *blockNode) finalityScore(dag *BlockDAG) uint64 {
|
||||
return node.blueScore / uint64(dag.FinalityInterval())
|
||||
}
|
||||
|
||||
// String returns a string that contains the block hash.
|
||||
func (node blockNode) String() string {
|
||||
return node.hash.String()
|
||||
}
|
||||
|
||||
func (node *blockNode) time() mstime.Time {
|
||||
return mstime.UnixMilliseconds(node.timestamp)
|
||||
}
|
||||
|
||||
@@ -1,86 +1,41 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestChainHeight(t *testing.T) {
|
||||
phantomK := uint32(2)
|
||||
buildNode := buildNodeGenerator(phantomK, true)
|
||||
// This test is to ensure the size BlueAnticoneSizesSize is serialized to the size of KType.
|
||||
// We verify that by serializing and deserializing the block while making sure that we stay within the expected range.
|
||||
func TestBlueAnticoneSizesSize(t *testing.T) {
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestBlueAnticoneSizesSize: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
node0 := buildNode(setFromSlice())
|
||||
node1 := buildNode(setFromSlice(node0))
|
||||
node2 := buildNode(setFromSlice(node0))
|
||||
node3 := buildNode(setFromSlice(node0))
|
||||
node4 := buildNode(setFromSlice(node1, node2, node3))
|
||||
node5 := buildNode(setFromSlice(node1, node2, node3))
|
||||
node6 := buildNode(setFromSlice(node1, node2, node3))
|
||||
node7 := buildNode(setFromSlice(node0))
|
||||
node8 := buildNode(setFromSlice(node7))
|
||||
node9 := buildNode(setFromSlice(node8))
|
||||
node10 := buildNode(setFromSlice(node9, node6))
|
||||
k := dagconfig.KType(0)
|
||||
k--
|
||||
|
||||
// Because nodes 7 & 8 were mined secretly, node10's selected
|
||||
// parent will be node6, although node9 is higher. So in this
|
||||
// case, node10.height and node10.chainHeight will be different
|
||||
|
||||
tests := []struct {
|
||||
node *blockNode
|
||||
expectedChainHeight uint64
|
||||
}{
|
||||
{
|
||||
node: node0,
|
||||
expectedChainHeight: 0,
|
||||
},
|
||||
{
|
||||
node: node1,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node2,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node3,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node4,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node5,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node6,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node7,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node8,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node9,
|
||||
expectedChainHeight: 3,
|
||||
},
|
||||
{
|
||||
node: node10,
|
||||
expectedChainHeight: 3,
|
||||
},
|
||||
if k < dagconfig.KType(0) {
|
||||
t.Fatalf("KType must be unsigned")
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if test.node.chainHeight != test.expectedChainHeight {
|
||||
t.Errorf("block %v expected chain height %v but got %v", test.node, test.expectedChainHeight, test.node.chainHeight)
|
||||
}
|
||||
if calculateChainHeight(test.node) != test.expectedChainHeight {
|
||||
t.Errorf("block %v expected calculated chain height %v but got %v", test.node, test.expectedChainHeight, test.node.chainHeight)
|
||||
}
|
||||
blockHeader := dagconfig.SimnetParams.GenesisBlock.Header
|
||||
node, _ := dag.newBlockNode(&blockHeader, newBlockSet())
|
||||
fakeBlue := &blockNode{hash: &daghash.Hash{1}}
|
||||
dag.index.AddNode(fakeBlue)
|
||||
// Setting maxKType to maximum value of KType.
|
||||
// As we verify above that KType is unsigned we can be sure that maxKType is indeed the maximum value of KType.
|
||||
maxKType := ^dagconfig.KType(0)
|
||||
node.bluesAnticoneSizes[fakeBlue] = maxKType
|
||||
serializedNode, _ := serializeBlockNode(node)
|
||||
deserializedNode, _ := dag.deserializeBlockNode(serializedNode)
|
||||
if deserializedNode.bluesAnticoneSizes[fakeBlue] != maxKType {
|
||||
t.Fatalf("TestBlueAnticoneSizesSize: BlueAnticoneSize should not change when deserializing. Expected: %v but got %v",
|
||||
maxKType, deserializedNode.bluesAnticoneSizes[fakeBlue])
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -3,83 +3,72 @@ package blockdag
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// blockSet implements a basic unsorted set of blocks
|
||||
type blockSet map[daghash.Hash]*blockNode
|
||||
type blockSet map[*blockNode]struct{}
|
||||
|
||||
// newSet creates a new, empty BlockSet
|
||||
func newSet() blockSet {
|
||||
return map[daghash.Hash]*blockNode{}
|
||||
// newBlockSet creates a new, empty BlockSet
|
||||
func newBlockSet() blockSet {
|
||||
return map[*blockNode]struct{}{}
|
||||
}
|
||||
|
||||
// setFromSlice converts a slice of blocks into an unordered set represented as map
|
||||
func setFromSlice(blocks ...*blockNode) blockSet {
|
||||
set := newSet()
|
||||
for _, block := range blocks {
|
||||
set.add(block)
|
||||
// blockSetFromSlice converts a slice of blockNodes into an unordered set represented as map
|
||||
func blockSetFromSlice(nodes ...*blockNode) blockSet {
|
||||
set := newBlockSet()
|
||||
for _, node := range nodes {
|
||||
set.add(node)
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
// maxHeight returns the height of the highest block in the block set
|
||||
func (bs blockSet) maxHeight() uint64 {
|
||||
var maxHeight uint64
|
||||
for _, node := range bs {
|
||||
if maxHeight < node.height {
|
||||
maxHeight = node.height
|
||||
}
|
||||
}
|
||||
return maxHeight
|
||||
// add adds a blockNode to this BlockSet
|
||||
func (bs blockSet) add(node *blockNode) {
|
||||
bs[node] = struct{}{}
|
||||
}
|
||||
|
||||
// add adds a block to this BlockSet
|
||||
func (bs blockSet) add(block *blockNode) {
|
||||
bs[*block.hash] = block
|
||||
}
|
||||
|
||||
// remove removes a block from this BlockSet, if exists
|
||||
// Does nothing if this set does not contain the block
|
||||
func (bs blockSet) remove(block *blockNode) {
|
||||
delete(bs, *block.hash)
|
||||
// remove removes a blockNode from this BlockSet, if exists
|
||||
// Does nothing if this set does not contain the blockNode
|
||||
func (bs blockSet) remove(node *blockNode) {
|
||||
delete(bs, node)
|
||||
}
|
||||
|
||||
// clone clones thie block set
|
||||
func (bs blockSet) clone() blockSet {
|
||||
clone := newSet()
|
||||
for _, block := range bs {
|
||||
clone.add(block)
|
||||
clone := newBlockSet()
|
||||
for node := range bs {
|
||||
clone.add(node)
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
||||
// subtract returns the difference between the BlockSet and another BlockSet
|
||||
func (bs blockSet) subtract(other blockSet) blockSet {
|
||||
diff := newSet()
|
||||
for _, block := range bs {
|
||||
if !other.contains(block) {
|
||||
diff.add(block)
|
||||
diff := newBlockSet()
|
||||
for node := range bs {
|
||||
if !other.contains(node) {
|
||||
diff.add(node)
|
||||
}
|
||||
}
|
||||
return diff
|
||||
}
|
||||
|
||||
// addSet adds all blocks in other set to this set
|
||||
// addSet adds all blockNodes in other set to this set
|
||||
func (bs blockSet) addSet(other blockSet) {
|
||||
for _, block := range other {
|
||||
bs.add(block)
|
||||
for node := range other {
|
||||
bs.add(node)
|
||||
}
|
||||
}
|
||||
|
||||
// addSlice adds provided slice to this set
|
||||
func (bs blockSet) addSlice(slice []*blockNode) {
|
||||
for _, block := range slice {
|
||||
bs.add(block)
|
||||
for _, node := range slice {
|
||||
bs.add(node)
|
||||
}
|
||||
}
|
||||
|
||||
// union returns a BlockSet that contains all blocks included in this set,
|
||||
// union returns a BlockSet that contains all blockNodes included in this set,
|
||||
// the other set, or both
|
||||
func (bs blockSet) union(other blockSet) blockSet {
|
||||
union := bs.clone()
|
||||
@@ -89,39 +78,16 @@ func (bs blockSet) union(other blockSet) blockSet {
|
||||
return union
|
||||
}
|
||||
|
||||
// contains returns true iff this set contains block
|
||||
func (bs blockSet) contains(block *blockNode) bool {
|
||||
_, ok := bs[*block.hash]
|
||||
// contains returns true iff this set contains node
|
||||
func (bs blockSet) contains(node *blockNode) bool {
|
||||
_, ok := bs[node]
|
||||
return ok
|
||||
}
|
||||
|
||||
// containsHash returns true iff this set contains a block hash
|
||||
func (bs blockSet) containsHash(hash *daghash.Hash) bool {
|
||||
_, ok := bs[*hash]
|
||||
return ok
|
||||
}
|
||||
|
||||
// hashesEqual returns true if the given hashes are equal to the hashes
|
||||
// of the blocks in this set.
|
||||
// NOTE: The given hash slice must not contain duplicates.
|
||||
func (bs blockSet) hashesEqual(hashes []*daghash.Hash) bool {
|
||||
if len(hashes) != len(bs) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, hash := range hashes {
|
||||
if _, wasFound := bs[*hash]; !wasFound {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// hashes returns the hashes of the blocks in this set.
|
||||
// hashes returns the hashes of the blockNodes in this set.
|
||||
func (bs blockSet) hashes() []*daghash.Hash {
|
||||
hashes := make([]*daghash.Hash, 0, len(bs))
|
||||
for _, node := range bs {
|
||||
for node := range bs {
|
||||
hashes = append(hashes, node.hash)
|
||||
}
|
||||
daghash.Sort(hashes)
|
||||
@@ -130,27 +96,16 @@ func (bs blockSet) hashes() []*daghash.Hash {
|
||||
|
||||
func (bs blockSet) String() string {
|
||||
nodeStrs := make([]string, 0, len(bs))
|
||||
for _, node := range bs {
|
||||
for node := range bs {
|
||||
nodeStrs = append(nodeStrs, node.String())
|
||||
}
|
||||
return strings.Join(nodeStrs, ",")
|
||||
}
|
||||
|
||||
// anyChildInSet returns true iff any child of block is contained within this set
|
||||
func (bs blockSet) anyChildInSet(block *blockNode) bool {
|
||||
for _, child := range block.children {
|
||||
if bs.contains(child) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (bs blockSet) bluest() *blockNode {
|
||||
var bluestNode *blockNode
|
||||
var maxScore uint64
|
||||
for _, node := range bs {
|
||||
for node := range bs {
|
||||
if bluestNode == nil ||
|
||||
node.blueScore > maxScore ||
|
||||
(node.blueScore == maxScore && daghash.Less(node.hash, bluestNode.hash)) {
|
||||
|
||||
@@ -4,11 +4,11 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
func TestHashes(t *testing.T) {
|
||||
bs := setFromSlice(
|
||||
bs := blockSetFromSlice(
|
||||
&blockNode{
|
||||
hash: &daghash.Hash{3},
|
||||
},
|
||||
@@ -49,33 +49,33 @@ func TestBlockSetSubtract(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "both sets empty",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(),
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "subtract an empty set",
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(node1),
|
||||
setA: blockSetFromSlice(node1),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "subtract from empty set",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(),
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "subtract unrelated set",
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(node2),
|
||||
expectedResult: setFromSlice(node1),
|
||||
setA: blockSetFromSlice(node1),
|
||||
setB: blockSetFromSlice(node2),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node2, node3),
|
||||
expectedResult: setFromSlice(node1),
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node2, node3),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -101,33 +101,33 @@ func TestBlockSetAddSet(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "both sets empty",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(),
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "add an empty set",
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(node1),
|
||||
setA: blockSetFromSlice(node1),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add to empty set",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1),
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add already added member",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1, node2),
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1, node2),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node2, node3),
|
||||
expectedResult: setFromSlice(node1, node2, node3),
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node2, node3),
|
||||
expectedResult: blockSetFromSlice(node1, node2, node3),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -153,33 +153,33 @@ func TestBlockSetAddSlice(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "add empty slice to empty set",
|
||||
set: setFromSlice(),
|
||||
set: blockSetFromSlice(),
|
||||
slice: []*blockNode{},
|
||||
expectedResult: setFromSlice(),
|
||||
expectedResult: blockSetFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "add an empty slice",
|
||||
set: setFromSlice(node1),
|
||||
set: blockSetFromSlice(node1),
|
||||
slice: []*blockNode{},
|
||||
expectedResult: setFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add to empty set",
|
||||
set: setFromSlice(),
|
||||
set: blockSetFromSlice(),
|
||||
slice: []*blockNode{node1},
|
||||
expectedResult: setFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add already added member",
|
||||
set: setFromSlice(node1, node2),
|
||||
set: blockSetFromSlice(node1, node2),
|
||||
slice: []*blockNode{node1},
|
||||
expectedResult: setFromSlice(node1, node2),
|
||||
expectedResult: blockSetFromSlice(node1, node2),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
set: setFromSlice(node1, node2),
|
||||
set: blockSetFromSlice(node1, node2),
|
||||
slice: []*blockNode{node2, node3},
|
||||
expectedResult: setFromSlice(node1, node2, node3),
|
||||
expectedResult: blockSetFromSlice(node1, node2, node3),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -205,33 +205,33 @@ func TestBlockSetUnion(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "both sets empty",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(),
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "union against an empty set",
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(node1),
|
||||
setA: blockSetFromSlice(node1),
|
||||
setB: blockSetFromSlice(),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "union from an empty set",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1),
|
||||
setA: blockSetFromSlice(),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "union with subset",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1, node2),
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node1),
|
||||
expectedResult: blockSetFromSlice(node1, node2),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node2, node3),
|
||||
expectedResult: setFromSlice(node1, node2, node3),
|
||||
setA: blockSetFromSlice(node1, node2),
|
||||
setB: blockSetFromSlice(node2, node3),
|
||||
expectedResult: blockSetFromSlice(node1, node2, node3),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -243,54 +243,3 @@ func TestBlockSetUnion(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSetHashesEqual(t *testing.T) {
|
||||
node1 := &blockNode{hash: &daghash.Hash{10}}
|
||||
node2 := &blockNode{hash: &daghash.Hash{20}}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
set blockSet
|
||||
hashes []*daghash.Hash
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
name: "empty set, no hashes",
|
||||
set: setFromSlice(),
|
||||
hashes: []*daghash.Hash{},
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "empty set, one hash",
|
||||
set: setFromSlice(),
|
||||
hashes: []*daghash.Hash{node1.hash},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "set and hashes of different length",
|
||||
set: setFromSlice(node1, node2),
|
||||
hashes: []*daghash.Hash{node1.hash},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "set equal to hashes",
|
||||
set: setFromSlice(node1, node2),
|
||||
hashes: []*daghash.Hash{node1.hash, node2.hash},
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "set equal to hashes, different order",
|
||||
set: setFromSlice(node1, node2),
|
||||
hashes: []*daghash.Hash{node2.hash, node1.hash},
|
||||
expectedResult: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := test.set.hashesEqual(test.hashes)
|
||||
if result != test.expectedResult {
|
||||
t.Errorf("blockSet.hashesEqual: unexpected result in test '%s'. "+
|
||||
"Expected: %t, got: %t", test.name, test.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/bigintpool"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
@@ -11,7 +12,7 @@ import (
|
||||
type blockWindow []*blockNode
|
||||
|
||||
// blueBlockWindow returns a blockWindow of the given size that contains the
|
||||
// blues in the past of startindNode, sorted by phantom order.
|
||||
// blues in the past of startindNode, sorted by GHOSTDAG order.
|
||||
// If the number of blues in the past of startingNode is less then windowSize,
|
||||
// the window will be padded by genesis blocks to achieve a size of windowSize.
|
||||
func blueBlockWindow(startingNode *blockNode, windowSize uint64) blockWindow {
|
||||
@@ -53,13 +54,19 @@ func (window blockWindow) minMaxTimestamps() (min, max int64) {
|
||||
return
|
||||
}
|
||||
|
||||
func (window blockWindow) averageTarget() *big.Int {
|
||||
averageTarget := big.NewInt(0)
|
||||
func (window blockWindow) averageTarget(averageTarget *big.Int) {
|
||||
averageTarget.SetInt64(0)
|
||||
|
||||
target := bigintpool.Acquire(0)
|
||||
defer bigintpool.Release(target)
|
||||
for _, node := range window {
|
||||
target := util.CompactToBig(node.bits)
|
||||
util.CompactToBigWithDestination(node.bits, target)
|
||||
averageTarget.Add(averageTarget, target)
|
||||
}
|
||||
return averageTarget.Div(averageTarget, big.NewInt(int64(len(window))))
|
||||
|
||||
windowLen := bigintpool.Acquire(int64(len(window)))
|
||||
defer bigintpool.Release(windowLen)
|
||||
averageTarget.Div(averageTarget, windowLen)
|
||||
}
|
||||
|
||||
func (window blockWindow) medianTimestamp() (int64, error) {
|
||||
|
||||
@@ -1,18 +1,26 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestBlueBlockWindow(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag := newTestDAG(¶ms)
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup dag instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
resetExtraNonceForTest()
|
||||
|
||||
windowSize := uint64(10)
|
||||
genesisNode := dag.genesis
|
||||
@@ -21,7 +29,6 @@ func TestBlueBlockWindow(t *testing.T) {
|
||||
idByBlockMap := make(map[*blockNode]string)
|
||||
blockByIDMap["A"] = genesisNode
|
||||
idByBlockMap[genesisNode] = "A"
|
||||
blockVersion := int32(0x10000000)
|
||||
|
||||
blocksData := []*struct {
|
||||
parents []string
|
||||
@@ -44,12 +51,12 @@ func TestBlueBlockWindow(t *testing.T) {
|
||||
expectedWindowWithGenesisPadding: []string{"B", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
parents: []string{"D", "C"},
|
||||
id: "E",
|
||||
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
parents: []string{"D", "C"},
|
||||
id: "F",
|
||||
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
@@ -107,14 +114,29 @@ func TestBlueBlockWindow(t *testing.T) {
|
||||
parent := blockByIDMap[parentID]
|
||||
parents.add(parent)
|
||||
}
|
||||
node := newTestNode(parents, blockVersion, 0, blockTime, dag.dagParams.K)
|
||||
node.hash = &daghash.Hash{} // It helps to predict hash order
|
||||
for i, char := range blockData.id {
|
||||
node.hash[i] = byte(char)
|
||||
|
||||
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("block %v got unexpected error from PrepareBlockForTest: %v", blockData.id, err)
|
||||
}
|
||||
|
||||
dag.index.AddNode(node)
|
||||
node.updateParentsChildren()
|
||||
utilBlock := util.NewBlock(block)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("dag.ProcessBlock got unexpected error for block %v: %v", blockData.id, err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("block %s "+
|
||||
"is too far in the future", blockData.id)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("block %v was unexpectedly orphan", blockData.id)
|
||||
}
|
||||
|
||||
node, ok := dag.index.LookupNode(utilBlock.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", utilBlock.Hash())
|
||||
}
|
||||
|
||||
blockByIDMap[blockData.id] = node
|
||||
idByBlockMap[node] = blockData.id
|
||||
@@ -132,7 +154,7 @@ func checkWindowIDs(window []*blockNode, expectedIDs []string, idByBlockMap map[
|
||||
ids[i] = idByBlockMap[node]
|
||||
}
|
||||
if !reflect.DeepEqual(ids, expectedIDs) {
|
||||
return fmt.Errorf("window expected to have blocks %s but got %s", expectedIDs, ids)
|
||||
return errors.Errorf("window expected to have blocks %s but got %s", expectedIDs, ids)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,257 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// CheckpointConfirmations is the number of blocks before the end of the current
|
||||
// best block chain that a good checkpoint candidate must be.
|
||||
const CheckpointConfirmations = 2016
|
||||
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// daghash.Hash. It only differs from the one available in daghash in that
|
||||
// it ignores the error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, hashes.
|
||||
func newHashFromStr(hexStr string) *daghash.Hash {
|
||||
hash, _ := daghash.NewHashFromStr(hexStr)
|
||||
return hash
|
||||
}
|
||||
|
||||
// newTxIDFromStr converts the passed big-endian hex string into a
|
||||
// daghash.TxID. It only differs from the one available in daghash in that
|
||||
// it ignores the error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, IDs.
|
||||
func newTxIDFromStr(hexStr string) *daghash.TxID {
|
||||
txID, _ := daghash.NewTxIDFromStr(hexStr)
|
||||
return txID
|
||||
}
|
||||
|
||||
// Checkpoints returns a slice of checkpoints (regardless of whether they are
|
||||
// already known). When there are no checkpoints for the chain, it will return
|
||||
// nil.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) Checkpoints() []dagconfig.Checkpoint {
|
||||
return dag.checkpoints
|
||||
}
|
||||
|
||||
// HasCheckpoints returns whether this BlockDAG has checkpoints defined.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) HasCheckpoints() bool {
|
||||
return len(dag.checkpoints) > 0
|
||||
}
|
||||
|
||||
// LatestCheckpoint returns the most recent checkpoint (regardless of whether it
|
||||
// is already known). When there are no defined checkpoints for the active chain
|
||||
// instance, it will return nil.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) LatestCheckpoint() *dagconfig.Checkpoint {
|
||||
if !dag.HasCheckpoints() {
|
||||
return nil
|
||||
}
|
||||
return &dag.checkpoints[len(dag.checkpoints)-1]
|
||||
}
|
||||
|
||||
// verifyCheckpoint returns whether the passed block chain height and hash combination
|
||||
// match the checkpoint data. It also returns true if there is no checkpoint
|
||||
// data for the passed block chain height.
|
||||
func (dag *BlockDAG) verifyCheckpoint(chainHeight uint64, hash *daghash.Hash) bool {
|
||||
if !dag.HasCheckpoints() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Nothing to check if there is no checkpoint data for the block chainHeight.
|
||||
checkpoint, exists := dag.checkpointsByChainHeight[chainHeight]
|
||||
if !exists {
|
||||
return true
|
||||
}
|
||||
|
||||
if !checkpoint.Hash.IsEqual(hash) {
|
||||
return false
|
||||
}
|
||||
|
||||
log.Infof("Verified checkpoint at chainHeight %d/block %s", checkpoint.ChainHeight,
|
||||
checkpoint.Hash)
|
||||
return true
|
||||
}
|
||||
|
||||
// findPreviousCheckpoint finds the most recent checkpoint that is already
|
||||
// available in the downloaded portion of the block chain and returns the
|
||||
// associated block node. It returns nil if a checkpoint can't be found (this
|
||||
// should really only happen for blocks before the first checkpoint).
|
||||
//
|
||||
// This function MUST be called with the DAG lock held (for reads).
|
||||
func (dag *BlockDAG) findPreviousCheckpoint() (*blockNode, error) {
|
||||
if !dag.HasCheckpoints() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Perform the initial search to find and cache the latest known
|
||||
// checkpoint if the best chain is not known yet or we haven't already
|
||||
// previously searched.
|
||||
checkpoints := dag.checkpoints
|
||||
numCheckpoints := len(checkpoints)
|
||||
if dag.checkpointNode == nil && dag.nextCheckpoint == nil {
|
||||
// Loop backwards through the available checkpoints to find one
|
||||
// that is already available.
|
||||
for i := numCheckpoints - 1; i >= 0; i-- {
|
||||
node := dag.index.LookupNode(checkpoints[i].Hash)
|
||||
if node == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Checkpoint found. Cache it for future lookups and
|
||||
// set the next expected checkpoint accordingly.
|
||||
dag.checkpointNode = node
|
||||
if i < numCheckpoints-1 {
|
||||
dag.nextCheckpoint = &checkpoints[i+1]
|
||||
}
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// No known latest checkpoint. This will only happen on blocks
|
||||
// before the first known checkpoint. So, set the next expected
|
||||
// checkpoint to the first checkpoint and return the fact there
|
||||
// is no latest known checkpoint block.
|
||||
dag.nextCheckpoint = &checkpoints[0]
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// At this point we've already searched for the latest known checkpoint,
|
||||
// so when there is no next checkpoint, the current checkpoint lockin
|
||||
// will always be the latest known checkpoint.
|
||||
if dag.nextCheckpoint == nil {
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// When there is a next checkpoint and the chain height of the current
|
||||
// selected tip of the DAG does not exceed it, the current checkpoint
|
||||
// lockin is still the latest known checkpoint.
|
||||
if dag.selectedTip().chainHeight < dag.nextCheckpoint.ChainHeight {
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// We've reached or exceeded the next checkpoint height. Note that
|
||||
// once a checkpoint lockin has been reached, forks are prevented from
|
||||
// any blocks before the checkpoint, so we don't have to worry about the
|
||||
// checkpoint going away out from under us due to a chain reorganize.
|
||||
|
||||
// Cache the latest known checkpoint for future lookups. Note that if
|
||||
// this lookup fails something is very wrong since the chain has already
|
||||
// passed the checkpoint which was verified as accurate before inserting
|
||||
// it.
|
||||
checkpointNode := dag.index.LookupNode(dag.nextCheckpoint.Hash)
|
||||
if checkpointNode == nil {
|
||||
return nil, AssertError(fmt.Sprintf("findPreviousCheckpoint "+
|
||||
"failed lookup of known good block node %s",
|
||||
dag.nextCheckpoint.Hash))
|
||||
}
|
||||
dag.checkpointNode = checkpointNode
|
||||
|
||||
// Set the next expected checkpoint.
|
||||
checkpointIndex := -1
|
||||
for i := numCheckpoints - 1; i >= 0; i-- {
|
||||
if checkpoints[i].Hash.IsEqual(dag.nextCheckpoint.Hash) {
|
||||
checkpointIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
dag.nextCheckpoint = nil
|
||||
if checkpointIndex != -1 && checkpointIndex < numCheckpoints-1 {
|
||||
dag.nextCheckpoint = &checkpoints[checkpointIndex+1]
|
||||
}
|
||||
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// isNonstandardTransaction determines whether a transaction contains any
|
||||
// scripts which are not one of the standard types.
|
||||
func isNonstandardTransaction(tx *util.Tx) bool {
|
||||
// Check all of the output public key scripts for non-standard scripts.
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
scriptClass := txscript.GetScriptClass(txOut.ScriptPubKey)
|
||||
if scriptClass == txscript.NonStandardTy {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCheckpointCandidate returns whether or not the passed block is a good
|
||||
// checkpoint candidate.
|
||||
//
|
||||
// The factors used to determine a good checkpoint are:
|
||||
// - The block must be in the main chain
|
||||
// - The block must be at least 'CheckpointConfirmations' blocks prior to the
|
||||
// current end of the main chain
|
||||
// - The timestamps for the blocks before and after the checkpoint must have
|
||||
// timestamps which are also before and after the checkpoint, respectively
|
||||
// (due to the median time allowance this is not always the case)
|
||||
// - The block must not contain any strange transaction such as those with
|
||||
// nonstandard scripts
|
||||
//
|
||||
// The intent is that candidates are reviewed by a developer to make the final
|
||||
// decision and then manually added to the list of checkpoints for a network.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) IsCheckpointCandidate(block *util.Block) (bool, error) {
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
|
||||
// A checkpoint must be in the DAG.
|
||||
node := dag.index.LookupNode(block.Hash())
|
||||
if node == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Ensure the chain height of the passed block and the entry for the block
|
||||
// in the DAG match. This should always be the case unless the
|
||||
// caller provided an invalid block.
|
||||
if node.chainHeight != block.ChainHeight() {
|
||||
return false, fmt.Errorf("passed block chain height of %d does not "+
|
||||
"match the its height in the DAG: %d", block.ChainHeight(),
|
||||
node.chainHeight)
|
||||
}
|
||||
|
||||
// A checkpoint must be at least CheckpointConfirmations blocks
|
||||
// before the end of the main chain.
|
||||
dagChainHeight := dag.selectedTip().chainHeight
|
||||
if node.chainHeight > (dagChainHeight - CheckpointConfirmations) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must be have at least one block after it.
|
||||
//
|
||||
// This should always succeed since the check above already made sure it
|
||||
// is CheckpointConfirmations back, but be safe in case the constant
|
||||
// changes.
|
||||
if len(node.children) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must be have at least one block before it.
|
||||
if &node.selectedParent == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must have transactions that only contain standard
|
||||
// scripts.
|
||||
for _, tx := range block.Transactions() {
|
||||
if isNonstandardTransaction(tx) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// All of the checks passed, so the block is a candidate.
|
||||
return true, nil
|
||||
}
|
||||
@@ -4,17 +4,16 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/util/txsort"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/coinbasepayload"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/util/txsort"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// compactFeeData is a specialized data type to store a compact list of fees
|
||||
@@ -74,55 +73,24 @@ func (cfr *compactFeeIterator) next() (uint64, error) {
|
||||
}
|
||||
|
||||
// The following functions relate to storing and retrieving fee data from the database
|
||||
var feeBucket = []byte("fees")
|
||||
|
||||
// getBluesFeeData returns the compactFeeData for all nodes's blues,
|
||||
// used to calculate the fees this blockNode needs to pay
|
||||
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
|
||||
func (dag *BlockDAG) getBluesFeeData(node *blockNode) (map[daghash.Hash]compactFeeData, error) {
|
||||
bluesFeeData := make(map[daghash.Hash]compactFeeData)
|
||||
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
|
||||
}
|
||||
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbaccess.FetchFeeData(dag.databaseContext, blueBlock.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
}
|
||||
|
||||
return bluesFeeData, nil
|
||||
}
|
||||
|
||||
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
|
||||
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating or retrieving fee bucket: %s", err)
|
||||
}
|
||||
|
||||
return feeBucket.Put(blockHash.CloneBytes(), feeData)
|
||||
}
|
||||
|
||||
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
|
||||
feeBucket := dbTx.Metadata().Bucket(feeBucket)
|
||||
if feeBucket == nil {
|
||||
return nil, errors.New("Fee bucket does not exist")
|
||||
}
|
||||
|
||||
feeData := feeBucket.Get(blockHash.CloneBytes())
|
||||
if feeData == nil {
|
||||
return nil, fmt.Errorf("No fee data found for block %s", blockHash)
|
||||
}
|
||||
|
||||
return feeData, nil
|
||||
}
|
||||
|
||||
// The following functions deal with building and validating the coinbase transaction
|
||||
|
||||
func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
|
||||
@@ -130,7 +98,10 @@ func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Bl
|
||||
return nil
|
||||
}
|
||||
blockCoinbaseTx := block.CoinbaseTransaction().MsgTx()
|
||||
scriptPubKey, extraData, err := DeserializeCoinbasePayload(blockCoinbaseTx)
|
||||
_, scriptPubKey, extraData, err := coinbasepayload.DeserializeCoinbasePayload(blockCoinbaseTx)
|
||||
if errors.Is(err, coinbasepayload.ErrIncorrectScriptPubKeyLen) {
|
||||
return ruleError(ErrBadCoinbaseTransaction, err.Error())
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -148,7 +119,7 @@ func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Bl
|
||||
|
||||
// expectedCoinbaseTransaction returns the coinbase transaction for the current block
|
||||
func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData, scriptPubKey []byte, extraData []byte) (*util.Tx, error) {
|
||||
bluesFeeData, err := node.getBluesFeeData(dag)
|
||||
bluesFeeData, err := dag.getBluesFeeData(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -157,16 +128,15 @@ func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceD
|
||||
txOuts := []*wire.TxOut{}
|
||||
|
||||
for _, blue := range node.blues {
|
||||
txIn, txOut, err := coinbaseInputAndOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData)
|
||||
txOut, err := coinbaseOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txIns = append(txIns, txIn)
|
||||
if txOut != nil {
|
||||
txOuts = append(txOuts, txOut)
|
||||
}
|
||||
}
|
||||
payload, err := SerializeCoinbasePayload(scriptPubKey, extraData)
|
||||
payload, err := coinbasepayload.SerializeCoinbasePayload(node.blueScore, scriptPubKey, extraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -175,99 +145,49 @@ func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceD
|
||||
return util.NewTx(sortedCoinbaseTx), nil
|
||||
}
|
||||
|
||||
// SerializeCoinbasePayload builds the coinbase payload based on the provided scriptPubKey and extra data.
|
||||
func SerializeCoinbasePayload(scriptPubKey []byte, extraData []byte) ([]byte, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := wire.WriteVarInt(w, uint64(len(scriptPubKey)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = w.Write(scriptPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = w.Write(extraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w.Bytes(), nil
|
||||
}
|
||||
// coinbaseOutputForBlueBlock calculates the output that should go into the coinbase transaction of blueBlock
|
||||
// If blueBlock gets no fee - returns nil for txOut
|
||||
func coinbaseOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (*wire.TxOut, error) {
|
||||
|
||||
// DeserializeCoinbasePayload deserialize the coinbase payload to its component (scriptPubKey and extra data).
|
||||
func DeserializeCoinbasePayload(tx *wire.MsgTx) (scriptPubKey []byte, extraData []byte, err error) {
|
||||
r := bytes.NewReader(tx.Payload)
|
||||
scriptPubKeyLen, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
scriptPubKey = make([]byte, scriptPubKeyLen)
|
||||
_, err = r.Read(scriptPubKey)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
extraData = make([]byte, r.Len())
|
||||
if r.Len() != 0 {
|
||||
_, err = r.Read(extraData)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return scriptPubKey, extraData, nil
|
||||
}
|
||||
|
||||
// feeInputAndOutputForBlueBlock calculates the input and output that should go into the coinbase transaction of blueBlock
|
||||
// If blueBlock gets no fee - returns only txIn and nil for txOut
|
||||
func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (
|
||||
*wire.TxIn, *wire.TxOut, error) {
|
||||
|
||||
blockTxsAcceptanceData, ok := txsAcceptanceData[*blueBlock.hash]
|
||||
blockTxsAcceptanceData, ok := txsAcceptanceData.FindAcceptanceData(blueBlock.hash)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
|
||||
return nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
|
||||
}
|
||||
blockFeeData, ok := feeData[*blueBlock.hash]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("No feeData for block %s", blueBlock.hash)
|
||||
return nil, errors.Errorf("No feeData for block %s", blueBlock.hash)
|
||||
}
|
||||
|
||||
if len(blockTxsAcceptanceData) != blockFeeData.Len() {
|
||||
return nil, nil, fmt.Errorf(
|
||||
if len(blockTxsAcceptanceData.TxAcceptanceData) != blockFeeData.Len() {
|
||||
return nil, errors.Errorf(
|
||||
"length of accepted transaction data(%d) and fee data(%d) is not equal for block %s",
|
||||
len(blockTxsAcceptanceData), blockFeeData.Len(), blueBlock.hash)
|
||||
}
|
||||
|
||||
txIn := &wire.TxIn{
|
||||
SignatureScript: []byte{},
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID(*blueBlock.hash),
|
||||
Index: math.MaxUint32,
|
||||
},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
len(blockTxsAcceptanceData.TxAcceptanceData), blockFeeData.Len(), blueBlock.hash)
|
||||
}
|
||||
|
||||
totalFees := uint64(0)
|
||||
feeIterator := blockFeeData.iterator()
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData {
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
|
||||
fee, err := feeIterator.next()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
|
||||
return nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
|
||||
}
|
||||
if txAcceptanceData.IsAccepted {
|
||||
totalFees += fee
|
||||
}
|
||||
}
|
||||
|
||||
totalReward := CalcBlockSubsidy(blueBlock.height, dag.dagParams) + totalFees
|
||||
totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.Params) + totalFees
|
||||
|
||||
if totalReward == 0 {
|
||||
return txIn, nil, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// the ScriptPubKey for the coinbase is parsed from the coinbase payload
|
||||
scriptPubKey, _, err := DeserializeCoinbasePayload(blockTxsAcceptanceData[0].Tx.MsgTx())
|
||||
_, scriptPubKey, _, err := coinbasepayload.DeserializeCoinbasePayload(blockTxsAcceptanceData.TxAcceptanceData[0].Tx.MsgTx())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txOut := &wire.TxOut{
|
||||
@@ -275,5 +195,5 @@ func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
|
||||
return txIn, txOut, nil
|
||||
return txOut, nil
|
||||
}
|
||||
|
||||
@@ -7,33 +7,22 @@ package blockdag
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
_ "github.com/daglabs/btcd/database/ffldb"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
func loadBlocksWithLog(t *testing.T, filename string) ([]*util.Block, error) {
|
||||
blocks, err := LoadBlocks(filename)
|
||||
if err == nil {
|
||||
t.Logf("Loaded %d blocks from file %s", len(blocks), filename)
|
||||
for i, b := range blocks {
|
||||
t.Logf("Block #%d: %s", i, b.Hash())
|
||||
}
|
||||
}
|
||||
return blocks, err
|
||||
}
|
||||
|
||||
// loadUTXOSet returns a utxo view loaded from a file.
|
||||
func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
// The utxostore file format is:
|
||||
@@ -84,15 +73,8 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Serialized utxo entry.
|
||||
serialized := make([]byte, numBytes)
|
||||
_, err = io.ReadAtLeast(r, serialized, int(numBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Deserialize it and add it to the view.
|
||||
entry, err := deserializeUTXOEntry(serialized)
|
||||
// Deserialize the UTXO entry and add it to the UTXO set.
|
||||
entry, err := deserializeUTXOEntry(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -105,39 +87,38 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
// TestSetCoinbaseMaturity makes the ability to set the coinbase maturity
|
||||
// available when running tests.
|
||||
func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
|
||||
dag.dagParams.BlockCoinbaseMaturity = maturity
|
||||
dag.Params.BlockCoinbaseMaturity = maturity
|
||||
}
|
||||
|
||||
// newTestDAG returns a DAG that is usable for syntetic tests. It is
|
||||
// important to note that this chain has no database associated with it, so
|
||||
// newTestDAG returns a DAG that is usable for syntetic tests. It is
|
||||
// important to note that this DAG has no database associated with it, so
|
||||
// it is not usable with all functions and the tests must take care when making
|
||||
// use of it.
|
||||
func newTestDAG(params *dagconfig.Params) *BlockDAG {
|
||||
// Create a genesis block node and block index index populated with it
|
||||
// for use when creating the fake chain below.
|
||||
node := newBlockNode(¶ms.GenesisBlock.Header, newSet(), params.K)
|
||||
index := newBlockIndex(nil, params)
|
||||
index.AddNode(node)
|
||||
|
||||
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
|
||||
return &BlockDAG{
|
||||
dagParams: params,
|
||||
timeSource: NewMedianTime(),
|
||||
targetTimePerBlock: targetTimePerBlock,
|
||||
index := newBlockIndex(params)
|
||||
dag := &BlockDAG{
|
||||
Params: params,
|
||||
timeSource: NewTimeSource(),
|
||||
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,
|
||||
TimestampDeviationTolerance: params.TimestampDeviationTolerance,
|
||||
powMaxBits: util.BigToCompact(params.PowMax),
|
||||
index: index,
|
||||
virtual: newVirtualBlock(setFromSlice(node), params.K),
|
||||
genesis: index.LookupNode(params.GenesisHash),
|
||||
warningCaches: newThresholdCaches(vbNumBits),
|
||||
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
|
||||
}
|
||||
|
||||
// Create a genesis block node and block index index populated with it
|
||||
// on the above fake DAG.
|
||||
dag.genesis, _ = dag.newBlockNode(¶ms.GenesisBlock.Header, newBlockSet())
|
||||
index.AddNode(dag.genesis)
|
||||
|
||||
dag.virtual = newVirtualBlock(dag, blockSetFromSlice(dag.genesis))
|
||||
return dag
|
||||
}
|
||||
|
||||
// newTestNode creates a block node connected to the passed parent with the
|
||||
// provided fields populated and fake values for the other fields.
|
||||
func newTestNode(parents blockSet, blockVersion int32, bits uint32, timestamp time.Time, phantomK uint32) *blockNode {
|
||||
func newTestNode(dag *BlockDAG, parents blockSet, blockVersion int32, bits uint32, timestamp mstime.Time) *blockNode {
|
||||
// Make up a header and create a block node from it.
|
||||
header := &wire.BlockHeader{
|
||||
Version: blockVersion,
|
||||
@@ -148,64 +129,69 @@ func newTestNode(parents blockSet, blockVersion int32, bits uint32, timestamp ti
|
||||
AcceptedIDMerkleRoot: &daghash.ZeroHash,
|
||||
UTXOCommitment: &daghash.ZeroHash,
|
||||
}
|
||||
return newBlockNode(header, parents, phantomK)
|
||||
node, _ := dag.newBlockNode(header, parents)
|
||||
return node
|
||||
}
|
||||
|
||||
func addNodeAsChildToParents(node *blockNode) {
|
||||
for _, parent := range node.parents {
|
||||
for parent := range node.parents {
|
||||
parent.children.add(node)
|
||||
}
|
||||
}
|
||||
|
||||
func buildNodeGenerator(phantomK uint32, withChildren bool) func(parents blockSet) *blockNode {
|
||||
// For the purposes of these tests, we'll create blockNodes whose hashes are a
|
||||
// series of numbers from 1 to 255.
|
||||
hashCounter := byte(1)
|
||||
buildNode := func(parents blockSet) *blockNode {
|
||||
block := newBlockNode(nil, parents, phantomK)
|
||||
block.hash = &daghash.Hash{hashCounter}
|
||||
hashCounter++
|
||||
|
||||
return block
|
||||
}
|
||||
if withChildren {
|
||||
return func(parents blockSet) *blockNode {
|
||||
node := buildNode(parents)
|
||||
addNodeAsChildToParents(node)
|
||||
return node
|
||||
}
|
||||
}
|
||||
return buildNode
|
||||
}
|
||||
|
||||
// checkRuleError ensures the type of the two passed errors are of the
|
||||
// same type (either both nil or both of type RuleError) and their error codes
|
||||
// match when not nil.
|
||||
func checkRuleError(gotErr, wantErr error) error {
|
||||
// Ensure the error code is of the expected type and the error
|
||||
// code matches the value specified in the test instance.
|
||||
if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) {
|
||||
return fmt.Errorf("wrong error - got %T (%[1]v), want %T",
|
||||
gotErr, wantErr)
|
||||
}
|
||||
if gotErr == nil {
|
||||
if wantErr == nil && gotErr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure the want error type is a script error.
|
||||
werr, ok := wantErr.(RuleError)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected test error type %T", wantErr)
|
||||
var gotRuleErr RuleError
|
||||
if ok := errors.As(gotErr, &gotRuleErr); !ok {
|
||||
return errors.Errorf("gotErr expected to be RuleError, but got %+v instead", gotErr)
|
||||
}
|
||||
|
||||
// Ensure the error codes match. It's safe to use a raw type assert
|
||||
var wantRuleErr RuleError
|
||||
if ok := errors.As(wantErr, &wantRuleErr); !ok {
|
||||
return errors.Errorf("wantErr expected to be RuleError, but got %+v instead", wantErr)
|
||||
}
|
||||
|
||||
// Ensure the error codes match. It's safe to use a raw type assert
|
||||
// here since the code above already proved they are the same type and
|
||||
// the want error is a script error.
|
||||
gotErrorCode := gotErr.(RuleError).ErrorCode
|
||||
if gotErrorCode != werr.ErrorCode {
|
||||
return fmt.Errorf("mismatched error code - got %v (%v), want %v",
|
||||
gotErrorCode, gotErr, werr.ErrorCode)
|
||||
if gotRuleErr.ErrorCode != wantRuleErr.ErrorCode {
|
||||
return errors.Errorf("mismatched error code - got %v (%v), want %v",
|
||||
gotRuleErr.ErrorCode, gotErr, wantRuleErr.ErrorCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepareAndProcessBlockByParentMsgBlocks(t *testing.T, dag *BlockDAG, parents ...*wire.MsgBlock) *wire.MsgBlock {
|
||||
parentHashes := make([]*daghash.Hash, len(parents))
|
||||
for i, parent := range parents {
|
||||
parentHashes[i] = parent.BlockHash()
|
||||
}
|
||||
return PrepareAndProcessBlockForTest(t, dag, parentHashes, nil)
|
||||
}
|
||||
|
||||
func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNode {
|
||||
node, ok := dag.index.LookupNode(block.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("couldn't find block node with hash %s", block.BlockHash())
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
type fakeTimeSource struct {
|
||||
time mstime.Time
|
||||
}
|
||||
|
||||
func (fts *fakeTimeSource) Now() mstime.Time {
|
||||
return fts.time
|
||||
}
|
||||
|
||||
func newFakeTimeSource(fakeTime mstime.Time) TimeSource {
|
||||
return &fakeTimeSource{time: fakeTime}
|
||||
}
|
||||
|
||||
@@ -1,586 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/btcec"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// A variable length quantity (VLQ) is an encoding that uses an arbitrary number
|
||||
// of binary octets to represent an arbitrarily large integer. The scheme
|
||||
// employs a most significant byte (MSB) base-128 encoding where the high bit in
|
||||
// each byte indicates whether or not the byte is the final one. In addition,
|
||||
// to ensure there are no redundant encodings, an offset is subtracted every
|
||||
// time a group of 7 bits is shifted out. Therefore each integer can be
|
||||
// represented in exactly one way, and each representation stands for exactly
|
||||
// one integer.
|
||||
//
|
||||
// Another nice property of this encoding is that it provides a compact
|
||||
// representation of values that are typically used to indicate sizes. For
|
||||
// example, the values 0 - 127 are represented with a single byte, 128 - 16511
|
||||
// with two bytes, and 16512 - 2113663 with three bytes.
|
||||
//
|
||||
// While the encoding allows arbitrarily large integers, it is artificially
|
||||
// limited in this code to an unsigned 64-bit integer for efficiency purposes.
|
||||
//
|
||||
// Example encodings:
|
||||
// 0 -> [0x00]
|
||||
// 127 -> [0x7f] * Max 1-byte value
|
||||
// 128 -> [0x80 0x00]
|
||||
// 129 -> [0x80 0x01]
|
||||
// 255 -> [0x80 0x7f]
|
||||
// 256 -> [0x81 0x00]
|
||||
// 16511 -> [0xff 0x7f] * Max 2-byte value
|
||||
// 16512 -> [0x80 0x80 0x00]
|
||||
// 32895 -> [0x80 0xff 0x7f]
|
||||
// 2113663 -> [0xff 0xff 0x7f] * Max 3-byte value
|
||||
// 270549119 -> [0xff 0xff 0xff 0x7f] * Max 4-byte value
|
||||
// 2^64-1 -> [0x80 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0x7f]
|
||||
//
|
||||
// References:
|
||||
// https://en.wikipedia.org/wiki/Variable-length_quantity
|
||||
// http://www.codecodex.com/wiki/Variable-Length_Integers
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// serializeSizeVLQ returns the number of bytes it would take to serialize the
|
||||
// passed number as a variable-length quantity according to the format described
|
||||
// above.
|
||||
func serializeSizeVLQ(n uint64) int {
|
||||
size := 1
|
||||
for ; n > 0x7f; n = (n >> 7) - 1 {
|
||||
size++
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
// putVLQ serializes the provided number to a variable-length quantity according
|
||||
// to the format described above and returns the number of bytes of the encoded
|
||||
// value. The result is placed directly into the passed byte slice which must
|
||||
// be at least large enough to handle the number of bytes returned by the
|
||||
// serializeSizeVLQ function or it will panic.
|
||||
func putVLQ(target []byte, n uint64) int {
|
||||
offset := 0
|
||||
for ; ; offset++ {
|
||||
// The high bit is set when another byte follows.
|
||||
highBitMask := byte(0x80)
|
||||
if offset == 0 {
|
||||
highBitMask = 0x00
|
||||
}
|
||||
|
||||
target[offset] = byte(n&0x7f) | highBitMask
|
||||
if n <= 0x7f {
|
||||
break
|
||||
}
|
||||
n = (n >> 7) - 1
|
||||
}
|
||||
|
||||
// Reverse the bytes so it is MSB-encoded.
|
||||
for i, j := 0, offset; i < j; i, j = i+1, j-1 {
|
||||
target[i], target[j] = target[j], target[i]
|
||||
}
|
||||
|
||||
return offset + 1
|
||||
}
|
||||
|
||||
// deserializeVLQ deserializes the provided variable-length quantity according
|
||||
// to the format described above. It also returns the number of bytes
|
||||
// deserialized.
|
||||
func deserializeVLQ(serialized []byte) (uint64, int) {
|
||||
var n uint64
|
||||
var size int
|
||||
for _, val := range serialized {
|
||||
size++
|
||||
n = (n << 7) | uint64(val&0x7f)
|
||||
if val&0x80 != 0x80 {
|
||||
break
|
||||
}
|
||||
n++
|
||||
}
|
||||
|
||||
return n, size
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// In order to reduce the size of stored scripts, a domain specific compression
|
||||
// algorithm is used which recognizes standard scripts and stores them using
|
||||
// less bytes than the original script. The compression algorithm used here was
|
||||
// obtained from Bitcoin Core, so all credits for the algorithm go to it.
|
||||
//
|
||||
// The general serialized format is:
|
||||
//
|
||||
// <script size or type><script data>
|
||||
//
|
||||
// Field Type Size
|
||||
// script size or type VLQ variable
|
||||
// script data []byte variable
|
||||
//
|
||||
// The specific serialized format for each recognized standard script is:
|
||||
//
|
||||
// - Pay-to-pubkey-hash: (21 bytes) - <0><20-byte pubkey hash>
|
||||
// - Pay-to-script-hash: (21 bytes) - <1><20-byte script hash>
|
||||
// - Pay-to-pubkey**: (33 bytes) - <2, 3, 4, or 5><32-byte pubkey X value>
|
||||
// 2, 3 = compressed pubkey with bit 0 specifying the y coordinate to use
|
||||
// 4, 5 = uncompressed pubkey with bit 0 specifying the y coordinate to use
|
||||
// ** Only valid public keys starting with 0x02, 0x03, and 0x04 are supported.
|
||||
//
|
||||
// Any scripts which are not recognized as one of the aforementioned standard
|
||||
// scripts are encoded using the general serialized format and encode the script
|
||||
// size as the sum of the actual size of the script and the number of special
|
||||
// cases.
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// The following constants specify the special constants used to identify a
|
||||
// special script type in the domain-specific compressed script encoding.
|
||||
//
|
||||
// NOTE: This section specifically does not use iota since these values are
|
||||
// serialized and must be stable for long-term storage.
|
||||
const (
|
||||
// cstPayToPubKeyHash identifies a compressed pay-to-pubkey-hash script.
|
||||
cstPayToPubKeyHash = 0
|
||||
|
||||
// cstPayToScriptHash identifies a compressed pay-to-script-hash script.
|
||||
cstPayToScriptHash = 1
|
||||
|
||||
// cstPayToPubKeyComp2 identifies a compressed pay-to-pubkey script to
|
||||
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyComp2 = 2
|
||||
|
||||
// cstPayToPubKeyComp3 identifies a compressed pay-to-pubkey script to
|
||||
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyComp3 = 3
|
||||
|
||||
// cstPayToPubKeyUncomp4 identifies a compressed pay-to-pubkey script to
|
||||
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyUncomp4 = 4
|
||||
|
||||
// cstPayToPubKeyUncomp5 identifies a compressed pay-to-pubkey script to
|
||||
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyUncomp5 = 5
|
||||
|
||||
// numSpecialScripts is the number of special scripts recognized by the
|
||||
// domain-specific script compression algorithm.
|
||||
numSpecialScripts = 6
|
||||
)
|
||||
|
||||
// isPubKeyHash returns whether or not the passed public key script is a
|
||||
// standard pay-to-pubkey-hash script along with the pubkey hash it is paying to
|
||||
// if it is.
|
||||
func isPubKeyHash(script []byte) (bool, []byte) {
|
||||
if len(script) == 25 && script[0] == txscript.OpDup &&
|
||||
script[1] == txscript.OpHash160 &&
|
||||
script[2] == txscript.OpData20 &&
|
||||
script[23] == txscript.OpEqualVerify &&
|
||||
script[24] == txscript.OpCheckSig {
|
||||
|
||||
return true, script[3:23]
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// isScriptHash returns whether or not the passed public key script is a
|
||||
// standard pay-to-script-hash script along with the script hash it is paying to
|
||||
// if it is.
|
||||
func isScriptHash(script []byte) (bool, []byte) {
|
||||
if len(script) == 23 && script[0] == txscript.OpHash160 &&
|
||||
script[1] == txscript.OpData20 &&
|
||||
script[22] == txscript.OpEqual {
|
||||
|
||||
return true, script[2:22]
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// isPubKey returns whether or not the passed public key script is a standard
|
||||
// pay-to-pubkey script that pays to a valid compressed or uncompressed public
|
||||
// key along with the serialized pubkey it is paying to if it is.
|
||||
//
|
||||
// NOTE: This function ensures the public key is actually valid since the
|
||||
// compression algorithm requires valid pubkeys. It does not support hybrid
|
||||
// pubkeys. This means that even if the script has the correct form for a
|
||||
// pay-to-pubkey script, this function will only return true when it is paying
|
||||
// to a valid compressed or uncompressed pubkey.
|
||||
func isPubKey(script []byte) (bool, []byte) {
|
||||
// Pay-to-compressed-pubkey script.
|
||||
if len(script) == 35 && script[0] == txscript.OpData33 &&
|
||||
script[34] == txscript.OpCheckSig && (script[1] == 0x02 ||
|
||||
script[1] == 0x03) {
|
||||
|
||||
// Ensure the public key is valid.
|
||||
serializedPubKey := script[1:34]
|
||||
_, err := btcec.ParsePubKey(serializedPubKey, btcec.S256())
|
||||
if err == nil {
|
||||
return true, serializedPubKey
|
||||
}
|
||||
}
|
||||
|
||||
// Pay-to-uncompressed-pubkey script.
|
||||
if len(script) == 67 && script[0] == txscript.OpData65 &&
|
||||
script[66] == txscript.OpCheckSig && script[1] == 0x04 {
|
||||
|
||||
// Ensure the public key is valid.
|
||||
serializedPubKey := script[1:66]
|
||||
_, err := btcec.ParsePubKey(serializedPubKey, btcec.S256())
|
||||
if err == nil {
|
||||
return true, serializedPubKey
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// compressedScriptSize returns the number of bytes the passed script would take
|
||||
// when encoded with the domain specific compression algorithm described above.
|
||||
func compressedScriptSize(scriptPubKey []byte) int {
|
||||
// Pay-to-pubkey-hash script.
|
||||
if valid, _ := isPubKeyHash(scriptPubKey); valid {
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-script-hash script.
|
||||
if valid, _ := isScriptHash(scriptPubKey); valid {
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-pubkey (compressed or uncompressed) script.
|
||||
if valid, _ := isPubKey(scriptPubKey); valid {
|
||||
return 33
|
||||
}
|
||||
|
||||
// When none of the above special cases apply, encode the script as is
|
||||
// preceded by the sum of its size and the number of special cases
|
||||
// encoded as a variable length quantity.
|
||||
return serializeSizeVLQ(uint64(len(scriptPubKey)+numSpecialScripts)) +
|
||||
len(scriptPubKey)
|
||||
}
|
||||
|
||||
// decodeCompressedScriptSize treats the passed serialized bytes as a compressed
|
||||
// script, possibly followed by other data, and returns the number of bytes it
|
||||
// occupies taking into account the special encoding of the script size by the
|
||||
// domain specific compression algorithm described above.
|
||||
func decodeCompressedScriptSize(serialized []byte) int {
|
||||
scriptSize, bytesRead := deserializeVLQ(serialized)
|
||||
if bytesRead == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
switch scriptSize {
|
||||
case cstPayToPubKeyHash:
|
||||
return 21
|
||||
|
||||
case cstPayToScriptHash:
|
||||
return 21
|
||||
|
||||
case cstPayToPubKeyComp2, cstPayToPubKeyComp3, cstPayToPubKeyUncomp4,
|
||||
cstPayToPubKeyUncomp5:
|
||||
return 33
|
||||
}
|
||||
|
||||
scriptSize -= numSpecialScripts
|
||||
scriptSize += uint64(bytesRead)
|
||||
return int(scriptSize)
|
||||
}
|
||||
|
||||
// putCompressedScript compresses the passed script according to the domain
|
||||
// specific compression algorithm described above directly into the passed
|
||||
// target byte slice. The target byte slice must be at least large enough to
|
||||
// handle the number of bytes returned by the compressedScriptSize function or
|
||||
// it will panic.
|
||||
func putCompressedScript(target, scriptPubKey []byte) int {
|
||||
// Pay-to-pubkey-hash script.
|
||||
if valid, hash := isPubKeyHash(scriptPubKey); valid {
|
||||
target[0] = cstPayToPubKeyHash
|
||||
copy(target[1:21], hash)
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-script-hash script.
|
||||
if valid, hash := isScriptHash(scriptPubKey); valid {
|
||||
target[0] = cstPayToScriptHash
|
||||
copy(target[1:21], hash)
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-pubkey (compressed or uncompressed) script.
|
||||
if valid, serializedPubKey := isPubKey(scriptPubKey); valid {
|
||||
pubKeyFormat := serializedPubKey[0]
|
||||
switch pubKeyFormat {
|
||||
case 0x02, 0x03:
|
||||
target[0] = pubKeyFormat
|
||||
copy(target[1:33], serializedPubKey[1:33])
|
||||
return 33
|
||||
case 0x04:
|
||||
// Encode the oddness of the serialized pubkey into the
|
||||
// compressed script type.
|
||||
target[0] = pubKeyFormat | (serializedPubKey[64] & 0x01)
|
||||
copy(target[1:33], serializedPubKey[1:33])
|
||||
return 33
|
||||
}
|
||||
}
|
||||
|
||||
// When none of the above special cases apply, encode the unmodified
|
||||
// script preceded by the sum of its size and the number of special
|
||||
// cases encoded as a variable length quantity.
|
||||
encodedSize := uint64(len(scriptPubKey) + numSpecialScripts)
|
||||
vlqSizeLen := putVLQ(target, encodedSize)
|
||||
copy(target[vlqSizeLen:], scriptPubKey)
|
||||
return vlqSizeLen + len(scriptPubKey)
|
||||
}
|
||||
|
||||
// decompressScript returns the original script obtained by decompressing the
|
||||
// passed compressed script according to the domain specific compression
|
||||
// algorithm described above.
|
||||
//
|
||||
// NOTE: The script parameter must already have been proven to be long enough
|
||||
// to contain the number of bytes returned by decodeCompressedScriptSize or it
|
||||
// will panic. This is acceptable since it is only an internal function.
|
||||
func decompressScript(compressedScriptPubKey []byte) []byte {
|
||||
// In practice this function will not be called with a zero-length or
|
||||
// nil script since the nil script encoding includes the length, however
|
||||
// the code below assumes the length exists, so just return nil now if
|
||||
// the function ever ends up being called with a nil script in the
|
||||
// future.
|
||||
if len(compressedScriptPubKey) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode the script size and examine it for the special cases.
|
||||
encodedScriptSize, bytesRead := deserializeVLQ(compressedScriptPubKey)
|
||||
switch encodedScriptSize {
|
||||
// Pay-to-pubkey-hash script. The resulting script is:
|
||||
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
|
||||
case cstPayToPubKeyHash:
|
||||
scriptPubKey := make([]byte, 25)
|
||||
scriptPubKey[0] = txscript.OpDup
|
||||
scriptPubKey[1] = txscript.OpHash160
|
||||
scriptPubKey[2] = txscript.OpData20
|
||||
copy(scriptPubKey[3:], compressedScriptPubKey[bytesRead:bytesRead+20])
|
||||
scriptPubKey[23] = txscript.OpEqualVerify
|
||||
scriptPubKey[24] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-script-hash script. The resulting script is:
|
||||
// <OP_HASH160><20 byte script hash><OP_EQUAL>
|
||||
case cstPayToScriptHash:
|
||||
scriptPubKey := make([]byte, 23)
|
||||
scriptPubKey[0] = txscript.OpHash160
|
||||
scriptPubKey[1] = txscript.OpData20
|
||||
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+20])
|
||||
scriptPubKey[22] = txscript.OpEqual
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-compressed-pubkey script. The resulting script is:
|
||||
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
|
||||
case cstPayToPubKeyComp2, cstPayToPubKeyComp3:
|
||||
scriptPubKey := make([]byte, 35)
|
||||
scriptPubKey[0] = txscript.OpData33
|
||||
scriptPubKey[1] = byte(encodedScriptSize)
|
||||
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+32])
|
||||
scriptPubKey[34] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-uncompressed-pubkey script. The resulting script is:
|
||||
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
|
||||
case cstPayToPubKeyUncomp4, cstPayToPubKeyUncomp5:
|
||||
// Change the leading byte to the appropriate compressed pubkey
|
||||
// identifier (0x02 or 0x03) so it can be decoded as a
|
||||
// compressed pubkey. This really should never fail since the
|
||||
// encoding ensures it is valid before compressing to this type.
|
||||
compressedKey := make([]byte, 33)
|
||||
compressedKey[0] = byte(encodedScriptSize - 2)
|
||||
copy(compressedKey[1:], compressedScriptPubKey[1:])
|
||||
key, err := btcec.ParsePubKey(compressedKey, btcec.S256())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
scriptPubKey := make([]byte, 67)
|
||||
scriptPubKey[0] = txscript.OpData65
|
||||
copy(scriptPubKey[1:], key.SerializeUncompressed())
|
||||
scriptPubKey[66] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
}
|
||||
|
||||
// When none of the special cases apply, the script was encoded using
|
||||
// the general format, so reduce the script size by the number of
|
||||
// special cases and return the unmodified script.
|
||||
scriptSize := int(encodedScriptSize - numSpecialScripts)
|
||||
scriptPubKey := make([]byte, scriptSize)
|
||||
copy(scriptPubKey, compressedScriptPubKey[bytesRead:bytesRead+scriptSize])
|
||||
return scriptPubKey
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// In order to reduce the size of stored amounts, a domain specific compression
|
||||
// algorithm is used which relies on there typically being a lot of zeroes at
|
||||
// end of the amounts. The compression algorithm used here was obtained from
|
||||
// Bitcoin Core, so all credits for the algorithm go to it.
|
||||
//
|
||||
// While this is simply exchanging one uint64 for another, the resulting value
|
||||
// for typical amounts has a much smaller magnitude which results in fewer bytes
|
||||
// when encoded as variable length quantity. For example, consider the amount
|
||||
// of 0.1 BTC which is 10000000 satoshi. Encoding 10000000 as a VLQ would take
|
||||
// 4 bytes while encoding the compressed value of 8 as a VLQ only takes 1 byte.
|
||||
//
|
||||
// Essentially the compression is achieved by splitting the value into an
|
||||
// exponent in the range [0-9] and a digit in the range [1-9], when possible,
|
||||
// and encoding them in a way that can be decoded. More specifically, the
|
||||
// encoding is as follows:
|
||||
// - 0 is 0
|
||||
// - Find the exponent, e, as the largest power of 10 that evenly divides the
|
||||
// value up to a maximum of 9
|
||||
// - When e < 9, the final digit can't be 0 so store it as d and remove it by
|
||||
// dividing the value by 10 (call the result n). The encoded value is thus:
|
||||
// 1 + 10*(9*n + d-1) + e
|
||||
// - When e==9, the only thing known is the amount is not 0. The encoded value
|
||||
// is thus:
|
||||
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
|
||||
//
|
||||
// Example encodings:
|
||||
// (The numbers in parenthesis are the number of bytes when serialized as a VLQ)
|
||||
// 0 (1) -> 0 (1) * 0.00000000 BTC
|
||||
// 1000 (2) -> 4 (1) * 0.00001000 BTC
|
||||
// 10000 (2) -> 5 (1) * 0.00010000 BTC
|
||||
// 12345678 (4) -> 111111101(4) * 0.12345678 BTC
|
||||
// 50000000 (4) -> 47 (1) * 0.50000000 BTC
|
||||
// 100000000 (4) -> 9 (1) * 1.00000000 BTC
|
||||
// 500000000 (5) -> 49 (1) * 5.00000000 BTC
|
||||
// 1000000000 (5) -> 10 (1) * 10.00000000 BTC
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// compressTxOutAmount compresses the passed amount according to the domain
|
||||
// specific compression algorithm described above.
|
||||
func compressTxOutAmount(amount uint64) uint64 {
|
||||
// No need to do any work if it's zero.
|
||||
if amount == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Find the largest power of 10 (max of 9) that evenly divides the
|
||||
// value.
|
||||
exponent := uint64(0)
|
||||
for amount%10 == 0 && exponent < 9 {
|
||||
amount /= 10
|
||||
exponent++
|
||||
}
|
||||
|
||||
// The compressed result for exponents less than 9 is:
|
||||
// 1 + 10*(9*n + d-1) + e
|
||||
if exponent < 9 {
|
||||
lastDigit := amount % 10
|
||||
amount /= 10
|
||||
return 1 + 10*(9*amount+lastDigit-1) + exponent
|
||||
}
|
||||
|
||||
// The compressed result for an exponent of 9 is:
|
||||
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
|
||||
return 10 + 10*(amount-1)
|
||||
}
|
||||
|
||||
// decompressTxOutAmount returns the original amount the passed compressed
|
||||
// amount represents according to the domain specific compression algorithm
|
||||
// described above.
|
||||
func decompressTxOutAmount(amount uint64) uint64 {
|
||||
// No need to do any work if it's zero.
|
||||
if amount == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// The decompressed amount is either of the following two equations:
|
||||
// x = 1 + 10*(9*n + d - 1) + e
|
||||
// x = 1 + 10*(n - 1) + 9
|
||||
amount--
|
||||
|
||||
// The decompressed amount is now one of the following two equations:
|
||||
// x = 10*(9*n + d - 1) + e
|
||||
// x = 10*(n - 1) + 9
|
||||
exponent := amount % 10
|
||||
amount /= 10
|
||||
|
||||
// The decompressed amount is now one of the following two equations:
|
||||
// x = 9*n + d - 1 | where e < 9
|
||||
// x = n - 1 | where e = 9
|
||||
n := uint64(0)
|
||||
if exponent < 9 {
|
||||
lastDigit := amount%9 + 1
|
||||
amount /= 9
|
||||
n = amount*10 + lastDigit
|
||||
} else {
|
||||
n = amount + 1
|
||||
}
|
||||
|
||||
// Apply the exponent.
|
||||
for ; exponent > 0; exponent-- {
|
||||
n *= 10
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Compressed transaction outputs consist of an amount and a public key script
|
||||
// both compressed using the domain specific compression algorithms previously
|
||||
// described.
|
||||
//
|
||||
// The serialized format is:
|
||||
//
|
||||
// <compressed amount><compressed script>
|
||||
//
|
||||
// Field Type Size
|
||||
// compressed amount VLQ variable
|
||||
// compressed script []byte variable
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// compressedTxOutSize returns the number of bytes the passed transaction output
|
||||
// fields would take when encoded with the format described above.
|
||||
func compressedTxOutSize(amount uint64, scriptPubKey []byte) int {
|
||||
return serializeSizeVLQ(compressTxOutAmount(amount)) +
|
||||
compressedScriptSize(scriptPubKey)
|
||||
}
|
||||
|
||||
// putCompressedTxOut compresses the passed amount and script according to their
|
||||
// domain specific compression algorithms and encodes them directly into the
|
||||
// passed target byte slice with the format described above. The target byte
|
||||
// slice must be at least large enough to handle the number of bytes returned by
|
||||
// the compressedTxOutSize function or it will panic.
|
||||
func putCompressedTxOut(target []byte, amount uint64, scriptPubKey []byte) int {
|
||||
offset := putVLQ(target, compressTxOutAmount(amount))
|
||||
offset += putCompressedScript(target[offset:], scriptPubKey)
|
||||
return offset
|
||||
}
|
||||
|
||||
// decodeCompressedTxOut decodes the passed compressed txout, possibly followed
|
||||
// by other data, into its uncompressed amount and script and returns them along
|
||||
// with the number of bytes they occupied prior to decompression.
|
||||
func decodeCompressedTxOut(serialized []byte) (uint64, []byte, int, error) {
|
||||
// Deserialize the compressed amount and ensure there are bytes
|
||||
// remaining for the compressed script.
|
||||
compressedAmount, bytesRead := deserializeVLQ(serialized)
|
||||
if bytesRead >= len(serialized) {
|
||||
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
|
||||
"data after compressed amount")
|
||||
}
|
||||
|
||||
// Decode the compressed script size and ensure there are enough bytes
|
||||
// left in the slice for it.
|
||||
scriptSize := decodeCompressedScriptSize(serialized[bytesRead:])
|
||||
if len(serialized[bytesRead:]) < scriptSize {
|
||||
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
|
||||
"data after script size")
|
||||
}
|
||||
|
||||
// Decompress and return the amount and script.
|
||||
amount := decompressTxOutAmount(compressedAmount)
|
||||
script := decompressScript(serialized[bytesRead : bytesRead+scriptSize])
|
||||
return amount, script, bytesRead + scriptSize, nil
|
||||
}
|
||||
@@ -1,436 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||
// is an error. This is only provided for the hard-coded constants so errors in
|
||||
// the source code can be detected. It will only (and must only) be called with
|
||||
// hard-coded values.
|
||||
func hexToBytes(s string) []byte {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic("invalid hex in source file: " + s)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// TestVLQ ensures the variable length quantity serialization, deserialization,
|
||||
// and size calculation works as expected.
|
||||
func TestVLQ(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
val uint64
|
||||
serialized []byte
|
||||
}{
|
||||
{0, hexToBytes("00")},
|
||||
{1, hexToBytes("01")},
|
||||
{127, hexToBytes("7f")},
|
||||
{128, hexToBytes("8000")},
|
||||
{129, hexToBytes("8001")},
|
||||
{255, hexToBytes("807f")},
|
||||
{256, hexToBytes("8100")},
|
||||
{16383, hexToBytes("fe7f")},
|
||||
{16384, hexToBytes("ff00")},
|
||||
{16511, hexToBytes("ff7f")}, // Max 2-byte value
|
||||
{16512, hexToBytes("808000")},
|
||||
{16513, hexToBytes("808001")},
|
||||
{16639, hexToBytes("80807f")},
|
||||
{32895, hexToBytes("80ff7f")},
|
||||
{2113663, hexToBytes("ffff7f")}, // Max 3-byte value
|
||||
{2113664, hexToBytes("80808000")},
|
||||
{270549119, hexToBytes("ffffff7f")}, // Max 4-byte value
|
||||
{270549120, hexToBytes("8080808000")},
|
||||
{2147483647, hexToBytes("86fefefe7f")},
|
||||
{2147483648, hexToBytes("86fefeff00")},
|
||||
{4294967295, hexToBytes("8efefefe7f")}, // Max uint32, 5 bytes
|
||||
// Max uint64, 10 bytes
|
||||
{18446744073709551615, hexToBytes("80fefefefefefefefe7f")},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the value is calculated properly.
|
||||
gotSize := serializeSizeVLQ(test.val)
|
||||
if gotSize != len(test.serialized) {
|
||||
t.Errorf("serializeSizeVLQ: did not get expected size "+
|
||||
"for %d - got %d, want %d", test.val, gotSize,
|
||||
len(test.serialized))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the value serializes to the expected bytes.
|
||||
gotBytes := make([]byte, gotSize)
|
||||
gotBytesWritten := putVLQ(gotBytes, test.val)
|
||||
if !bytes.Equal(gotBytes, test.serialized) {
|
||||
t.Errorf("putVLQUnchecked: did not get expected bytes "+
|
||||
"for %d - got %x, want %x", test.val, gotBytes,
|
||||
test.serialized)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.serialized) {
|
||||
t.Errorf("putVLQUnchecked: did not get expected number "+
|
||||
"of bytes written for %d - got %d, want %d",
|
||||
test.val, gotBytesWritten, len(test.serialized))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the serialized bytes deserialize to the expected
|
||||
// value.
|
||||
gotVal, gotBytesRead := deserializeVLQ(test.serialized)
|
||||
if gotVal != test.val {
|
||||
t.Errorf("deserializeVLQ: did not get expected value "+
|
||||
"for %x - got %d, want %d", test.serialized,
|
||||
gotVal, test.val)
|
||||
continue
|
||||
}
|
||||
if gotBytesRead != len(test.serialized) {
|
||||
t.Errorf("deserializeVLQ: did not get expected number "+
|
||||
"of bytes read for %d - got %d, want %d",
|
||||
test.serialized, gotBytesRead,
|
||||
len(test.serialized))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestScriptCompression ensures the domain-specific script compression and
|
||||
// decompression works as expected.
|
||||
func TestScriptCompression(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
uncompressed []byte
|
||||
compressed []byte
|
||||
}{
|
||||
{
|
||||
name: "nil",
|
||||
uncompressed: nil,
|
||||
compressed: hexToBytes("06"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey-hash 1",
|
||||
uncompressed: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
|
||||
compressed: hexToBytes("001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey-hash 2",
|
||||
uncompressed: hexToBytes("76a914e34cce70c86373273efcc54ce7d2a491bb4a0e8488ac"),
|
||||
compressed: hexToBytes("00e34cce70c86373273efcc54ce7d2a491bb4a0e84"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-script-hash 1",
|
||||
uncompressed: hexToBytes("a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87"),
|
||||
compressed: hexToBytes("01da1745e9b549bd0bfa1a569971c77eba30cd5a4b"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-script-hash 2",
|
||||
uncompressed: hexToBytes("a914f815b036d9bbbce5e9f2a00abd1bf3dc91e9551087"),
|
||||
compressed: hexToBytes("01f815b036d9bbbce5e9f2a00abd1bf3dc91e95510"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey compressed 0x02",
|
||||
uncompressed: hexToBytes("2102192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4ac"),
|
||||
compressed: hexToBytes("02192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey compressed 0x03",
|
||||
uncompressed: hexToBytes("2103b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65ac"),
|
||||
compressed: hexToBytes("03b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 0x04 even",
|
||||
uncompressed: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
|
||||
compressed: hexToBytes("04192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 0x04 odd",
|
||||
uncompressed: hexToBytes("410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac"),
|
||||
compressed: hexToBytes("0511db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey invalid pubkey",
|
||||
uncompressed: hexToBytes("3302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
|
||||
compressed: hexToBytes("293302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
|
||||
},
|
||||
{
|
||||
name: "requires 2 size bytes - data push 200 bytes",
|
||||
uncompressed: append(hexToBytes("4cc8"), bytes.Repeat([]byte{0x00}, 200)...),
|
||||
// [0x80, 0x50] = 208 as a variable length quantity
|
||||
// [0x4c, 0xc8] = OP_PUSHDATA1 200
|
||||
compressed: append(hexToBytes("80504cc8"), bytes.Repeat([]byte{0x00}, 200)...),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the value is calculated properly.
|
||||
gotSize := compressedScriptSize(test.uncompressed)
|
||||
if gotSize != len(test.compressed) {
|
||||
t.Errorf("compressedScriptSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the script compresses to the expected bytes.
|
||||
gotCompressed := make([]byte, gotSize)
|
||||
gotBytesWritten := putCompressedScript(gotCompressed,
|
||||
test.uncompressed)
|
||||
if !bytes.Equal(gotCompressed, test.compressed) {
|
||||
t.Errorf("putCompressedScript (%s): did not get "+
|
||||
"expected bytes - got %x, want %x", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.compressed) {
|
||||
t.Errorf("putCompressedScript (%s): did not get "+
|
||||
"expected number of bytes written - got %d, "+
|
||||
"want %d", test.name, gotBytesWritten,
|
||||
len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the compressed script size is properly decoded from
|
||||
// the compressed script.
|
||||
gotDecodedSize := decodeCompressedScriptSize(test.compressed)
|
||||
if gotDecodedSize != len(test.compressed) {
|
||||
t.Errorf("decodeCompressedScriptSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotDecodedSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the script decompresses to the expected bytes.
|
||||
gotDecompressed := decompressScript(test.compressed)
|
||||
if !bytes.Equal(gotDecompressed, test.uncompressed) {
|
||||
t.Errorf("decompressScript (%s): did not get expected "+
|
||||
"bytes - got %x, want %x", test.name,
|
||||
gotDecompressed, test.uncompressed)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestScriptCompressionErrors ensures calling various functions related to
|
||||
// script compression with incorrect data returns the expected results.
|
||||
func TestScriptCompressionErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// A nil script must result in a decoded size of 0.
|
||||
if gotSize := decodeCompressedScriptSize(nil); gotSize != 0 {
|
||||
t.Fatalf("decodeCompressedScriptSize with nil script did not "+
|
||||
"return 0 - got %d", gotSize)
|
||||
}
|
||||
|
||||
// A nil script must result in a nil decompressed script.
|
||||
if gotScript := decompressScript(nil); gotScript != nil {
|
||||
t.Fatalf("decompressScript with nil script did not return nil "+
|
||||
"decompressed script - got %x", gotScript)
|
||||
}
|
||||
|
||||
// A compressed script for a pay-to-pubkey (uncompressed) that results
|
||||
// in an invalid pubkey must result in a nil decompressed script.
|
||||
compressedScript := hexToBytes("04012d74d0cb94344c9569c2e77901573d8d" +
|
||||
"7903c3ebec3a957724895dca52c6b4")
|
||||
if gotScript := decompressScript(compressedScript); gotScript != nil {
|
||||
t.Fatalf("decompressScript with compressed pay-to-"+
|
||||
"uncompressed-pubkey that is invalid did not return "+
|
||||
"nil decompressed script - got %x", gotScript)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAmountCompression ensures the domain-specific transaction output amount
|
||||
// compression and decompression works as expected.
|
||||
func TestAmountCompression(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
uncompressed uint64
|
||||
compressed uint64
|
||||
}{
|
||||
{
|
||||
name: "0 BTC",
|
||||
uncompressed: 0,
|
||||
compressed: 0,
|
||||
},
|
||||
{
|
||||
name: "546 Satoshi (current network dust value)",
|
||||
uncompressed: 546,
|
||||
compressed: 4911,
|
||||
},
|
||||
{
|
||||
name: "0.00001 BTC (typical transaction fee)",
|
||||
uncompressed: 1000,
|
||||
compressed: 4,
|
||||
},
|
||||
{
|
||||
name: "0.0001 BTC (typical transaction fee)",
|
||||
uncompressed: 10000,
|
||||
compressed: 5,
|
||||
},
|
||||
{
|
||||
name: "0.12345678 BTC",
|
||||
uncompressed: 12345678,
|
||||
compressed: 111111101,
|
||||
},
|
||||
{
|
||||
name: "0.5 BTC",
|
||||
uncompressed: 50000000,
|
||||
compressed: 48,
|
||||
},
|
||||
{
|
||||
name: "1 BTC",
|
||||
uncompressed: 100000000,
|
||||
compressed: 9,
|
||||
},
|
||||
{
|
||||
name: "5 BTC",
|
||||
uncompressed: 500000000,
|
||||
compressed: 49,
|
||||
},
|
||||
{
|
||||
name: "21000000 BTC (max minted coins)",
|
||||
uncompressed: 2100000000000000,
|
||||
compressed: 21000000,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the amount compresses to the expected value.
|
||||
gotCompressed := compressTxOutAmount(test.uncompressed)
|
||||
if gotCompressed != test.compressed {
|
||||
t.Errorf("compressTxOutAmount (%s): did not get "+
|
||||
"expected value - got %d, want %d", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the value decompresses to the expected value.
|
||||
gotDecompressed := decompressTxOutAmount(test.compressed)
|
||||
if gotDecompressed != test.uncompressed {
|
||||
t.Errorf("decompressTxOutAmount (%s): did not get "+
|
||||
"expected value - got %d, want %d", test.name,
|
||||
gotDecompressed, test.uncompressed)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCompressedTxOut ensures the transaction output serialization and
|
||||
// deserialization works as expected.
|
||||
func TestCompressedTxOut(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
amount uint64
|
||||
scriptPubKey []byte
|
||||
compressed []byte
|
||||
}{
|
||||
{
|
||||
name: "pay-to-pubkey-hash dust",
|
||||
amount: 546,
|
||||
scriptPubKey: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
|
||||
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 1 BTC",
|
||||
amount: 100000000,
|
||||
scriptPubKey: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
|
||||
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the txout is calculated properly.
|
||||
gotSize := compressedTxOutSize(test.amount, test.scriptPubKey)
|
||||
if gotSize != len(test.compressed) {
|
||||
t.Errorf("compressedTxOutSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the txout compresses to the expected value.
|
||||
gotCompressed := make([]byte, gotSize)
|
||||
gotBytesWritten := putCompressedTxOut(gotCompressed,
|
||||
test.amount, test.scriptPubKey)
|
||||
if !bytes.Equal(gotCompressed, test.compressed) {
|
||||
t.Errorf("compressTxOut (%s): did not get expected "+
|
||||
"bytes - got %x, want %x", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.compressed) {
|
||||
t.Errorf("compressTxOut (%s): did not get expected "+
|
||||
"number of bytes written - got %d, want %d",
|
||||
test.name, gotBytesWritten,
|
||||
len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the serialized bytes are decoded back to the expected
|
||||
// uncompressed values.
|
||||
gotAmount, gotScript, gotBytesRead, err := decodeCompressedTxOut(
|
||||
test.compressed)
|
||||
if err != nil {
|
||||
t.Errorf("decodeCompressedTxOut (%s): unexpected "+
|
||||
"error: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
if gotAmount != test.amount {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected amount - got %d, want %d",
|
||||
test.name, gotAmount, test.amount)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(gotScript, test.scriptPubKey) {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected script - got %x, want %x",
|
||||
test.name, gotScript, test.scriptPubKey)
|
||||
continue
|
||||
}
|
||||
if gotBytesRead != len(test.compressed) {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected number of bytes read - got %d, want %d",
|
||||
test.name, gotBytesRead, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestTxOutCompressionErrors ensures calling various functions related to
|
||||
// txout compression with incorrect data returns the expected results.
|
||||
func TestTxOutCompressionErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// A compressed txout with missing compressed script must error.
|
||||
compressedTxOut := hexToBytes("00")
|
||||
_, _, _, err := decodeCompressedTxOut(compressedTxOut)
|
||||
if !isDeserializeErr(err) {
|
||||
t.Fatalf("decodeCompressedTxOut with missing compressed script "+
|
||||
"did not return expected error type - got %T, want "+
|
||||
"errDeserialize", err)
|
||||
}
|
||||
|
||||
// A compressed txout with short compressed script must error.
|
||||
compressedTxOut = hexToBytes("0010")
|
||||
_, _, _, err = decodeCompressedTxOut(compressedTxOut)
|
||||
if !isDeserializeErr(err) {
|
||||
t.Fatalf("decodeCompressedTxOut with short compressed script "+
|
||||
"did not return expected error type - got %T, want "+
|
||||
"errDeserialize", err)
|
||||
}
|
||||
}
|
||||
1704
blockdag/dag.go
1704
blockdag/dag.go
File diff suppressed because it is too large
Load Diff
1186
blockdag/dag_test.go
1186
blockdag/dag_test.go
File diff suppressed because it is too large
Load Diff
1022
blockdag/dagio.go
1022
blockdag/dagio.go
File diff suppressed because it is too large
Load Diff
@@ -6,39 +6,51 @@ package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"encoding/hex"
|
||||
"github.com/pkg/errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// TestErrNotInDAG ensures the functions related to errNotInDAG work
|
||||
// TestErrNotInDAG ensures the functions related to ErrNotInDAG work
|
||||
// as expected.
|
||||
func TestErrNotInDAG(t *testing.T) {
|
||||
errStr := "no block at height 1 exists"
|
||||
err := error(errNotInDAG(errStr))
|
||||
err := error(ErrNotInDAG(errStr))
|
||||
|
||||
// Ensure the stringized output for the error is as expected.
|
||||
if err.Error() != errStr {
|
||||
t.Fatalf("errNotInDAG retuned unexpected error string - "+
|
||||
t.Fatalf("ErrNotInDAG retuned unexpected error string - "+
|
||||
"got %q, want %q", err.Error(), errStr)
|
||||
}
|
||||
|
||||
// Ensure error is detected as the correct type.
|
||||
if !isNotInDAGErr(err) {
|
||||
t.Fatalf("isNotInDAGErr did not detect as expected type")
|
||||
if !IsNotInDAGErr(err) {
|
||||
t.Fatalf("IsNotInDAGErr did not detect as expected type")
|
||||
}
|
||||
err = errors.New("something else")
|
||||
if isNotInDAGErr(err) {
|
||||
t.Fatalf("isNotInDAGErr detected incorrect type")
|
||||
if IsNotInDAGErr(err) {
|
||||
t.Fatalf("IsNotInDAGErr detected incorrect type")
|
||||
}
|
||||
}
|
||||
|
||||
// TestUtxoSerialization ensures serializing and deserializing unspent
|
||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||
// is an error. This is only provided for the hard-coded constants so errors in
|
||||
// the source code can be detected. It will only (and must only) be called with
|
||||
// hard-coded values.
|
||||
func hexToBytes(s string) []byte {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic("invalid hex in source file: " + s)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// TestUTXOSerialization ensures serializing and deserializing unspent
|
||||
// trasaction output entries works as expected.
|
||||
func TestUtxoSerialization(t *testing.T) {
|
||||
func TestUTXOSerialization(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
@@ -46,8 +58,6 @@ func TestUtxoSerialization(t *testing.T) {
|
||||
entry *UTXOEntry
|
||||
serialized []byte
|
||||
}{
|
||||
// From tx in main blockchain:
|
||||
// b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
|
||||
{
|
||||
name: "blue score 1, coinbase",
|
||||
entry: &UTXOEntry{
|
||||
@@ -56,10 +66,8 @@ func TestUtxoSerialization(t *testing.T) {
|
||||
blockBlueScore: 1,
|
||||
packedFlags: tfCoinbase,
|
||||
},
|
||||
serialized: hexToBytes("03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"),
|
||||
serialized: hexToBytes("01000000000000000100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
|
||||
},
|
||||
// From tx in main blockchain:
|
||||
// 8131ffb0a2c945ecaf9b9063e59558784f9c3a74741ce6ae2a18d0571dac15bb:1
|
||||
{
|
||||
name: "blue score 100001, not coinbase",
|
||||
entry: &UTXOEntry{
|
||||
@@ -68,13 +76,21 @@ func TestUtxoSerialization(t *testing.T) {
|
||||
blockBlueScore: 100001,
|
||||
packedFlags: 0,
|
||||
},
|
||||
serialized: hexToBytes("8b99420700ee8bd501094a7d5ca318da2506de35e1cb025ddc"),
|
||||
serialized: hexToBytes("a1860100000000000040420f00000000001976a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
// Ensure the utxo entry serializes to the expected value.
|
||||
gotBytes := serializeUTXOEntry(test.entry)
|
||||
w := &bytes.Buffer{}
|
||||
err := serializeUTXOEntry(w, test.entry)
|
||||
if err != nil {
|
||||
t.Errorf("serializeUTXOEntry #%d (%s) unexpected "+
|
||||
"error: %v", i, test.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
gotBytes := w.Bytes()
|
||||
if !bytes.Equal(gotBytes, test.serialized) {
|
||||
t.Errorf("serializeUTXOEntry #%d (%s): mismatched "+
|
||||
"bytes - got %x, want %x", i, test.name,
|
||||
@@ -82,8 +98,8 @@ func TestUtxoSerialization(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Deserialize to a utxo entry.
|
||||
utxoEntry, err := deserializeUTXOEntry(test.serialized)
|
||||
// Deserialize to a utxo entry.gotBytes
|
||||
utxoEntry, err := deserializeUTXOEntry(bytes.NewReader(test.serialized))
|
||||
if err != nil {
|
||||
t.Errorf("deserializeUTXOEntry #%d (%s) unexpected "+
|
||||
"error: %v", i, test.name, err)
|
||||
@@ -128,28 +144,24 @@ func TestUtxoEntryDeserializeErrors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
serialized []byte
|
||||
errType error
|
||||
}{
|
||||
{
|
||||
name: "no data after header code",
|
||||
serialized: hexToBytes("02"),
|
||||
errType: errDeserialize(""),
|
||||
},
|
||||
{
|
||||
name: "incomplete compressed txout",
|
||||
serialized: hexToBytes("0232"),
|
||||
errType: errDeserialize(""),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the expected error type is returned and the returned
|
||||
// entry is nil.
|
||||
entry, err := deserializeUTXOEntry(test.serialized)
|
||||
if reflect.TypeOf(err) != reflect.TypeOf(test.errType) {
|
||||
t.Errorf("deserializeUTXOEntry (%s): expected error "+
|
||||
"type does not match - got %T, want %T",
|
||||
test.name, err, test.errType)
|
||||
entry, err := deserializeUTXOEntry(bytes.NewReader(test.serialized))
|
||||
if err == nil {
|
||||
t.Errorf("deserializeUTXOEntry (%s): didn't return an error",
|
||||
test.name)
|
||||
continue
|
||||
}
|
||||
if entry != nil {
|
||||
@@ -176,7 +188,7 @@ func TestDAGStateSerialization(t *testing.T) {
|
||||
TipHashes: []*daghash.Hash{newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")},
|
||||
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
|
||||
},
|
||||
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
|
||||
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
|
||||
},
|
||||
{
|
||||
name: "block 1",
|
||||
@@ -184,7 +196,7 @@ func TestDAGStateSerialization(t *testing.T) {
|
||||
TipHashes: []*daghash.Hash{newHashFromStr("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048")},
|
||||
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
|
||||
},
|
||||
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
|
||||
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -221,46 +233,14 @@ func TestDAGStateSerialization(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestDAGStateDeserializeErrors performs negative tests against
|
||||
// deserializing the DAG state to ensure error paths work as expected.
|
||||
func TestDAGStateDeserializeErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
serialized []byte
|
||||
errType error
|
||||
}{
|
||||
{
|
||||
name: "nothing serialized",
|
||||
serialized: hexToBytes(""),
|
||||
errType: database.Error{ErrorCode: database.ErrCorruption},
|
||||
},
|
||||
{
|
||||
name: "corrupted data",
|
||||
serialized: []byte("[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,7"),
|
||||
errType: database.Error{ErrorCode: database.ErrCorruption},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the expected error type and code is returned.
|
||||
_, err := deserializeDAGState(test.serialized)
|
||||
if reflect.TypeOf(err) != reflect.TypeOf(test.errType) {
|
||||
t.Errorf("deserializeDAGState (%s): expected "+
|
||||
"error type does not match - got %T, want %T",
|
||||
test.name, err, test.errType)
|
||||
continue
|
||||
}
|
||||
if derr, ok := err.(database.Error); ok {
|
||||
tderr := test.errType.(database.Error)
|
||||
if derr.ErrorCode != tderr.ErrorCode {
|
||||
t.Errorf("deserializeDAGState (%s): "+
|
||||
"wrong error code got: %v, want: %v",
|
||||
test.name, derr.ErrorCode,
|
||||
tderr.ErrorCode)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// daghash.Hash. It only differs from the one available in daghash in that
|
||||
// it panics in case of an error since it will only (and must only) be
|
||||
// called with hard-coded, and therefore known good, hashes.
|
||||
func newHashFromStr(hexStr string) *daghash.Hash {
|
||||
hash, err := daghash.NewHashFromStr(hexStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
73
blockdag/delayedblockheap.go
Normal file
73
blockdag/delayedblockheap.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
)
|
||||
|
||||
type baseDelayedBlocksHeap []*delayedBlock
|
||||
|
||||
func (h baseDelayedBlocksHeap) Len() int {
|
||||
return len(h)
|
||||
}
|
||||
func (h baseDelayedBlocksHeap) Swap(i, j int) {
|
||||
h[i], h[j] = h[j], h[i]
|
||||
}
|
||||
|
||||
func (h *baseDelayedBlocksHeap) Push(x interface{}) {
|
||||
*h = append(*h, x.(*delayedBlock))
|
||||
}
|
||||
|
||||
func (h *baseDelayedBlocksHeap) Pop() interface{} {
|
||||
oldHeap := *h
|
||||
oldLength := len(oldHeap)
|
||||
popped := oldHeap[oldLength-1]
|
||||
*h = oldHeap[0 : oldLength-1]
|
||||
return popped
|
||||
}
|
||||
|
||||
func (h baseDelayedBlocksHeap) peek() interface{} {
|
||||
if h.Len() > 0 {
|
||||
return h[h.Len()-1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h baseDelayedBlocksHeap) Less(i, j int) bool {
|
||||
return h[j].processTime.After(h[i].processTime)
|
||||
}
|
||||
|
||||
type delayedBlocksHeap struct {
|
||||
baseDelayedBlocksHeap *baseDelayedBlocksHeap
|
||||
impl heap.Interface
|
||||
}
|
||||
|
||||
// newDelayedBlocksHeap initializes and returns a new delayedBlocksHeap
|
||||
func newDelayedBlocksHeap() delayedBlocksHeap {
|
||||
baseHeap := &baseDelayedBlocksHeap{}
|
||||
h := delayedBlocksHeap{impl: baseHeap, baseDelayedBlocksHeap: baseHeap}
|
||||
heap.Init(h.impl)
|
||||
return h
|
||||
}
|
||||
|
||||
// pop removes the block with lowest height from this heap and returns it
|
||||
func (dbh delayedBlocksHeap) pop() *delayedBlock {
|
||||
return heap.Pop(dbh.impl).(*delayedBlock)
|
||||
}
|
||||
|
||||
// Push pushes the block onto the heap
|
||||
func (dbh delayedBlocksHeap) Push(block *delayedBlock) {
|
||||
heap.Push(dbh.impl, block)
|
||||
}
|
||||
|
||||
// Len returns the length of this heap
|
||||
func (dbh delayedBlocksHeap) Len() int {
|
||||
return dbh.impl.Len()
|
||||
}
|
||||
|
||||
// peek returns the topmost element in the queue without poping it
|
||||
func (dbh delayedBlocksHeap) peek() *delayedBlock {
|
||||
if dbh.baseDelayedBlocksHeap.peek() == nil {
|
||||
return nil
|
||||
}
|
||||
return dbh.baseDelayedBlocksHeap.peek().(*delayedBlock)
|
||||
}
|
||||
@@ -5,15 +5,15 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"time"
|
||||
"github.com/kaspanet/kaspad/util/bigintpool"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// requiredDifficulty calculates the required difficulty for a
|
||||
// block given its bluest parent.
|
||||
func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime time.Time) uint32 {
|
||||
func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime mstime.Time) uint32 {
|
||||
// Genesis block.
|
||||
if bluestParent == nil || bluestParent.blueScore < dag.difficultyAdjustmentWindowSize+1 {
|
||||
return dag.powMaxBits
|
||||
@@ -30,12 +30,21 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ti
|
||||
// averageWindowTarget * (windowMinTimestamp / (targetTimePerBlock * windowSize))
|
||||
// The result uses integer division which means it will be slightly
|
||||
// rounded down.
|
||||
newTarget := targetsWindow.averageTarget()
|
||||
newTarget := bigintpool.Acquire(0)
|
||||
defer bigintpool.Release(newTarget)
|
||||
windowTimeStampDifference := bigintpool.Acquire(windowMaxTimeStamp - windowMinTimestamp)
|
||||
defer bigintpool.Release(windowTimeStampDifference)
|
||||
targetTimePerBlock := bigintpool.Acquire(dag.Params.TargetTimePerBlock.Milliseconds())
|
||||
defer bigintpool.Release(targetTimePerBlock)
|
||||
difficultyAdjustmentWindowSize := bigintpool.Acquire(int64(dag.difficultyAdjustmentWindowSize))
|
||||
defer bigintpool.Release(difficultyAdjustmentWindowSize)
|
||||
|
||||
targetsWindow.averageTarget(newTarget)
|
||||
newTarget.
|
||||
Mul(newTarget, big.NewInt(windowMaxTimeStamp-windowMinTimestamp)).
|
||||
Div(newTarget, big.NewInt(dag.targetTimePerBlock)).
|
||||
Div(newTarget, big.NewInt(int64(dag.difficultyAdjustmentWindowSize)))
|
||||
if newTarget.Cmp(dag.dagParams.PowMax) > 0 {
|
||||
Mul(newTarget, windowTimeStampDifference).
|
||||
Div(newTarget, targetTimePerBlock).
|
||||
Div(newTarget, difficultyAdjustmentWindowSize)
|
||||
if newTarget.Cmp(dag.Params.PowMax) > 0 {
|
||||
return dag.powMaxBits
|
||||
}
|
||||
newTargetBits := util.BigToCompact(newTarget)
|
||||
@@ -46,7 +55,7 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ti
|
||||
// be built on top of the current tips.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) NextRequiredDifficulty(timestamp time.Time) uint32 {
|
||||
func (dag *BlockDAG) NextRequiredDifficulty(timestamp mstime.Time) uint32 {
|
||||
difficulty := dag.requiredDifficulty(dag.virtual.parents.bluest(), timestamp)
|
||||
return difficulty
|
||||
}
|
||||
|
||||
@@ -5,14 +5,13 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// TestBigToCompact ensures BigToCompact converts big integers to the expected
|
||||
@@ -81,65 +80,84 @@ func TestCalcWork(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDifficulty(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag := newTestDAG(¶ms)
|
||||
nonce := uint64(0)
|
||||
zeroTime := time.Unix(0, 0)
|
||||
addNode := func(parents blockSet, blockTime time.Time) *blockNode {
|
||||
params.DifficultyAdjustmentWindowSize = 264
|
||||
dag, teardownFunc, err := DAGSetup("TestDifficulty", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
zeroTime := mstime.Time{}
|
||||
addNode := func(parents blockSet, blockTime mstime.Time) *blockNode {
|
||||
bluestParent := parents.bluest()
|
||||
if blockTime == zeroTime {
|
||||
blockTime = time.Unix(bluestParent.timestamp+1, 0)
|
||||
if blockTime.IsZero() {
|
||||
blockTime = bluestParent.time()
|
||||
blockTime = blockTime.Add(params.TargetTimePerBlock)
|
||||
}
|
||||
header := &wire.BlockHeader{
|
||||
ParentHashes: parents.hashes(),
|
||||
Bits: dag.requiredDifficulty(bluestParent, blockTime),
|
||||
Nonce: nonce,
|
||||
Timestamp: blockTime,
|
||||
HashMerkleRoot: &daghash.ZeroHash,
|
||||
AcceptedIDMerkleRoot: &daghash.ZeroHash,
|
||||
UTXOCommitment: &daghash.ZeroHash,
|
||||
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
block.Header.Timestamp = blockTime
|
||||
block.Header.Bits = dag.requiredDifficulty(bluestParent, blockTime)
|
||||
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(block), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in ProcessBlock: %s", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("block is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("block was unexpectedly orphan")
|
||||
}
|
||||
node, ok := dag.index.LookupNode(block.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", block.BlockHash())
|
||||
}
|
||||
node := newBlockNode(header, parents, dag.dagParams.K)
|
||||
node.updateParentsChildren()
|
||||
nonce++
|
||||
return node
|
||||
}
|
||||
tip := dag.genesis
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if tip.bits != dag.genesis.bits {
|
||||
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment window size, the difficulty should be the same as genesis'")
|
||||
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment " +
|
||||
"window size, the difficulty should be the same as genesis'")
|
||||
}
|
||||
}
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize+1000; i++ {
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize+100; i++ {
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if tip.bits != dag.genesis.bits {
|
||||
t.Fatalf("As long as the block rate remains the same, the difficulty shouldn't change")
|
||||
}
|
||||
}
|
||||
nodeInThePast := addNode(setFromSlice(tip), tip.PastMedianTime(dag))
|
||||
nodeInThePast := addNode(blockSetFromSlice(tip), tip.PastMedianTime(dag))
|
||||
if nodeInThePast.bits != tip.bits {
|
||||
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
|
||||
}
|
||||
tip = nodeInThePast
|
||||
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if tip.bits != nodeInThePast.bits {
|
||||
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
|
||||
}
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if compareBits(tip.bits, nodeInThePast.bits) >= 0 {
|
||||
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the block rate, so the difficulty should increase as well")
|
||||
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the " +
|
||||
"block rate, so the difficulty should increase as well")
|
||||
}
|
||||
expectedBits := uint32(0x207ff395)
|
||||
expectedBits := uint32(0x207f83df)
|
||||
if tip.bits != expectedBits {
|
||||
t.Errorf("tip.bits was expected to be %x but got %x", expectedBits, tip.bits)
|
||||
}
|
||||
|
||||
// Increase block rate to increase difficulty
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
|
||||
tip = addNode(setFromSlice(tip), tip.PastMedianTime(dag))
|
||||
tip = addNode(blockSetFromSlice(tip), tip.PastMedianTime(dag))
|
||||
if compareBits(tip.bits, tip.parents.bluest().bits) > 0 {
|
||||
t.Fatalf("Because we're increasing the block rate, the difficulty can't decrease")
|
||||
}
|
||||
@@ -149,7 +167,7 @@ func TestDifficulty(t *testing.T) {
|
||||
lastBits := tip.bits
|
||||
sameBitsCount := uint64(0)
|
||||
for sameBitsCount < dag.difficultyAdjustmentWindowSize+1 {
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if tip.bits == lastBits {
|
||||
sameBitsCount++
|
||||
} else {
|
||||
@@ -157,37 +175,41 @@ func TestDifficulty(t *testing.T) {
|
||||
sameBitsCount = 0
|
||||
}
|
||||
}
|
||||
slowNode := addNode(setFromSlice(tip), time.Unix(tip.timestamp+2, 0))
|
||||
slowBlockTime := tip.time()
|
||||
slowBlockTime = slowBlockTime.Add(params.TargetTimePerBlock + time.Second)
|
||||
slowNode := addNode(blockSetFromSlice(tip), slowBlockTime)
|
||||
if slowNode.bits != tip.bits {
|
||||
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
|
||||
}
|
||||
|
||||
tip = slowNode
|
||||
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if tip.bits != slowNode.bits {
|
||||
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
|
||||
}
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
if compareBits(tip.bits, slowNode.bits) <= 0 {
|
||||
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block rate, so the difficulty should decrease as well")
|
||||
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block" +
|
||||
" rate, so the difficulty should decrease as well")
|
||||
}
|
||||
|
||||
splitNode := addNode(setFromSlice(tip), zeroTime)
|
||||
splitNode := addNode(blockSetFromSlice(tip), zeroTime)
|
||||
tip = splitNode
|
||||
for i := 0; i < 100; i++ {
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
tip = addNode(blockSetFromSlice(tip), zeroTime)
|
||||
}
|
||||
blueTip := tip
|
||||
|
||||
redChainTip := splitNode
|
||||
for i := 0; i < 10; i++ {
|
||||
redChainTip = addNode(setFromSlice(redChainTip), redChainTip.PastMedianTime(dag))
|
||||
redChainTip = addNode(blockSetFromSlice(redChainTip), redChainTip.PastMedianTime(dag))
|
||||
}
|
||||
tipWithRedPast := addNode(setFromSlice(redChainTip, blueTip), zeroTime)
|
||||
tipWithoutRedPast := addNode(setFromSlice(blueTip), zeroTime)
|
||||
tipWithRedPast := addNode(blockSetFromSlice(redChainTip, blueTip), zeroTime)
|
||||
tipWithoutRedPast := addNode(blockSetFromSlice(blueTip), zeroTime)
|
||||
if tipWithoutRedPast.bits != tipWithRedPast.bits {
|
||||
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks shouldn't affect the difficulty")
|
||||
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks" +
|
||||
" shouldn't affect the difficulty")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,35 +1,26 @@
|
||||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package blockdag implements bitcoin block handling and chain selection rules.
|
||||
Package blockdag implements kaspa block handling and DAG selection rules.
|
||||
|
||||
The bitcoin block handling and chain selection rules are an integral, and quite
|
||||
likely the most important, part of bitcoin. Unfortunately, at the time of
|
||||
this writing, these rules are also largely undocumented and had to be
|
||||
ascertained from the bitcoind source code. At its core, bitcoin is a
|
||||
distributed consensus of which blocks are valid and which ones will comprise the
|
||||
main block chain (public ledger) that ultimately determines accepted
|
||||
transactions, so it is extremely important that fully validating nodes agree on
|
||||
all rules.
|
||||
The kaspa block handling and DAG selection rules are an integral, and quite
|
||||
likely the most important, part of kaspa. At its core, kaspa is a distributed
|
||||
consensus of which blocks are valid and which ones will comprise the DAG
|
||||
(public ledger) that ultimately determines accepted transactions, so it is
|
||||
extremely important that fully validating nodes agree on all rules.
|
||||
|
||||
At a high level, this package provides support for inserting new blocks into
|
||||
the block chain according to the aforementioned rules. It includes
|
||||
functionality such as rejecting duplicate blocks, ensuring blocks and
|
||||
transactions follow all rules, orphan handling, and best chain selection along
|
||||
with reorganization.
|
||||
the block DAG according to the aforementioned rules. It includes functionality
|
||||
such as rejecting duplicate blocks, ensuring blocks and transactions follow all
|
||||
rules, orphan handling, and DAG order along with reorganization.
|
||||
|
||||
Since this package does not deal with other bitcoin specifics such as network
|
||||
communication or wallets, it provides a notification system which gives the
|
||||
caller a high level of flexibility in how they want to react to certain events
|
||||
such as orphan blocks which need their parents requested and newly connected
|
||||
main chain blocks which might result in wallet updates.
|
||||
Since this package does not deal with other kaspa specifics such as network
|
||||
communication, it provides a notification system which gives the caller a high
|
||||
level of flexibility in how they want to react to certain events such as orphan
|
||||
blocks which need their parents requested and newly connected DAG blocks.
|
||||
|
||||
Bitcoin Chain Processing Overview
|
||||
Kaspa DAG Processing Overview
|
||||
|
||||
Before a block is allowed into the block chain, it must go through an intensive
|
||||
series of validation rules. The following list serves as a general outline of
|
||||
Before a block is allowed into the block DAG, it must go through an intensive
|
||||
series of validation rules. The following list serves as a general outline of
|
||||
those rules to provide some intuition into what is going on under the hood, but
|
||||
is by no means exhaustive:
|
||||
|
||||
@@ -37,26 +28,19 @@ is by no means exhaustive:
|
||||
- Perform a series of sanity checks on the block and its transactions such as
|
||||
verifying proof of work, timestamps, number and character of transactions,
|
||||
transaction amounts, script complexity, and merkle root calculations
|
||||
- Compare the block against predetermined checkpoints for expected timestamps
|
||||
and difficulty based on elapsed time since the checkpoint
|
||||
- Save the most recent orphan blocks for a limited time in case their parent
|
||||
blocks become available
|
||||
- Stop processing if the block is an orphan as the rest of the processing
|
||||
depends on the block's position within the block chain
|
||||
depends on the block's position within the block DAG
|
||||
- Perform a series of more thorough checks that depend on the block's position
|
||||
within the block chain such as verifying block difficulties adhere to
|
||||
within the block DAG such as verifying block difficulties adhere to
|
||||
difficulty retarget rules, timestamps are after the median of the last
|
||||
several blocks, all transactions are finalized, checkpoint blocks match, and
|
||||
several blocks, all transactions are finalized, and
|
||||
block versions are in line with the previous blocks
|
||||
- Determine how the block fits into the chain and perform different actions
|
||||
accordingly in order to ensure any side chains which have higher difficulty
|
||||
than the main chain become the new main chain
|
||||
- When a block is being connected to the main chain (either through
|
||||
reorganization of a side chain to the main chain or just extending the
|
||||
main chain), perform further checks on the block's transactions such as
|
||||
verifying transaction duplicates, script complexity for the combination of
|
||||
connected scripts, coinbase maturity, double spends, and connected
|
||||
transaction values
|
||||
- When a block is being connected to the DAG, perform further checks on the
|
||||
block's transactions such as verifying transaction duplicates, script
|
||||
complexity for the combination of connected scripts, coinbase maturity,
|
||||
double spends, and connected transaction values
|
||||
- Run the transaction scripts to verify the spender is allowed to spend the
|
||||
coins
|
||||
- Insert the block into the block database
|
||||
@@ -64,18 +48,10 @@ is by no means exhaustive:
|
||||
Errors
|
||||
|
||||
Errors returned by this package are either the raw errors provided by underlying
|
||||
calls or of type blockchain.RuleError. This allows the caller to differentiate
|
||||
calls or of type blockdag.RuleError. This allows the caller to differentiate
|
||||
between unexpected errors, such as database errors, versus errors due to rule
|
||||
violations through type assertions. In addition, callers can programmatically
|
||||
violations through type assertions. In addition, callers can programmatically
|
||||
determine the specific rule violation by examining the ErrorCode field of the
|
||||
type asserted blockchain.RuleError.
|
||||
|
||||
Bitcoin Improvement Proposals
|
||||
|
||||
This package includes spec changes outlined by the following BIPs:
|
||||
|
||||
BIP0016 (https://en.bitcoin.it/wiki/BIP_0016)
|
||||
BIP0030 (https://en.bitcoin.it/wiki/BIP_0030)
|
||||
BIP0034 (https://en.bitcoin.it/wiki/BIP_0034)
|
||||
type asserted blockdag.RuleError.
|
||||
*/
|
||||
package blockdag
|
||||
|
||||
@@ -6,28 +6,10 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DeploymentError identifies an error that indicates a deployment ID was
|
||||
// specified that does not exist.
|
||||
type DeploymentError uint32
|
||||
|
||||
// Error returns the assertion error as a human-readable string and satisfies
|
||||
// the error interface.
|
||||
func (e DeploymentError) Error() string {
|
||||
return fmt.Sprintf("deployment ID %d does not exist", uint32(e))
|
||||
}
|
||||
|
||||
// AssertError identifies an error that indicates an internal code consistency
|
||||
// issue and should be treated as a critical and unrecoverable error.
|
||||
type AssertError string
|
||||
|
||||
// Error returns the assertion error as a human-readable string and satisfies
|
||||
// the error interface.
|
||||
func (e AssertError) Error() string {
|
||||
return "assertion failed: " + string(e)
|
||||
}
|
||||
|
||||
// ErrorCode identifies a kind of error.
|
||||
type ErrorCode int
|
||||
|
||||
@@ -46,14 +28,8 @@ const (
|
||||
// to a newer version.
|
||||
ErrBlockVersionTooOld
|
||||
|
||||
// ErrInvalidTime indicates the time in the passed block has a precision
|
||||
// that is more than one second. The chain consensus rules require
|
||||
// timestamps to have a maximum precision of one second.
|
||||
ErrInvalidTime
|
||||
|
||||
// ErrTimeTooOld indicates the time is either before the median time of
|
||||
// the last several blocks per the chain consensus rules or prior to the
|
||||
// most recent checkpoint.
|
||||
// the last several blocks per the DAG consensus rules.
|
||||
ErrTimeTooOld
|
||||
|
||||
// ErrTimeTooNew indicates the time is too far in the future as compared
|
||||
@@ -67,7 +43,7 @@ const (
|
||||
ErrWrongParentsOrder
|
||||
|
||||
// ErrDifficultyTooLow indicates the difficulty for the block is lower
|
||||
// than the difficulty required by the most recent checkpoint.
|
||||
// than the difficulty required.
|
||||
ErrDifficultyTooLow
|
||||
|
||||
// ErrUnexpectedDifficulty indicates specified bits do not align with
|
||||
@@ -88,20 +64,19 @@ const (
|
||||
// the expected value.
|
||||
ErrBadUTXOCommitment
|
||||
|
||||
// ErrBadCheckpoint indicates a block that is expected to be at a
|
||||
// checkpoint height does not match the expected one.
|
||||
ErrBadCheckpoint
|
||||
// ErrInvalidSubnetwork indicates the subnetwork is now allowed.
|
||||
ErrInvalidSubnetwork
|
||||
|
||||
// ErrFinalityPointTimeTooOld indicates a block has a timestamp before the
|
||||
// last finality point.
|
||||
ErrFinalityPointTimeTooOld
|
||||
|
||||
// ErrNoTransactions indicates the block does not have a least one
|
||||
// transaction. A valid block must have at least the coinbase
|
||||
// transaction. A valid block must have at least the coinbase
|
||||
// transaction.
|
||||
ErrNoTransactions
|
||||
|
||||
// ErrNoTxInputs indicates a transaction does not have any inputs. A
|
||||
// ErrNoTxInputs indicates a transaction does not have any inputs. A
|
||||
// valid transaction must have at least one input.
|
||||
ErrNoTxInputs
|
||||
|
||||
@@ -126,12 +101,17 @@ const (
|
||||
// either does not exist or has already been spent.
|
||||
ErrMissingTxOut
|
||||
|
||||
// ErrDoubleSpendInSameBlock indicates a transaction
|
||||
// that spends an output that was already spent by another
|
||||
// transaction in the same block.
|
||||
ErrDoubleSpendInSameBlock
|
||||
|
||||
// ErrUnfinalizedTx indicates a transaction has not been finalized.
|
||||
// A valid block may only contain finalized transactions.
|
||||
ErrUnfinalizedTx
|
||||
|
||||
// ErrDuplicateTx indicates a block contains an identical transaction
|
||||
// (or at least two transactions which hash to the same value). A
|
||||
// (or at least two transactions which hash to the same value). A
|
||||
// valid block may only contain unique transactions.
|
||||
ErrDuplicateTx
|
||||
|
||||
@@ -172,12 +152,12 @@ const (
|
||||
ErrBadCoinbaseTransaction
|
||||
|
||||
// ErrScriptMalformed indicates a transaction script is malformed in
|
||||
// some way. For example, it might be longer than the maximum allowed
|
||||
// some way. For example, it might be longer than the maximum allowed
|
||||
// length or fail to parse.
|
||||
ErrScriptMalformed
|
||||
|
||||
// ErrScriptValidation indicates the result of executing transaction
|
||||
// script failed. The error covers any failure when executing scripts
|
||||
// script failed. The error covers any failure when executing scripts
|
||||
// such signature verification failures and execution past the end of
|
||||
// the stack.
|
||||
ErrScriptValidation
|
||||
@@ -222,6 +202,14 @@ const (
|
||||
// ErrInvalidParentsRelation indicates that one of the parents of a block
|
||||
// is also an ancestor of another parent
|
||||
ErrInvalidParentsRelation
|
||||
|
||||
// ErrDelayedBlockIsNotAllowed indicates that a block with a delayed timestamp was
|
||||
// submitted with BFDisallowDelay flag raised.
|
||||
ErrDelayedBlockIsNotAllowed
|
||||
|
||||
// ErrOrphanBlockIsNotAllowed indicates that an orphan block was submitted with
|
||||
// BFDisallowOrphans flag raised.
|
||||
ErrOrphanBlockIsNotAllowed
|
||||
)
|
||||
|
||||
// Map of ErrorCode values back to their constant names for pretty printing.
|
||||
@@ -229,7 +217,6 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrDuplicateBlock: "ErrDuplicateBlock",
|
||||
ErrBlockMassTooHigh: "ErrBlockMassTooHigh",
|
||||
ErrBlockVersionTooOld: "ErrBlockVersionTooOld",
|
||||
ErrInvalidTime: "ErrInvalidTime",
|
||||
ErrTimeTooOld: "ErrTimeTooOld",
|
||||
ErrTimeTooNew: "ErrTimeTooNew",
|
||||
ErrNoParents: "ErrNoParents",
|
||||
@@ -238,7 +225,6 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrUnexpectedDifficulty: "ErrUnexpectedDifficulty",
|
||||
ErrHighHash: "ErrHighHash",
|
||||
ErrBadMerkleRoot: "ErrBadMerkleRoot",
|
||||
ErrBadCheckpoint: "ErrBadCheckpoint",
|
||||
ErrFinalityPointTimeTooOld: "ErrFinalityPointTimeTooOld",
|
||||
ErrNoTransactions: "ErrNoTransactions",
|
||||
ErrNoTxInputs: "ErrNoTxInputs",
|
||||
@@ -247,6 +233,7 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrDuplicateTxInputs: "ErrDuplicateTxInputs",
|
||||
ErrBadTxInput: "ErrBadTxInput",
|
||||
ErrMissingTxOut: "ErrMissingTxOut",
|
||||
ErrDoubleSpendInSameBlock: "ErrDoubleSpendInSameBlock",
|
||||
ErrUnfinalizedTx: "ErrUnfinalizedTx",
|
||||
ErrDuplicateTx: "ErrDuplicateTx",
|
||||
ErrOverwriteTx: "ErrOverwriteTx",
|
||||
@@ -270,6 +257,8 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrInvalidPayload: "ErrInvalidPayload",
|
||||
ErrInvalidPayloadHash: "ErrInvalidPayloadHash",
|
||||
ErrInvalidParentsRelation: "ErrInvalidParentsRelation",
|
||||
ErrDelayedBlockIsNotAllowed: "ErrDelayedBlockIsNotAllowed",
|
||||
ErrOrphanBlockIsNotAllowed: "ErrOrphanBlockIsNotAllowed",
|
||||
}
|
||||
|
||||
// String returns the ErrorCode as a human-readable name.
|
||||
@@ -280,9 +269,9 @@ func (e ErrorCode) String() string {
|
||||
return fmt.Sprintf("Unknown ErrorCode (%d)", int(e))
|
||||
}
|
||||
|
||||
// RuleError identifies a rule violation. It is used to indicate that
|
||||
// RuleError identifies a rule violation. It is used to indicate that
|
||||
// processing of a block or transaction failed due to one of the many validation
|
||||
// rules. The caller can use type assertions to determine if a failure was
|
||||
// rules. The caller can use type assertions to determine if a failure was
|
||||
// specifically due to a rule violation and access the ErrorCode field to
|
||||
// ascertain the specific reason for the rule violation.
|
||||
type RuleError struct {
|
||||
@@ -295,7 +284,6 @@ func (e RuleError) Error() string {
|
||||
return e.Description
|
||||
}
|
||||
|
||||
// ruleError creates an RuleError given a set of arguments.
|
||||
func ruleError(c ErrorCode, desc string) RuleError {
|
||||
return RuleError{ErrorCode: c, Description: desc}
|
||||
func ruleError(c ErrorCode, desc string) error {
|
||||
return errors.WithStack(RuleError{ErrorCode: c, Description: desc})
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -18,7 +17,6 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrDuplicateBlock, "ErrDuplicateBlock"},
|
||||
{ErrBlockMassTooHigh, "ErrBlockMassTooHigh"},
|
||||
{ErrBlockVersionTooOld, "ErrBlockVersionTooOld"},
|
||||
{ErrInvalidTime, "ErrInvalidTime"},
|
||||
{ErrTimeTooOld, "ErrTimeTooOld"},
|
||||
{ErrTimeTooNew, "ErrTimeTooNew"},
|
||||
{ErrNoParents, "ErrNoParents"},
|
||||
@@ -27,7 +25,6 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrUnexpectedDifficulty, "ErrUnexpectedDifficulty"},
|
||||
{ErrHighHash, "ErrHighHash"},
|
||||
{ErrBadMerkleRoot, "ErrBadMerkleRoot"},
|
||||
{ErrBadCheckpoint, "ErrBadCheckpoint"},
|
||||
{ErrFinalityPointTimeTooOld, "ErrFinalityPointTimeTooOld"},
|
||||
{ErrNoTransactions, "ErrNoTransactions"},
|
||||
{ErrNoTxInputs, "ErrNoTxInputs"},
|
||||
@@ -35,7 +32,6 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrBadTxOutValue, "ErrBadTxOutValue"},
|
||||
{ErrDuplicateTxInputs, "ErrDuplicateTxInputs"},
|
||||
{ErrBadTxInput, "ErrBadTxInput"},
|
||||
{ErrBadCheckpoint, "ErrBadCheckpoint"},
|
||||
{ErrMissingTxOut, "ErrMissingTxOut"},
|
||||
{ErrUnfinalizedTx, "ErrUnfinalizedTx"},
|
||||
{ErrDuplicateTx, "ErrDuplicateTx"},
|
||||
@@ -60,6 +56,8 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrInvalidPayload, "ErrInvalidPayload"},
|
||||
{ErrInvalidPayloadHash, "ErrInvalidPayloadHash"},
|
||||
{ErrInvalidParentsRelation, "ErrInvalidParentsRelation"},
|
||||
{ErrDelayedBlockIsNotAllowed, "ErrDelayedBlockIsNotAllowed"},
|
||||
{ErrOrphanBlockIsNotAllowed, "ErrOrphanBlockIsNotAllowed"},
|
||||
{0xffff, "Unknown ErrorCode (65535)"},
|
||||
}
|
||||
|
||||
@@ -100,46 +98,3 @@ func TestRuleError(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeploymentError tests the stringized output for the DeploymentError type.
|
||||
func TestDeploymentError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
in DeploymentError
|
||||
want string
|
||||
}{
|
||||
{
|
||||
DeploymentError(0),
|
||||
"deployment ID 0 does not exist",
|
||||
},
|
||||
{
|
||||
DeploymentError(10),
|
||||
"deployment ID 10 does not exist",
|
||||
},
|
||||
{
|
||||
DeploymentError(123),
|
||||
"deployment ID 123 does not exist",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
result := test.in.Error()
|
||||
if result != test.want {
|
||||
t.Errorf("Error #%d\n got: %s want: %s", i, result,
|
||||
test.want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAssertError(t *testing.T) {
|
||||
message := "abc 123"
|
||||
err := AssertError(message)
|
||||
expectedMessage := fmt.Sprintf("assertion failed: %s", message)
|
||||
if expectedMessage != err.Error() {
|
||||
t.Errorf("Unexpected AssertError message. "+
|
||||
"Got: %s, want: %s", err.Error(), expectedMessage)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,30 +3,33 @@ package blockdag_test
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/util/testtools"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/mining"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/testtools"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/mining"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// TestFinality checks that the finality mechanism works as expected.
|
||||
// This is how the flow goes:
|
||||
// 1) We build a chain of blockdag.FinalityInterval blocks and call its tip altChainTip.
|
||||
// 2) We build another chain (let's call it mainChain) of 2 * blockdag.FinalityInterval
|
||||
// 1) We build a chain of params.FinalityInterval blocks and call its tip altChainTip.
|
||||
// 2) We build another chain (let's call it mainChain) of 2 * params.FinalityInterval
|
||||
// blocks, which points to genesis, and then we check that the block in that
|
||||
// chain with height of blockdag.FinalityInterval is marked as finality point (This is
|
||||
// chain with height of params.FinalityInterval is marked as finality point (This is
|
||||
// very predictable, because the blue score of each new block in a chain is the
|
||||
// parents plus one).
|
||||
// 3) We make a new child to block with height (2 * blockdag.FinalityInterval - 1)
|
||||
// 3) We make a new child to block with height (2 * params.FinalityInterval - 1)
|
||||
// in mainChain, and we check that connecting it to the DAG
|
||||
// doesn't affect the last finality point.
|
||||
// 4) We make a block that points to genesis, and check that it
|
||||
@@ -36,9 +39,10 @@ import (
|
||||
// gets rejected because it doesn't have the last finality point in
|
||||
// its selected parent chain.
|
||||
func TestFinality(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", blockdag.Config{
|
||||
params.FinalityDuration = 100 * params.TargetTimePerBlock
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -46,22 +50,22 @@ func TestFinality(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
buildNodeToDag := func(parentHashes []*daghash.Hash) (*util.Block, error) {
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, nil, false)
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, parentHashes, nil, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block := util.NewBlock(msgBlock)
|
||||
|
||||
isOrphan, delay, err := dag.ProcessBlock(block, blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(block, blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if delay != 0 {
|
||||
return nil, fmt.Errorf("ProcessBlock: block " +
|
||||
if isDelayed {
|
||||
return nil, errors.Errorf("ProcessBlock: block " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
return nil, fmt.Errorf("ProcessBlock: unexpected returned orphan block")
|
||||
return nil, errors.Errorf("ProcessBlock: unexpected returned orphan block")
|
||||
}
|
||||
|
||||
return block, nil
|
||||
@@ -70,8 +74,8 @@ func TestFinality(t *testing.T) {
|
||||
genesis := util.NewBlock(params.GenesisBlock)
|
||||
currentNode := genesis
|
||||
|
||||
// First we build a chain of blockdag.FinalityInterval blocks for future use
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
// First we build a chain of params.FinalityInterval blocks for future use
|
||||
for i := uint64(0); i < dag.FinalityInterval(); i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -80,10 +84,10 @@ func TestFinality(t *testing.T) {
|
||||
|
||||
altChainTip := currentNode
|
||||
|
||||
// Now we build a new chain of 2 * blockdag.FinalityInterval blocks, pointed to genesis, and
|
||||
// we expect the block with height 1 * blockdag.FinalityInterval to be the last finality point
|
||||
// Now we build a new chain of 2 * params.FinalityInterval blocks, pointed to genesis, and
|
||||
// we expect the block with height 1 * params.FinalityInterval to be the last finality point
|
||||
currentNode = genesis
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
for i := uint64(0); i < dag.FinalityInterval(); i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -92,7 +96,7 @@ func TestFinality(t *testing.T) {
|
||||
|
||||
expectedFinalityPoint := currentNode
|
||||
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
for i := uint64(0); i < dag.FinalityInterval(); i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -135,10 +139,10 @@ func TestFinality(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
|
||||
}
|
||||
rErr, ok := err.(blockdag.RuleError)
|
||||
if ok {
|
||||
if rErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
|
||||
var ruleErr blockdag.RuleError
|
||||
if errors.As(err, &ruleErr) {
|
||||
if ruleErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, ruleErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", err)
|
||||
@@ -150,13 +154,12 @@ func TestFinality(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
|
||||
}
|
||||
rErr, ok = err.(blockdag.RuleError)
|
||||
if ok {
|
||||
if rErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
|
||||
if errors.As(err, &ruleErr) {
|
||||
if ruleErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, ruleErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", rErr)
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", ruleErr)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -165,17 +168,37 @@ func TestFinality(t *testing.T) {
|
||||
// a getblocks message it should always be able to send
|
||||
// all the necessary invs.
|
||||
func TestFinalityInterval(t *testing.T) {
|
||||
if blockdag.FinalityInterval > wire.MaxInvPerMsg {
|
||||
t.Errorf("blockdag.FinalityInterval should be lower or equal to wire.MaxInvPerMsg")
|
||||
netParams := []*dagconfig.Params{
|
||||
&dagconfig.MainnetParams,
|
||||
&dagconfig.TestnetParams,
|
||||
&dagconfig.DevnetParams,
|
||||
&dagconfig.RegressionNetParams,
|
||||
&dagconfig.SimnetParams,
|
||||
}
|
||||
for _, params := range netParams {
|
||||
func() {
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinalityInterval", true, blockdag.Config{
|
||||
DAGParams: params,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup dag instance for %s: %v", params.Name, err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
if dag.FinalityInterval() > wire.MaxInvPerMsg {
|
||||
t.Errorf("FinalityInterval in %s should be lower or equal to wire.MaxInvPerMsg", params.Name)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// TestSubnetworkRegistry tests the full subnetwork registry flow
|
||||
func TestSubnetworkRegistry(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
params.EnableNonNativeSubnetworks = true
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -188,7 +211,7 @@ func TestSubnetworkRegistry(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("could not register network: %s", err)
|
||||
}
|
||||
limit, err := dag.SubnetworkStore.GasLimit(subnetworkID)
|
||||
limit, err := dag.GasLimit(subnetworkID)
|
||||
if err != nil {
|
||||
t.Fatalf("could not retrieve gas limit: %s", err)
|
||||
}
|
||||
@@ -198,10 +221,10 @@ func TestSubnetworkRegistry(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainedTransactions(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params := dagconfig.SimnetParams
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", blockdag.Config{
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -209,15 +232,15 @@ func TestChainedTransactions(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
block1, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{params.GenesisHash}, nil, false)
|
||||
block1, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{params.GenesisHash}, nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(block1), blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(block1), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: block1 " +
|
||||
"is too far in the future")
|
||||
}
|
||||
@@ -257,23 +280,34 @@ func TestChainedTransactions(t *testing.T) {
|
||||
}
|
||||
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
|
||||
|
||||
block2, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true)
|
||||
block2, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx}, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
// Manually add a chained transaction to block2
|
||||
block2.Transactions = append(block2.Transactions, chainedTx)
|
||||
block2UtilTxs := make([]*util.Tx, len(block2.Transactions))
|
||||
for i, tx := range block2.Transactions {
|
||||
block2UtilTxs[i] = util.NewTx(tx)
|
||||
}
|
||||
block2.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(block2UtilTxs).Root()
|
||||
|
||||
//Checks that dag.ProcessBlock fails because we don't allow a transaction to spend another transaction from the same block
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Errorf("ProcessBlock expected an error")
|
||||
} else if rErr, ok := err.(blockdag.RuleError); ok {
|
||||
if rErr.ErrorCode != blockdag.ErrMissingTxOut {
|
||||
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, rErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
|
||||
var ruleErr blockdag.RuleError
|
||||
if ok := errors.As(err, &ruleErr); ok {
|
||||
if ruleErr.ErrorCode != blockdag.ErrMissingTxOut {
|
||||
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, ruleErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
|
||||
}
|
||||
}
|
||||
if delay != 0 {
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: block2 " +
|
||||
"is too far in the future")
|
||||
}
|
||||
@@ -292,17 +326,17 @@ func TestChainedTransactions(t *testing.T) {
|
||||
}
|
||||
nonChainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{nonChainedTxIn}, []*wire.TxOut{nonChainedTxOut})
|
||||
|
||||
block3, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false)
|
||||
block3, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
//Checks that dag.ProcessBlock doesn't fail because all of its transaction are dependant on transactions from previous blocks
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(block3), blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(block3), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Errorf("ProcessBlock: %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: block3 " +
|
||||
"is too far in the future")
|
||||
}
|
||||
@@ -311,12 +345,85 @@ func TestChainedTransactions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestOrderInDiffFromAcceptanceData makes sure that the order of transactions in
|
||||
// dag.diffFromAcceptanceData is such that if txA is spent by txB then txA is processed
|
||||
// before txB.
|
||||
func TestOrderInDiffFromAcceptanceData(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = math.MaxUint8
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
dag.TestSetCoinbaseMaturity(0)
|
||||
|
||||
createBlock := func(previousBlock *util.Block) *util.Block {
|
||||
// Prepare a transaction that spends the previous block's coinbase transaction
|
||||
var txs []*wire.MsgTx
|
||||
if !previousBlock.IsGenesis() {
|
||||
previousCoinbaseTx := previousBlock.MsgBlock().Transactions[0]
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to build signature script: %s", err)
|
||||
}
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *previousCoinbaseTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := &wire.TxOut{
|
||||
ScriptPubKey: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}
|
||||
txs = append(txs, wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut}))
|
||||
}
|
||||
|
||||
// Create the block
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{previousBlock.Hash()}, txs, false)
|
||||
if err != nil {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to prepare block: %s", err)
|
||||
}
|
||||
|
||||
// Add the block to the DAG
|
||||
newBlock := util.NewBlock(msgBlock)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(newBlock, blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Errorf("TestOrderInDiffFromAcceptanceData: %s", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: block is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: block got unexpectedly orphaned")
|
||||
}
|
||||
return newBlock
|
||||
}
|
||||
|
||||
// Create two block chains starting from the genesis block. Every time a block is added
|
||||
// one of the chains is selected as the selected parent chain while all the blocks in
|
||||
// the other chain (and their transactions) get accepted by the new virtual. If the
|
||||
// transactions in the non-selected parent chain get processed in the wrong order then
|
||||
// diffFromAcceptanceData panics.
|
||||
blockAmountPerChain := 100
|
||||
chainATip := util.NewBlock(params.GenesisBlock)
|
||||
chainBTip := chainATip
|
||||
for i := 0; i < blockAmountPerChain; i++ {
|
||||
chainATip = createBlock(chainATip)
|
||||
chainBTip = createBlock(chainBTip)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGasLimit tests the gas limit rules
|
||||
func TestGasLimit(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
params.EnableNonNativeSubnetworks = true
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -333,15 +440,15 @@ func TestGasLimit(t *testing.T) {
|
||||
|
||||
cbTxs := []*wire.MsgTx{}
|
||||
for i := 0; i < 4; i++ {
|
||||
fundsBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), nil, false)
|
||||
fundsBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(fundsBlock), blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(fundsBlock), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: the funds block " +
|
||||
"is too far in the future")
|
||||
}
|
||||
@@ -385,21 +492,21 @@ func TestGasLimit(t *testing.T) {
|
||||
tx2 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx2In}, []*wire.TxOut{tx2Out}, subnetworkID, 10000, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that has transactions that exceed the gas limit
|
||||
overLimitBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true)
|
||||
overLimitBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(overLimitBlock), blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(overLimitBlock), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Fatalf("ProcessBlock expected to have an error in block that exceeds gas limit")
|
||||
}
|
||||
rErr, ok := err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
var ruleErr blockdag.RuleError
|
||||
if !errors.As(err, &ruleErr) {
|
||||
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
|
||||
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
|
||||
} else if ruleErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, ruleErr.ErrorCode)
|
||||
}
|
||||
if delay != 0 {
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock " +
|
||||
"is too far in the future")
|
||||
}
|
||||
@@ -420,23 +527,26 @@ func TestGasLimit(t *testing.T) {
|
||||
subnetworkID, math.MaxUint64, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that its transactions' gas overflows uint64
|
||||
overflowGasBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true)
|
||||
overflowGasBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(overflowGasBlock), blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(overflowGasBlock), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Fatalf("ProcessBlock expected to have an error")
|
||||
}
|
||||
rErr, ok = err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
if !errors.As(err, &ruleErr) {
|
||||
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
|
||||
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
|
||||
} else if ruleErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, ruleErr.ErrorCode)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: overflowGasBlock " +
|
||||
"is too far in the future")
|
||||
}
|
||||
|
||||
nonExistentSubnetwork := &subnetworkid.SubnetworkID{123}
|
||||
nonExistentSubnetworkTxIn := &wire.TxIn{
|
||||
@@ -451,29 +561,36 @@ func TestGasLimit(t *testing.T) {
|
||||
nonExistentSubnetworkTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{nonExistentSubnetworkTxIn},
|
||||
[]*wire.TxOut{nonExistentSubnetworkTxOut}, nonExistentSubnetwork, 1, []byte{})
|
||||
|
||||
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true)
|
||||
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
// Here we check that we can't process a block with a transaction from a non-existent subnetwork
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
|
||||
expectedErrStr := fmt.Sprintf("Error getting gas limit for subnetworkID '%s': subnetwork '%s' not found",
|
||||
nonExistentSubnetwork, nonExistentSubnetwork)
|
||||
if err.Error() != expectedErrStr {
|
||||
if strings.Contains(err.Error(), expectedErrStr) {
|
||||
t.Fatalf("ProcessBlock expected error \"%v\" but got \"%v\"", expectedErrStr, err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: nonExistentSubnetworkBlock " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: nonExistentSubnetworkBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
// Here we check that we can process a block with a transaction that doesn't exceed the gas limit
|
||||
validBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1}, true)
|
||||
validBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*wire.MsgTx{tx1}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(validBlock), blockdag.BFNoPoWCheck)
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(validBlock), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock " +
|
||||
"is too far in the future")
|
||||
}
|
||||
|
||||
@@ -1,318 +0,0 @@
|
||||
// Copyright (c) 2016 The Decred developers
|
||||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/blockdag/fullblocktests"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
_ "github.com/daglabs/btcd/database/ffldb"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// testDbType is the database backend type to use for the tests.
|
||||
testDbType = "ffldb"
|
||||
|
||||
// testDbRoot is the root directory used to create all test databases.
|
||||
testDbRoot = "testdbs"
|
||||
|
||||
// blockDataNet is the expected network in the test block data.
|
||||
blockDataNet = wire.MainNet
|
||||
)
|
||||
|
||||
// filesExists returns whether or not the named file or directory exists.
|
||||
func fileExists(name string) bool {
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isSupportedDbType returns whether or not the passed database type is
|
||||
// currently supported.
|
||||
func isSupportedDbType(dbType string) bool {
|
||||
supportedDrivers := database.SupportedDrivers()
|
||||
for _, driver := range supportedDrivers {
|
||||
if dbType == driver {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// DAGSetup is used to create a new db and chain instance with the genesis
|
||||
// block already inserted. In addition to the new chain instance, it returns
|
||||
// a teardown function the caller should invoke when done testing to clean up.
|
||||
func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func(), error) {
|
||||
if !isSupportedDbType(testDbType) {
|
||||
return nil, nil, fmt.Errorf("unsupported db type %v", testDbType)
|
||||
}
|
||||
|
||||
// Handle memory database specially since it doesn't need the disk
|
||||
// specific handling.
|
||||
var db database.DB
|
||||
var teardown func()
|
||||
if testDbType == "memdb" {
|
||||
ndb, err := database.Create(testDbType)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating db: %v", err)
|
||||
}
|
||||
db = ndb
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
teardown = func() {
|
||||
db.Close()
|
||||
}
|
||||
} else {
|
||||
// Create the root directory for test databases.
|
||||
if !fileExists(testDbRoot) {
|
||||
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
|
||||
err := fmt.Errorf("unable to create test db "+
|
||||
"root: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new database to store the accepted blocks into.
|
||||
dbPath := filepath.Join(testDbRoot, dbName)
|
||||
_ = os.RemoveAll(dbPath)
|
||||
ndb, err := database.Create(testDbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating db: %v", err)
|
||||
}
|
||||
db = ndb
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
teardown = func() {
|
||||
db.Close()
|
||||
os.RemoveAll(dbPath)
|
||||
os.RemoveAll(testDbRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the chain params to ensure any modifications the tests do to
|
||||
// the DAG parameters do not affect the global instance.
|
||||
paramsCopy := *params
|
||||
|
||||
// Create the main chain instance.
|
||||
chain, err := blockdag.New(&blockdag.Config{
|
||||
DB: db,
|
||||
DAGParams: ¶msCopy,
|
||||
Checkpoints: nil,
|
||||
TimeSource: blockdag.NewMedianTime(),
|
||||
SigCache: txscript.NewSigCache(1000),
|
||||
})
|
||||
if err != nil {
|
||||
teardown()
|
||||
err := fmt.Errorf("failed to create chain instance: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return chain, teardown, nil
|
||||
}
|
||||
|
||||
// TestFullBlocks ensures all tests generated by the fullblocktests package
|
||||
// have the expected result when processed via ProcessBlock.
|
||||
func TestFullBlocks(t *testing.T) {
|
||||
// TODO: (Stas) This test was disabled for until we have implemented Phantom
|
||||
// Ticket: https://daglabs.atlassian.net/browse/DEV-60
|
||||
t.SkipNow()
|
||||
|
||||
tests, err := fullblocktests.Generate(false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to generate tests: %v", err)
|
||||
}
|
||||
|
||||
// Create a new database and chain instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("fullblocktest",
|
||||
&dagconfig.RegressionNetParams)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup chain instance: %v", err)
|
||||
return
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// testAcceptedBlock attempts to process the block in the provided test
|
||||
// instance and ensures that it was accepted according to the flags
|
||||
// specified in the test.
|
||||
testAcceptedBlock := func(item fullblocktests.AcceptedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetChainHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
isOrphan, delay, err := dag.ProcessBlock(block,
|
||||
blockdag.BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("block %q (hash %s, height %d) should "+
|
||||
"have been accepted: %v", item.Name,
|
||||
block.Hash(), blockHeight, err)
|
||||
}
|
||||
|
||||
if delay != item.Delay {
|
||||
t.Fatalf("block %q (hash %s, height %d) unexpected "+
|
||||
"delay -- got %v, want %v", item.Name,
|
||||
block.Hash(), blockHeight, delay,
|
||||
item.Delay)
|
||||
}
|
||||
|
||||
if isOrphan != item.IsOrphan {
|
||||
t.Fatalf("block %q (hash %s, height %d) unexpected "+
|
||||
"orphan flag -- got %v, want %v", item.Name,
|
||||
block.Hash(), blockHeight, isOrphan,
|
||||
item.IsOrphan)
|
||||
}
|
||||
}
|
||||
|
||||
// testRejectedBlock attempts to process the block in the provided test
|
||||
// instance and ensures that it was rejected with the reject code
|
||||
// specified in the test.
|
||||
testRejectedBlock := func(item fullblocktests.RejectedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetChainHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
_, _, err := dag.ProcessBlock(block, blockdag.BFNone)
|
||||
if err == nil {
|
||||
t.Fatalf("block %q (hash %s, height %d) should not "+
|
||||
"have been accepted", item.Name, block.Hash(),
|
||||
blockHeight)
|
||||
}
|
||||
|
||||
// Ensure the error code is of the expected type and the reject
|
||||
// code matches the value specified in the test instance.
|
||||
rerr, ok := err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
t.Fatalf("block %q (hash %s, height %d) returned "+
|
||||
"unexpected error type -- got %T, want "+
|
||||
"blockchain.RuleError", item.Name, block.Hash(),
|
||||
blockHeight, err)
|
||||
}
|
||||
if rerr.ErrorCode != item.RejectCode {
|
||||
t.Fatalf("block %q (hash %s, height %d) does not have "+
|
||||
"expected reject code -- got %v, want %v",
|
||||
item.Name, block.Hash(), blockHeight,
|
||||
rerr.ErrorCode, item.RejectCode)
|
||||
}
|
||||
}
|
||||
|
||||
// testRejectedNonCanonicalBlock attempts to decode the block in the
|
||||
// provided test instance and ensures that it failed to decode with a
|
||||
// message error.
|
||||
testRejectedNonCanonicalBlock := func(item fullblocktests.RejectedNonCanonicalBlock) {
|
||||
headerLen := len(item.RawBlock)
|
||||
if headerLen > 80 {
|
||||
headerLen = 80
|
||||
}
|
||||
blockHash := daghash.DoubleHashH(item.RawBlock[0:headerLen])
|
||||
blockHeight := item.Height
|
||||
t.Logf("Testing block %s (hash %s, height %d)", item.Name,
|
||||
blockHash, blockHeight)
|
||||
|
||||
// Ensure there is an error due to deserializing the block.
|
||||
var msgBlock wire.MsgBlock
|
||||
err := msgBlock.BtcDecode(bytes.NewReader(item.RawBlock), 0)
|
||||
if _, ok := err.(*wire.MessageError); !ok {
|
||||
t.Fatalf("block %q (hash %s, height %d) should have "+
|
||||
"failed to decode", item.Name, blockHash,
|
||||
blockHeight)
|
||||
}
|
||||
}
|
||||
|
||||
// testOrphanOrRejectedBlock attempts to process the block in the
|
||||
// provided test instance and ensures that it was either accepted as an
|
||||
// orphan or rejected with a rule violation.
|
||||
testOrphanOrRejectedBlock := func(item fullblocktests.OrphanOrRejectedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetChainHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
isOrphan, delay, err := dag.ProcessBlock(block, blockdag.BFNone)
|
||||
if err != nil {
|
||||
// Ensure the error code is of the expected type.
|
||||
if _, ok := err.(blockdag.RuleError); !ok {
|
||||
t.Fatalf("block %q (hash %s, height %d) "+
|
||||
"returned unexpected error type -- "+
|
||||
"got %T, want blockchain.RuleError",
|
||||
item.Name, block.Hash(), blockHeight,
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
if delay != 0 {
|
||||
t.Fatalf("block %q (hash %s, height %d) "+
|
||||
"is too far in the future",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
}
|
||||
|
||||
if !isOrphan {
|
||||
t.Fatalf("block %q (hash %s, height %d) was accepted, "+
|
||||
"but is not considered an orphan", item.Name,
|
||||
block.Hash(), blockHeight)
|
||||
}
|
||||
}
|
||||
|
||||
// testExpectedTip ensures the current tip of the blockchain is the
|
||||
// block specified in the provided test instance.
|
||||
testExpectedTip := func(item fullblocktests.ExpectedTip) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetChainHeight(blockHeight)
|
||||
t.Logf("Testing tip for block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
// Ensure hash and height match.
|
||||
if dag.SelectedTipHash() != item.Block.BlockHash() ||
|
||||
dag.ChainHeight() != blockHeight { //TODO: (Ori) the use of dag.ChainHeight() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
|
||||
|
||||
t.Fatalf("block %q (hash %s, height %d) should be "+
|
||||
"the current tip -- got (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight, dag.SelectedTipHash(),
|
||||
dag.ChainHeight()) //TODO: (Ori) the use of dag.ChainHeight() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
|
||||
}
|
||||
}
|
||||
|
||||
for testNum, test := range tests {
|
||||
for itemNum, item := range test {
|
||||
switch item := item.(type) {
|
||||
case fullblocktests.AcceptedBlock:
|
||||
testAcceptedBlock(item)
|
||||
case fullblocktests.RejectedBlock:
|
||||
testRejectedBlock(item)
|
||||
case fullblocktests.RejectedNonCanonicalBlock:
|
||||
testRejectedNonCanonicalBlock(item)
|
||||
case fullblocktests.OrphanOrRejectedBlock:
|
||||
testOrphanOrRejectedBlock(item)
|
||||
case fullblocktests.ExpectedTip:
|
||||
testExpectedTip(item)
|
||||
default:
|
||||
t.Fatalf("test #%d, item #%d is not one of "+
|
||||
"the supported test instance types -- "+
|
||||
"got type: %T", testNum, itemNum, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
fullblocktests
|
||||
==============
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd/blockchain/fullblocktests)
|
||||
|
||||
Package fullblocktests provides a set of full block tests to be used for testing
|
||||
the consensus validation rules. The tests are intended to be flexible enough to
|
||||
allow both unit-style tests directly against the blockchain code as well as
|
||||
integration style tests over the peer-to-peer network. To achieve that goal,
|
||||
each test contains additional information about the expected result, however
|
||||
that information can be ignored when doing comparison tests between two
|
||||
independent versions over the peer-to-peer network.
|
||||
|
||||
This package has intentionally been designed so it can be used as a standalone
|
||||
package for any projects needing to test their implementation against a full set
|
||||
of blocks that exercise the consensus validation rules.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/daglabs/btcd/blockchain/fullblocktests
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Package fullblocktests is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
||||
@@ -1,20 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package fullblocktests provides a set of block consensus validation tests.
|
||||
|
||||
All of the generated test instances involve full blocks that are to be used for
|
||||
testing the consensus validation rules. The tests are intended to be flexible
|
||||
enough to allow both unit-style tests directly against the blockchain code as
|
||||
well as integration style tests over the peer-to-peer network. To achieve that
|
||||
goal, each test contains additional information about the expected result,
|
||||
however that information can be ignored when doing comparison tests between two
|
||||
independent versions over the peer-to-peer network.
|
||||
|
||||
This package has intentionally been designed so it can be used as a standalone
|
||||
package for any projects needing to test their implementation against a full set
|
||||
of blocks that exercise the consensus validation rules.
|
||||
*/
|
||||
package fullblocktests
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,143 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fullblocktests
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"math"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util/hdkeychain"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// wire.Hash. It only differs from the one available in daghash in that
|
||||
// it panics on an error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, hashes.
|
||||
func newHashFromStr(hexStr string) *daghash.Hash {
|
||||
hash, err := daghash.NewHashFromStr(hexStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// newTxIDFromStr converts the passed big-endian hex string into a
|
||||
// wire.TxID. It only differs from the one available in daghash in that
|
||||
// it panics on an error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, hashes.
|
||||
func newTxIDFromStr(hexStr string) *daghash.TxID {
|
||||
txID, err := daghash.NewTxIDFromStr(hexStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return txID
|
||||
}
|
||||
|
||||
// fromHex converts the passed hex string into a byte slice and will panic if
|
||||
// there is an error. This is only provided for the hard-coded constants so
|
||||
// errors in the source code can be detected. It will only (and must only) be
|
||||
// called for initialization purposes.
|
||||
func fromHex(s string) []byte {
|
||||
r, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic("invalid hex in source file: " + s)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
var (
|
||||
// bigOne is 1 represented as a big.Int. It is defined here to avoid
|
||||
// the overhead of creating it multiple times.
|
||||
bigOne = big.NewInt(1)
|
||||
|
||||
// regressionPowLimit is the highest proof of work value a Bitcoin block
|
||||
// can have for the regression test network. It is the value 2^255 - 1.
|
||||
regressionPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne)
|
||||
|
||||
// regTestGenesisBlock defines the genesis block of the block chain which serves
|
||||
// as the public transaction ledger for the regression test network.
|
||||
regTestGenesisBlock = wire.MsgBlock{
|
||||
Header: wire.BlockHeader{
|
||||
Version: 1,
|
||||
ParentHashes: []*daghash.Hash{},
|
||||
HashMerkleRoot: newHashFromStr("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"),
|
||||
Timestamp: time.Unix(0x5b28c636, 0), // 2018-06-19 09:00:38 +0000 UTC
|
||||
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
|
||||
Nonce: 1,
|
||||
},
|
||||
Transactions: []*wire.MsgTx{{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: fromHex("04ffff001d010445" +
|
||||
"5468652054696d65732030332f4a616e2f" +
|
||||
"32303039204368616e63656c6c6f72206f" +
|
||||
"6e206272696e6b206f66207365636f6e64" +
|
||||
"206261696c6f757420666f72206261686b73"),
|
||||
Sequence: math.MaxUint64,
|
||||
}},
|
||||
TxOut: []*wire.TxOut{{
|
||||
Value: 0,
|
||||
ScriptPubKey: fromHex("4104678afdb0fe5548271967f1" +
|
||||
"a67130b7105cd6a828e03909a67962e0ea1f" +
|
||||
"61deb649f6bc3f4cef38c4f35504e51ec138" +
|
||||
"c4f35504e51ec112de5c384df7ba0b8d578a" +
|
||||
"4c702b6bf11d5fac"),
|
||||
}},
|
||||
LockTime: 0,
|
||||
SubnetworkID: *subnetworkid.SubnetworkIDNative,
|
||||
}},
|
||||
}
|
||||
)
|
||||
|
||||
// regressionNetParams defines the network parameters for the regression test
|
||||
// network.
|
||||
//
|
||||
// NOTE: The test generator intentionally does not use the existing definitions
|
||||
// in the dagconfig package since the intent is to be able to generate known
|
||||
// good tests which exercise that code. Using the dagconfig parameters would
|
||||
// allow them to change out from under the tests potentially invalidating them.
|
||||
var regressionNetParams = &dagconfig.Params{
|
||||
Name: "regtest",
|
||||
Net: wire.RegTest,
|
||||
DefaultPort: "18444",
|
||||
|
||||
// DAG parameters
|
||||
GenesisBlock: ®TestGenesisBlock,
|
||||
GenesisHash: newHashFromStr("5bec7567af40504e0994db3b573c186fffcc4edefe096ff2e58d00523bd7e8a6"),
|
||||
PowMax: regressionPowLimit,
|
||||
BlockCoinbaseMaturity: 100,
|
||||
SubsidyReductionInterval: 150,
|
||||
TargetTimePerBlock: time.Second * 10, // 10 seconds
|
||||
DifficultyAdjustmentWindowSize: 2640,
|
||||
TimestampDeviationTolerance: 132,
|
||||
GenerateSupported: true,
|
||||
|
||||
// Checkpoints ordered from oldest to newest.
|
||||
Checkpoints: nil,
|
||||
|
||||
// Mempool parameters
|
||||
RelayNonStdTxs: true,
|
||||
|
||||
// Address encoding magics
|
||||
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
|
||||
|
||||
// BIP32 hierarchical deterministic extended key magics
|
||||
HDKeyIDPair: hdkeychain.HDKeyPairRegressionNet,
|
||||
|
||||
// BIP44 coin type used in the hierarchical deterministic path for
|
||||
// address generation.
|
||||
HDCoinType: 1,
|
||||
}
|
||||
176
blockdag/ghostdag.go
Normal file
176
blockdag/ghostdag.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// ghostdag runs the GHOSTDAG protocol and updates newNode.blues,
|
||||
// newNode.selectedParent and newNode.bluesAnticoneSizes accordingly.
|
||||
// The function updates newNode.blues by iterating over the blocks in
|
||||
// the anticone of newNode.selectedParent (which is the parent with the
|
||||
// highest blue score) and adds any block to newNode.blues if by adding
|
||||
// it to newNode.blues these conditions will not be violated:
|
||||
//
|
||||
// 1) |anticone-of-candidate-block ∩ blue-set-of-newNode| ≤ K
|
||||
//
|
||||
// 2) For every blue block in blue-set-of-newNode:
|
||||
// |(anticone-of-blue-block ∩ blue-set-newNode) ∪ {candidate-block}| ≤ K.
|
||||
// We validate this condition by maintaining a map bluesAnticoneSizes for
|
||||
// each block which holds all the blue anticone sizes that were affected by
|
||||
// the new added blue blocks.
|
||||
// So to find out what is |anticone-of-blue ∩ blue-set-of-newNode| we just iterate in
|
||||
// the selected parent chain of newNode until we find an existing entry in
|
||||
// bluesAnticoneSizes.
|
||||
//
|
||||
// For further details see the article https://eprint.iacr.org/2018/104.pdf
|
||||
func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blockNode, err error) {
|
||||
newNode.selectedParent = newNode.parents.bluest()
|
||||
newNode.bluesAnticoneSizes[newNode.selectedParent] = 0
|
||||
newNode.blues = []*blockNode{newNode.selectedParent}
|
||||
selectedParentAnticone, err = dag.selectedParentAnticone(newNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Slice(selectedParentAnticone, func(i, j int) bool {
|
||||
return selectedParentAnticone[i].less(selectedParentAnticone[j])
|
||||
})
|
||||
|
||||
for _, blueCandidate := range selectedParentAnticone {
|
||||
candidateBluesAnticoneSizes := make(map[*blockNode]dagconfig.KType)
|
||||
var candidateAnticoneSize dagconfig.KType
|
||||
possiblyBlue := true
|
||||
|
||||
// Iterate over all blocks in the blue set of newNode that are not in the past
|
||||
// of blueCandidate, and check for each one of them if blueCandidate potentially
|
||||
// enlarges their blue anticone to be over K, or that they enlarge the blue anticone
|
||||
// of blueCandidate to be over K.
|
||||
for chainBlock := newNode; possiblyBlue; chainBlock = chainBlock.selectedParent {
|
||||
// If blueCandidate is in the future of chainBlock, it means
|
||||
// that all remaining blues are in the past of chainBlock and thus
|
||||
// in the past of blueCandidate. In this case we know for sure that
|
||||
// the anticone of blueCandidate will not exceed K, and we can mark
|
||||
// it as blue.
|
||||
//
|
||||
// newNode is always in the future of blueCandidate, so there's
|
||||
// no point in checking it.
|
||||
if chainBlock != newNode {
|
||||
if isAncestorOfBlueCandidate, err := dag.isInPast(chainBlock, blueCandidate); err != nil {
|
||||
return nil, err
|
||||
} else if isAncestorOfBlueCandidate {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, block := range chainBlock.blues {
|
||||
// Skip blocks that exist in the past of blueCandidate.
|
||||
if isAncestorOfBlueCandidate, err := dag.isInPast(block, blueCandidate); err != nil {
|
||||
return nil, err
|
||||
} else if isAncestorOfBlueCandidate {
|
||||
continue
|
||||
}
|
||||
|
||||
candidateBluesAnticoneSizes[block], err = dag.blueAnticoneSize(block, newNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
candidateAnticoneSize++
|
||||
|
||||
if candidateAnticoneSize > dag.Params.K {
|
||||
// k-cluster violation: The candidate's blue anticone exceeded k
|
||||
possiblyBlue = false
|
||||
break
|
||||
}
|
||||
|
||||
if candidateBluesAnticoneSizes[block] == dag.Params.K {
|
||||
// k-cluster violation: A block in candidate's blue anticone already
|
||||
// has k blue blocks in its own anticone
|
||||
possiblyBlue = false
|
||||
break
|
||||
}
|
||||
|
||||
// This is a sanity check that validates that a blue
|
||||
// block's blue anticone is not already larger than K.
|
||||
if candidateBluesAnticoneSizes[block] > dag.Params.K {
|
||||
return nil, errors.New("found blue anticone size larger than k")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if possiblyBlue {
|
||||
// No k-cluster violation found, we can now set the candidate block as blue
|
||||
newNode.blues = append(newNode.blues, blueCandidate)
|
||||
newNode.bluesAnticoneSizes[blueCandidate] = candidateAnticoneSize
|
||||
for blue, blueAnticoneSize := range candidateBluesAnticoneSizes {
|
||||
newNode.bluesAnticoneSizes[blue] = blueAnticoneSize + 1
|
||||
}
|
||||
|
||||
// The maximum length of node.blues can be K+1 because
|
||||
// it contains the selected parent.
|
||||
if dagconfig.KType(len(newNode.blues)) == dag.Params.K+1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newNode.blueScore = newNode.selectedParent.blueScore + uint64(len(newNode.blues))
|
||||
return selectedParentAnticone, nil
|
||||
}
|
||||
|
||||
// selectedParentAnticone returns the blocks in the anticone of the selected parent of the given node.
|
||||
// The function work as follows.
|
||||
// We start by adding all parents of the node (other than the selected parent) to a process queue.
|
||||
// For each node in the queue:
|
||||
// we check whether it is in the past of the selected parent.
|
||||
// If not, we add the node to the resulting anticone-set and queue it for processing.
|
||||
func (dag *BlockDAG) selectedParentAnticone(node *blockNode) ([]*blockNode, error) {
|
||||
anticoneSet := newBlockSet()
|
||||
var anticoneSlice []*blockNode
|
||||
selectedParentPast := newBlockSet()
|
||||
var queue []*blockNode
|
||||
// Queueing all parents (other than the selected parent itself) for processing.
|
||||
for parent := range node.parents {
|
||||
if parent == node.selectedParent {
|
||||
continue
|
||||
}
|
||||
anticoneSet.add(parent)
|
||||
anticoneSlice = append(anticoneSlice, parent)
|
||||
queue = append(queue, parent)
|
||||
}
|
||||
for len(queue) > 0 {
|
||||
var current *blockNode
|
||||
current, queue = queue[0], queue[1:]
|
||||
// For each parent of a the current node we check whether it is in the past of the selected parent. If not,
|
||||
// we add the it to the resulting anticone-set and queue it for further processing.
|
||||
for parent := range current.parents {
|
||||
if anticoneSet.contains(parent) || selectedParentPast.contains(parent) {
|
||||
continue
|
||||
}
|
||||
isAncestorOfSelectedParent, err := dag.isInPast(parent, node.selectedParent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isAncestorOfSelectedParent {
|
||||
selectedParentPast.add(parent)
|
||||
continue
|
||||
}
|
||||
anticoneSet.add(parent)
|
||||
anticoneSlice = append(anticoneSlice, parent)
|
||||
queue = append(queue, parent)
|
||||
}
|
||||
}
|
||||
return anticoneSlice, nil
|
||||
}
|
||||
|
||||
// blueAnticoneSize returns the blue anticone size of 'block' from the worldview of 'context'.
|
||||
// Expects 'block' to be in the blue set of 'context'
|
||||
func (dag *BlockDAG) blueAnticoneSize(block, context *blockNode) (dagconfig.KType, error) {
|
||||
for current := context; current != nil; current = current.selectedParent {
|
||||
if blueAnticoneSize, ok := current.bluesAnticoneSizes[block]; ok {
|
||||
return blueAnticoneSize, nil
|
||||
}
|
||||
}
|
||||
return 0, errors.Errorf("block %s is not in blue set of %s", block.hash, context.hash)
|
||||
}
|
||||
386
blockdag/ghostdag_test.go
Normal file
386
blockdag/ghostdag_test.go
Normal file
@@ -0,0 +1,386 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
type testBlockData struct {
|
||||
parents []string
|
||||
id string // id is a virtual entity that is used only for tests so we can define relations between blocks without knowing their hash
|
||||
expectedScore uint64
|
||||
expectedSelectedParent string
|
||||
expectedBlues []string
|
||||
}
|
||||
|
||||
// TestGHOSTDAG iterates over several dag simulations, and checks
|
||||
// that the blue score, blue set and selected parent of each
|
||||
// block are calculated as expected.
|
||||
func TestGHOSTDAG(t *testing.T) {
|
||||
dagParams := dagconfig.SimnetParams
|
||||
|
||||
tests := []struct {
|
||||
k dagconfig.KType
|
||||
expectedReds []string
|
||||
dagData []*testBlockData
|
||||
}{
|
||||
{
|
||||
k: 3,
|
||||
expectedReds: []string{"F", "G", "H", "I", "N", "P"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "C",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "D",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
id: "E",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C", "D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "F",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F"},
|
||||
id: "G",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "H",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "I",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E", "G"},
|
||||
id: "J",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J"},
|
||||
id: "K",
|
||||
expectedScore: 6,
|
||||
expectedSelectedParent: "J",
|
||||
expectedBlues: []string{"J"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I", "K"},
|
||||
id: "L",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "K",
|
||||
expectedBlues: []string{"K"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L"},
|
||||
id: "M",
|
||||
expectedScore: 8,
|
||||
expectedSelectedParent: "L",
|
||||
expectedBlues: []string{"L"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "N",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "O",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "P",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "Q",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "R",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"R"},
|
||||
id: "S",
|
||||
expectedScore: 10,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "O", "P", "Q", "S"},
|
||||
id: "T",
|
||||
expectedScore: 13,
|
||||
expectedSelectedParent: "S",
|
||||
expectedBlues: []string{"S", "O", "Q"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
func() {
|
||||
resetExtraNonceForTest()
|
||||
dagParams.K = test.k
|
||||
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), true, Config{
|
||||
DAGParams: &dagParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup dag instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
genesisNode := dag.genesis
|
||||
blockByIDMap := make(map[string]*blockNode)
|
||||
idByBlockMap := make(map[*blockNode]string)
|
||||
blockByIDMap["A"] = genesisNode
|
||||
idByBlockMap[genesisNode] = "A"
|
||||
|
||||
for _, blockData := range test.dagData {
|
||||
parents := blockSet{}
|
||||
for _, parentID := range blockData.parents {
|
||||
parent := blockByIDMap[parentID]
|
||||
parents.add(parent)
|
||||
}
|
||||
|
||||
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("TestGHOSTDAG: block %v got unexpected error from PrepareBlockForTest: %v", blockData.id, err)
|
||||
}
|
||||
|
||||
utilBlock := util.NewBlock(block)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("TestGHOSTDAG: dag.ProcessBlock got unexpected error for block %v: %v", blockData.id, err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("TestGHOSTDAG: block %s "+
|
||||
"is too far in the future", blockData.id)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestGHOSTDAG: block %v was unexpectedly orphan", blockData.id)
|
||||
}
|
||||
|
||||
node, ok := dag.index.LookupNode(utilBlock.Hash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", utilBlock.Hash())
|
||||
}
|
||||
|
||||
blockByIDMap[blockData.id] = node
|
||||
idByBlockMap[node] = blockData.id
|
||||
|
||||
bluesIDs := make([]string, 0, len(node.blues))
|
||||
for _, blue := range node.blues {
|
||||
bluesIDs = append(bluesIDs, idByBlockMap[blue])
|
||||
}
|
||||
selectedParentID := idByBlockMap[node.selectedParent]
|
||||
fullDataStr := fmt.Sprintf("blues: %v, selectedParent: %v, score: %v",
|
||||
bluesIDs, selectedParentID, node.blueScore)
|
||||
if blockData.expectedScore != node.blueScore {
|
||||
t.Errorf("Test %d: Block %v expected to have score %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedScore, node.blueScore, fullDataStr)
|
||||
}
|
||||
if blockData.expectedSelectedParent != selectedParentID {
|
||||
t.Errorf("Test %d: Block %v expected to have selected parent %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedSelectedParent, selectedParentID, fullDataStr)
|
||||
}
|
||||
if !reflect.DeepEqual(blockData.expectedBlues, bluesIDs) {
|
||||
t.Errorf("Test %d: Block %v expected to have blues %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedBlues, bluesIDs, fullDataStr)
|
||||
}
|
||||
}
|
||||
|
||||
reds := make(map[string]bool)
|
||||
|
||||
for id := range blockByIDMap {
|
||||
reds[id] = true
|
||||
}
|
||||
|
||||
for tip := &dag.virtual.blockNode; tip.selectedParent != nil; tip = tip.selectedParent {
|
||||
tipID := idByBlockMap[tip]
|
||||
delete(reds, tipID)
|
||||
for _, blue := range tip.blues {
|
||||
blueID := idByBlockMap[blue]
|
||||
delete(reds, blueID)
|
||||
}
|
||||
}
|
||||
if !checkReds(test.expectedReds, reds) {
|
||||
redsIDs := make([]string, 0, len(reds))
|
||||
for id := range reds {
|
||||
redsIDs = append(redsIDs, id)
|
||||
}
|
||||
sort.Strings(redsIDs)
|
||||
sort.Strings(test.expectedReds)
|
||||
t.Errorf("Test %d: Expected reds %v but got %v", i, test.expectedReds, redsIDs)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func checkReds(expectedReds []string, reds map[string]bool) bool {
|
||||
if len(expectedReds) != len(reds) {
|
||||
return false
|
||||
}
|
||||
for _, redID := range expectedReds {
|
||||
if !reds[redID] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestBlueAnticoneSizeErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizeErrors", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestBlueAnticoneSizeErrors: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Prepare a block chain with size K beginning with the genesis block
|
||||
currentBlockA := dag.Params.GenesisBlock
|
||||
for i := dagconfig.KType(0); i < dag.Params.K; i++ {
|
||||
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockA)
|
||||
currentBlockA = newBlock
|
||||
}
|
||||
|
||||
// Prepare another block chain with size K beginning with the genesis block
|
||||
currentBlockB := dag.Params.GenesisBlock
|
||||
for i := dagconfig.KType(0); i < dag.Params.K; i++ {
|
||||
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockB)
|
||||
currentBlockB = newBlock
|
||||
}
|
||||
|
||||
// Get references to the tips of the two chains
|
||||
blockNodeA, ok := dag.index.LookupNode(currentBlockA.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", currentBlockA.BlockHash())
|
||||
}
|
||||
|
||||
blockNodeB, ok := dag.index.LookupNode(currentBlockB.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", currentBlockB.BlockHash())
|
||||
}
|
||||
|
||||
// Try getting the blueAnticoneSize between them. Since the two
|
||||
// blocks are not in the anticones of eachother, this should fail.
|
||||
_, err = dag.blueAnticoneSize(blockNodeA, blockNodeB)
|
||||
if err == nil {
|
||||
t.Fatalf("TestBlueAnticoneSizeErrors: blueAnticoneSize unexpectedly succeeded")
|
||||
}
|
||||
expectedErrSubstring := "is not in blue set of"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestBlueAnticoneSizeErrors: blueAnticoneSize returned wrong error. "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGHOSTDAGErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestGHOSTDAGErrors", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestGHOSTDAGErrors: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Add two child blocks to the genesis
|
||||
block1 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
|
||||
block2 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
|
||||
|
||||
// Add a child block to the previous two blocks
|
||||
block3 := prepareAndProcessBlockByParentMsgBlocks(t, dag, block1, block2)
|
||||
|
||||
// Clear the reachability store
|
||||
dag.reachabilityTree.store.loaded = map[daghash.Hash]*reachabilityData{}
|
||||
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
t.Fatalf("NewTx: %s", err)
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
|
||||
err = dbaccess.ClearReachabilityData(dbTx)
|
||||
if err != nil {
|
||||
t.Fatalf("ClearReachabilityData: %s", err)
|
||||
}
|
||||
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("Commit: %s", err)
|
||||
}
|
||||
|
||||
// Try to rerun GHOSTDAG on the last block. GHOSTDAG uses
|
||||
// reachability data, so we expect it to fail.
|
||||
blockNode3, ok := dag.index.LookupNode(block3.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", block3.BlockHash())
|
||||
}
|
||||
_, err = dag.ghostdag(blockNode3)
|
||||
if err == nil {
|
||||
t.Fatalf("TestGHOSTDAGErrors: ghostdag unexpectedly succeeded")
|
||||
}
|
||||
expectedErrSubstring := "couldn't find reachability data"
|
||||
if !strings.Contains(err.Error(), expectedErrSubstring) {
|
||||
t.Fatalf("TestGHOSTDAGErrors: ghostdag returned wrong error. "+
|
||||
"Want: %s, got: %s", expectedErrSubstring, err)
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,8 @@
|
||||
indexers
|
||||
========
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd/blockchain/indexers)
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad/blockdag/indexers)
|
||||
|
||||
Package indexers implements optional block chain indexes.
|
||||
|
||||
@@ -12,21 +11,14 @@ via an RPC interface.
|
||||
|
||||
## Supported Indexers
|
||||
|
||||
- Transaction-by-hash (txbyhashidx) Index
|
||||
- Transaction-by-hash (txindex) Index
|
||||
- Creates a mapping from the hash of each transaction to the block that
|
||||
contains it along with its offset and length within the serialized block
|
||||
- Transaction-by-address (txbyaddridx) Index
|
||||
- Transaction-by-address (addrindex) Index
|
||||
- Creates a mapping from every address to all transactions which either credit
|
||||
or debit the address
|
||||
- Requires the transaction-by-hash index
|
||||
- AcceptanceData-by-block Index
|
||||
- Creates a mapping from the hash of each block to the list of transaction this block
|
||||
accepts from it's .Blues
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/daglabs/btcd/blockchain/indexers
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Package indexers is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user