mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-21 19:22:53 +00:00
Compare commits
605 Commits
v0.0.3-dev
...
v0.6.3-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1f04f30ea7 | ||
|
|
3e4e8d8b6b | ||
|
|
31c0399484 | ||
|
|
f2a3ccd9ab | ||
|
|
31b5cd8d28 | ||
|
|
96bd1fa99b | ||
|
|
48d498e820 | ||
|
|
32c5cfeaf5 | ||
|
|
d55f4e8164 | ||
|
|
1927e81202 | ||
|
|
8a4ece1101 | ||
|
|
0bf1052abf | ||
|
|
2af03c1ccf | ||
|
|
a2aa58c8a4 | ||
|
|
7e74fc0b2b | ||
|
|
0653e59e16 | ||
|
|
32463ce906 | ||
|
|
23a3594c18 | ||
|
|
ffe153efa7 | ||
|
|
ca3172dad0 | ||
|
|
22dc3f998f | ||
|
|
91f4ed9825 | ||
|
|
aa9556aa59 | ||
|
|
91f0fe5740 | ||
|
|
b0fecc9f87 | ||
|
|
53cccd405f | ||
|
|
5b84184921 | ||
|
|
af1df425a2 | ||
|
|
8e170cf327 | ||
|
|
b55cfee8c8 | ||
|
|
420c3d4258 | ||
|
|
b92943a98c | ||
|
|
e1318aa326 | ||
|
|
2bd4a71913 | ||
|
|
5b206f4c9d | ||
|
|
3f969a2921 | ||
|
|
90be14fd57 | ||
|
|
1a5d9fc65c | ||
|
|
ec03a094e5 | ||
|
|
9d60bb1ee7 | ||
|
|
cd10de2dce | ||
|
|
658fb08c02 | ||
|
|
3b40488877 | ||
|
|
d3d0ad0cf3 | ||
|
|
473cc37a75 | ||
|
|
966cba4a4e | ||
|
|
da90755530 | ||
|
|
fa58623815 | ||
|
|
26af4da507 | ||
|
|
b527470153 | ||
|
|
e70561141d | ||
|
|
20b547984e | ||
|
|
16a658a5be | ||
|
|
42e50e6dc2 | ||
|
|
3d942ce355 | ||
|
|
94f617b06a | ||
|
|
211c4d05e8 | ||
|
|
a9f3bdf4ab | ||
|
|
2303aecab4 | ||
|
|
7655841e9f | ||
|
|
c4bbcf9de6 | ||
|
|
0cec1ce23e | ||
|
|
089fe828aa | ||
|
|
24a09fb3df | ||
|
|
b2901454d6 | ||
|
|
6cf589dc9b | ||
|
|
683ceda3a7 | ||
|
|
6a18b56587 | ||
|
|
2c9e5be816 | ||
|
|
5d5a0ef335 | ||
|
|
428f16ffef | ||
|
|
f93e54b63c | ||
|
|
c30b350e8e | ||
|
|
8fdb5aa024 | ||
|
|
83a3c30d01 | ||
|
|
63646c8c92 | ||
|
|
097e7ab42a | ||
|
|
3d45c8de50 | ||
|
|
8e1958c20b | ||
|
|
3e6c1792ef | ||
|
|
6b5b4bfb2a | ||
|
|
b797436884 | ||
|
|
2de3c1d0d4 | ||
|
|
7e81757e2f | ||
|
|
4773f87875 | ||
|
|
aa5bc34280 | ||
|
|
b9a25c1141 | ||
|
|
b42b8b16fd | ||
|
|
e0aac68759 | ||
|
|
9939671ccc | ||
|
|
eaa8515442 | ||
|
|
04b578cee1 | ||
|
|
f8e53d309c | ||
|
|
6076309b3e | ||
|
|
05db135d23 | ||
|
|
433cdb6006 | ||
|
|
4a4dca1926 | ||
|
|
6d591dde74 | ||
|
|
8e624e057e | ||
|
|
eb2642ba90 | ||
|
|
1a43cabfb9 | ||
|
|
580e37943b | ||
|
|
749775c7ea | ||
|
|
8ff8c30fb4 | ||
|
|
9893b7396c | ||
|
|
8c90344f28 | ||
|
|
e4955729d2 | ||
|
|
8a7b0314e5 | ||
|
|
e87d00c9cf | ||
|
|
336347b3c5 | ||
|
|
15d0899406 | ||
|
|
ad096f9781 | ||
|
|
d3c6a3dffc | ||
|
|
57b1653383 | ||
|
|
a86255ba51 | ||
|
|
0a7a4ce7d6 | ||
|
|
4c3735a897 | ||
|
|
22fd38c053 | ||
|
|
895f67a8d4 | ||
|
|
56e807b663 | ||
|
|
af64c7dc2d | ||
|
|
1e6458973b | ||
|
|
7bf8bb5436 | ||
|
|
1358911d95 | ||
|
|
1271d2f113 | ||
|
|
bc0227b49b | ||
|
|
dc643c2d76 | ||
|
|
0744e8ebc0 | ||
|
|
d4c9fdf6ac | ||
|
|
829979b6c7 | ||
|
|
32cd29bf70 | ||
|
|
03cb6cbd4d | ||
|
|
ba4a89488e | ||
|
|
b0d4a92e47 | ||
|
|
3e5a840c5a | ||
|
|
d6d34238d2 | ||
|
|
8bbced5925 | ||
|
|
20da1b9c9a | ||
|
|
b6a6e577c4 | ||
|
|
84888221ae | ||
|
|
222477b33e | ||
|
|
4a50d94633 | ||
|
|
b4dba782fb | ||
|
|
9c78a797e4 | ||
|
|
35c733a4c1 | ||
|
|
e5810d023e | ||
|
|
96930bd6ea | ||
|
|
e09ce32146 | ||
|
|
d15c009b3c | ||
|
|
95c8b8e9d8 | ||
|
|
2d798a5611 | ||
|
|
3a22249be9 | ||
|
|
a4c1898624 | ||
|
|
672f02490a | ||
|
|
fc00275d9c | ||
|
|
6219b93430 | ||
|
|
3a4571d671 | ||
|
|
96052ac69a | ||
|
|
6463a4b5d0 | ||
|
|
0ca127853d | ||
|
|
b884ba128e | ||
|
|
fe25ea3d8c | ||
|
|
e0f587f599 | ||
|
|
e9e1ef4772 | ||
|
|
eb8b841850 | ||
|
|
28681affda | ||
|
|
378f0b659a | ||
|
|
35b943e04f | ||
|
|
65f75c17fc | ||
|
|
806eab817c | ||
|
|
585510d76c | ||
|
|
c8a381d5bb | ||
|
|
3d04e6bded | ||
|
|
f8e851a6ed | ||
|
|
e70a615135 | ||
|
|
73ad0adf72 | ||
|
|
5b74e51db1 | ||
|
|
2e2492cc5d | ||
|
|
2ef5c2cbac | ||
|
|
3c89e1f7b3 | ||
|
|
2910724b49 | ||
|
|
3af945692e | ||
|
|
5fe9dae557 | ||
|
|
42c53ec3e2 | ||
|
|
291df8bfef | ||
|
|
d015286f65 | ||
|
|
fe91b4c878 | ||
|
|
7609c50641 | ||
|
|
df934990d7 | ||
|
|
3c4a80f16d | ||
|
|
a31139d4a5 | ||
|
|
6da3606721 | ||
|
|
bfbc72724d | ||
|
|
956b6f7d95 | ||
|
|
c1a039de3f | ||
|
|
f8b18e09d6 | ||
|
|
b20a7a679b | ||
|
|
36d866375e | ||
|
|
024edc30a3 | ||
|
|
6aa5e0b5a8 | ||
|
|
1a38550fdd | ||
|
|
3e7ebb5a84 | ||
|
|
4bca7342d3 | ||
|
|
f80908fb4e | ||
|
|
e000e10738 | ||
|
|
d83862f36c | ||
|
|
1020402b34 | ||
|
|
bc6ce6ed53 | ||
|
|
d3b1953deb | ||
|
|
3c67215e76 | ||
|
|
586624c836 | ||
|
|
49855e6333 | ||
|
|
624249c0f3 | ||
|
|
1cf443a63b | ||
|
|
8909679f44 | ||
|
|
e58efbf0ea | ||
|
|
34fb066590 | ||
|
|
299826f392 | ||
|
|
3d8dd8724d | ||
|
|
b8a00f7519 | ||
|
|
4dfc8cf5b0 | ||
|
|
5a99e4d2f3 | ||
|
|
606cd668ff | ||
|
|
dd537f5143 | ||
|
|
a1c631be62 | ||
|
|
707a728656 | ||
|
|
80b5631a48 | ||
|
|
2373965551 | ||
|
|
65cbb6655b | ||
|
|
cdd96d0670 | ||
|
|
ad04bbde83 | ||
|
|
5374d95416 | ||
|
|
de9aa39cc5 | ||
|
|
98987f4a8f | ||
|
|
9745f31b69 | ||
|
|
ee08531a52 | ||
|
|
61baf7b260 | ||
|
|
650e4f735e | ||
|
|
550b12b041 | ||
|
|
a4bb070722 | ||
|
|
30fe0c279b | ||
|
|
e405dd5981 | ||
|
|
243b4b8021 | ||
|
|
dd4c93e1ef | ||
|
|
a07335d74d | ||
|
|
7567cd4cb9 | ||
|
|
51ff9e2562 | ||
|
|
5b8ab63890 | ||
|
|
3dd7dc4496 | ||
|
|
d90a08ecfa | ||
|
|
45dc1a3e7b | ||
|
|
4ffb5daa37 | ||
|
|
b9138b720d | ||
|
|
d8954f1339 | ||
|
|
eb953286ec | ||
|
|
41c8178ad3 | ||
|
|
aa74b51e6f | ||
|
|
f7800eb5c4 | ||
|
|
193add502f | ||
|
|
44c55900f8 | ||
|
|
4c0ea78026 | ||
|
|
03a93fe51e | ||
|
|
eca0514465 | ||
|
|
aadbebb720 | ||
|
|
5daab45947 | ||
|
|
607b838ded | ||
|
|
25bdaeed31 | ||
|
|
8b2d3f07ce | ||
|
|
a3dc2f7da7 | ||
|
|
bf36f9ceb6 | ||
|
|
11de12304e | ||
|
|
a10320ad7b | ||
|
|
fd2bbf3557 | ||
|
|
7f9cf17274 | ||
|
|
ba0e239557 | ||
|
|
ed606bfda3 | ||
|
|
c0463a8a68 | ||
|
|
52e0a0967d | ||
|
|
29bcc271b5 | ||
|
|
94ec159147 | ||
|
|
9d434de4a5 | ||
|
|
49418f4222 | ||
|
|
38b4749f20 | ||
|
|
045984e6b9 | ||
|
|
38883d1a98 | ||
|
|
b5f365d282 | ||
|
|
a7d3a40465 | ||
|
|
359b16fca9 | ||
|
|
8b8e73feb5 | ||
|
|
6044b6ac1a | ||
|
|
a177ea4f15 | ||
|
|
3a15aa4bae | ||
|
|
427185b6a8 | ||
|
|
b282734a3f | ||
|
|
6d765f58ba | ||
|
|
20819ca4cd | ||
|
|
2174a0a7f2 | ||
|
|
ea6f7a28c2 | ||
|
|
ac9aa74a75 | ||
|
|
d46857677f | ||
|
|
cd719b1d5b | ||
|
|
7cf15ac93b | ||
|
|
d8e3191469 | ||
|
|
784d3de4ca | ||
|
|
733d06af5a | ||
|
|
df91643976 | ||
|
|
ebf635e6ff | ||
|
|
e41d9866c3 | ||
|
|
d984151549 | ||
|
|
6099ce56bd | ||
|
|
e0b5c145f7 | ||
|
|
cf37f733ef | ||
|
|
66a92a243c | ||
|
|
4a88eea57e | ||
|
|
fbaf360a42 | ||
|
|
1346810af8 | ||
|
|
9cbab94264 | ||
|
|
48f29cc11f | ||
|
|
e2b57e6231 | ||
|
|
f72afc8bbb | ||
|
|
0d1f447cb7 | ||
|
|
818f8c93eb | ||
|
|
264ffaae93 | ||
|
|
03b7af9a13 | ||
|
|
e3d7e83d44 | ||
|
|
07651e51c8 | ||
|
|
1cd2eb9308 | ||
|
|
a140327dd2 | ||
|
|
c1f7ae72e0 | ||
|
|
3a12fe9b1d | ||
|
|
c25c9b25bd | ||
|
|
f46dec449d | ||
|
|
60ab6330ff | ||
|
|
89dee3e005 | ||
|
|
70d7009985 | ||
|
|
3322a892e9 | ||
|
|
61d066e958 | ||
|
|
7b9ffc6c25 | ||
|
|
7a163d4dd7 | ||
|
|
189a3380a2 | ||
|
|
8680231e5a | ||
|
|
30f0e95969 | ||
|
|
c94becf144 | ||
|
|
369ec449a8 | ||
|
|
f4c6859e51 | ||
|
|
683dd52fcf | ||
|
|
11e936d109 | ||
|
|
9adb105e37 | ||
|
|
7b6ed9a778 | ||
|
|
3218fc5a04 | ||
|
|
3f94f8ca4c | ||
|
|
0842778c2c | ||
|
|
1332e1aa68 | ||
|
|
e872ebc7b3 | ||
|
|
e68b242243 | ||
|
|
9cc2a7260b | ||
|
|
bcd73012de | ||
|
|
1fea2a9421 | ||
|
|
bb7d68deda | ||
|
|
3ab861227d | ||
|
|
8f0d98ef9b | ||
|
|
dbd8bf3d2c | ||
|
|
1b6b02e0d2 | ||
|
|
2402bae1ff | ||
|
|
3dcf8d88b8 | ||
|
|
dbf9c09a2e | ||
|
|
5e9fc2defc | ||
|
|
bdc3cbceaa | ||
|
|
a71528fefb | ||
|
|
6725742d2c | ||
|
|
9a510e2e23 | ||
|
|
08a4b0dbf6 | ||
|
|
0c9e55a358 | ||
|
|
532e57b61c | ||
|
|
b1f59914d2 | ||
|
|
9a54b286c9 | ||
|
|
6e4b18a498 | ||
|
|
b5f8a0452e | ||
|
|
fab043ef14 | ||
|
|
8e0e62f21a | ||
|
|
9a1c2e2641 | ||
|
|
8cbc6670cc | ||
|
|
28ee6a8026 | ||
|
|
af39e96e3e | ||
|
|
db6e9c773f | ||
|
|
47214121a7 | ||
|
|
7b07609fd8 | ||
|
|
acb4b3f260 | ||
|
|
e0221aa8ab | ||
|
|
cba346d753 | ||
|
|
0f34cfb1a2 | ||
|
|
ea846a3284 | ||
|
|
63bfac9740 | ||
|
|
7284815c21 | ||
|
|
80307d108b | ||
|
|
722437afe9 | ||
|
|
684cf4b5fa | ||
|
|
c95a7b13a6 | ||
|
|
1ce7f21026 | ||
|
|
7d7df10493 | ||
|
|
8179862e0b | ||
|
|
6828f623b4 | ||
|
|
2c88a5b2fe | ||
|
|
a7f08598f3 | ||
|
|
83bad65d3a | ||
|
|
1f35378a4d | ||
|
|
39eab7a6d5 | ||
|
|
9dd025d4da | ||
|
|
bb75ea5020 | ||
|
|
8dbd4a2bed | ||
|
|
24305cda68 | ||
|
|
770dfd147d | ||
|
|
a9ff9b0e70 | ||
|
|
3cc6f2d648 | ||
|
|
a8f0d7b05b | ||
|
|
13f06ca293 | ||
|
|
c88fa1492e | ||
|
|
40657a83f5 | ||
|
|
44dd58b461 | ||
|
|
47891b17ab | ||
|
|
f7fbfbf5c4 | ||
|
|
0e278ca22b | ||
|
|
c66fb294c8 | ||
|
|
88b7e7ca03 | ||
|
|
a9b659a36f | ||
|
|
90fc6ba3e7 | ||
|
|
8ea97aa3fd | ||
|
|
7c9f5a65d8 | ||
|
|
e2d3c4c821 | ||
|
|
92578e2853 | ||
|
|
3018c18616 | ||
|
|
3ac9fa83c1 | ||
|
|
c5b0398dac | ||
|
|
76f23d8a9b | ||
|
|
089cee0e1d | ||
|
|
982340456d | ||
|
|
13cf1f7715 | ||
|
|
d99af7424c | ||
|
|
40ad9c5d2b | ||
|
|
9dfc3091b4 | ||
|
|
e6a4ed04f3 | ||
|
|
e3aa8d65dc | ||
|
|
ece0fb83e8 | ||
|
|
683830d574 | ||
|
|
c5108a4abd | ||
|
|
40342eb45a | ||
|
|
adf4b4380e | ||
|
|
7371120481 | ||
|
|
1064b5009d | ||
|
|
850876e6a7 | ||
|
|
d4083cbdbe | ||
|
|
47c5eddf38 | ||
|
|
f6a6508eff | ||
|
|
a036618b44 | ||
|
|
2429b623fc | ||
|
|
f4850b9e7a | ||
|
|
e81ac5f19e | ||
|
|
31ccedf136 | ||
|
|
502b510ccd | ||
|
|
369031f963 | ||
|
|
a789680db1 | ||
|
|
90bda69931 | ||
|
|
9647cb3e08 | ||
|
|
79c9060909 | ||
|
|
20206789e0 | ||
|
|
1ddae35277 | ||
|
|
75a8c6459a | ||
|
|
7fc2430ab1 | ||
|
|
cf9af0fb5d | ||
|
|
db6d6293c7 | ||
|
|
ae25ec2e6b | ||
|
|
7521545682 | ||
|
|
169e96e851 | ||
|
|
893b8a88c8 | ||
|
|
c60711ab15 | ||
|
|
1b00e01030 | ||
|
|
f0c80905eb | ||
|
|
b07a118431 | ||
|
|
0ae06cd277 | ||
|
|
ed9165f533 | ||
|
|
c73113a12e | ||
|
|
480b2ca07c | ||
|
|
c72b914050 | ||
|
|
5cf7f01d3f | ||
|
|
552a5917c2 | ||
|
|
5c14719f14 | ||
|
|
d2353a189a | ||
|
|
4fcd705ae3 | ||
|
|
744c17b4c8 | ||
|
|
e2eca24b33 | ||
|
|
36d5ac189f | ||
|
|
1a569c7bd7 | ||
|
|
6bb53eaae3 | ||
|
|
747a9bb944 | ||
|
|
d2daf334a5 | ||
|
|
70737e4e94 | ||
|
|
5f49115cac | ||
|
|
534cb2bf5b | ||
|
|
187c525667 | ||
|
|
6032727965 | ||
|
|
bb3f23b6dc | ||
|
|
e5485ac5e6 | ||
|
|
594a209f83 | ||
|
|
9981ce7adb | ||
|
|
49ac97c7db | ||
|
|
bfdf7a2cf2 | ||
|
|
54b681460d | ||
|
|
2147d16c1f | ||
|
|
7c1cb47bd0 | ||
|
|
6acfa18d7c | ||
|
|
f0a675162c | ||
|
|
7a4deb6f18 | ||
|
|
96842353de | ||
|
|
5ce8875ce0 | ||
|
|
812819e92f | ||
|
|
5cb536643e | ||
|
|
4c6b8969d3 | ||
|
|
8ccc63752c | ||
|
|
1088b69616 | ||
|
|
541119dda2 | ||
|
|
7400eabc6d | ||
|
|
c3c429494f | ||
|
|
6d20202354 | ||
|
|
d6297a3192 | ||
|
|
e2f8d4e0aa | ||
|
|
589763e8ec | ||
|
|
c14c64d534 | ||
|
|
f7f44995d6 | ||
|
|
263737b3fb | ||
|
|
0c5f3d72bd | ||
|
|
ffd886498a | ||
|
|
76f5619de7 | ||
|
|
35703e7956 | ||
|
|
29231d8d14 | ||
|
|
396842ae40 | ||
|
|
072c753323 | ||
|
|
6250342b86 | ||
|
|
e4b2d869d4 | ||
|
|
ccca580a4b | ||
|
|
84970a8378 | ||
|
|
901bde1fd4 | ||
|
|
33a4183bfa | ||
|
|
0bc6e5bc92 | ||
|
|
8323e468da | ||
|
|
7912fe4c35 | ||
|
|
266e471941 | ||
|
|
4e6edd4ffd | ||
|
|
7069d173c6 | ||
|
|
aa51b5f071 | ||
|
|
da7c9c7dfb | ||
|
|
ec10346e79 | ||
|
|
2481871c10 | ||
|
|
ac1fd11a42 | ||
|
|
b1d3ca0206 | ||
|
|
5c5491e1e4 | ||
|
|
8dedca693e | ||
|
|
ca0619bbcf | ||
|
|
d7a2ab52a1 | ||
|
|
3b72aafbc6 | ||
|
|
dfd12cdaac | ||
|
|
08d94c7a47 | ||
|
|
b7b41f1a94 | ||
|
|
42109ec4d5 | ||
|
|
39ccc4b225 | ||
|
|
8acc738b27 | ||
|
|
945b3f8fbf | ||
|
|
a73f218402 | ||
|
|
eded4c2285 | ||
|
|
33036278ac | ||
|
|
6163d3b4ec | ||
|
|
22046bebc5 | ||
|
|
c67d4507b6 | ||
|
|
ea5e18ea11 | ||
|
|
1cc479dbf8 | ||
|
|
b4e7b59e7b | ||
|
|
8592ae9641 | ||
|
|
1362fc45e0 | ||
|
|
b34894e4da | ||
|
|
30f5ebd6d1 | ||
|
|
4292bcac72 | ||
|
|
8683258e4a | ||
|
|
e9ec8cd39c | ||
|
|
068a8d117d | ||
|
|
83a012de12 | ||
|
|
f36ae25baf | ||
|
|
298cda0617 | ||
|
|
b9e3fff5d1 | ||
|
|
ed76e2c962 | ||
|
|
77fae7b522 | ||
|
|
cd71e80eb3 | ||
|
|
3f7c73f331 | ||
|
|
4845a7f16c | ||
|
|
77fb901706 | ||
|
|
d3e70810af | ||
|
|
daa4481282 | ||
|
|
a3735da12a | ||
|
|
311c96122e | ||
|
|
b612426ead | ||
|
|
e99af346bf | ||
|
|
e22bc9af8f | ||
|
|
89ca293dc1 | ||
|
|
194ceace6f | ||
|
|
a79c6cecdb | ||
|
|
c5827febf7 | ||
|
|
7353a49469 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -2,7 +2,7 @@
|
||||
*~
|
||||
|
||||
# Databases
|
||||
btcd.db
|
||||
kaspad.db
|
||||
*-shm
|
||||
*-wal
|
||||
|
||||
@@ -38,6 +38,7 @@ _testmain.go
|
||||
.vscode
|
||||
debug
|
||||
debug.test
|
||||
__debug_bin
|
||||
|
||||
# CI
|
||||
version.txt
|
||||
|
||||
955
CHANGES
955
CHANGES
@@ -1,955 +0,0 @@
|
||||
============================================================================
|
||||
User visible changes for btcd
|
||||
A full-node bitcoin implementation written in Go
|
||||
============================================================================
|
||||
|
||||
Changes in 0.12.0 (Fri Nov 20 2015)
|
||||
- Protocol and network related changes:
|
||||
- Add a new checkpoint at block height 382320 (#555)
|
||||
- Implement BIP0065 which includes support for version 4 blocks, a new
|
||||
consensus opcode (OP_CHECKLOCKTIMEVERIFY) that enforces transaction
|
||||
lock times, and a double-threshold switchover mechanism (#535, #459,
|
||||
#455)
|
||||
- Implement BIP0111 which provides a new bloom filter service flag and
|
||||
hence provides support for protocol version 70011 (#499)
|
||||
- Add a new parameter --nopeerbloomfilters to allow disabling bloom
|
||||
filter support (#499)
|
||||
- Reject non-canonically encoded variable length integers (#507)
|
||||
- Add mainnet peer discovery DNS seed (seed.bitcoin.jonasschnelli.ch)
|
||||
(#496)
|
||||
- Correct reconnect handling for persistent peers (#463, #464)
|
||||
- Ignore requests for block headers if not fully synced (#444)
|
||||
- Add CLI support for specifying the zone id on IPv6 addresses (#538)
|
||||
- Fix a couple of issues where the initial block sync could stall (#518,
|
||||
#229, #486)
|
||||
- Fix an issue which prevented the --onion option from working as
|
||||
intended (#446)
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Require transactions to only include signatures encoded with the
|
||||
canonical 'low-s' encoding (#512)
|
||||
- Add a new parameter --minrelaytxfee to allow the minimum transaction
|
||||
fee in BTC/kB to be overridden (#520)
|
||||
- Retain memory pool transactions when they redeem another one that is
|
||||
removed when a block is accepted (#539)
|
||||
- Do not send reject messages for a transaction if it is valid but
|
||||
causes an orphan transaction which depends on it to be determined
|
||||
as invalid (#546)
|
||||
- Refrain from attempting to add orphans to the memory pool multiple
|
||||
times when the transaction they redeem is added (#551)
|
||||
- Modify minimum transaction fee calculations to scale based on bytes
|
||||
instead of full kilobyte boundaries (#521, #537)
|
||||
- Implement signature cache:
|
||||
- Provides a limited memory cache of validated signatures which is a
|
||||
huge optimization when verifying blocks for transactions that are
|
||||
already in the memory pool (#506)
|
||||
- Add a new parameter '--sigcachemaxsize' which allows the size of the
|
||||
new cache to be manually changed if desired (#506)
|
||||
- Mining support changes:
|
||||
- Notify getblocktemplate long polling clients when a block is pushed
|
||||
via submitblock (#488)
|
||||
- Speed up getblocktemplate by making use of the new signature cache
|
||||
(#506)
|
||||
- RPC changes:
|
||||
- Implement getmempoolinfo command (#453)
|
||||
- Implement getblockheader command (#461)
|
||||
- Modify createrawtransaction command to accept a new optional parameter
|
||||
'locktime' (#529)
|
||||
- Modify listunspent result to include the 'spendable' field (#440)
|
||||
- Modify getinfo command to include 'errors' field (#511)
|
||||
- Add timestamps to blockconnected and blockdisconnected notifications
|
||||
(#450)
|
||||
- Several modifications to searchrawtranscations command:
|
||||
- Accept a new optional parameter 'vinextra' which causes the results
|
||||
to include information about the outputs referenced by a transaction's
|
||||
inputs (#485, #487)
|
||||
- Skip entries in the mempool too (#495)
|
||||
- Accept a new optional parameter 'reverse' to return the results in
|
||||
reverse order (most recent to oldest) (#497)
|
||||
- Accept a new optional parameter 'filteraddrs' which causes the
|
||||
results to only include inputs and outputs which involve the
|
||||
provided addresses (#516)
|
||||
- Change the notification order to notify clients about mined
|
||||
transactions (recvtx, redeemingtx) before the blockconnected
|
||||
notification (#449)
|
||||
- Update verifymessage RPC to use the standard algorithm so it is
|
||||
compatible with other implementations (#515)
|
||||
- Improve ping statistics by pinging on an interval (#517)
|
||||
- Websocket changes:
|
||||
- Implement session command which returns a per-session unique id (#500,
|
||||
#503)
|
||||
- btcctl utility changes:
|
||||
- Add getmempoolinfo command (#453)
|
||||
- Add getblockheader command (#461)
|
||||
- Add getwalletinfo command (#471)
|
||||
- Notable developer-related package changes:
|
||||
- Introduce a new peer package which acts a common base for creating and
|
||||
concurrently managing bitcoin network peers (#445)
|
||||
- Various cleanup of the new peer package (#528, #531, #524, #534,
|
||||
#549)
|
||||
- Blocks heights now consistently use int32 everywhere (#481)
|
||||
- The BlockHeader type in the wire package now provides the BtcDecode
|
||||
and BtcEncode methods (#467)
|
||||
- Update wire package to recognize BIP0064 (getutxo) service bit (#489)
|
||||
- Export LockTimeThreshold constant from txscript package (#454)
|
||||
- Export MaxDataCarrierSize constant from txscript package (#466)
|
||||
- Provide new IsUnspendable function from the txscript package (#478)
|
||||
- Export variable length string functions from the wire package (#514)
|
||||
- Export DNS Seeds for each network from the chaincfg package (#544)
|
||||
- Preliminary work towards separating the memory pool into a separate
|
||||
package (#525, #548)
|
||||
- Misc changes:
|
||||
- Various documentation updates (#442, #462, #465, #460, #470, #473,
|
||||
#505, #530, #545)
|
||||
- Add installation instructions for gentoo (#542)
|
||||
- Ensure an error is shown if OS limits can't be set at startup (#498)
|
||||
- Tighten the standardness checks for multisig scripts (#526)
|
||||
- Test coverage improvement (#468, #494, #527, #543, #550)
|
||||
- Several optimizations (#457, #474, #475, #476, #508, #509)
|
||||
- Minor code cleanup and refactoring (#472, #479, #482, #519, #540)
|
||||
- Contributors (alphabetical order):
|
||||
- Ben Echols
|
||||
- Bruno Clermont
|
||||
- danda
|
||||
- Daniel Krawisz
|
||||
- Dario Nieuwenhuis
|
||||
- Dave Collins
|
||||
- David Hill
|
||||
- Javed Khan
|
||||
- Jonathan Gillham
|
||||
- Joseph Becher
|
||||
- Josh Rickmar
|
||||
- Justus Ranvier
|
||||
- Mawuli Adzoe
|
||||
- Olaoluwa Osuntokun
|
||||
- Rune T. Aune
|
||||
|
||||
Changes in 0.11.1 (Wed May 27 2015)
|
||||
- Protocol and network related changes:
|
||||
- Use correct sub-command in reject message for rejected transactions
|
||||
(#436, #437)
|
||||
- Add a new parameter --torisolation which forces new circuits for each
|
||||
connection when using tor (#430)
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Reduce the default number max number of allowed orphan transactions
|
||||
to 1000 (#419)
|
||||
- Add a new parameter --maxorphantx which allows the maximum number of
|
||||
orphan transactions stored in the mempool to be specified (#419)
|
||||
- RPC changes:
|
||||
- Modify listtransactions result to include the 'involveswatchonly' and
|
||||
'vout' fields (#427)
|
||||
- Update getrawtransaction result to omit the 'confirmations' field
|
||||
when it is 0 (#420, #422)
|
||||
- Update signrawtransaction result to include errors (#423)
|
||||
- btcctl utility changes:
|
||||
- Add gettxoutproof command (#428)
|
||||
- Add verifytxoutproof command (#428)
|
||||
- Notable developer-related package changes:
|
||||
- The btcec package now provides the ability to perform ECDH
|
||||
encryption and decryption (#375)
|
||||
- The block and header validation in the blockchain package has been
|
||||
split to help pave the way toward concurrent downloads (#386)
|
||||
- Misc changes:
|
||||
- Minor peer optimization (#433)
|
||||
- Contributors (alphabetical order):
|
||||
- Dave Collins
|
||||
- David Hill
|
||||
- Federico Bond
|
||||
- Ishbir Singh
|
||||
- Josh Rickmar
|
||||
|
||||
Changes in 0.11.0 (Wed May 06 2015)
|
||||
- Protocol and network related changes:
|
||||
- **IMPORTANT: Update is required due to the following point**
|
||||
- Correct a few corner cases in script handling which could result in
|
||||
forking from the network on non-standard transactions (#425)
|
||||
- Add a new checkpoint at block height 352940 (#418)
|
||||
- Optimized script execution (#395, #400, #404, #409)
|
||||
- Fix a case that could lead stalled syncs (#138, #296)
|
||||
- Network address manager changes:
|
||||
- Implement eclipse attack countermeasures as proposed in
|
||||
http://cs-people.bu.edu/heilman/eclipse (#370, #373)
|
||||
- Optional address indexing changes:
|
||||
- Fix an issue where a reorg could cause an orderly shutdown when the
|
||||
address index is active (#340, #357)
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Increase maximum allowed space for nulldata transactions to 80 bytes
|
||||
(#331)
|
||||
- Implement support for the following rules specified by BIP0062:
|
||||
- The S value in ECDSA signature must be at most half the curve order
|
||||
(rule 5) (#349)
|
||||
- Script execution must result in a single non-zero value on the stack
|
||||
(rule 6) (#347)
|
||||
- NOTE: All 7 rules of BIP0062 are now implemented
|
||||
- Use network adjusted time in finalized transaction checks to improve
|
||||
consistency across nodes (#332)
|
||||
- Process orphan transactions on acceptance of new transactions (#345)
|
||||
- RPC changes:
|
||||
- Add support for a limited RPC user which is not allowed admin level
|
||||
operations on the server (#363)
|
||||
- Implement node command for more unified control over connected peers
|
||||
(#79, #341)
|
||||
- Implement generate command for regtest/simnet to support
|
||||
deterministically mining a specified number of blocks (#362, #407)
|
||||
- Update searchrawtransactions to return the matching transactions in
|
||||
order (#354)
|
||||
- Correct an issue with searchrawtransactions where it could return
|
||||
duplicates (#346, #354)
|
||||
- Increase precision of 'difficulty' field in getblock result to 8
|
||||
(#414, #415)
|
||||
- Omit 'nextblockhash' field from getblock result when it is empty
|
||||
(#416, #417)
|
||||
- Add 'id' and 'timeoffset' fields to getpeerinfo result (#335)
|
||||
- Websocket changes:
|
||||
- Implement new commands stopnotifyspent, stopnotifyreceived,
|
||||
stopnotifyblocks, and stopnotifynewtransactions to allow clients to
|
||||
cancel notification registrations (#122, #342)
|
||||
- btcctl utility changes:
|
||||
- A single dash can now be used as an argument to cause that argument to
|
||||
be read from stdin (#348)
|
||||
- Add generate command
|
||||
- Notable developer-related package changes:
|
||||
- The new version 2 btcjson package has now replaced the deprecated
|
||||
version 1 package (#368)
|
||||
- The btcec package now performs all signing using RFC6979 deterministic
|
||||
signatures (#358, #360)
|
||||
- The txscript package has been significantly cleaned up and had a few
|
||||
API changes (#387, #388, #389, #390, #391, #392, #393, #395, #396,
|
||||
#400, #403, #404, #405, #406, #408, #409, #410, #412)
|
||||
- A new PkScriptLocs function has been added to the wire package MsgTx
|
||||
type which provides callers that deal with scripts optimization
|
||||
opportunities (#343)
|
||||
- Misc changes:
|
||||
- Minor wire hashing optimizations (#366, #367)
|
||||
- Other minor internal optimizations
|
||||
- Contributors (alphabetical order):
|
||||
- Alex Akselrod
|
||||
- Arne Brutschy
|
||||
- Chris Jepson
|
||||
- Daniel Krawisz
|
||||
- Dave Collins
|
||||
- David Hill
|
||||
- Jimmy Song
|
||||
- Jonas Nick
|
||||
- Josh Rickmar
|
||||
- Olaoluwa Osuntokun
|
||||
- Oleg Andreev
|
||||
|
||||
Changes in 0.10.0 (Sun Mar 01 2015)
|
||||
- Protocol and network related changes:
|
||||
- Add a new checkpoint at block height 343185
|
||||
- Implement BIP066 which includes support for version 3 blocks, a new
|
||||
consensus rule which prevents non-DER encoded signatures, and a
|
||||
double-threshold switchover mechanism
|
||||
- Rather than announcing all known addresses on getaddr requests which
|
||||
can possibly result in multiple messages, randomize the results and
|
||||
limit them to the max allowed by a single message (1000 addresses)
|
||||
- Add more reserved IP spaces to the address manager
|
||||
- Transaction relay (memory pool) changes:
|
||||
- Make transactions which contain reserved opcodes nonstandard
|
||||
- No longer accept or relay free and low-fee transactions that have
|
||||
insufficient priority to be mined in the next block
|
||||
- Implement support for the following rules specified by BIP0062:
|
||||
- ECDSA signature must use strict DER encoding (rule 1)
|
||||
- The signature script must only contain push operations (rule 2)
|
||||
- All push operations must use the smallest possible encoding (rule 3)
|
||||
- All stack values interpreted as a number must be encoding using the
|
||||
shortest possible form (rule 4)
|
||||
- NOTE: Rule 1 was already enforced, however the entire script now
|
||||
evaluates to false rather than only the signature verification as
|
||||
required by BIP0062
|
||||
- Allow transactions with nulldata transaction outputs to be treated as
|
||||
standard
|
||||
- Mining support changes:
|
||||
- Modify the getblocktemplate RPC to generate and return block templates
|
||||
for version 3 blocks which are compatible with BIP0066
|
||||
- Allow getblocktemplate to serve blocks when the current time is
|
||||
less than the minimum allowed time for a generated block template
|
||||
(https://github.com/btcsuite/btcd/issues/209)
|
||||
- Crypto changes:
|
||||
- Optimize scalar multiplication by the base point by using a
|
||||
pre-computed table which results in approximately a 35% speedup
|
||||
(https://github.com/btcsuite/btcec/issues/2)
|
||||
- Optimize general scalar multiplication by using the secp256k1
|
||||
endomorphism which results in approximately a 17-20% speedup
|
||||
(https://github.com/btcsuite/btcec/issues/1)
|
||||
- Optimize general scalar multiplication by using non-adjacent form
|
||||
which results in approximately an additional 8% speedup
|
||||
(https://github.com/btcsuite/btcec/issues/3)
|
||||
- Implement optional address indexing:
|
||||
- Add a new parameter --addrindex which will enable the creation of an
|
||||
address index which can be queried to determine all transactions which
|
||||
involve a given address
|
||||
(https://github.com/btcsuite/btcd/issues/190)
|
||||
- Add a new logging subsystem for address index related operations
|
||||
- Support new searchrawtransactions RPC
|
||||
(https://github.com/btcsuite/btcd/issues/185)
|
||||
- RPC changes:
|
||||
- Require TLS version 1.2 as the minimum version for all TLS connections
|
||||
- Provide support for disabling TLS when only listening on localhost
|
||||
(https://github.com/btcsuite/btcd/pull/192)
|
||||
- Modify help output for all commands to provide much more consistent
|
||||
and detailed information
|
||||
- Correct case in getrawtransaction which would refuse to serve certain
|
||||
transactions with invalid scripts
|
||||
(https://github.com/btcsuite/btcd/issues/210)
|
||||
- Correct error handling in the getrawtransaction RPC which could lead
|
||||
to a crash in rare cases
|
||||
(https://github.com/btcsuite/btcd/issues/196)
|
||||
- Update getinfo RPC to include the appropriate 'timeoffset' calculated
|
||||
from the median network time
|
||||
- Modify listreceivedbyaddress result type to include txids field so it
|
||||
is compatible
|
||||
- Add 'iswatchonly' field to validateaddress result
|
||||
- Add 'startingpriority' and 'currentpriority' fields to getrawmempool
|
||||
(https://github.com/btcsuite/btcd/issues/178)
|
||||
- Don't omit the 'confirmations' field from getrawtransaction when it is
|
||||
zero
|
||||
- Websocket changes:
|
||||
- Modify the behavior of the rescan command to automatically register
|
||||
for notifications about transactions paying to rescanned addresses
|
||||
or spending outputs from the final rescan utxo set when the rescan
|
||||
is through the best block in the chain
|
||||
- btcctl utility changes:
|
||||
- Make the list of commands available via the -l option rather than
|
||||
dumping the entire list on usage errors
|
||||
- Alphabetize and categorize the list of commands by chain and wallet
|
||||
- Make the help option only show the help options instead of also
|
||||
dumping all of the commands
|
||||
- Make the usage syntax much more consistent and correct a few cases of
|
||||
misnamed fields
|
||||
(https://github.com/btcsuite/btcd/issues/305)
|
||||
- Improve usage errors to show the specific parameter number, reason,
|
||||
and error code
|
||||
- Only show the usage for specific command is shown when a valid command
|
||||
is provided with invalid parameters
|
||||
- Add support for a SOCK5 proxy
|
||||
- Modify output for integer fields (such as timestamps) to display
|
||||
normally instead in scientific notation
|
||||
- Add invalidateblock command
|
||||
- Add reconsiderblock command
|
||||
- Add createnewaccount command
|
||||
- Add renameaccount command
|
||||
- Add searchrawtransactions command
|
||||
- Add importaddress command
|
||||
- Add importpubkey command
|
||||
- showblock utility changes:
|
||||
- Remove utility in favor of the RPC getblock method
|
||||
- Notable developer-related package changes:
|
||||
- Many of the core packages have been relocated into the btcd repository
|
||||
(https://github.com/btcsuite/btcd/issues/214)
|
||||
- A new version of the btcjson package that has been completely
|
||||
redesigned from the ground up based based upon how the project has
|
||||
evolved and lessons learned while using it since it was first written
|
||||
is now available in the btcjson/v2/btcjson directory
|
||||
- This will ultimately replace the current version so anyone making
|
||||
use of this package will need to update their code accordingly
|
||||
- The btcec package now provides better facilities for working directly
|
||||
with its public and private keys without having to mix elements from
|
||||
the ecdsa package
|
||||
- Update the script builder to ensure all rules specified by BIP0062 are
|
||||
adhered to when creating scripts
|
||||
- The blockchain package now provides a MedianTimeSource interface and
|
||||
concrete implementation for providing time samples from remote peers
|
||||
and using that data to calculate an offset against the local time
|
||||
- Misc changes:
|
||||
- Fix a slow memory leak due to tickers not being stopped
|
||||
(https://github.com/btcsuite/btcd/issues/189)
|
||||
- Fix an issue where a mix of orphans and SPV clients could trigger a
|
||||
condition where peers would no longer be served
|
||||
(https://github.com/btcsuite/btcd/issues/231)
|
||||
- The RPC username and password can now contain symbols which previously
|
||||
conflicted with special symbols used in URLs
|
||||
- Improve handling of obtaining random nonces to prevent cases where it
|
||||
could error when not enough entropy was available
|
||||
- Improve handling of home directory creation errors such as in the case
|
||||
of unmounted symlinks (https://github.com/btcsuite/btcd/issues/193)
|
||||
- Improve the error reporting for rejected transactions to include the
|
||||
inputs which are missing and/or being double spent
|
||||
- Update sample config file with new options and correct a comment
|
||||
regarding the fact the RPC server only listens on localhost by default
|
||||
(https://github.com/btcsuite/btcd/issues/218)
|
||||
- Update the continuous integration builds to run several tools which
|
||||
help keep code quality high
|
||||
- Significant amount of internal code cleanup and improvements
|
||||
- Other minor internal optimizations
|
||||
- Code Contributors (alphabetical order):
|
||||
- Beldur
|
||||
- Ben Holden-Crowther
|
||||
- Dave Collins
|
||||
- David Evans
|
||||
- David Hill
|
||||
- Guilherme Salgado
|
||||
- Javed Khan
|
||||
- Jimmy Song
|
||||
- John C. Vernaleo
|
||||
- Jonathan Gillham
|
||||
- Josh Rickmar
|
||||
- Michael Ford
|
||||
- Michail Kargakis
|
||||
- kac
|
||||
- Olaoluwa Osuntokun
|
||||
|
||||
Changes in 0.9.0 (Sat Sep 20 2014)
|
||||
- Protocol and network related changes:
|
||||
- Add a new checkpoint at block height 319400
|
||||
- Add support for BIP0037 bloom filters
|
||||
(https://github.com/conformal/btcd/issues/132)
|
||||
- Implement BIP0061 reject handling and hence support for protocol
|
||||
version 70002 (https://github.com/conformal/btcd/issues/133)
|
||||
- Add testnet DNS seeds for peer discovery (testnet-seed.alexykot.me
|
||||
and testnet-seed.bitcoin.schildbach.de)
|
||||
- Add mainnet DNS seed for peer discovery (seeds.bitcoin.open-nodes.org)
|
||||
- Make multisig transactions with non-null dummy data nonstandard
|
||||
(https://github.com/conformal/btcd/issues/131)
|
||||
- Make transactions with an excessive number of signature operations
|
||||
nonstandard
|
||||
- Perform initial DNS lookups concurrently which allows connections
|
||||
more quickly
|
||||
- Improve the address manager to significantly reduce memory usage and
|
||||
add tests
|
||||
- Remove orphan transactions when they appear in a mined block
|
||||
(https://github.com/conformal/btcd/issues/166)
|
||||
- Apply incremental back off on connection retries for persistent peers
|
||||
that give invalid replies to mirror the logic used for failed
|
||||
connections (https://github.com/conformal/btcd/issues/103)
|
||||
- Correct rate-limiting of free and low-fee transactions
|
||||
- Mining support changes:
|
||||
- Implement getblocktemplate RPC with the following support:
|
||||
(https://github.com/conformal/btcd/issues/124)
|
||||
- BIP0022 Non-Optional Sections
|
||||
- BIP0022 Long Polling
|
||||
- BIP0023 Basic Pool Extensions
|
||||
- BIP0023 Mutation coinbase/append
|
||||
- BIP0023 Mutations time, time/increment, and time/decrement
|
||||
- BIP0023 Mutation transactions/add
|
||||
- BIP0023 Mutations prevblock, coinbase, and generation
|
||||
- BIP0023 Block Proposals
|
||||
- Implement built-in concurrent CPU miner
|
||||
(https://github.com/conformal/btcd/issues/137)
|
||||
NOTE: CPU mining on mainnet is pointless. This has been provided
|
||||
for testing purposes such as for the new simulation test network
|
||||
- Add --generate flag to enable CPU mining
|
||||
- Deprecate the --getworkkey flag in favor of --miningaddr which
|
||||
specifies which addresses generated blocks will choose from to pay
|
||||
the subsidy to
|
||||
- RPC changes:
|
||||
- Implement gettxout command
|
||||
(https://github.com/conformal/btcd/issues/141)
|
||||
- Implement validateaddress command
|
||||
- Implement verifymessage command
|
||||
- Mark getunconfirmedbalance RPC as wallet-only
|
||||
- Mark getwalletinfo RPC as wallet-only
|
||||
- Update getgenerate, setgenerate, gethashespersec, and getmininginfo
|
||||
to return the appropriate information about new CPU mining status
|
||||
- Modify getpeerinfo pingtime and pingwait field types to float64 so
|
||||
they are compatible
|
||||
- Improve disconnect handling for normal HTTP clients
|
||||
- Make error code returns for invalid hex more consistent
|
||||
- Websocket changes:
|
||||
- Switch to a new more efficient websocket package
|
||||
(https://github.com/conformal/btcd/issues/134)
|
||||
- Add rescanfinished notification
|
||||
- Modify the rescanprogress notification to include block hash as well
|
||||
as height (https://github.com/conformal/btcd/issues/151)
|
||||
- btcctl utility changes:
|
||||
- Accept --simnet flag which automatically selects the appropriate port
|
||||
and TLS certificates needed to communicate with btcd and btcwallet on
|
||||
the simulation test network
|
||||
- Fix createrawtransaction command to send amounts denominated in BTC
|
||||
- Add estimatefee command
|
||||
- Add estimatepriority command
|
||||
- Add getmininginfo command
|
||||
- Add getnetworkinfo command
|
||||
- Add gettxout command
|
||||
- Add lockunspent command
|
||||
- Add signrawtransaction command
|
||||
- addblock utility changes:
|
||||
- Accept --simnet flag which automatically selects the appropriate port
|
||||
and TLS certificates needed to communicate with btcd and btcwallet on
|
||||
the simulation test network
|
||||
- Notable developer-related package changes:
|
||||
- Provide a new bloom package in btcutil which allows creating and
|
||||
working with BIP0037 bloom filters
|
||||
- Provide a new hdkeychain package in btcutil which allows working with
|
||||
BIP0032 hierarchical deterministic key chains
|
||||
- Introduce a new btcnet package which houses network parameters
|
||||
- Provide new simnet network (--simnet) which is useful for private
|
||||
simulation testing
|
||||
- Enforce low S values in serialized signatures as detailed in BIP0062
|
||||
- Return errors from all methods on the btcdb.Db interface
|
||||
(https://github.com/conformal/btcdb/issues/5)
|
||||
- Allow behavior flags to alter btcchain.ProcessBlock
|
||||
(https://github.com/conformal/btcchain/issues/5)
|
||||
- Provide a new SerializeSize API for blocks
|
||||
(https://github.com/conformal/btcwire/issues/19)
|
||||
- Several of the core packages now work with Google App Engine
|
||||
- Misc changes:
|
||||
- Correct an issue where the database could corrupt under certain
|
||||
circumstances which would require a new chain download
|
||||
- Slightly optimize deserialization
|
||||
- Use the correct IP block for he.net
|
||||
- Fix an issue where it was possible the block manager could hang on
|
||||
shutdown
|
||||
- Update sample config file so the comments are on a separate line
|
||||
rather than the end of a line so they are not interpreted as settings
|
||||
(https://github.com/conformal/btcd/issues/135)
|
||||
- Correct an issue where getdata requests were not being properly
|
||||
throttled which could lead to larger than necessary memory usage
|
||||
- Always show help when given the help flag even when the config file
|
||||
contains invalid entries
|
||||
- General code cleanup and minor optimizations
|
||||
|
||||
Changes in 0.8.0-beta (Sun May 25 2014)
|
||||
- Btcd is now Beta (https://github.com/conformal/btcd/issues/130)
|
||||
- Add a new checkpoint at block height 300255
|
||||
- Protocol and network related changes:
|
||||
- Lower the minimum transaction relay fee to 1000 satoshi to match
|
||||
recent reference client changes
|
||||
(https://github.com/conformal/btcd/issues/100)
|
||||
- Raise the maximum signature script size to support standard 15-of-15
|
||||
multi-signature pay-to-sript-hash transactions with compressed pubkeys
|
||||
to remain compatible with the reference client
|
||||
(https://github.com/conformal/btcd/issues/128)
|
||||
- Reduce max bytes allowed for a standard nulldata transaction to 40 for
|
||||
compatibility with the reference client
|
||||
- Introduce a new btcnet package which houses all of the network params
|
||||
for each network (mainnet, testnet3, regtest) to ultimately enable
|
||||
easier addition and tweaking of networks without needing to change
|
||||
several packages
|
||||
- Fix several script discrepancies found by reference client test data
|
||||
- Add new DNS seed for peer discovery (seed.bitnodes.io)
|
||||
- Reduce the max known inventory cache from 20000 items to 1000 items
|
||||
- Fix an issue where unknown inventory types could lead to a hung peer
|
||||
- Implement inventory rebroadcast handler for sendrawtransaction
|
||||
(https://github.com/conformal/btcd/issues/99)
|
||||
- Update user agent to fully support BIP0014
|
||||
(https://github.com/conformal/btcwire/issues/10)
|
||||
- Implement initial mining support:
|
||||
- Add a new logging subsystem for mining related operations
|
||||
- Implement infrastructure for creating block templates
|
||||
- Provide options to control block template creation settings
|
||||
- Support the getwork RPC
|
||||
- Allow address identifiers to apply to more than one network since both
|
||||
testnet3 and the regression test network unfortunately use the same
|
||||
identifier
|
||||
- RPC changes:
|
||||
- Set the content type for HTTP POST RPC connections to application/json
|
||||
(https://github.com/conformal/btcd/issues/121)
|
||||
- Modified the RPC server startup so it only requires at least one valid
|
||||
listen interface
|
||||
- Correct an error path where it was possible certain errors would not
|
||||
be returned
|
||||
- Implement getwork command
|
||||
(https://github.com/conformal/btcd/issues/125)
|
||||
- Update sendrawtransaction command to reject orphans
|
||||
- Update sendrawtransaction command to include the reason a transaction
|
||||
was rejected
|
||||
- Update getinfo command to populate connection count field
|
||||
- Update getinfo command to include relay fee field
|
||||
(https://github.com/conformal/btcd/issues/107)
|
||||
- Allow transactions submitted with sendrawtransaction to bypass the
|
||||
rate limiter
|
||||
- Allow the getcurrentnet and getbestblock extensions to be accessed via
|
||||
HTTP POST in addition to Websockets
|
||||
(https://github.com/conformal/btcd/issues/127)
|
||||
- Websocket changes:
|
||||
- Rework notifications to ensure they are delivered in the order they
|
||||
occur
|
||||
- Rename notifynewtxs command to notifyreceived (funds received)
|
||||
- Rename notifyallnewtxs command to notifynewtransactions
|
||||
- Rename alltx notification to txaccepted
|
||||
- Rename allverbosetx notification to txacceptedverbose
|
||||
(https://github.com/conformal/btcd/issues/98)
|
||||
- Add rescan progress notification
|
||||
- Add recvtx notification
|
||||
- Add redeemingtx notification
|
||||
- Modify notifyspent command to accept an array of outpoints
|
||||
(https://github.com/conformal/btcd/issues/123)
|
||||
- Significantly optimize the rescan command to yield up to a 60x speed
|
||||
increase
|
||||
- btcctl utility changes:
|
||||
- Add createencryptedwallet command
|
||||
- Add getblockchaininfo command
|
||||
- Add importwallet command
|
||||
- Add addmultisigaddress command
|
||||
- Add setgenerate command
|
||||
- Accept --testnet and --wallet flags which automatically select
|
||||
the appropriate port and TLS certificates needed to communicate
|
||||
with btcd and btcwallet (https://github.com/conformal/btcd/issues/112)
|
||||
- Allow path expansion from config file entries
|
||||
(https://github.com/conformal/btcd/issues/113)
|
||||
- Minor refactor simplify handling of options
|
||||
- addblock utility changes:
|
||||
- Improve logging by making it consistent with the logging provided by
|
||||
btcd (https://github.com/conformal/btcd/issues/90)
|
||||
- Improve several package APIs for developers:
|
||||
- Add new amount type for consistently handling monetary values
|
||||
- Add new coin selector API
|
||||
- Add new WIF (Wallet Import Format) API
|
||||
- Add new crypto types for private keys and signatures
|
||||
- Add new API to sign transactions including script merging and hash
|
||||
types
|
||||
- Expose function to extract all pushed data from a script
|
||||
(https://github.com/conformal/btcscript/issues/8)
|
||||
- Misc changes:
|
||||
- Optimize address manager shuffling to do 67% less work on average
|
||||
- Resolve a couple of benign data races found by the race detector
|
||||
(https://github.com/conformal/btcd/issues/101)
|
||||
- Add IP address to all peer related errors to clarify which peer is the
|
||||
cause (https://github.com/conformal/btcd/issues/102)
|
||||
- Fix a UPNP case issue that prevented the --upnp option from working
|
||||
with some UPNP servers
|
||||
- Update documentation in the sample config file regarding debug levels
|
||||
- Adjust some logging levels to improve debug messages
|
||||
- Improve the throughput of query messages to the block manager
|
||||
- Several minor optimizations to reduce GC churn and enhance speed
|
||||
- Other minor refactoring
|
||||
- General code cleanup
|
||||
|
||||
Changes in 0.7.0 (Thu Feb 20 2014)
|
||||
- Fix an issue when parsing scripts which contain a multi-signature script
|
||||
which require zero signatures such as testnet block
|
||||
000000001881dccfeda317393c261f76d09e399e15e27d280e5368420f442632
|
||||
(https://github.com/conformal/btcscript/issues/7)
|
||||
- Add check to ensure all transactions accepted to mempool only contain
|
||||
canonical data pushes (https://github.com/conformal/btcscript/issues/6)
|
||||
- Fix an issue causing excessive memory consumption
|
||||
- Significantly rework and improve the websocket notification system:
|
||||
- Each client is now independent so slow clients no longer limit the
|
||||
speed of other connected clients
|
||||
- Potentially long-running operations such as rescans are now run in
|
||||
their own handler and rate-limited to one operation at a time without
|
||||
preventing simultaneous requests from the same client for the faster
|
||||
requests or notifications
|
||||
- A couple of scenarios which could cause shutdown to hang have been
|
||||
resolved
|
||||
- Update notifynewtx notifications to support all address types instead
|
||||
of only pay-to-pubkey-hash
|
||||
- Provide a --rpcmaxwebsockets option to allow limiting the number of
|
||||
concurrent websocket clients
|
||||
- Add a new websocket command notifyallnewtxs to request notifications
|
||||
(https://github.com/conformal/btcd/issues/86) (thanks @flammit)
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Add getnetworkhashps command
|
||||
- Add gettransaction command (wallet-specific)
|
||||
- Add signmessage command (wallet-specific)
|
||||
- Update getwork command to accept
|
||||
- Continue cleanup and work on implementing the RPC API:
|
||||
- Implement getnettotals command
|
||||
(https://github.com/conformal/btcd/issues/84)
|
||||
- Implement networkhashps command
|
||||
(https://github.com/conformal/btcd/issues/87)
|
||||
- Update getpeerinfo to always include syncnode field even when false
|
||||
- Remove help addenda for getpeerinfo now that it supports all fields
|
||||
- Close standard RPC connections on auth failure
|
||||
- Provide a --rpcmaxclients option to allow limiting the number of
|
||||
concurrent RPC clients (https://github.com/conformal/btcd/issues/68)
|
||||
- Include IP address in RPC auth failure log messages
|
||||
- Resolve a rather harmless data races found by the race detector
|
||||
(https://github.com/conformal/btcd/issues/94)
|
||||
- Increase block priority size and max standard transaction size to 50k
|
||||
and 100k, respectively (https://github.com/conformal/btcd/issues/71)
|
||||
- Add rate limiting of free transactions to the memory pool to prevent
|
||||
penny flooding (https://github.com/conformal/btcd/issues/40)
|
||||
- Provide a --logdir option (https://github.com/conformal/btcd/issues/95)
|
||||
- Change the default log file path to include the network
|
||||
- Add a new ScriptBuilder interface to btcscript to support creation of
|
||||
custom scripts (https://github.com/conformal/btcscript/issues/5)
|
||||
- General code cleanup
|
||||
|
||||
Changes in 0.6.0 (Tue Feb 04 2014)
|
||||
- Fix an issue when parsing scripts which contain invalid signatures that
|
||||
caused a chain fork on block
|
||||
0000000000000001e4241fd0b3469a713f41c5682605451c05d3033288fb2244
|
||||
- Correct an issue which could lead to an error in removeBlockNode
|
||||
(https://github.com/conformal/btcchain/issues/4)
|
||||
- Improve addblock utility as follows:
|
||||
- Check imported blocks against all chain rules and checkpoints
|
||||
- Skip blocks which are already known so you can stop and restart the
|
||||
import or start the import after you have already downloaded a portion
|
||||
of the chain
|
||||
- Correct an issue where the utility did not shutdown cleanly after
|
||||
processing all blocks
|
||||
- Add error on attempt to import orphan blocks
|
||||
- Improve error handling and reporting
|
||||
- Display statistics after input file has been fully processed
|
||||
- Rework, optimize, and improve headers-first mode:
|
||||
- Resuming the chain sync from any point before the final checkpoint
|
||||
will now use headers-first mode
|
||||
(https://github.com/conformal/btcd/issues/69)
|
||||
- Verify all checkpoints as opposed to only the final one
|
||||
- Reduce and bound memory usage
|
||||
- Rollback to the last known good point when a header does not match a
|
||||
checkpoint
|
||||
- Log information about what is happening with headers
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Add getaddednodeinfo command
|
||||
- Add getnettotals command
|
||||
- Add getblocktemplate command (wallet-specific)
|
||||
- Add getwork command (wallet-specific)
|
||||
- Add getnewaddress command (wallet-specific)
|
||||
- Add walletpassphrasechange command (wallet-specific)
|
||||
- Add walletlock command (wallet-specific)
|
||||
- Add sendfrom command (wallet-specific)
|
||||
- Add sendmany command (wallet-specific)
|
||||
- Add settxfee command (wallet-specific)
|
||||
- Add listsinceblock command (wallet-specific)
|
||||
- Add listaccounts command (wallet-specific)
|
||||
- Add keypoolrefill command (wallet-specific)
|
||||
- Add getreceivedbyaccount command (wallet-specific)
|
||||
- Add getrawchangeaddress command (wallet-specific)
|
||||
- Add gettxoutsetinfo command (wallet-specific)
|
||||
- Add listaddressgroupings command (wallet-specific)
|
||||
- Add listlockunspent command (wallet-specific)
|
||||
- Add listlock command (wallet-specific)
|
||||
- Add listreceivedbyaccount command (wallet-specific)
|
||||
- Add validateaddress command (wallet-specific)
|
||||
- Add verifymessage command (wallet-specific)
|
||||
- Add sendtoaddress command (wallet-specific)
|
||||
- Continue cleanup and work on implementing the RPC API:
|
||||
- Implement submitblock command
|
||||
(https://github.com/conformal/btcd/issues/61)
|
||||
- Implement help command
|
||||
- Implement ping command
|
||||
- Implement getaddednodeinfo command
|
||||
(https://github.com/conformal/btcd/issues/78)
|
||||
- Implement getinfo command
|
||||
- Update getpeerinfo to support bytesrecv and bytessent
|
||||
(https://github.com/conformal/btcd/issues/83)
|
||||
- Improve and correct several RPC server and websocket areas:
|
||||
- Change the connection endpoint for websockets from /wallet to /ws
|
||||
(https://github.com/conformal/btcd/issues/80)
|
||||
- Implement an alternative authentication for websockets so clients
|
||||
such as javascript from browsers that don't support setting HTTP
|
||||
headers can authenticate (https://github.com/conformal/btcd/issues/77)
|
||||
- Add an authentication deadline for RPC connections
|
||||
(https://github.com/conformal/btcd/issues/68)
|
||||
- Use standard authentication failure responses for RPC connections
|
||||
- Make automatically generated certificate more standard so it works
|
||||
from client such as node.js and Firefox
|
||||
- Correct some minor issues which could prevent the RPC server from
|
||||
shutting down in an orderly fashion
|
||||
- Make all websocket notifications require registration
|
||||
- Change the data sent over websockets to text since it is JSON-RPC
|
||||
- Allow connections that do not have an Origin header set
|
||||
- Expose and track the number of bytes read and written per peer
|
||||
(https://github.com/conformal/btcwire/issues/6)
|
||||
- Correct an issue with sendrawtransaction when invoked via websockets
|
||||
which prevented a minedtx notification from being added
|
||||
- Rescan operations issued from remote wallets are no stopped when
|
||||
the wallet disconnects mid-operation
|
||||
(https://github.com/conformal/btcd/issues/66)
|
||||
- Several optimizations related to fetching block information from the
|
||||
database
|
||||
- General code cleanup
|
||||
|
||||
Changes in 0.5.0 (Mon Jan 13 2014)
|
||||
- Optimize initial block download by introducing a new mode which
|
||||
downloads the block headers first (up to the final checkpoint)
|
||||
- Improve peer handling to remove the potential for slow peers to cause
|
||||
sluggishness amongst all peers
|
||||
(https://github.com/conformal/btcd/issues/63)
|
||||
- Fix an issue where the initial block sync could stall when the sync peer
|
||||
disconnects (https://github.com/conformal/btcd/issues/62)
|
||||
- Correct an issue where --externalip was doing a DNS lookup on the full
|
||||
host:port instead of just the host portion
|
||||
(https://github.com/conformal/btcd/issues/38)
|
||||
- Fix an issue which could lead to a panic on chain switches
|
||||
(https://github.com/conformal/btcd/issues/70)
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Show getdifficulty output as floating point to 6 digits of precision
|
||||
- Show all JSON object replies formatted as standard JSON
|
||||
- Allow btcctl getblock to accept optional params
|
||||
- Add getaccount command (wallet-specific)
|
||||
- Add getaccountaddress command (wallet-specific)
|
||||
- Add sendrawtransaction command
|
||||
- Continue cleanup and work on implementing RPC API calls
|
||||
- Update getrawmempool to support new optional verbose flag
|
||||
- Update getrawtransaction to match the reference client
|
||||
- Update getblock to support new optional verbose flag
|
||||
- Update raw transactions to fully match the reference client including
|
||||
support for all transaction types and address types
|
||||
- Correct getrawmempool fee field to return BTC instead of Satoshi
|
||||
- Correct getpeerinfo service flag to return 8 digit string so it
|
||||
matches the reference client
|
||||
- Correct verifychain to return a boolean
|
||||
- Implement decoderawtransaction command
|
||||
- Implement createrawtransaction command
|
||||
- Implement decodescript command
|
||||
- Implement gethashespersec command
|
||||
- Allow RPC handler overrides when invoked via a websocket versus
|
||||
legacy connection
|
||||
- Add new DNS seed for peer discovery
|
||||
- Display user agent on new valid peer log message
|
||||
(https://github.com/conformal/btcd/issues/64)
|
||||
- Notify wallet when new transactions that pay to registered addresses
|
||||
show up in the mempool before being mined into a block
|
||||
- Support a tor-specific proxy in addition to a normal proxy
|
||||
(https://github.com/conformal/btcd/issues/47)
|
||||
- Remove deprecated sqlite3 imports from utilities
|
||||
- Remove leftover profile write from addblock utility
|
||||
- Quite a bit of code cleanup and refactoring to improve maintainability
|
||||
|
||||
Changes in 0.4.0 (Thu Dec 12 2013)
|
||||
- Allow listen interfaces to be specified via --listen instead of only the
|
||||
port (https://github.com/conformal/btcd/issues/33)
|
||||
- Allow listen interfaces for the RPC server to be specified via
|
||||
--rpclisten instead of only the port
|
||||
(https://github.com/conformal/btcd/issues/34)
|
||||
- Only disable listening when --connect or --proxy are used when no
|
||||
--listen interface are specified
|
||||
(https://github.com/conformal/btcd/issues/10)
|
||||
- Add several new standard transaction checks to transaction memory pool:
|
||||
- Support nulldata scripts as standard
|
||||
- Only allow a max of one nulldata output per transaction
|
||||
- Enforce a maximum of 3 public keys in multi-signature transactions
|
||||
- The number of signatures in multi-signature transactions must not
|
||||
exceed the number of public keys
|
||||
- The number of inputs to a signature script must match the expected
|
||||
number of inputs for the script type
|
||||
- The number of inputs pushed onto the stack by a redeeming signature
|
||||
script must match the number of inputs consumed by the referenced
|
||||
public key script
|
||||
- When a block is connected, remove any transactions from the memory pool
|
||||
which are now double spends as a result of the newly connected
|
||||
transactions
|
||||
- Don't relay transactions resurrected during a chain switch since
|
||||
other peers will also be switching chains and therefore already know
|
||||
about them
|
||||
- Cleanup a few cases where rejected transactions showed as an error
|
||||
rather than as a rejected transaction
|
||||
- Ignore the default configuration file when --regtest (regression test
|
||||
mode) is specified
|
||||
- Implement TLS support for RPC including automatic certificate generation
|
||||
- Support HTTP authentication headers for web sockets
|
||||
- Update address manager to recognize and properly work with Tor
|
||||
addresses (https://github.com/conformal/btcd/issues/36) and
|
||||
(https://github.com/conformal/btcd/issues/37)
|
||||
- Improve btcctl utility in the following ways:
|
||||
- Add the ability to specify a configuration file
|
||||
- Add a default entry for the RPC cert to point to the location
|
||||
it will likely be in the btcd home directory
|
||||
- Implement --version flag
|
||||
- Provide a --notls option to support non-TLS configurations
|
||||
- Fix a couple of minor races found by the Go race detector
|
||||
- Improve logging
|
||||
- Allow logging level to be specified on a per subsystem basis
|
||||
(https://github.com/conformal/btcd/issues/48)
|
||||
- Allow logging levels to be dynamically changed via RPC
|
||||
(https://github.com/conformal/btcd/issues/15)
|
||||
- Implement a rolling log file with a max of 10MB per file and a
|
||||
rotation size of 3 which results in a max logging size of 30 MB
|
||||
- Correct a minor issue with the rescanning websocket call
|
||||
(https://github.com/conformal/btcd/issues/54)
|
||||
- Fix a race with pushing address messages that could lead to a panic
|
||||
(https://github.com/conformal/btcd/issues/58)
|
||||
- Improve which external IP address is reported to peers based on which
|
||||
interface they are connected through
|
||||
(https://github.com/conformal/btcd/issues/35)
|
||||
- Add --externalip option to allow an external IP address to be specified
|
||||
for cases such as tor hidden services or advanced network configurations
|
||||
(https://github.com/conformal/btcd/issues/38)
|
||||
- Add --upnp option to support automatic port mapping via UPnP
|
||||
(https://github.com/conformal/btcd/issues/51)
|
||||
- Update Ctrl+C interrupt handler to properly sync address manager and
|
||||
remove the UPnP port mapping (if needed)
|
||||
- Continue cleanup and work on implementing RPC API calls
|
||||
- Add importprivkey (import private key) command to btcctl
|
||||
- Update getrawtransaction to provide addresses properly, support
|
||||
new verbose param, and match the reference implementation with the
|
||||
exception of MULTISIG (thanks @flammit)
|
||||
- Update getblock with new verbose flag (thanks @flammit)
|
||||
- Add listtransactions command to btcctl
|
||||
- Add getbalance command to btcctl
|
||||
- Add basic support for btcd to run as a native Windows service
|
||||
(https://github.com/conformal/btcd/issues/42)
|
||||
- Package addblock utility with Windows MSIs
|
||||
- Add support for TravisCI (continuous build integration)
|
||||
- Cleanup some documentation and usage
|
||||
- Several other minor bug fixes and general code cleanup
|
||||
|
||||
Changes in 0.3.3 (Wed Nov 13 2013)
|
||||
- Significantly improve initial block chain download speed
|
||||
(https://github.com/conformal/btcd/issues/20)
|
||||
- Add a new checkpoint at block height 267300
|
||||
- Optimize most recently used inventory handling
|
||||
(https://github.com/conformal/btcd/issues/21)
|
||||
- Optimize duplicate transaction input check
|
||||
(https://github.com/conformal/btcchain/issues/2)
|
||||
- Optimize transaction hashing
|
||||
(https://github.com/conformal/btcd/issues/25)
|
||||
- Rework and optimize wallet listener notifications
|
||||
(https://github.com/conformal/btcd/issues/22)
|
||||
- Optimize serialization and deserialization
|
||||
(https://github.com/conformal/btcd/issues/27)
|
||||
- Add support for minimum transaction fee to memory pool acceptance
|
||||
(https://github.com/conformal/btcd/issues/29)
|
||||
- Improve leveldb database performance by removing explicit GC call
|
||||
- Fix an issue where Ctrl+C was not always finishing orderly database
|
||||
shutdown
|
||||
- Fix an issue in the script handling for OP_CHECKSIG
|
||||
- Impose max limits on all variable length protocol entries to prevent
|
||||
abuse from malicious peers
|
||||
- Enforce DER signatures for transactions allowed into the memory pool
|
||||
- Separate the debug profile http server from the RPC server
|
||||
- Rework of the RPC code to improve performance and make the code cleaner
|
||||
- The getrawtransaction RPC call now properly checks the memory pool
|
||||
before consulting the db (https://github.com/conformal/btcd/issues/26)
|
||||
- Add support for the following RPC calls: getpeerinfo, getconnectedcount,
|
||||
addnode, verifychain
|
||||
(https://github.com/conformal/btcd/issues/13)
|
||||
(https://github.com/conformal/btcd/issues/17)
|
||||
- Implement rescan websocket extension to allow wallet rescans
|
||||
- Use correct paths for application data storage for all supported
|
||||
operating systems (https://github.com/conformal/btcd/issues/30)
|
||||
- Add a default redirect to the http profiling page when accessing the
|
||||
http profile server
|
||||
- Add a new --cpuprofile option which can be used to generate CPU
|
||||
profiling data on platforms that support it
|
||||
- Several other minor performance optimizations
|
||||
- Other minor bug fixes and general code cleanup
|
||||
|
||||
Changes in 0.3.2 (Tue Oct 22 2013)
|
||||
- Fix an issue that could cause the download of the block chain to stall
|
||||
(https://github.com/conformal/btcd/issues/12)
|
||||
- Remove deprecated sqlite as an available database backend
|
||||
- Close sqlite compile issue as sqlite has now been removed
|
||||
(https://github.com/conformal/btcd/issues/11)
|
||||
- Change default RPC ports to 8334 (mainnet) and 18334 (testnet)
|
||||
- Continue cleanup and work on implementing RPC API calls
|
||||
- Add support for the following RPC calls: getrawmempool,
|
||||
getbestblockhash, decoderawtransaction, getdifficulty,
|
||||
getconnectioncount, getpeerinfo, and addnode
|
||||
- Improve the btcctl utility that is used to issue JSON-RPC commands
|
||||
- Fix an issue preventing btcd from cleanly shutting down with the RPC
|
||||
stop command
|
||||
- Add a number of database interface tests to ensure backends implement
|
||||
the expected interface
|
||||
- Expose some additional information from btcscript to be used for
|
||||
identifying "standard"" transactions
|
||||
- Add support for plan9 - thanks @mischief
|
||||
(https://github.com/conformal/btcd/pull/19)
|
||||
- Other minor bug fixes and general code cleanup
|
||||
|
||||
Changes in 0.3.1-alpha (Tue Oct 15 2013)
|
||||
- Change default database to leveldb
|
||||
NOTE: This does mean you will have to redownload the block chain. Since we
|
||||
are still in alpha, we didn't feel writing a converter was worth the time as
|
||||
it would take away from more important issues at this stage
|
||||
- Add a warning if there are multiple block chain databases of different types
|
||||
- Fix issue with unexpected EOF in leveldb -- https://github.com/conformal/btcd/issues/18
|
||||
- Fix issue preventing block 21066 on testnet -- https://github.com/conformal/btcchain/issues/1
|
||||
- Fix issue preventing block 96464 on testnet -- https://github.com/conformal/btcscript/issues/1
|
||||
- Optimize transaction lookups
|
||||
- Correct a few cases of list removal that could result in improper cleanup
|
||||
of no longer needed orphans
|
||||
- Add functionality to increase ulimits on non-Windows platforms
|
||||
- Add support for mempool command which allows remote peers to query the
|
||||
transaction memory pool via the bitcoin protocol
|
||||
- Clean up logging a bit
|
||||
- Add a flag to disable checkpoints for developers
|
||||
- Add a lot of useful debug logging such as message summaries
|
||||
- Other minor bug fixes and general code cleanup
|
||||
|
||||
Initial Release 0.3.0-alpha (Sat Oct 05 2013):
|
||||
- Initial release
|
||||
105
Gopkg.lock
generated
105
Gopkg.lock
generated
@@ -1,105 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "bou.ke/monkey"
|
||||
packages = ["."]
|
||||
revision = "bdf6dea004c6fd1cdf4b25da8ad45a606c09409a"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aead/siphash"
|
||||
packages = ["."]
|
||||
revision = "83563a290f60225eb120d724600b9690c3fb536f"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/btclog"
|
||||
packages = ["."]
|
||||
revision = "84c8d2346e9fc8c7b947e243b9c24e6df9fd206a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/go-socks"
|
||||
packages = ["socks"]
|
||||
revision = "4720035b7bfd2a9bb130b1c184f8bbe41b6f0d0f"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/btcsuite/goleveldb"
|
||||
packages = ["leveldb","leveldb/cache","leveldb/comparer","leveldb/errors","leveldb/filter","leveldb/iterator","leveldb/journal","leveldb/memdb","leveldb/opt","leveldb/storage","leveldb/table","leveldb/util"]
|
||||
revision = "3fd0373267b6461dbefe91cef614278064d05465"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/btcsuite/snappy-go"
|
||||
packages = ["."]
|
||||
revision = "b3db38edf0a9a11a115eb6b022d8c946024a9ac0"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/websocket"
|
||||
packages = ["."]
|
||||
revision = "31079b6807923eb23992c421b114992b95131b55"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/btcsuite/winsvc"
|
||||
packages = ["eventlog","mgr","registry","svc","winapi"]
|
||||
revision = "f8fb11f83f7e860e3769a08e6811d1b399a43722"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jessevdk/go-flags"
|
||||
packages = ["."]
|
||||
revision = "c6ca198ec95c841fdb89fc0de7496fed11ab854e"
|
||||
version = "v1.4.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jrick/logrotate"
|
||||
packages = ["rotator"]
|
||||
revision = "a93b200c26cbae3bb09dd0dc2c7c7fe1468a034a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kkdai/bstream"
|
||||
packages = ["."]
|
||||
revision = "b3251f7901ec4dd4ec66b3210e8f4bd5c0f1c5a3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/miekg/dns"
|
||||
packages = ["."]
|
||||
revision = "cc8cd02140663157ce797c6650488d6c8563f31f"
|
||||
version = "v1.1.6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ed25519","ed25519/internal/edwards25519","ripemd160"]
|
||||
revision = "c2843e01d9a2bc60bb26ad24e09734fdc2d9ec58"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["bpf","internal/iana","internal/socket","ipv4","ipv6"]
|
||||
revision = "d8887717615a059821345a5c23649351b52a1c0b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "fead79001313d15903fb4605b4a1b781532cd93e"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "00392a00928f96fc94e2c8c65ce3a98cc6f5e2f93dda64d3c4502f2f38026e96"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
78
Gopkg.toml
78
Gopkg.toml
@@ -1,78 +0,0 @@
|
||||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "bou.ke/monkey"
|
||||
version = "1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aead/siphash"
|
||||
version = "1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/btclog"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/go-socks"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/btcsuite/goleveldb"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/websocket"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/btcsuite/winsvc"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
version = "1.1.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/jessevdk/go-flags"
|
||||
version = "1.4.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/jrick/logrotate"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/kkdai/bstream"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/miekg/dns"
|
||||
version = "1.1.6"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
10
Jenkinsfile
vendored
10
Jenkinsfile
vendored
@@ -1,10 +0,0 @@
|
||||
node {
|
||||
stage 'Checkout'
|
||||
checkout scm
|
||||
|
||||
stage 'Version'
|
||||
sh './deploy.sh version'
|
||||
|
||||
stage 'Build'
|
||||
sh "./deploy.sh build"
|
||||
}
|
||||
3
LICENSE
3
LICENSE
@@ -1,8 +1,9 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2018-2019 DAGLabs
|
||||
Copyright (c) 2018-2019 The kaspanet developers
|
||||
Copyright (c) 2013-2018 The btcsuite developers
|
||||
Copyright (c) 2015-2016 The Decred developers
|
||||
Copyright (c) 2013-2014 Conformal Systems LLC.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
|
||||
125
README.md
125
README.md
@@ -1,49 +1,24 @@
|
||||
btcd
|
||||
|
||||
Kaspad
|
||||
====
|
||||
Warning: This is pre-alpha software. There's no guarantee anything works.
|
||||
====
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd)
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad)
|
||||
|
||||
btcd is an alternative full node bitcoin implementation written in Go (golang).
|
||||
Kaspad is the reference full node Kaspa implementation written in Go (golang).
|
||||
|
||||
This project is currently under active development and is in a Beta state. It
|
||||
is extremely stable and has been in production use since October 2013.
|
||||
|
||||
It properly downloads, validates, and serves the block chain using the exact
|
||||
rules (including consensus bugs) for block acceptance as Bitcoin Core. We have
|
||||
taken great care to avoid btcd causing a fork to the block chain. It includes a
|
||||
full block validation testing framework which contains all of the 'official'
|
||||
block acceptance tests (and some additional ones) that is run on every pull
|
||||
request to help ensure it properly follows consensus. Also, it passes all of
|
||||
the JSON test data in the Bitcoin Core code.
|
||||
|
||||
It also properly relays newly mined blocks, maintains a transaction pool, and
|
||||
relays individual transactions that have not yet made it into a block. It
|
||||
ensures all individual transactions admitted to the pool follow the rules
|
||||
required by the block chain and also includes more strict checks which filter
|
||||
transactions based on miner requirements ("standard" transactions).
|
||||
|
||||
One key difference between btcd and Bitcoin Core is that btcd does *NOT* include
|
||||
wallet functionality and this was a very intentional design decision. See the
|
||||
blog entry [here](https://blog.conformal.com/btcd-not-your-moms-bitcoin-daemon)
|
||||
for more details. This means you can't actually make or receive payments
|
||||
directly with btcd. That functionality is provided by the
|
||||
[btcwallet](https://github.com/btcsuite/btcwallet) and
|
||||
[Paymetheus](https://github.com/btcsuite/Paymetheus) (Windows-only) projects
|
||||
which are both under active development.
|
||||
This project is currently under active development and is in a pre-Alpha state.
|
||||
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
|
||||
|
||||
## Requirements
|
||||
|
||||
[Go](http://golang.org) 1.8 or newer.
|
||||
Latest version of [Go](http://golang.org) (currently 1.13).
|
||||
|
||||
## Installation
|
||||
|
||||
#### Windows - MSI Available
|
||||
|
||||
https://github.com/daglabs/btcd/releases
|
||||
|
||||
#### Linux/BSD/MacOSX/POSIX - Build from Source
|
||||
#### Build from Source
|
||||
|
||||
- Install Go according to the installation instructions here:
|
||||
http://golang.org/doc/install
|
||||
@@ -55,92 +30,50 @@ $ go version
|
||||
$ go env GOROOT GOPATH
|
||||
```
|
||||
|
||||
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
|
||||
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
|
||||
recommended that `GOPATH` is set to a directory in your home directory such as
|
||||
`~/goprojects` to avoid write permission issues. It is also recommended to add
|
||||
`~/dev/go` to avoid write permission issues. It is also recommended to add
|
||||
`$GOPATH/bin` to your `PATH` at this point.
|
||||
|
||||
- Run the following commands to obtain btcd, all dependencies, and install it:
|
||||
- Run the following commands to obtain and install kaspad including all dependencies:
|
||||
|
||||
```bash
|
||||
$ # Install dep: https://golang.github.io/dep/docs/installation.html
|
||||
$ git clone https://github.com/daglabs/btcd $GOPATH/src/github.com/daglabs/btcd
|
||||
$ cd $GOPATH/src/github.com/daglabs/btcd
|
||||
$ dep ensure
|
||||
$ git clone https://github.com/kaspanet/kaspad $GOPATH/src/github.com/kaspanet/kaspad
|
||||
$ cd $GOPATH/src/github.com/kaspanet/kaspad
|
||||
$ ./test.sh
|
||||
$ go install . ./cmd/...
|
||||
```
|
||||
`./test.sh` tests can be skipped, but some things might not run correctly on your system if tests fail.
|
||||
|
||||
- btcd (and utilities) will now be installed in ```$GOPATH/bin```. If you did
|
||||
- Kaspad (and utilities) should now be installed in `$GOPATH/bin`. If you did
|
||||
not already add the bin directory to your system path during Go installation,
|
||||
we recommend you do so now.
|
||||
you are encouraged to do so now.
|
||||
|
||||
## Updating
|
||||
|
||||
#### Windows
|
||||
|
||||
Install a newer MSI
|
||||
|
||||
#### Linux/BSD/MacOSX/POSIX - Build from Source
|
||||
|
||||
- Run the following commands to update btcd, all dependencies, and install it:
|
||||
|
||||
```bash
|
||||
$ cd $GOPATH/src/github.com/daglabs/btcd
|
||||
$ git pull && dep ensure
|
||||
$ go install . ./cmd/...
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
btcd has several configuration options available to tweak how it runs, but all
|
||||
of the basic operations described in the intro section work with zero
|
||||
configuration.
|
||||
|
||||
#### Windows (Installed from MSI)
|
||||
|
||||
Launch btcd from your Start menu.
|
||||
Kaspad has several configuration options available to tweak how it runs, but all
|
||||
of the basic operations work with zero configuration.
|
||||
|
||||
#### Linux/BSD/POSIX/Source
|
||||
|
||||
```bash
|
||||
$ ./btcd
|
||||
$ ./kaspad
|
||||
```
|
||||
|
||||
## IRC
|
||||
|
||||
- irc.freenode.net
|
||||
- channel #btcd
|
||||
- [webchat](https://webchat.freenode.net/?channels=btcd)
|
||||
## Discord
|
||||
Join our discord server using the following link: https://discord.gg/WmGhhzk
|
||||
|
||||
## Issue Tracker
|
||||
|
||||
The [integrated github issue tracker](https://github.com/daglabs/btcd/issues)
|
||||
The [integrated github issue tracker](https://github.com/kaspanet/kaspad/issues)
|
||||
is used for this project.
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is a work-in-progress. It is located in the [docs](https://github.com/daglabs/btcd/tree/master/docs) folder.
|
||||
|
||||
## GPG Verification Key
|
||||
|
||||
All official release tags are signed by Conformal so users can ensure the code
|
||||
has not been tampered with and is coming from the btcsuite developers. To
|
||||
verify the signature perform the following:
|
||||
|
||||
- Download the Conformal public key:
|
||||
https://raw.githubusercontent.com/btcsuite/btcd/master/release/GIT-GPG-KEY-conformal.txt
|
||||
|
||||
- Import the public key into your GPG keyring:
|
||||
```bash
|
||||
gpg --import GIT-GPG-KEY-conformal.txt
|
||||
```
|
||||
|
||||
- Verify the release tag with the following command where `TAG_NAME` is a
|
||||
placeholder for the specific tag:
|
||||
```bash
|
||||
git tag -v TAG_NAME
|
||||
```
|
||||
The documentation is a work-in-progress. It is located in the [docs](https://github.com/kaspanet/kaspad/tree/master/docs) folder.
|
||||
|
||||
## License
|
||||
|
||||
btcd is licensed under the [copyfree](http://copyfree.org) ISC License.
|
||||
Kaspad is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/).
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,17 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script uses gocov to generate a test coverage report.
|
||||
# The gocov tool my be obtained with the following command:
|
||||
# go get github.com/axw/gocov/gocov
|
||||
#
|
||||
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
|
||||
|
||||
# Check for gocov.
|
||||
type gocov >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo >&2 "This script requires the gocov tool."
|
||||
echo >&2 "You may obtain it with the following command:"
|
||||
echo >&2 "go get github.com/axw/gocov/gocov"
|
||||
exit 1
|
||||
fi
|
||||
gocov test | gocov report
|
||||
@@ -1,38 +0,0 @@
|
||||
// Copyright (c) 2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package addrmgr implements concurrency safe Bitcoin address manager.
|
||||
|
||||
Address Manager Overview
|
||||
|
||||
In order maintain the peer-to-peer Bitcoin network, there needs to be a source
|
||||
of addresses to connect to as nodes come and go. The Bitcoin protocol provides
|
||||
the getaddr and addr messages to allow peers to communicate known addresses with
|
||||
each other. However, there needs to a mechanism to store those results and
|
||||
select peers from them. It is also important to note that remote peers can't
|
||||
be trusted to send valid peers nor attempt to provide you with only peers they
|
||||
control with malicious intent.
|
||||
|
||||
With that in mind, this package provides a concurrency safe address manager for
|
||||
caching and selecting peers in a non-deterministic manner. The general idea is
|
||||
the caller adds addresses to the address manager and notifies it when addresses
|
||||
are connected, known good, and attempted. The caller also requests addresses as
|
||||
it needs them.
|
||||
|
||||
The address manager internally segregates the addresses into groups and
|
||||
non-deterministically selects groups in a cryptographically random manner. This
|
||||
reduce the chances multiple addresses from the same nets are selected which
|
||||
generally helps provide greater peer diversity, and perhaps more importantly,
|
||||
drastically reduces the chances an attacker is able to coerce your peer into
|
||||
only connecting to nodes they control.
|
||||
|
||||
The address manager also understands routability and Tor addresses and tries
|
||||
hard to only return routable addresses. In addition, it uses the information
|
||||
provided by the caller about connected, known good, and attempted addresses to
|
||||
periodically purge peers which no longer appear to be good peers as well as
|
||||
bias the selection toward known good peers. The general idea is to make a best
|
||||
effort at only providing usable addresses.
|
||||
*/
|
||||
package addrmgr
|
||||
@@ -1,25 +0,0 @@
|
||||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
func TstKnownAddressIsBad(ka *KnownAddress) bool {
|
||||
return ka.isBad()
|
||||
}
|
||||
|
||||
func TstKnownAddressChance(ka *KnownAddress) float64 {
|
||||
return ka.chance()
|
||||
}
|
||||
|
||||
func TstNewKnownAddress(na *wire.NetAddress, attempts int,
|
||||
lastattempt, lastsuccess time.Time, tried bool, refs int) *KnownAddress {
|
||||
return &KnownAddress{na: na, attempts: attempts, lastattempt: lastattempt,
|
||||
lastsuccess: lastsuccess, tried: tried, refs: refs}
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
// Copyright (c) 2013-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/addrmgr"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
func TestChance(t *testing.T) {
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
var tests = []struct {
|
||||
addr *addrmgr.KnownAddress
|
||||
expected float64
|
||||
}{
|
||||
{
|
||||
//Test normal case
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastseen < 0
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)},
|
||||
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1.0,
|
||||
}, {
|
||||
//Test case in which lastattempt < 0
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(30*time.Minute), time.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case in which lastattempt < ten minutes
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
0, time.Now().Add(-5*time.Minute), time.Now(), false, 0),
|
||||
1.0 * .01,
|
||||
}, {
|
||||
//Test case with several failed attempts.
|
||||
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
|
||||
2, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
|
||||
1 / 1.5 / 1.5,
|
||||
},
|
||||
}
|
||||
|
||||
err := .0001
|
||||
for i, test := range tests {
|
||||
chance := addrmgr.TstKnownAddressChance(test.addr)
|
||||
if math.Abs(test.expected-chance) >= err {
|
||||
t.Errorf("case %d: got %f, expected %f", i, chance, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsBad(t *testing.T) {
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
future := now.Add(35 * time.Minute)
|
||||
monthOld := now.Add(-43 * time.Hour * 24)
|
||||
secondsOld := now.Add(-2 * time.Second)
|
||||
minutesOld := now.Add(-27 * time.Minute)
|
||||
hoursOld := now.Add(-5 * time.Hour)
|
||||
zeroTime := time.Time{}
|
||||
|
||||
futureNa := &wire.NetAddress{Timestamp: future}
|
||||
minutesOldNa := &wire.NetAddress{Timestamp: minutesOld}
|
||||
monthOldNa := &wire.NetAddress{Timestamp: monthOld}
|
||||
currentNa := &wire.NetAddress{Timestamp: secondsOld}
|
||||
|
||||
//Test addresses that have been tried in the last minute.
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) {
|
||||
t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) {
|
||||
t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.")
|
||||
}
|
||||
|
||||
//Test address that claims to be from the future.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 6: addresses that claim to be from the future are bad.")
|
||||
}
|
||||
|
||||
//Test address that has not been seen in over a month.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 7: addresses more than a month old are bad.")
|
||||
}
|
||||
|
||||
//It has failed at least three times and never succeeded.
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
|
||||
t.Errorf("test case 8: addresses that have never succeeded are bad.")
|
||||
}
|
||||
|
||||
//It has failed ten times in the last week
|
||||
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
|
||||
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
|
||||
}
|
||||
|
||||
//Test an address that should work.
|
||||
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) {
|
||||
t.Errorf("test case 10: This should be a valid address.")
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package addrmgr
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/daglabs/btcd/logger"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This
|
||||
// means the package will not perform any logging by default until the caller
|
||||
// requests it.
|
||||
var log btclog.Logger
|
||||
|
||||
func init() {
|
||||
log, _ = logger.Get(logger.SubsystemTags.ADXR)
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
|
||||
github.com/conformal/btcd/addrmgr/network.go GroupKey 100.00% (23/23)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.reset 100.00% (6/6)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC5737 100.00% (4/4)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC1918 100.00% (4/4)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go New 100.00% (3/3)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go NetAddressKey 100.00% (2/2)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4862 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.numAddresses 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/log.go init 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/log.go DisableLog 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go ipNet 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsIPv4 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsLocal 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsOnionCatTor 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC2544 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC3849 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC3927 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC3964 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4193 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4380 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC4843 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC6052 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC6145 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRFC6598 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsValid 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/network.go IsRoutable 100.00% (1/1)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetBestLocalAddress 94.74% (18/19)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddLocalAddress 90.91% (10/11)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go getReachabilityFrom 51.52% (17/33)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go ipString 50.00% (2/4)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetAddress 9.30% (4/43)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.deserializePeers 0.00% (0/50)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Good 0.00% (0/44)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.savePeers 0.00% (0/39)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.updateAddress 0.00% (0/30)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.expireNew 0.00% (0/22)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddressCache 0.00% (0/16)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.HostToNetAddress 0.00% (0/15)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getNewBucket 0.00% (0/15)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddressByIP 0.00% (0/14)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getTriedBucket 0.00% (0/14)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.chance 0.00% (0/13)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.loadPeers 0.00% (0/11)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.isBad 0.00% (0/11)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Connected 0.00% (0/10)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.addressHandler 0.00% (0/9)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.pickTried 0.00% (0/8)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.DeserializeNetAddress 0.00% (0/7)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Stop 0.00% (0/7)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Attempt 0.00% (0/7)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Start 0.00% (0/6)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddresses 0.00% (0/4)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NeedMoreAddresses 0.00% (0/3)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NumAddresses 0.00% (0/3)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddress 0.00% (0/3)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.LastAttempt 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.NetAddress 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.find 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr/log.go UseLogger 0.00% (0/1)
|
||||
github.com/conformal/btcd/addrmgr --------------------------------- 21.04% (113/537)
|
||||
|
||||
247
app/app.go
Normal file
247
app/app.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/network/addressmanager"
|
||||
|
||||
"github.com/kaspanet/kaspad/network/netadapter/id"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/blockdag"
|
||||
"github.com/kaspanet/kaspad/domain/blockdag/indexers"
|
||||
"github.com/kaspanet/kaspad/domain/mempool"
|
||||
"github.com/kaspanet/kaspad/domain/mining"
|
||||
"github.com/kaspanet/kaspad/domain/txscript"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/dbaccess"
|
||||
"github.com/kaspanet/kaspad/infrastructure/signal"
|
||||
"github.com/kaspanet/kaspad/network/connmanager"
|
||||
"github.com/kaspanet/kaspad/network/dnsseed"
|
||||
"github.com/kaspanet/kaspad/network/domainmessage"
|
||||
"github.com/kaspanet/kaspad/network/netadapter"
|
||||
"github.com/kaspanet/kaspad/network/protocol"
|
||||
"github.com/kaspanet/kaspad/network/rpc"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
// App is a wrapper for all the kaspad services
|
||||
type App struct {
|
||||
cfg *config.Config
|
||||
rpcServer *rpc.Server
|
||||
addressManager *addressmanager.AddressManager
|
||||
protocolManager *protocol.Manager
|
||||
connectionManager *connmanager.ConnectionManager
|
||||
netAdapter *netadapter.NetAdapter
|
||||
|
||||
started, shutdown int32
|
||||
}
|
||||
|
||||
// Start launches all the kaspad services.
|
||||
func (a *App) Start() {
|
||||
// Already started?
|
||||
if atomic.AddInt32(&a.started, 1) != 1 {
|
||||
return
|
||||
}
|
||||
|
||||
log.Trace("Starting kaspad")
|
||||
|
||||
err := a.protocolManager.Start()
|
||||
if err != nil {
|
||||
panics.Exit(log, fmt.Sprintf("Error starting the p2p protocol: %+v", err))
|
||||
}
|
||||
|
||||
a.maybeSeedFromDNS()
|
||||
|
||||
a.connectionManager.Start()
|
||||
|
||||
if !a.cfg.DisableRPC {
|
||||
a.rpcServer.Start()
|
||||
}
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down all the kaspad services.
|
||||
func (a *App) Stop() {
|
||||
// Make sure this only happens once.
|
||||
if atomic.AddInt32(&a.shutdown, 1) != 1 {
|
||||
log.Infof("Kaspad is already in the process of shutting down")
|
||||
return
|
||||
}
|
||||
|
||||
log.Warnf("Kaspad shutting down")
|
||||
|
||||
a.connectionManager.Stop()
|
||||
|
||||
err := a.protocolManager.Stop()
|
||||
if err != nil {
|
||||
log.Errorf("Error stopping the p2p protocol: %+v", err)
|
||||
}
|
||||
|
||||
// Shutdown the RPC server if it's not disabled.
|
||||
if !a.cfg.DisableRPC {
|
||||
err := a.rpcServer.Stop()
|
||||
if err != nil {
|
||||
log.Errorf("Error stopping rpcServer: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = a.addressManager.Stop()
|
||||
if err != nil {
|
||||
log.Errorf("Error stopping address manager: %s", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// New returns a new App instance configured to listen on addr for the
|
||||
// kaspa network type specified by dagParams. Use start to begin accepting
|
||||
// connections from peers.
|
||||
func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{}) (*App, error) {
|
||||
indexManager, acceptanceIndex := setupIndexes(cfg)
|
||||
|
||||
sigCache := txscript.NewSigCache(cfg.SigCacheMaxSize)
|
||||
|
||||
// Create a new block DAG instance with the appropriate configuration.
|
||||
dag, err := setupDAG(cfg, databaseContext, interrupt, sigCache, indexManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txMempool := setupMempool(cfg, dag, sigCache)
|
||||
|
||||
netAdapter, err := netadapter.NewNetAdapter(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addressManager := addressmanager.New(cfg, databaseContext)
|
||||
|
||||
connectionManager, err := connmanager.New(cfg, netAdapter, addressManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
protocolManager, err := protocol.NewManager(cfg, dag, netAdapter, addressManager, txMempool, connectionManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpcServer, err := setupRPC(
|
||||
cfg, dag, txMempool, sigCache, acceptanceIndex, connectionManager, addressManager, protocolManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &App{
|
||||
cfg: cfg,
|
||||
rpcServer: rpcServer,
|
||||
protocolManager: protocolManager,
|
||||
connectionManager: connectionManager,
|
||||
netAdapter: netAdapter,
|
||||
addressManager: addressManager,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *App) maybeSeedFromDNS() {
|
||||
if !a.cfg.DisableDNSSeed {
|
||||
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, domainmessage.SFNodeNetwork, false, nil,
|
||||
a.cfg.Lookup, func(addresses []*domainmessage.NetAddress) {
|
||||
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
|
||||
// IPs of nodes and not its own IP, we can not know real IP of
|
||||
// source. So we'll take first returned address as source.
|
||||
a.addressManager.AddAddresses(addresses, addresses[0], nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
func setupDAG(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{},
|
||||
sigCache *txscript.SigCache, indexManager blockdag.IndexManager) (*blockdag.BlockDAG, error) {
|
||||
|
||||
dag, err := blockdag.New(&blockdag.Config{
|
||||
Interrupt: interrupt,
|
||||
DatabaseContext: databaseContext,
|
||||
DAGParams: cfg.NetParams(),
|
||||
TimeSource: blockdag.NewTimeSource(),
|
||||
SigCache: sigCache,
|
||||
IndexManager: indexManager,
|
||||
SubnetworkID: cfg.SubnetworkID,
|
||||
})
|
||||
return dag, err
|
||||
}
|
||||
|
||||
func setupIndexes(cfg *config.Config) (blockdag.IndexManager, *indexers.AcceptanceIndex) {
|
||||
// Create indexes if needed.
|
||||
var indexes []indexers.Indexer
|
||||
var acceptanceIndex *indexers.AcceptanceIndex
|
||||
if cfg.AcceptanceIndex {
|
||||
log.Info("acceptance index is enabled")
|
||||
acceptanceIndex = indexers.NewAcceptanceIndex()
|
||||
indexes = append(indexes, acceptanceIndex)
|
||||
}
|
||||
|
||||
// Create an index manager if any of the optional indexes are enabled.
|
||||
if len(indexes) < 0 {
|
||||
return nil, nil
|
||||
}
|
||||
indexManager := indexers.NewManager(indexes)
|
||||
return indexManager, acceptanceIndex
|
||||
}
|
||||
|
||||
func setupMempool(cfg *config.Config, dag *blockdag.BlockDAG, sigCache *txscript.SigCache) *mempool.TxPool {
|
||||
mempoolConfig := mempool.Config{
|
||||
Policy: mempool.Policy{
|
||||
AcceptNonStd: cfg.RelayNonStd,
|
||||
MaxOrphanTxs: cfg.MaxOrphanTxs,
|
||||
MaxOrphanTxSize: config.DefaultMaxOrphanTxSize,
|
||||
MinRelayTxFee: cfg.MinRelayTxFee,
|
||||
MaxTxVersion: 1,
|
||||
},
|
||||
CalcSequenceLockNoLock: func(tx *util.Tx, utxoSet blockdag.UTXOSet) (*blockdag.SequenceLock, error) {
|
||||
return dag.CalcSequenceLockNoLock(tx, utxoSet)
|
||||
},
|
||||
SigCache: sigCache,
|
||||
DAG: dag,
|
||||
}
|
||||
|
||||
return mempool.New(&mempoolConfig)
|
||||
}
|
||||
|
||||
func setupRPC(cfg *config.Config,
|
||||
dag *blockdag.BlockDAG,
|
||||
txMempool *mempool.TxPool,
|
||||
sigCache *txscript.SigCache,
|
||||
acceptanceIndex *indexers.AcceptanceIndex,
|
||||
connectionManager *connmanager.ConnectionManager,
|
||||
addressManager *addressmanager.AddressManager,
|
||||
protocolManager *protocol.Manager) (*rpc.Server, error) {
|
||||
|
||||
if !cfg.DisableRPC {
|
||||
policy := mining.Policy{
|
||||
BlockMaxMass: cfg.BlockMaxMass,
|
||||
}
|
||||
blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy, txMempool, dag, sigCache)
|
||||
|
||||
rpcServer, err := rpc.NewRPCServer(cfg, dag, txMempool, acceptanceIndex, blockTemplateGenerator,
|
||||
connectionManager, addressManager, protocolManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Signal process shutdown when the RPC server requests it.
|
||||
spawn("setupRPC-handleShutdownRequest", func() {
|
||||
<-rpcServer.RequestedProcessShutdown()
|
||||
signal.ShutdownRequestChannel <- struct{}{}
|
||||
})
|
||||
|
||||
return rpcServer, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// P2PNodeID returns the network ID associated with this App
|
||||
func (a *App) P2PNodeID() *id.ID {
|
||||
return a.netAdapter.ID()
|
||||
}
|
||||
|
||||
// AddressManager returns the AddressManager associated with this App
|
||||
func (a *App) AddressManager() *addressmanager.AddressManager {
|
||||
return a.addressManager
|
||||
}
|
||||
14
app/log.go
Normal file
14
app/log.go
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Copyright (c) 2017 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.KASD)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
@@ -1,103 +0,0 @@
|
||||
blockchain
|
||||
==========
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd/blockchain)
|
||||
|
||||
Package blockchain implements bitcoin block handling and chain selection rules.
|
||||
The test coverage is currently only around 60%, but will be increasing over
|
||||
time. See `test_coverage.txt` for the gocov coverage report. Alternatively, if
|
||||
you are running a POSIX OS, you can run the `cov_report.sh` script for a
|
||||
real-time report. Package blockchain is licensed under the liberal ISC license.
|
||||
|
||||
There is an associated blog post about the release of this package
|
||||
[here](https://blog.conformal.com/btcchain-the-bitcoin-chain-package-from-bctd/).
|
||||
|
||||
This package has intentionally been designed so it can be used as a standalone
|
||||
package for any projects needing to handle processing of blocks into the bitcoin
|
||||
block chain.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/daglabs/btcd/blockchain
|
||||
```
|
||||
|
||||
## Bitcoin Chain Processing Overview
|
||||
|
||||
Before a block is allowed into the block chain, it must go through an intensive
|
||||
series of validation rules. The following list serves as a general outline of
|
||||
those rules to provide some intuition into what is going on under the hood, but
|
||||
is by no means exhaustive:
|
||||
|
||||
- Reject duplicate blocks
|
||||
- Perform a series of sanity checks on the block and its transactions such as
|
||||
verifying proof of work, timestamps, number and character of transactions,
|
||||
transaction amounts, script complexity, and merkle root calculations
|
||||
- Compare the block against predetermined checkpoints for expected timestamps
|
||||
and difficulty based on elapsed time since the checkpoint
|
||||
- Save the most recent orphan blocks for a limited time in case their parent
|
||||
blocks become available
|
||||
- Stop processing if the block is an orphan as the rest of the processing
|
||||
depends on the block's position within the block chain
|
||||
- Perform a series of more thorough checks that depend on the block's position
|
||||
within the block chain such as verifying block difficulties adhere to
|
||||
difficulty retarget rules, timestamps are after the median of the last
|
||||
several blocks, all transactions are finalized, checkpoint blocks match, and
|
||||
block versions are in line with the previous blocks
|
||||
- Determine how the block fits into the chain and perform different actions
|
||||
accordingly in order to ensure any side chains which have higher difficulty
|
||||
than the main chain become the new main chain
|
||||
- When a block is being connected to the main chain (either through
|
||||
reorganization of a side chain to the main chain or just extending the
|
||||
main chain), perform further checks on the block's transactions such as
|
||||
verifying transaction duplicates, script complexity for the combination of
|
||||
connected scripts, coinbase maturity, double spends, and connected
|
||||
transaction values
|
||||
- Run the transaction scripts to verify the spender is allowed to spend the
|
||||
coins
|
||||
- Insert the block into the block database
|
||||
|
||||
## Examples
|
||||
|
||||
* [ProcessBlock Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-BlockChain-ProcessBlock)
|
||||
Demonstrates how to create a new chain instance and use ProcessBlock to
|
||||
attempt to add a block to the chain. This example intentionally
|
||||
attempts to insert a duplicate genesis block to illustrate how an invalid
|
||||
block is handled.
|
||||
|
||||
* [CompactToBig Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-CompactToBig)
|
||||
Demonstrates how to convert the compact "bits" in a block header which
|
||||
represent the target difficulty to a big integer and display it using the
|
||||
typical hex notation.
|
||||
|
||||
* [BigToCompact Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-BigToCompact)
|
||||
Demonstrates how to convert a target difficulty into the
|
||||
compact "bits" in a block header which represent that target difficulty.
|
||||
|
||||
## GPG Verification Key
|
||||
|
||||
All official release tags are signed by Conformal so users can ensure the code
|
||||
has not been tampered with and is coming from the btcsuite developers. To
|
||||
verify the signature perform the following:
|
||||
|
||||
- Download the public key from the Conformal website at
|
||||
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
|
||||
|
||||
- Import the public key into your GPG keyring:
|
||||
```bash
|
||||
gpg --import GIT-GPG-KEY-conformal.txt
|
||||
```
|
||||
|
||||
- Verify the release tag with the following command where `TAG_NAME` is a
|
||||
placeholder for the specific tag:
|
||||
```bash
|
||||
git tag -v TAG_NAME
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
|
||||
Package blockchain is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
||||
@@ -1,108 +0,0 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
// maybeAcceptBlock potentially accepts a block into the block DAG. It
|
||||
// performs several validation checks which depend on its position within
|
||||
// the block DAG before adding it. The block is expected to have already
|
||||
// gone through ProcessBlock before calling this function with it.
|
||||
//
|
||||
// The flags are also passed to checkBlockContext and connectToDAG. See
|
||||
// their documentation for how the flags modify their behavior.
|
||||
//
|
||||
// This function MUST be called with the dagLock held (for writes).
|
||||
func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error {
|
||||
// The height of this block is one more than the referenced previous
|
||||
// block.
|
||||
parents, err := lookupParentNodes(block, dag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bluestParent := parents.bluest()
|
||||
blockHeight := int32(0)
|
||||
if !block.IsGenesis() {
|
||||
blockHeight = parents.maxHeight() + 1
|
||||
}
|
||||
block.SetHeight(blockHeight)
|
||||
|
||||
// The block must pass all of the validation rules which depend on the
|
||||
// position of the block within the block DAG.
|
||||
err = dag.checkBlockContext(block, parents, bluestParent, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Insert the block into the database if it's not already there. Even
|
||||
// though it is possible the block will ultimately fail to connect, it
|
||||
// has already passed all proof-of-work and validity tests which means
|
||||
// it would be prohibitively expensive for an attacker to fill up the
|
||||
// disk with a bunch of blocks that fail to connect. This is necessary
|
||||
// since it allows block download to be decoupled from the much more
|
||||
// expensive connection logic. It also has some other nice properties
|
||||
// such as making blocks that never become part of the DAG or
|
||||
// blocks that fail to connect available for further analysis.
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
return dbStoreBlock(dbTx, block)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a new block node for the block and add it to the node index.
|
||||
blockHeader := &block.MsgBlock().Header
|
||||
newNode := newBlockNode(blockHeader, parents, dag.dagParams.K)
|
||||
newNode.status = statusDataStored
|
||||
|
||||
dag.index.AddNode(newNode)
|
||||
err = dag.index.flushToDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Connect the passed block to the DAG. This also handles validation of the
|
||||
// transaction scripts.
|
||||
err = dag.addBlock(newNode, parents, block, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Notify the caller that the new block was accepted into the block
|
||||
// DAG. The caller would typically want to react by relaying the
|
||||
// inventory to other peers.
|
||||
dag.dagLock.Unlock()
|
||||
dag.sendNotification(NTBlockAdded, block)
|
||||
dag.dagLock.Lock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error) {
|
||||
header := block.MsgBlock().Header
|
||||
parentHashes := header.ParentHashes
|
||||
|
||||
nodes := newSet()
|
||||
for _, parentHash := range parentHashes {
|
||||
node := blockDAG.index.LookupNode(parentHash)
|
||||
if node == nil {
|
||||
str := fmt.Sprintf("parent block %s is unknown", parentHashes)
|
||||
return nil, ruleError(ErrParentBlockUnknown, str)
|
||||
} else if blockDAG.index.NodeStatus(node).KnownInvalid() {
|
||||
str := fmt.Sprintf("parent block %s is known to be invalid", parentHashes)
|
||||
return nil, ruleError(ErrInvalidAncestorBlock, str)
|
||||
}
|
||||
|
||||
nodes.add(node)
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
@@ -1,139 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"bou.ke/monkey"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", Config{
|
||||
DAGParams: &dagconfig.SimNetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
dag.TestSetBlockRewardMaturity(1)
|
||||
|
||||
// Test rejecting the block if its parents are missing
|
||||
orphanBlockFile := "blk_3B.dat"
|
||||
loadedBlocks, err := loadBlocks(orphanBlockFile)
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: "+
|
||||
"Error loading file '%s': %s\n", orphanBlockFile, err)
|
||||
}
|
||||
block := loadedBlocks[0]
|
||||
|
||||
err = dag.maybeAcceptBlock(block, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
|
||||
"Expected: %s, got: <nil>", ErrParentBlockUnknown)
|
||||
}
|
||||
ruleErr, ok := err.(RuleError)
|
||||
if !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrParentBlockUnknown {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
|
||||
"Unexpected error code. Want: %s, got: %s", ErrParentBlockUnknown, ruleErr.ErrorCode)
|
||||
}
|
||||
|
||||
// Test rejecting the block if its parents are invalid
|
||||
blocksFile := "blk_0_to_4.dat"
|
||||
blocks, err := loadBlocks(blocksFile)
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: "+
|
||||
"Error loading file '%s': %s\n", blocksFile, err)
|
||||
}
|
||||
|
||||
// Add a valid block and mark it as invalid
|
||||
block1 := blocks[1]
|
||||
isOrphan, err := dag.ProcessBlock(block1, BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: Valid block unexpectedly returned an error: %s", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: incorrectly returned block 1 is an orphan")
|
||||
}
|
||||
blockNode1 := dag.index.LookupNode(block1.Hash())
|
||||
dag.index.SetStatusFlags(blockNode1, statusValidateFailed)
|
||||
|
||||
block2 := blocks[2]
|
||||
err = dag.maybeAcceptBlock(block2, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
|
||||
"Expected: %s, got: <nil>", ErrInvalidAncestorBlock)
|
||||
}
|
||||
ruleErr, ok = err.(RuleError)
|
||||
if !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrInvalidAncestorBlock {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
|
||||
"Unexpected error. Want: %s, got: %s", ErrInvalidAncestorBlock, ruleErr.ErrorCode)
|
||||
}
|
||||
|
||||
// Set block1's status back to valid for next tests
|
||||
dag.index.UnsetStatusFlags(blockNode1, statusValidateFailed)
|
||||
|
||||
// Test rejecting the block due to bad context
|
||||
originalBits := block2.MsgBlock().Header.Bits
|
||||
block2.MsgBlock().Header.Bits = 0
|
||||
err = dag.maybeAcceptBlock(block2, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
|
||||
"Expected: %s, got: <nil>", ErrUnexpectedDifficulty)
|
||||
}
|
||||
ruleErr, ok = err.(RuleError)
|
||||
if !ok {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
|
||||
"Expected RuleError but got %s", err)
|
||||
} else if ruleErr.ErrorCode != ErrUnexpectedDifficulty {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
|
||||
"Unexpected error. Want: %s, got: %s", ErrUnexpectedDifficulty, ruleErr.ErrorCode)
|
||||
}
|
||||
|
||||
// Set block2's bits back to valid for next tests
|
||||
block2.MsgBlock().Header.Bits = originalBits
|
||||
|
||||
// Test rejecting the node due to database error
|
||||
databaseErrorMessage := "database error"
|
||||
guard := monkey.Patch(dbStoreBlock, func(dbTx database.Tx, block *util.Block) error {
|
||||
return errors.New(databaseErrorMessage)
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
err = dag.maybeAcceptBlock(block2, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to database error: "+
|
||||
"Expected: %s, got: <nil>", databaseErrorMessage)
|
||||
}
|
||||
if !strings.Contains(err.Error(), databaseErrorMessage) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to database error: "+
|
||||
"Unexpected error. Want: %s, got: %s", databaseErrorMessage, err)
|
||||
}
|
||||
guard.Unpatch()
|
||||
|
||||
// Test rejecting the node due to index error
|
||||
indexErrorMessage := "index error"
|
||||
guard = monkey.Patch((*blockIndex).flushToDB, func(_ *blockIndex) error {
|
||||
return errors.New(indexErrorMessage)
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
err = dag.maybeAcceptBlock(block2, BFNone)
|
||||
if err == nil {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to index error: "+
|
||||
"Expected %s, got: <nil>", indexErrorMessage)
|
||||
}
|
||||
if !strings.Contains(err.Error(), indexErrorMessage) {
|
||||
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to index error: "+
|
||||
"Unexpected error. Want: %s, got: %s", indexErrorMessage, err)
|
||||
}
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bou.ke/monkey"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
)
|
||||
|
||||
func TestAncestorErrors(t *testing.T) {
|
||||
node := newTestNode(newSet(), int32(0x10000000), 0, time.Unix(0, 0), dagconfig.MainNetParams.K)
|
||||
node.height = 2
|
||||
ancestor := node.SelectedAncestor(3)
|
||||
if ancestor != nil {
|
||||
t.Errorf("TestAncestorErrors: Ancestor() unexpectedly returned a node. Expected: <nil>")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlushToDBErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestFlushToDBErrors", Config{
|
||||
DAGParams: &dagconfig.SimNetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFlushToDBErrors: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Call flushToDB without anything to flush. This should succeed
|
||||
err = dag.index.flushToDB()
|
||||
if err != nil {
|
||||
t.Errorf("TestFlushToDBErrors: flushToDB without anything to flush: "+
|
||||
"Unexpected flushToDB error: %s", err)
|
||||
}
|
||||
|
||||
// Mark the genesis block as dirty
|
||||
dag.index.SetStatusFlags(dag.genesis, statusValid)
|
||||
|
||||
// Test flushToDB failure due to database error
|
||||
databaseErrorMessage := "database error"
|
||||
guard := monkey.Patch(dbStoreBlockNode, func(_ database.Tx, _ *blockNode) error {
|
||||
return errors.New(databaseErrorMessage)
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
err = dag.index.flushToDB()
|
||||
if err == nil {
|
||||
t.Errorf("TestFlushToDBErrors: flushToDB failure due to database error: "+
|
||||
"Expected: %s, got: <nil>", databaseErrorMessage)
|
||||
}
|
||||
if !strings.Contains(err.Error(), databaseErrorMessage) {
|
||||
t.Errorf("TestFlushToDBErrors: flushToDB failure due to database error: "+
|
||||
"Unexpected flushToDB error. Expected: %s, got: %s", databaseErrorMessage, err)
|
||||
}
|
||||
}
|
||||
@@ -1,267 +0,0 @@
|
||||
// Copyright (c) 2015-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// blockStatus is a bit field representing the validation state of the block.
|
||||
type blockStatus byte
|
||||
|
||||
const (
|
||||
// statusDataStored indicates that the block's payload is stored on disk.
|
||||
statusDataStored blockStatus = 1 << iota
|
||||
|
||||
// statusValid indicates that the block has been fully validated.
|
||||
statusValid
|
||||
|
||||
// statusValidateFailed indicates that the block has failed validation.
|
||||
statusValidateFailed
|
||||
|
||||
// statusInvalidAncestor indicates that one of the block's ancestors has
|
||||
// has failed validation, thus the block is also invalid.
|
||||
statusInvalidAncestor
|
||||
|
||||
// statusNone indicates that the block has no validation state flags set.
|
||||
//
|
||||
// NOTE: This must be defined last in order to avoid influencing iota.
|
||||
statusNone blockStatus = 0
|
||||
)
|
||||
|
||||
// KnownValid returns whether the block is known to be valid. This will return
|
||||
// false for a valid block that has not been fully validated yet.
|
||||
func (status blockStatus) KnownValid() bool {
|
||||
return status&statusValid != 0
|
||||
}
|
||||
|
||||
// KnownInvalid returns whether the block is known to be invalid. This may be
|
||||
// because the block itself failed validation or any of its ancestors is
|
||||
// invalid. This will return false for invalid blocks that have not been proven
|
||||
// invalid yet.
|
||||
func (status blockStatus) KnownInvalid() bool {
|
||||
return status&(statusValidateFailed|statusInvalidAncestor) != 0
|
||||
}
|
||||
|
||||
// blockNode represents a block within the block DAG. The DAG is stored into
|
||||
// the block database.
|
||||
type blockNode struct {
|
||||
// NOTE: Additions, deletions, or modifications to the order of the
|
||||
// definitions in this struct should not be changed without considering
|
||||
// how it affects alignment on 64-bit platforms. The current order is
|
||||
// specifically crafted to result in minimal padding. There will be
|
||||
// hundreds of thousands of these in memory, so a few extra bytes of
|
||||
// padding adds up.
|
||||
|
||||
// parents is the parent blocks for this node.
|
||||
parents blockSet
|
||||
|
||||
// selectedParent is the selected parent for this node.
|
||||
// The selected parent is the parent that if chosen will maximize the blue score of this block
|
||||
selectedParent *blockNode
|
||||
|
||||
// children are all the blocks that refer to this block as a parent
|
||||
children blockSet
|
||||
|
||||
// blues are all blue blocks in this block's worldview that are in its selected parent anticone
|
||||
blues []*blockNode
|
||||
|
||||
// blueScore is the count of all the blue blocks in this block's past
|
||||
blueScore uint64
|
||||
|
||||
// diff is the UTXO representation of the block
|
||||
// A block's UTXO is reconstituted by applying diffWith on every block in the chain of diffChildren
|
||||
// from the virtual block down to the block. See diffChild
|
||||
diff *UTXODiff
|
||||
|
||||
// diffChild is the child that diff will be built from. See diff
|
||||
diffChild *blockNode
|
||||
|
||||
// hash is the double sha 256 of the block.
|
||||
hash *daghash.Hash
|
||||
|
||||
// workSum is the total amount of work in the DAG up to and including
|
||||
// this node.
|
||||
workSum *big.Int
|
||||
|
||||
// height is the position in the block DAG.
|
||||
height int32
|
||||
|
||||
// chainHeight is the number of hops you need to go down the selected parent chain in order to get to the genesis block.
|
||||
chainHeight uint32
|
||||
|
||||
// Some fields from block headers to aid in best chain selection and
|
||||
// reconstructing headers from memory. These must be treated as
|
||||
// immutable and are intentionally ordered to avoid padding on 64-bit
|
||||
// platforms.
|
||||
version int32
|
||||
bits uint32
|
||||
nonce uint64
|
||||
timestamp int64
|
||||
hashMerkleRoot *daghash.Hash
|
||||
idMerkleRoot *daghash.Hash
|
||||
|
||||
// status is a bitfield representing the validation state of the block. The
|
||||
// status field, unlike the other fields, may be written to and so should
|
||||
// only be accessed using the concurrent-safe NodeStatus method on
|
||||
// blockIndex once the node has been added to the global index.
|
||||
status blockStatus
|
||||
}
|
||||
|
||||
// initBlockNode initializes a block node from the given header and parent nodes,
|
||||
// calculating the height and workSum from the respective fields on the first parent.
|
||||
// This function is NOT safe for concurrent access. It must only be called when
|
||||
// initially creating a node.
|
||||
func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents blockSet, phantomK uint32) {
|
||||
*node = blockNode{
|
||||
parents: parents,
|
||||
children: make(blockSet),
|
||||
workSum: big.NewInt(0),
|
||||
timestamp: time.Now().Unix(),
|
||||
}
|
||||
|
||||
// blockHeader is nil only for the virtual block
|
||||
if blockHeader != nil {
|
||||
node.hash = blockHeader.BlockHash()
|
||||
node.workSum = util.CalcWork(blockHeader.Bits)
|
||||
node.version = blockHeader.Version
|
||||
node.bits = blockHeader.Bits
|
||||
node.nonce = blockHeader.Nonce
|
||||
node.timestamp = blockHeader.Timestamp.Unix()
|
||||
node.hashMerkleRoot = blockHeader.HashMerkleRoot
|
||||
node.idMerkleRoot = blockHeader.IDMerkleRoot
|
||||
} else {
|
||||
node.hash = &daghash.ZeroHash
|
||||
}
|
||||
|
||||
if len(parents) > 0 {
|
||||
node.blues, node.selectedParent, node.blueScore = phantom(node, phantomK)
|
||||
node.height = calculateNodeHeight(node)
|
||||
node.chainHeight = calculateChainHeight(node)
|
||||
node.workSum = node.workSum.Add(node.selectedParent.workSum, node.workSum)
|
||||
}
|
||||
}
|
||||
|
||||
func calculateNodeHeight(node *blockNode) int32 {
|
||||
return node.parents.maxHeight() + 1
|
||||
}
|
||||
|
||||
func calculateChainHeight(node *blockNode) uint32 {
|
||||
if node.isGenesis() {
|
||||
return 0
|
||||
}
|
||||
return node.selectedParent.chainHeight + 1
|
||||
}
|
||||
|
||||
// newBlockNode returns a new block node for the given block header and parent
|
||||
// nodes, calculating the height and workSum from the respective fields on the
|
||||
// parent. This function is NOT safe for concurrent access.
|
||||
func newBlockNode(blockHeader *wire.BlockHeader, parents blockSet, phantomK uint32) *blockNode {
|
||||
var node blockNode
|
||||
initBlockNode(&node, blockHeader, parents, phantomK)
|
||||
return &node
|
||||
}
|
||||
|
||||
// updateParentsChildren updates the node's parents to point to new node
|
||||
func (node *blockNode) updateParentsChildren() {
|
||||
for _, parent := range node.parents {
|
||||
parent.children.add(node)
|
||||
}
|
||||
}
|
||||
|
||||
// Header constructs a block header from the node and returns it.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) Header() *wire.BlockHeader {
|
||||
// No lock is needed because all accessed fields are immutable.
|
||||
return &wire.BlockHeader{
|
||||
Version: node.version,
|
||||
ParentHashes: node.ParentHashes(),
|
||||
HashMerkleRoot: node.hashMerkleRoot,
|
||||
IDMerkleRoot: node.idMerkleRoot,
|
||||
Timestamp: time.Unix(node.timestamp, 0),
|
||||
Bits: node.bits,
|
||||
Nonce: node.nonce,
|
||||
}
|
||||
}
|
||||
|
||||
// SelectedAncestor returns the ancestor block node at the provided height by following
|
||||
// the selected chain backwards from this node. The returned block will be nil when a
|
||||
// height is requested that is after the height of the passed node or is less than zero.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) SelectedAncestor(height int32) *blockNode {
|
||||
if height < 0 || height > node.height {
|
||||
return nil
|
||||
}
|
||||
|
||||
n := node
|
||||
for ; n != nil && n.height != height; n = n.selectedParent {
|
||||
// Intentionally left blank
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// RelativeAncestor returns the ancestor block node a relative 'distance' blocks
|
||||
// before this node. This is equivalent to calling Ancestor with the node's
|
||||
// height minus provided distance.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) RelativeAncestor(distance int32) *blockNode {
|
||||
return node.SelectedAncestor(node.height - distance)
|
||||
}
|
||||
|
||||
// PastMedianTime returns the median time of the previous few blocks
|
||||
// prior to, and including, the block node.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) PastMedianTime() time.Time {
|
||||
// Create a slice of the previous few block timestamps used to calculate
|
||||
// the median per the number defined by the constant medianTimeBlocks.
|
||||
// If there aren't enough blocks yet - pad remaining with genesis block's timestamp.
|
||||
timestamps := make([]int64, medianTimeBlocks)
|
||||
iterNode := node
|
||||
for i := 0; i < medianTimeBlocks; i++ {
|
||||
timestamps[i] = iterNode.timestamp
|
||||
|
||||
if !iterNode.isGenesis() {
|
||||
iterNode = iterNode.selectedParent
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(timeSorter(timestamps))
|
||||
|
||||
// Note: This works when medianTimeBlockCount is an odd number.
|
||||
// If it is to be changed to an even number - must take avarage of two middle values
|
||||
// Since medianTimeBlockCount is a constant, we can skip the odd/even check
|
||||
medianTimestamp := timestamps[medianTimeBlocks/2]
|
||||
return time.Unix(medianTimestamp, 0)
|
||||
}
|
||||
|
||||
func (node *blockNode) ParentHashes() []*daghash.Hash {
|
||||
return node.parents.hashes()
|
||||
}
|
||||
|
||||
// isGenesis returns if the current block is the genesis block
|
||||
func (node *blockNode) isGenesis() bool {
|
||||
return len(node.parents) == 0
|
||||
}
|
||||
|
||||
func (node *blockNode) finalityScore() uint64 {
|
||||
return node.blueScore / FinalityInterval
|
||||
}
|
||||
|
||||
// String returns a string that contains the block hash and height.
|
||||
func (node blockNode) String() string {
|
||||
return fmt.Sprintf("%s (%d)", node.hash, node.height)
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestChainHeight(t *testing.T) {
|
||||
phantomK := uint32(2)
|
||||
buildNode := buildNodeGenerator(phantomK, true)
|
||||
|
||||
node0 := buildNode(setFromSlice())
|
||||
node1 := buildNode(setFromSlice(node0))
|
||||
node2 := buildNode(setFromSlice(node0))
|
||||
node3 := buildNode(setFromSlice(node0))
|
||||
node4 := buildNode(setFromSlice(node1, node2, node3))
|
||||
node5 := buildNode(setFromSlice(node1, node2, node3))
|
||||
node6 := buildNode(setFromSlice(node1, node2, node3))
|
||||
node7 := buildNode(setFromSlice(node0))
|
||||
node8 := buildNode(setFromSlice(node7))
|
||||
node9 := buildNode(setFromSlice(node8))
|
||||
node10 := buildNode(setFromSlice(node9, node6))
|
||||
|
||||
// Because nodes 7 & 8 were mined secretly, node10's selected
|
||||
// parent will be node6, although node9 is higher. So in this
|
||||
// case, node10.height and node10.chainHeight will be different
|
||||
|
||||
tests := []struct {
|
||||
node *blockNode
|
||||
expectedChainHeight uint32
|
||||
}{
|
||||
{
|
||||
node: node0,
|
||||
expectedChainHeight: 0,
|
||||
},
|
||||
{
|
||||
node: node1,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node2,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node3,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node4,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node5,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node6,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node7,
|
||||
expectedChainHeight: 1,
|
||||
},
|
||||
{
|
||||
node: node8,
|
||||
expectedChainHeight: 2,
|
||||
},
|
||||
{
|
||||
node: node9,
|
||||
expectedChainHeight: 3,
|
||||
},
|
||||
{
|
||||
node: node10,
|
||||
expectedChainHeight: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if test.node.chainHeight != test.expectedChainHeight {
|
||||
t.Errorf("block %v expected chain height %v but got %v", test.node, test.expectedChainHeight, test.node.chainHeight)
|
||||
}
|
||||
if calculateChainHeight(test.node) != test.expectedChainHeight {
|
||||
t.Errorf("block %v expected calculated chain height %v but got %v", test.node, test.expectedChainHeight, test.node.chainHeight)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,175 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
)
|
||||
|
||||
// blockSet implements a basic unsorted set of blocks
|
||||
type blockSet map[daghash.Hash]*blockNode
|
||||
|
||||
// newSet creates a new, empty BlockSet
|
||||
func newSet() blockSet {
|
||||
return map[daghash.Hash]*blockNode{}
|
||||
}
|
||||
|
||||
// setFromSlice converts a slice of blocks into an unordered set represented as map
|
||||
func setFromSlice(blocks ...*blockNode) blockSet {
|
||||
set := newSet()
|
||||
for _, block := range blocks {
|
||||
set.add(block)
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
// maxHeight returns the height of the highest block in the block set
|
||||
func (bs blockSet) maxHeight() int32 {
|
||||
var maxHeight int32
|
||||
for _, node := range bs {
|
||||
if maxHeight < node.height {
|
||||
maxHeight = node.height
|
||||
}
|
||||
}
|
||||
return maxHeight
|
||||
}
|
||||
|
||||
func (bs blockSet) highest() *blockNode {
|
||||
var highest *blockNode
|
||||
for _, node := range bs {
|
||||
if highest == nil ||
|
||||
highest.height < node.height ||
|
||||
(highest.height == node.height && daghash.Less(node.hash, highest.hash)) {
|
||||
|
||||
highest = node
|
||||
}
|
||||
}
|
||||
return highest
|
||||
}
|
||||
|
||||
// add adds a block to this BlockSet
|
||||
func (bs blockSet) add(block *blockNode) {
|
||||
bs[*block.hash] = block
|
||||
}
|
||||
|
||||
// remove removes a block from this BlockSet, if exists
|
||||
// Does nothing if this set does not contain the block
|
||||
func (bs blockSet) remove(block *blockNode) {
|
||||
delete(bs, *block.hash)
|
||||
}
|
||||
|
||||
// clone clones thie block set
|
||||
func (bs blockSet) clone() blockSet {
|
||||
clone := newSet()
|
||||
for _, block := range bs {
|
||||
clone.add(block)
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
||||
// subtract returns the difference between the BlockSet and another BlockSet
|
||||
func (bs blockSet) subtract(other blockSet) blockSet {
|
||||
diff := newSet()
|
||||
for _, block := range bs {
|
||||
if !other.contains(block) {
|
||||
diff.add(block)
|
||||
}
|
||||
}
|
||||
return diff
|
||||
}
|
||||
|
||||
// addSet adds all blocks in other set to this set
|
||||
func (bs blockSet) addSet(other blockSet) {
|
||||
for _, block := range other {
|
||||
bs.add(block)
|
||||
}
|
||||
}
|
||||
|
||||
// addSlice adds provided slice to this set
|
||||
func (bs blockSet) addSlice(slice []*blockNode) {
|
||||
for _, block := range slice {
|
||||
bs.add(block)
|
||||
}
|
||||
}
|
||||
|
||||
// union returns a BlockSet that contains all blocks included in this set,
|
||||
// the other set, or both
|
||||
func (bs blockSet) union(other blockSet) blockSet {
|
||||
union := bs.clone()
|
||||
|
||||
union.addSet(other)
|
||||
|
||||
return union
|
||||
}
|
||||
|
||||
// contains returns true iff this set contains block
|
||||
func (bs blockSet) contains(block *blockNode) bool {
|
||||
_, ok := bs[*block.hash]
|
||||
return ok
|
||||
}
|
||||
|
||||
// containsHash returns true iff this set contains a block hash
|
||||
func (bs blockSet) containsHash(hash *daghash.Hash) bool {
|
||||
_, ok := bs[*hash]
|
||||
return ok
|
||||
}
|
||||
|
||||
// hashesEqual returns true if the given hashes are equal to the hashes
|
||||
// of the blocks in this set.
|
||||
// NOTE: The given hash slice must not contain duplicates.
|
||||
func (bs blockSet) hashesEqual(hashes []*daghash.Hash) bool {
|
||||
if len(hashes) != len(bs) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, hash := range hashes {
|
||||
if _, wasFound := bs[*hash]; !wasFound {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// hashes returns the hashes of the blocks in this set.
|
||||
func (bs blockSet) hashes() []*daghash.Hash {
|
||||
hashes := make([]*daghash.Hash, 0, len(bs))
|
||||
for _, node := range bs {
|
||||
hashes = append(hashes, node.hash)
|
||||
}
|
||||
daghash.Sort(hashes)
|
||||
return hashes
|
||||
}
|
||||
|
||||
func (bs blockSet) String() string {
|
||||
nodeStrs := make([]string, 0, len(bs))
|
||||
for _, node := range bs {
|
||||
nodeStrs = append(nodeStrs, node.String())
|
||||
}
|
||||
return strings.Join(nodeStrs, ",")
|
||||
}
|
||||
|
||||
// anyChildInSet returns true iff any child of block is contained within this set
|
||||
func (bs blockSet) anyChildInSet(block *blockNode) bool {
|
||||
for _, child := range block.children {
|
||||
if bs.contains(child) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (bs blockSet) bluest() *blockNode {
|
||||
var bluestNode *blockNode
|
||||
var maxScore uint64
|
||||
for _, node := range bs {
|
||||
if bluestNode == nil ||
|
||||
node.blueScore > maxScore ||
|
||||
(node.blueScore == maxScore && daghash.Less(node.hash, bluestNode.hash)) {
|
||||
bluestNode = node
|
||||
maxScore = node.blueScore
|
||||
}
|
||||
}
|
||||
return bluestNode
|
||||
}
|
||||
@@ -1,337 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
)
|
||||
|
||||
func TestHashes(t *testing.T) {
|
||||
bs := setFromSlice(
|
||||
&blockNode{
|
||||
hash: &daghash.Hash{3},
|
||||
},
|
||||
&blockNode{
|
||||
hash: &daghash.Hash{1},
|
||||
},
|
||||
&blockNode{
|
||||
hash: &daghash.Hash{0},
|
||||
},
|
||||
&blockNode{
|
||||
hash: &daghash.Hash{2},
|
||||
},
|
||||
)
|
||||
|
||||
expected := []*daghash.Hash{
|
||||
{0},
|
||||
{1},
|
||||
{2},
|
||||
{3},
|
||||
}
|
||||
|
||||
hashes := bs.hashes()
|
||||
if !daghash.AreEqual(hashes, expected) {
|
||||
t.Errorf("TestHashes: hashes order is %s but expected %s", hashes, expected)
|
||||
}
|
||||
}
|
||||
func TestBlockSetHighest(t *testing.T) {
|
||||
node1 := &blockNode{hash: &daghash.Hash{10}, height: 1}
|
||||
node2a := &blockNode{hash: &daghash.Hash{20}, height: 2}
|
||||
node2b := &blockNode{hash: &daghash.Hash{21}, height: 2}
|
||||
node3 := &blockNode{hash: &daghash.Hash{30}, height: 3}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
set blockSet
|
||||
expectedHighest *blockNode
|
||||
}{
|
||||
{
|
||||
name: "empty set",
|
||||
set: setFromSlice(),
|
||||
expectedHighest: nil,
|
||||
},
|
||||
{
|
||||
name: "set with one member",
|
||||
set: setFromSlice(node1),
|
||||
expectedHighest: node1,
|
||||
},
|
||||
{
|
||||
name: "same-height highest members in set",
|
||||
set: setFromSlice(node2b, node1, node2a),
|
||||
expectedHighest: node2a,
|
||||
},
|
||||
{
|
||||
name: "typical set",
|
||||
set: setFromSlice(node2b, node3, node1, node2a),
|
||||
expectedHighest: node3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
highest := test.set.highest()
|
||||
if highest != test.expectedHighest {
|
||||
t.Errorf("blockSet.highest: unexpected value in test '%s'. "+
|
||||
"Expected: %v, got: %v", test.name, test.expectedHighest, highest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSetSubtract(t *testing.T) {
|
||||
node1 := &blockNode{hash: &daghash.Hash{10}}
|
||||
node2 := &blockNode{hash: &daghash.Hash{20}}
|
||||
node3 := &blockNode{hash: &daghash.Hash{30}}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setA blockSet
|
||||
setB blockSet
|
||||
expectedResult blockSet
|
||||
}{
|
||||
{
|
||||
name: "both sets empty",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "subtract an empty set",
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "subtract from empty set",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "subtract unrelated set",
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(node2),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node2, node3),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := test.setA.subtract(test.setB)
|
||||
if !reflect.DeepEqual(result, test.expectedResult) {
|
||||
t.Errorf("blockSet.subtract: unexpected result in test '%s'. "+
|
||||
"Expected: %v, got: %v", test.name, test.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSetAddSet(t *testing.T) {
|
||||
node1 := &blockNode{hash: &daghash.Hash{10}}
|
||||
node2 := &blockNode{hash: &daghash.Hash{20}}
|
||||
node3 := &blockNode{hash: &daghash.Hash{30}}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setA blockSet
|
||||
setB blockSet
|
||||
expectedResult blockSet
|
||||
}{
|
||||
{
|
||||
name: "both sets empty",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "add an empty set",
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add to empty set",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add already added member",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1, node2),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node2, node3),
|
||||
expectedResult: setFromSlice(node1, node2, node3),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test.setA.addSet(test.setB)
|
||||
if !reflect.DeepEqual(test.setA, test.expectedResult) {
|
||||
t.Errorf("blockSet.addSet: unexpected result in test '%s'. "+
|
||||
"Expected: %v, got: %v", test.name, test.expectedResult, test.setA)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSetAddSlice(t *testing.T) {
|
||||
node1 := &blockNode{hash: &daghash.Hash{10}}
|
||||
node2 := &blockNode{hash: &daghash.Hash{20}}
|
||||
node3 := &blockNode{hash: &daghash.Hash{30}}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
set blockSet
|
||||
slice []*blockNode
|
||||
expectedResult blockSet
|
||||
}{
|
||||
{
|
||||
name: "add empty slice to empty set",
|
||||
set: setFromSlice(),
|
||||
slice: []*blockNode{},
|
||||
expectedResult: setFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "add an empty slice",
|
||||
set: setFromSlice(node1),
|
||||
slice: []*blockNode{},
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add to empty set",
|
||||
set: setFromSlice(),
|
||||
slice: []*blockNode{node1},
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "add already added member",
|
||||
set: setFromSlice(node1, node2),
|
||||
slice: []*blockNode{node1},
|
||||
expectedResult: setFromSlice(node1, node2),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
set: setFromSlice(node1, node2),
|
||||
slice: []*blockNode{node2, node3},
|
||||
expectedResult: setFromSlice(node1, node2, node3),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test.set.addSlice(test.slice)
|
||||
if !reflect.DeepEqual(test.set, test.expectedResult) {
|
||||
t.Errorf("blockSet.addSlice: unexpected result in test '%s'. "+
|
||||
"Expected: %v, got: %v", test.name, test.expectedResult, test.set)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSetUnion(t *testing.T) {
|
||||
node1 := &blockNode{hash: &daghash.Hash{10}}
|
||||
node2 := &blockNode{hash: &daghash.Hash{20}}
|
||||
node3 := &blockNode{hash: &daghash.Hash{30}}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setA blockSet
|
||||
setB blockSet
|
||||
expectedResult blockSet
|
||||
}{
|
||||
{
|
||||
name: "both sets empty",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(),
|
||||
},
|
||||
{
|
||||
name: "union against an empty set",
|
||||
setA: setFromSlice(node1),
|
||||
setB: setFromSlice(),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "union from an empty set",
|
||||
setA: setFromSlice(),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1),
|
||||
},
|
||||
{
|
||||
name: "union with subset",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node1),
|
||||
expectedResult: setFromSlice(node1, node2),
|
||||
},
|
||||
{
|
||||
name: "typical case",
|
||||
setA: setFromSlice(node1, node2),
|
||||
setB: setFromSlice(node2, node3),
|
||||
expectedResult: setFromSlice(node1, node2, node3),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := test.setA.union(test.setB)
|
||||
if !reflect.DeepEqual(result, test.expectedResult) {
|
||||
t.Errorf("blockSet.union: unexpected result in test '%s'. "+
|
||||
"Expected: %v, got: %v", test.name, test.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSetHashesEqual(t *testing.T) {
|
||||
node1 := &blockNode{hash: &daghash.Hash{10}}
|
||||
node2 := &blockNode{hash: &daghash.Hash{20}}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
set blockSet
|
||||
hashes []*daghash.Hash
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
name: "empty set, no hashes",
|
||||
set: setFromSlice(),
|
||||
hashes: []*daghash.Hash{},
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "empty set, one hash",
|
||||
set: setFromSlice(),
|
||||
hashes: []*daghash.Hash{node1.hash},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "set and hashes of different length",
|
||||
set: setFromSlice(node1, node2),
|
||||
hashes: []*daghash.Hash{node1.hash},
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "set equal to hashes",
|
||||
set: setFromSlice(node1, node2),
|
||||
hashes: []*daghash.Hash{node1.hash, node2.hash},
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "set equal to hashes, different order",
|
||||
set: setFromSlice(node1, node2),
|
||||
hashes: []*daghash.Hash{node2.hash, node1.hash},
|
||||
expectedResult: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := test.set.hashesEqual(test.hashes)
|
||||
if result != test.expectedResult {
|
||||
t.Errorf("blockSet.hashesEqual: unexpected result in test '%s'. "+
|
||||
"Expected: %t, got: %t", test.name, test.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,270 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
// CheckpointConfirmations is the number of blocks before the end of the current
|
||||
// best block chain that a good checkpoint candidate must be.
|
||||
const CheckpointConfirmations = 2016
|
||||
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// daghash.Hash. It only differs from the one available in daghash in that
|
||||
// it ignores the error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, hashes.
|
||||
func newHashFromStr(hexStr string) *daghash.Hash {
|
||||
hash, _ := daghash.NewHashFromStr(hexStr)
|
||||
return hash
|
||||
}
|
||||
|
||||
// newTxIDFromStr converts the passed big-endian hex string into a
|
||||
// daghash.TxID. It only differs from the one available in daghash in that
|
||||
// it ignores the error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, IDs.
|
||||
func newTxIDFromStr(hexStr string) *daghash.TxID {
|
||||
txID, _ := daghash.NewTxIDFromStr(hexStr)
|
||||
return txID
|
||||
}
|
||||
|
||||
// Checkpoints returns a slice of checkpoints (regardless of whether they are
|
||||
// already known). When there are no checkpoints for the chain, it will return
|
||||
// nil.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) Checkpoints() []dagconfig.Checkpoint {
|
||||
return dag.checkpoints
|
||||
}
|
||||
|
||||
// HasCheckpoints returns whether this BlockDAG has checkpoints defined.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) HasCheckpoints() bool {
|
||||
return len(dag.checkpoints) > 0
|
||||
}
|
||||
|
||||
// LatestCheckpoint returns the most recent checkpoint (regardless of whether it
|
||||
// is already known). When there are no defined checkpoints for the active chain
|
||||
// instance, it will return nil.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) LatestCheckpoint() *dagconfig.Checkpoint {
|
||||
if !dag.HasCheckpoints() {
|
||||
return nil
|
||||
}
|
||||
return &dag.checkpoints[len(dag.checkpoints)-1]
|
||||
}
|
||||
|
||||
// verifyCheckpoint returns whether the passed block height and hash combination
|
||||
// match the checkpoint data. It also returns true if there is no checkpoint
|
||||
// data for the passed block height.
|
||||
func (dag *BlockDAG) verifyCheckpoint(height int32, hash *daghash.Hash) bool {
|
||||
if !dag.HasCheckpoints() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Nothing to check if there is no checkpoint data for the block height.
|
||||
checkpoint, exists := dag.checkpointsByHeight[height]
|
||||
if !exists {
|
||||
return true
|
||||
}
|
||||
|
||||
if !checkpoint.Hash.IsEqual(hash) {
|
||||
return false
|
||||
}
|
||||
|
||||
log.Infof("Verified checkpoint at height %d/block %s", checkpoint.Height,
|
||||
checkpoint.Hash)
|
||||
return true
|
||||
}
|
||||
|
||||
// findPreviousCheckpoint finds the most recent checkpoint that is already
|
||||
// available in the downloaded portion of the block chain and returns the
|
||||
// associated block node. It returns nil if a checkpoint can't be found (this
|
||||
// should really only happen for blocks before the first checkpoint).
|
||||
//
|
||||
// This function MUST be called with the DAG lock held (for reads).
|
||||
func (dag *BlockDAG) findPreviousCheckpoint() (*blockNode, error) {
|
||||
if !dag.HasCheckpoints() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Perform the initial search to find and cache the latest known
|
||||
// checkpoint if the best chain is not known yet or we haven't already
|
||||
// previously searched.
|
||||
checkpoints := dag.checkpoints
|
||||
numCheckpoints := len(checkpoints)
|
||||
if dag.checkpointNode == nil && dag.nextCheckpoint == nil {
|
||||
// Loop backwards through the available checkpoints to find one
|
||||
// that is already available.
|
||||
for i := numCheckpoints - 1; i >= 0; i-- {
|
||||
node := dag.index.LookupNode(checkpoints[i].Hash)
|
||||
if node == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Checkpoint found. Cache it for future lookups and
|
||||
// set the next expected checkpoint accordingly.
|
||||
dag.checkpointNode = node
|
||||
if i < numCheckpoints-1 {
|
||||
dag.nextCheckpoint = &checkpoints[i+1]
|
||||
}
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// No known latest checkpoint. This will only happen on blocks
|
||||
// before the first known checkpoint. So, set the next expected
|
||||
// checkpoint to the first checkpoint and return the fact there
|
||||
// is no latest known checkpoint block.
|
||||
dag.nextCheckpoint = &checkpoints[0]
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// At this point we've already searched for the latest known checkpoint,
|
||||
// so when there is no next checkpoint, the current checkpoint lockin
|
||||
// will always be the latest known checkpoint.
|
||||
if dag.nextCheckpoint == nil {
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// When there is a next checkpoint and the height of the current best
|
||||
// chain does not exceed it, the current checkpoint lockin is still
|
||||
// the latest known checkpoint.
|
||||
if dag.selectedTip().height < dag.nextCheckpoint.Height {
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// We've reached or exceeded the next checkpoint height. Note that
|
||||
// once a checkpoint lockin has been reached, forks are prevented from
|
||||
// any blocks before the checkpoint, so we don't have to worry about the
|
||||
// checkpoint going away out from under us due to a chain reorganize.
|
||||
|
||||
// Cache the latest known checkpoint for future lookups. Note that if
|
||||
// this lookup fails something is very wrong since the chain has already
|
||||
// passed the checkpoint which was verified as accurate before inserting
|
||||
// it.
|
||||
checkpointNode := dag.index.LookupNode(dag.nextCheckpoint.Hash)
|
||||
if checkpointNode == nil {
|
||||
return nil, AssertError(fmt.Sprintf("findPreviousCheckpoint "+
|
||||
"failed lookup of known good block node %s",
|
||||
dag.nextCheckpoint.Hash))
|
||||
}
|
||||
dag.checkpointNode = checkpointNode
|
||||
|
||||
// Set the next expected checkpoint.
|
||||
checkpointIndex := -1
|
||||
for i := numCheckpoints - 1; i >= 0; i-- {
|
||||
if checkpoints[i].Hash.IsEqual(dag.nextCheckpoint.Hash) {
|
||||
checkpointIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
dag.nextCheckpoint = nil
|
||||
if checkpointIndex != -1 && checkpointIndex < numCheckpoints-1 {
|
||||
dag.nextCheckpoint = &checkpoints[checkpointIndex+1]
|
||||
}
|
||||
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// isNonstandardTransaction determines whether a transaction contains any
|
||||
// scripts which are not one of the standard types.
|
||||
func isNonstandardTransaction(tx *util.Tx) bool {
|
||||
// Check all of the output public key scripts for non-standard scripts.
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
scriptClass := txscript.GetScriptClass(txOut.PkScript)
|
||||
if scriptClass == txscript.NonStandardTy {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCheckpointCandidate returns whether or not the passed block is a good
|
||||
// checkpoint candidate.
|
||||
//
|
||||
// The factors used to determine a good checkpoint are:
|
||||
// - The block must be in the main chain
|
||||
// - The block must be at least 'CheckpointConfirmations' blocks prior to the
|
||||
// current end of the main chain
|
||||
// - The timestamps for the blocks before and after the checkpoint must have
|
||||
// timestamps which are also before and after the checkpoint, respectively
|
||||
// (due to the median time allowance this is not always the case)
|
||||
// - The block must not contain any strange transaction such as those with
|
||||
// nonstandard scripts
|
||||
//
|
||||
// The intent is that candidates are reviewed by a developer to make the final
|
||||
// decision and then manually added to the list of checkpoints for a network.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) IsCheckpointCandidate(block *util.Block) (bool, error) {
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
|
||||
// A checkpoint must be in the DAG.
|
||||
node := dag.index.LookupNode(block.Hash())
|
||||
if node == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Ensure the height of the passed block and the entry for the block in
|
||||
// the main chain match. This should always be the case unless the
|
||||
// caller provided an invalid block.
|
||||
if node.height != block.Height() {
|
||||
return false, fmt.Errorf("passed block height of %d does not "+
|
||||
"match the main chain height of %d", block.Height(),
|
||||
node.height)
|
||||
}
|
||||
|
||||
// A checkpoint must be at least CheckpointConfirmations blocks
|
||||
// before the end of the main chain.
|
||||
dagHeight := dag.selectedTip().height
|
||||
if node.height > (dagHeight - CheckpointConfirmations) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must be have at least one block after it.
|
||||
//
|
||||
// This should always succeed since the check above already made sure it
|
||||
// is CheckpointConfirmations back, but be safe in case the constant
|
||||
// changes.
|
||||
nextNode := node.diffChild
|
||||
if nextNode == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must be have at least one block before it.
|
||||
if &node.selectedParent == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must have timestamps for the block and the blocks on
|
||||
// either side of it in order (due to the median time allowance this is
|
||||
// not always the case).
|
||||
prevTime := time.Unix(node.selectedParent.timestamp, 0)
|
||||
curTime := block.MsgBlock().Header.Timestamp
|
||||
nextTime := time.Unix(nextNode.timestamp, 0)
|
||||
if prevTime.After(curTime) || nextTime.Before(curTime) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must have transactions that only contain standard
|
||||
// scripts.
|
||||
for _, tx := range block.Transactions() {
|
||||
if isNonstandardTransaction(tx) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// All of the checks passed, so the block is a candidate.
|
||||
return true, nil
|
||||
}
|
||||
@@ -1,258 +0,0 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
_ "github.com/daglabs/btcd/database/ffldb"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// loadBlocks reads files containing bitcoin block data (gzipped but otherwise
|
||||
// in the format bitcoind writes) from disk and returns them as an array of
|
||||
// util.Block. This is largely borrowed from the test code in btcdb.
|
||||
func loadBlocks(filename string) (blocks []*util.Block, err error) {
|
||||
filename = filepath.Join("testdata/", filename)
|
||||
|
||||
var network = wire.MainNet
|
||||
var dr io.Reader
|
||||
var fi io.ReadCloser
|
||||
|
||||
fi, err = os.Open(filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasSuffix(filename, ".bz2") {
|
||||
dr = bzip2.NewReader(fi)
|
||||
} else {
|
||||
dr = fi
|
||||
}
|
||||
defer fi.Close()
|
||||
|
||||
var block *util.Block
|
||||
|
||||
err = nil
|
||||
for height := 0; err == nil; height++ {
|
||||
var rintbuf uint32
|
||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
||||
if err == io.EOF {
|
||||
// hit end of file at expected offset: no warning
|
||||
height--
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if rintbuf != uint32(network) {
|
||||
break
|
||||
}
|
||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
||||
blocklen := rintbuf
|
||||
|
||||
rbytes := make([]byte, blocklen)
|
||||
|
||||
// read block
|
||||
dr.Read(rbytes)
|
||||
|
||||
block, err = util.NewBlockFromBytes(rbytes)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
block.SetHeight(int32(height))
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// loadUTXOSet returns a utxo view loaded from a file.
|
||||
func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
// The utxostore file format is:
|
||||
// <tx hash><output index><serialized utxo len><serialized utxo>
|
||||
//
|
||||
// The output index and serialized utxo len are little endian uint32s
|
||||
// and the serialized utxo uses the format described in dagio.go.
|
||||
|
||||
filename = filepath.Join("testdata", filename)
|
||||
fi, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Choose read based on whether the file is compressed or not.
|
||||
var r io.Reader
|
||||
if strings.HasSuffix(filename, ".bz2") {
|
||||
r = bzip2.NewReader(fi)
|
||||
} else {
|
||||
r = fi
|
||||
}
|
||||
defer fi.Close()
|
||||
|
||||
utxoSet := NewFullUTXOSet()
|
||||
for {
|
||||
// Tx ID of the utxo entry.
|
||||
var txID daghash.TxID
|
||||
_, err := io.ReadAtLeast(r, txID[:], len(txID[:]))
|
||||
if err != nil {
|
||||
// Expected EOF at the right offset.
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Output index of the utxo entry.
|
||||
var index uint32
|
||||
err = binary.Read(r, binary.LittleEndian, &index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Num of serialized utxo entry bytes.
|
||||
var numBytes uint32
|
||||
err = binary.Read(r, binary.LittleEndian, &numBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Serialized utxo entry.
|
||||
serialized := make([]byte, numBytes)
|
||||
_, err = io.ReadAtLeast(r, serialized, int(numBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Deserialize it and add it to the view.
|
||||
entry, err := deserializeUTXOEntry(serialized)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoSet.utxoCollection[wire.OutPoint{TxID: txID, Index: index}] = entry
|
||||
}
|
||||
|
||||
return utxoSet, nil
|
||||
}
|
||||
|
||||
// TestSetBlockRewardMaturity makes the ability to set the block reward maturity
|
||||
// available when running tests.
|
||||
func (dag *BlockDAG) TestSetBlockRewardMaturity(maturity uint16) {
|
||||
dag.dagParams.BlockRewardMaturity = maturity
|
||||
}
|
||||
|
||||
// newTestDAG returns a DAG that is usable for syntetic tests. It is
|
||||
// important to note that this chain has no database associated with it, so
|
||||
// it is not usable with all functions and the tests must take care when making
|
||||
// use of it.
|
||||
func newTestDAG(params *dagconfig.Params) *BlockDAG {
|
||||
// Create a genesis block node and block index index populated with it
|
||||
// for use when creating the fake chain below.
|
||||
node := newBlockNode(¶ms.GenesisBlock.Header, newSet(), params.K)
|
||||
index := newBlockIndex(nil, params)
|
||||
index.AddNode(node)
|
||||
|
||||
targetTimespan := int64(params.TargetTimespan / time.Second)
|
||||
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
|
||||
adjustmentFactor := params.RetargetAdjustmentFactor
|
||||
return &BlockDAG{
|
||||
dagParams: params,
|
||||
timeSource: NewMedianTime(),
|
||||
minRetargetTimespan: targetTimespan / adjustmentFactor,
|
||||
maxRetargetTimespan: targetTimespan * adjustmentFactor,
|
||||
blocksPerRetarget: int32(targetTimespan / targetTimePerBlock),
|
||||
index: index,
|
||||
virtual: newVirtualBlock(setFromSlice(node), params.K),
|
||||
genesis: index.LookupNode(params.GenesisHash),
|
||||
warningCaches: newThresholdCaches(vbNumBits),
|
||||
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
|
||||
}
|
||||
}
|
||||
|
||||
// newTestNode creates a block node connected to the passed parent with the
|
||||
// provided fields populated and fake values for the other fields.
|
||||
func newTestNode(parents blockSet, blockVersion int32, bits uint32, timestamp time.Time, phantomK uint32) *blockNode {
|
||||
// Make up a header and create a block node from it.
|
||||
header := &wire.BlockHeader{
|
||||
Version: blockVersion,
|
||||
ParentHashes: parents.hashes(),
|
||||
Bits: bits,
|
||||
Timestamp: timestamp,
|
||||
HashMerkleRoot: &daghash.ZeroHash,
|
||||
IDMerkleRoot: &daghash.ZeroHash,
|
||||
}
|
||||
return newBlockNode(header, parents, phantomK)
|
||||
}
|
||||
|
||||
func addNodeAsChildToParents(node *blockNode) {
|
||||
for _, parent := range node.parents {
|
||||
parent.children.add(node)
|
||||
}
|
||||
}
|
||||
|
||||
func buildNodeGenerator(phantomK uint32, withChildren bool) func(parents blockSet) *blockNode {
|
||||
// For the purposes of these tests, we'll create blockNodes whose hashes are a
|
||||
// series of numbers from 1 to 255.
|
||||
hashCounter := byte(1)
|
||||
buildNode := func(parents blockSet) *blockNode {
|
||||
block := newBlockNode(nil, parents, phantomK)
|
||||
block.hash = &daghash.Hash{hashCounter}
|
||||
hashCounter++
|
||||
|
||||
return block
|
||||
}
|
||||
if withChildren {
|
||||
return func(parents blockSet) *blockNode {
|
||||
node := buildNode(parents)
|
||||
addNodeAsChildToParents(node)
|
||||
return node
|
||||
}
|
||||
}
|
||||
return buildNode
|
||||
}
|
||||
|
||||
// checkRuleError ensures the type of the two passed errors are of the
|
||||
// same type (either both nil or both of type RuleError) and their error codes
|
||||
// match when not nil.
|
||||
func checkRuleError(gotErr, wantErr error) error {
|
||||
// Ensure the error code is of the expected type and the error
|
||||
// code matches the value specified in the test instance.
|
||||
if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) {
|
||||
return fmt.Errorf("wrong error - got %T (%[1]v), want %T",
|
||||
gotErr, wantErr)
|
||||
}
|
||||
if gotErr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure the want error type is a script error.
|
||||
werr, ok := wantErr.(RuleError)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected test error type %T", wantErr)
|
||||
}
|
||||
|
||||
// Ensure the error codes match. It's safe to use a raw type assert
|
||||
// here since the code above already proved they are the same type and
|
||||
// the want error is a script error.
|
||||
gotErrorCode := gotErr.(RuleError).ErrorCode
|
||||
if gotErrorCode != werr.ErrorCode {
|
||||
return fmt.Errorf("mismatched error code - got %v (%v), want %v",
|
||||
gotErrorCode, gotErr, werr.ErrorCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,586 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/btcec"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// A variable length quantity (VLQ) is an encoding that uses an arbitrary number
|
||||
// of binary octets to represent an arbitrarily large integer. The scheme
|
||||
// employs a most significant byte (MSB) base-128 encoding where the high bit in
|
||||
// each byte indicates whether or not the byte is the final one. In addition,
|
||||
// to ensure there are no redundant encodings, an offset is subtracted every
|
||||
// time a group of 7 bits is shifted out. Therefore each integer can be
|
||||
// represented in exactly one way, and each representation stands for exactly
|
||||
// one integer.
|
||||
//
|
||||
// Another nice property of this encoding is that it provides a compact
|
||||
// representation of values that are typically used to indicate sizes. For
|
||||
// example, the values 0 - 127 are represented with a single byte, 128 - 16511
|
||||
// with two bytes, and 16512 - 2113663 with three bytes.
|
||||
//
|
||||
// While the encoding allows arbitrarily large integers, it is artificially
|
||||
// limited in this code to an unsigned 64-bit integer for efficiency purposes.
|
||||
//
|
||||
// Example encodings:
|
||||
// 0 -> [0x00]
|
||||
// 127 -> [0x7f] * Max 1-byte value
|
||||
// 128 -> [0x80 0x00]
|
||||
// 129 -> [0x80 0x01]
|
||||
// 255 -> [0x80 0x7f]
|
||||
// 256 -> [0x81 0x00]
|
||||
// 16511 -> [0xff 0x7f] * Max 2-byte value
|
||||
// 16512 -> [0x80 0x80 0x00]
|
||||
// 32895 -> [0x80 0xff 0x7f]
|
||||
// 2113663 -> [0xff 0xff 0x7f] * Max 3-byte value
|
||||
// 270549119 -> [0xff 0xff 0xff 0x7f] * Max 4-byte value
|
||||
// 2^64-1 -> [0x80 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0x7f]
|
||||
//
|
||||
// References:
|
||||
// https://en.wikipedia.org/wiki/Variable-length_quantity
|
||||
// http://www.codecodex.com/wiki/Variable-Length_Integers
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// serializeSizeVLQ returns the number of bytes it would take to serialize the
|
||||
// passed number as a variable-length quantity according to the format described
|
||||
// above.
|
||||
func serializeSizeVLQ(n uint64) int {
|
||||
size := 1
|
||||
for ; n > 0x7f; n = (n >> 7) - 1 {
|
||||
size++
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
// putVLQ serializes the provided number to a variable-length quantity according
|
||||
// to the format described above and returns the number of bytes of the encoded
|
||||
// value. The result is placed directly into the passed byte slice which must
|
||||
// be at least large enough to handle the number of bytes returned by the
|
||||
// serializeSizeVLQ function or it will panic.
|
||||
func putVLQ(target []byte, n uint64) int {
|
||||
offset := 0
|
||||
for ; ; offset++ {
|
||||
// The high bit is set when another byte follows.
|
||||
highBitMask := byte(0x80)
|
||||
if offset == 0 {
|
||||
highBitMask = 0x00
|
||||
}
|
||||
|
||||
target[offset] = byte(n&0x7f) | highBitMask
|
||||
if n <= 0x7f {
|
||||
break
|
||||
}
|
||||
n = (n >> 7) - 1
|
||||
}
|
||||
|
||||
// Reverse the bytes so it is MSB-encoded.
|
||||
for i, j := 0, offset; i < j; i, j = i+1, j-1 {
|
||||
target[i], target[j] = target[j], target[i]
|
||||
}
|
||||
|
||||
return offset + 1
|
||||
}
|
||||
|
||||
// deserializeVLQ deserializes the provided variable-length quantity according
|
||||
// to the format described above. It also returns the number of bytes
|
||||
// deserialized.
|
||||
func deserializeVLQ(serialized []byte) (uint64, int) {
|
||||
var n uint64
|
||||
var size int
|
||||
for _, val := range serialized {
|
||||
size++
|
||||
n = (n << 7) | uint64(val&0x7f)
|
||||
if val&0x80 != 0x80 {
|
||||
break
|
||||
}
|
||||
n++
|
||||
}
|
||||
|
||||
return n, size
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// In order to reduce the size of stored scripts, a domain specific compression
|
||||
// algorithm is used which recognizes standard scripts and stores them using
|
||||
// less bytes than the original script. The compression algorithm used here was
|
||||
// obtained from Bitcoin Core, so all credits for the algorithm go to it.
|
||||
//
|
||||
// The general serialized format is:
|
||||
//
|
||||
// <script size or type><script data>
|
||||
//
|
||||
// Field Type Size
|
||||
// script size or type VLQ variable
|
||||
// script data []byte variable
|
||||
//
|
||||
// The specific serialized format for each recognized standard script is:
|
||||
//
|
||||
// - Pay-to-pubkey-hash: (21 bytes) - <0><20-byte pubkey hash>
|
||||
// - Pay-to-script-hash: (21 bytes) - <1><20-byte script hash>
|
||||
// - Pay-to-pubkey**: (33 bytes) - <2, 3, 4, or 5><32-byte pubkey X value>
|
||||
// 2, 3 = compressed pubkey with bit 0 specifying the y coordinate to use
|
||||
// 4, 5 = uncompressed pubkey with bit 0 specifying the y coordinate to use
|
||||
// ** Only valid public keys starting with 0x02, 0x03, and 0x04 are supported.
|
||||
//
|
||||
// Any scripts which are not recognized as one of the aforementioned standard
|
||||
// scripts are encoded using the general serialized format and encode the script
|
||||
// size as the sum of the actual size of the script and the number of special
|
||||
// cases.
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// The following constants specify the special constants used to identify a
|
||||
// special script type in the domain-specific compressed script encoding.
|
||||
//
|
||||
// NOTE: This section specifically does not use iota since these values are
|
||||
// serialized and must be stable for long-term storage.
|
||||
const (
|
||||
// cstPayToPubKeyHash identifies a compressed pay-to-pubkey-hash script.
|
||||
cstPayToPubKeyHash = 0
|
||||
|
||||
// cstPayToScriptHash identifies a compressed pay-to-script-hash script.
|
||||
cstPayToScriptHash = 1
|
||||
|
||||
// cstPayToPubKeyComp2 identifies a compressed pay-to-pubkey script to
|
||||
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyComp2 = 2
|
||||
|
||||
// cstPayToPubKeyComp3 identifies a compressed pay-to-pubkey script to
|
||||
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyComp3 = 3
|
||||
|
||||
// cstPayToPubKeyUncomp4 identifies a compressed pay-to-pubkey script to
|
||||
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyUncomp4 = 4
|
||||
|
||||
// cstPayToPubKeyUncomp5 identifies a compressed pay-to-pubkey script to
|
||||
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
|
||||
// to reconstruct the full uncompressed pubkey.
|
||||
cstPayToPubKeyUncomp5 = 5
|
||||
|
||||
// numSpecialScripts is the number of special scripts recognized by the
|
||||
// domain-specific script compression algorithm.
|
||||
numSpecialScripts = 6
|
||||
)
|
||||
|
||||
// isPubKeyHash returns whether or not the passed public key script is a
|
||||
// standard pay-to-pubkey-hash script along with the pubkey hash it is paying to
|
||||
// if it is.
|
||||
func isPubKeyHash(script []byte) (bool, []byte) {
|
||||
if len(script) == 25 && script[0] == txscript.OpDup &&
|
||||
script[1] == txscript.OpHash160 &&
|
||||
script[2] == txscript.OpData20 &&
|
||||
script[23] == txscript.OpEqualVerify &&
|
||||
script[24] == txscript.OpCheckSig {
|
||||
|
||||
return true, script[3:23]
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// isScriptHash returns whether or not the passed public key script is a
|
||||
// standard pay-to-script-hash script along with the script hash it is paying to
|
||||
// if it is.
|
||||
func isScriptHash(script []byte) (bool, []byte) {
|
||||
if len(script) == 23 && script[0] == txscript.OpHash160 &&
|
||||
script[1] == txscript.OpData20 &&
|
||||
script[22] == txscript.OpEqual {
|
||||
|
||||
return true, script[2:22]
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// isPubKey returns whether or not the passed public key script is a standard
|
||||
// pay-to-pubkey script that pays to a valid compressed or uncompressed public
|
||||
// key along with the serialized pubkey it is paying to if it is.
|
||||
//
|
||||
// NOTE: This function ensures the public key is actually valid since the
|
||||
// compression algorithm requires valid pubkeys. It does not support hybrid
|
||||
// pubkeys. This means that even if the script has the correct form for a
|
||||
// pay-to-pubkey script, this function will only return true when it is paying
|
||||
// to a valid compressed or uncompressed pubkey.
|
||||
func isPubKey(script []byte) (bool, []byte) {
|
||||
// Pay-to-compressed-pubkey script.
|
||||
if len(script) == 35 && script[0] == txscript.OpData33 &&
|
||||
script[34] == txscript.OpCheckSig && (script[1] == 0x02 ||
|
||||
script[1] == 0x03) {
|
||||
|
||||
// Ensure the public key is valid.
|
||||
serializedPubKey := script[1:34]
|
||||
_, err := btcec.ParsePubKey(serializedPubKey, btcec.S256())
|
||||
if err == nil {
|
||||
return true, serializedPubKey
|
||||
}
|
||||
}
|
||||
|
||||
// Pay-to-uncompressed-pubkey script.
|
||||
if len(script) == 67 && script[0] == txscript.OpData65 &&
|
||||
script[66] == txscript.OpCheckSig && script[1] == 0x04 {
|
||||
|
||||
// Ensure the public key is valid.
|
||||
serializedPubKey := script[1:66]
|
||||
_, err := btcec.ParsePubKey(serializedPubKey, btcec.S256())
|
||||
if err == nil {
|
||||
return true, serializedPubKey
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// compressedScriptSize returns the number of bytes the passed script would take
|
||||
// when encoded with the domain specific compression algorithm described above.
|
||||
func compressedScriptSize(pkScript []byte) int {
|
||||
// Pay-to-pubkey-hash script.
|
||||
if valid, _ := isPubKeyHash(pkScript); valid {
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-script-hash script.
|
||||
if valid, _ := isScriptHash(pkScript); valid {
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-pubkey (compressed or uncompressed) script.
|
||||
if valid, _ := isPubKey(pkScript); valid {
|
||||
return 33
|
||||
}
|
||||
|
||||
// When none of the above special cases apply, encode the script as is
|
||||
// preceded by the sum of its size and the number of special cases
|
||||
// encoded as a variable length quantity.
|
||||
return serializeSizeVLQ(uint64(len(pkScript)+numSpecialScripts)) +
|
||||
len(pkScript)
|
||||
}
|
||||
|
||||
// decodeCompressedScriptSize treats the passed serialized bytes as a compressed
|
||||
// script, possibly followed by other data, and returns the number of bytes it
|
||||
// occupies taking into account the special encoding of the script size by the
|
||||
// domain specific compression algorithm described above.
|
||||
func decodeCompressedScriptSize(serialized []byte) int {
|
||||
scriptSize, bytesRead := deserializeVLQ(serialized)
|
||||
if bytesRead == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
switch scriptSize {
|
||||
case cstPayToPubKeyHash:
|
||||
return 21
|
||||
|
||||
case cstPayToScriptHash:
|
||||
return 21
|
||||
|
||||
case cstPayToPubKeyComp2, cstPayToPubKeyComp3, cstPayToPubKeyUncomp4,
|
||||
cstPayToPubKeyUncomp5:
|
||||
return 33
|
||||
}
|
||||
|
||||
scriptSize -= numSpecialScripts
|
||||
scriptSize += uint64(bytesRead)
|
||||
return int(scriptSize)
|
||||
}
|
||||
|
||||
// putCompressedScript compresses the passed script according to the domain
|
||||
// specific compression algorithm described above directly into the passed
|
||||
// target byte slice. The target byte slice must be at least large enough to
|
||||
// handle the number of bytes returned by the compressedScriptSize function or
|
||||
// it will panic.
|
||||
func putCompressedScript(target, pkScript []byte) int {
|
||||
// Pay-to-pubkey-hash script.
|
||||
if valid, hash := isPubKeyHash(pkScript); valid {
|
||||
target[0] = cstPayToPubKeyHash
|
||||
copy(target[1:21], hash)
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-script-hash script.
|
||||
if valid, hash := isScriptHash(pkScript); valid {
|
||||
target[0] = cstPayToScriptHash
|
||||
copy(target[1:21], hash)
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-pubkey (compressed or uncompressed) script.
|
||||
if valid, serializedPubKey := isPubKey(pkScript); valid {
|
||||
pubKeyFormat := serializedPubKey[0]
|
||||
switch pubKeyFormat {
|
||||
case 0x02, 0x03:
|
||||
target[0] = pubKeyFormat
|
||||
copy(target[1:33], serializedPubKey[1:33])
|
||||
return 33
|
||||
case 0x04:
|
||||
// Encode the oddness of the serialized pubkey into the
|
||||
// compressed script type.
|
||||
target[0] = pubKeyFormat | (serializedPubKey[64] & 0x01)
|
||||
copy(target[1:33], serializedPubKey[1:33])
|
||||
return 33
|
||||
}
|
||||
}
|
||||
|
||||
// When none of the above special cases apply, encode the unmodified
|
||||
// script preceded by the sum of its size and the number of special
|
||||
// cases encoded as a variable length quantity.
|
||||
encodedSize := uint64(len(pkScript) + numSpecialScripts)
|
||||
vlqSizeLen := putVLQ(target, encodedSize)
|
||||
copy(target[vlqSizeLen:], pkScript)
|
||||
return vlqSizeLen + len(pkScript)
|
||||
}
|
||||
|
||||
// decompressScript returns the original script obtained by decompressing the
|
||||
// passed compressed script according to the domain specific compression
|
||||
// algorithm described above.
|
||||
//
|
||||
// NOTE: The script parameter must already have been proven to be long enough
|
||||
// to contain the number of bytes returned by decodeCompressedScriptSize or it
|
||||
// will panic. This is acceptable since it is only an internal function.
|
||||
func decompressScript(compressedPkScript []byte) []byte {
|
||||
// In practice this function will not be called with a zero-length or
|
||||
// nil script since the nil script encoding includes the length, however
|
||||
// the code below assumes the length exists, so just return nil now if
|
||||
// the function ever ends up being called with a nil script in the
|
||||
// future.
|
||||
if len(compressedPkScript) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode the script size and examine it for the special cases.
|
||||
encodedScriptSize, bytesRead := deserializeVLQ(compressedPkScript)
|
||||
switch encodedScriptSize {
|
||||
// Pay-to-pubkey-hash script. The resulting script is:
|
||||
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
|
||||
case cstPayToPubKeyHash:
|
||||
pkScript := make([]byte, 25)
|
||||
pkScript[0] = txscript.OpDup
|
||||
pkScript[1] = txscript.OpHash160
|
||||
pkScript[2] = txscript.OpData20
|
||||
copy(pkScript[3:], compressedPkScript[bytesRead:bytesRead+20])
|
||||
pkScript[23] = txscript.OpEqualVerify
|
||||
pkScript[24] = txscript.OpCheckSig
|
||||
return pkScript
|
||||
|
||||
// Pay-to-script-hash script. The resulting script is:
|
||||
// <OP_HASH160><20 byte script hash><OP_EQUAL>
|
||||
case cstPayToScriptHash:
|
||||
pkScript := make([]byte, 23)
|
||||
pkScript[0] = txscript.OpHash160
|
||||
pkScript[1] = txscript.OpData20
|
||||
copy(pkScript[2:], compressedPkScript[bytesRead:bytesRead+20])
|
||||
pkScript[22] = txscript.OpEqual
|
||||
return pkScript
|
||||
|
||||
// Pay-to-compressed-pubkey script. The resulting script is:
|
||||
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
|
||||
case cstPayToPubKeyComp2, cstPayToPubKeyComp3:
|
||||
pkScript := make([]byte, 35)
|
||||
pkScript[0] = txscript.OpData33
|
||||
pkScript[1] = byte(encodedScriptSize)
|
||||
copy(pkScript[2:], compressedPkScript[bytesRead:bytesRead+32])
|
||||
pkScript[34] = txscript.OpCheckSig
|
||||
return pkScript
|
||||
|
||||
// Pay-to-uncompressed-pubkey script. The resulting script is:
|
||||
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
|
||||
case cstPayToPubKeyUncomp4, cstPayToPubKeyUncomp5:
|
||||
// Change the leading byte to the appropriate compressed pubkey
|
||||
// identifier (0x02 or 0x03) so it can be decoded as a
|
||||
// compressed pubkey. This really should never fail since the
|
||||
// encoding ensures it is valid before compressing to this type.
|
||||
compressedKey := make([]byte, 33)
|
||||
compressedKey[0] = byte(encodedScriptSize - 2)
|
||||
copy(compressedKey[1:], compressedPkScript[1:])
|
||||
key, err := btcec.ParsePubKey(compressedKey, btcec.S256())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
pkScript := make([]byte, 67)
|
||||
pkScript[0] = txscript.OpData65
|
||||
copy(pkScript[1:], key.SerializeUncompressed())
|
||||
pkScript[66] = txscript.OpCheckSig
|
||||
return pkScript
|
||||
}
|
||||
|
||||
// When none of the special cases apply, the script was encoded using
|
||||
// the general format, so reduce the script size by the number of
|
||||
// special cases and return the unmodified script.
|
||||
scriptSize := int(encodedScriptSize - numSpecialScripts)
|
||||
pkScript := make([]byte, scriptSize)
|
||||
copy(pkScript, compressedPkScript[bytesRead:bytesRead+scriptSize])
|
||||
return pkScript
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// In order to reduce the size of stored amounts, a domain specific compression
|
||||
// algorithm is used which relies on there typically being a lot of zeroes at
|
||||
// end of the amounts. The compression algorithm used here was obtained from
|
||||
// Bitcoin Core, so all credits for the algorithm go to it.
|
||||
//
|
||||
// While this is simply exchanging one uint64 for another, the resulting value
|
||||
// for typical amounts has a much smaller magnitude which results in fewer bytes
|
||||
// when encoded as variable length quantity. For example, consider the amount
|
||||
// of 0.1 BTC which is 10000000 satoshi. Encoding 10000000 as a VLQ would take
|
||||
// 4 bytes while encoding the compressed value of 8 as a VLQ only takes 1 byte.
|
||||
//
|
||||
// Essentially the compression is achieved by splitting the value into an
|
||||
// exponent in the range [0-9] and a digit in the range [1-9], when possible,
|
||||
// and encoding them in a way that can be decoded. More specifically, the
|
||||
// encoding is as follows:
|
||||
// - 0 is 0
|
||||
// - Find the exponent, e, as the largest power of 10 that evenly divides the
|
||||
// value up to a maximum of 9
|
||||
// - When e < 9, the final digit can't be 0 so store it as d and remove it by
|
||||
// dividing the value by 10 (call the result n). The encoded value is thus:
|
||||
// 1 + 10*(9*n + d-1) + e
|
||||
// - When e==9, the only thing known is the amount is not 0. The encoded value
|
||||
// is thus:
|
||||
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
|
||||
//
|
||||
// Example encodings:
|
||||
// (The numbers in parenthesis are the number of bytes when serialized as a VLQ)
|
||||
// 0 (1) -> 0 (1) * 0.00000000 BTC
|
||||
// 1000 (2) -> 4 (1) * 0.00001000 BTC
|
||||
// 10000 (2) -> 5 (1) * 0.00010000 BTC
|
||||
// 12345678 (4) -> 111111101(4) * 0.12345678 BTC
|
||||
// 50000000 (4) -> 47 (1) * 0.50000000 BTC
|
||||
// 100000000 (4) -> 9 (1) * 1.00000000 BTC
|
||||
// 500000000 (5) -> 49 (1) * 5.00000000 BTC
|
||||
// 1000000000 (5) -> 10 (1) * 10.00000000 BTC
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// compressTxOutAmount compresses the passed amount according to the domain
|
||||
// specific compression algorithm described above.
|
||||
func compressTxOutAmount(amount uint64) uint64 {
|
||||
// No need to do any work if it's zero.
|
||||
if amount == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Find the largest power of 10 (max of 9) that evenly divides the
|
||||
// value.
|
||||
exponent := uint64(0)
|
||||
for amount%10 == 0 && exponent < 9 {
|
||||
amount /= 10
|
||||
exponent++
|
||||
}
|
||||
|
||||
// The compressed result for exponents less than 9 is:
|
||||
// 1 + 10*(9*n + d-1) + e
|
||||
if exponent < 9 {
|
||||
lastDigit := amount % 10
|
||||
amount /= 10
|
||||
return 1 + 10*(9*amount+lastDigit-1) + exponent
|
||||
}
|
||||
|
||||
// The compressed result for an exponent of 9 is:
|
||||
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
|
||||
return 10 + 10*(amount-1)
|
||||
}
|
||||
|
||||
// decompressTxOutAmount returns the original amount the passed compressed
|
||||
// amount represents according to the domain specific compression algorithm
|
||||
// described above.
|
||||
func decompressTxOutAmount(amount uint64) uint64 {
|
||||
// No need to do any work if it's zero.
|
||||
if amount == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// The decompressed amount is either of the following two equations:
|
||||
// x = 1 + 10*(9*n + d - 1) + e
|
||||
// x = 1 + 10*(n - 1) + 9
|
||||
amount--
|
||||
|
||||
// The decompressed amount is now one of the following two equations:
|
||||
// x = 10*(9*n + d - 1) + e
|
||||
// x = 10*(n - 1) + 9
|
||||
exponent := amount % 10
|
||||
amount /= 10
|
||||
|
||||
// The decompressed amount is now one of the following two equations:
|
||||
// x = 9*n + d - 1 | where e < 9
|
||||
// x = n - 1 | where e = 9
|
||||
n := uint64(0)
|
||||
if exponent < 9 {
|
||||
lastDigit := amount%9 + 1
|
||||
amount /= 9
|
||||
n = amount*10 + lastDigit
|
||||
} else {
|
||||
n = amount + 1
|
||||
}
|
||||
|
||||
// Apply the exponent.
|
||||
for ; exponent > 0; exponent-- {
|
||||
n *= 10
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Compressed transaction outputs consist of an amount and a public key script
|
||||
// both compressed using the domain specific compression algorithms previously
|
||||
// described.
|
||||
//
|
||||
// The serialized format is:
|
||||
//
|
||||
// <compressed amount><compressed script>
|
||||
//
|
||||
// Field Type Size
|
||||
// compressed amount VLQ variable
|
||||
// compressed script []byte variable
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// compressedTxOutSize returns the number of bytes the passed transaction output
|
||||
// fields would take when encoded with the format described above.
|
||||
func compressedTxOutSize(amount uint64, pkScript []byte) int {
|
||||
return serializeSizeVLQ(compressTxOutAmount(amount)) +
|
||||
compressedScriptSize(pkScript)
|
||||
}
|
||||
|
||||
// putCompressedTxOut compresses the passed amount and script according to their
|
||||
// domain specific compression algorithms and encodes them directly into the
|
||||
// passed target byte slice with the format described above. The target byte
|
||||
// slice must be at least large enough to handle the number of bytes returned by
|
||||
// the compressedTxOutSize function or it will panic.
|
||||
func putCompressedTxOut(target []byte, amount uint64, pkScript []byte) int {
|
||||
offset := putVLQ(target, compressTxOutAmount(amount))
|
||||
offset += putCompressedScript(target[offset:], pkScript)
|
||||
return offset
|
||||
}
|
||||
|
||||
// decodeCompressedTxOut decodes the passed compressed txout, possibly followed
|
||||
// by other data, into its uncompressed amount and script and returns them along
|
||||
// with the number of bytes they occupied prior to decompression.
|
||||
func decodeCompressedTxOut(serialized []byte) (uint64, []byte, int, error) {
|
||||
// Deserialize the compressed amount and ensure there are bytes
|
||||
// remaining for the compressed script.
|
||||
compressedAmount, bytesRead := deserializeVLQ(serialized)
|
||||
if bytesRead >= len(serialized) {
|
||||
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
|
||||
"data after compressed amount")
|
||||
}
|
||||
|
||||
// Decode the compressed script size and ensure there are enough bytes
|
||||
// left in the slice for it.
|
||||
scriptSize := decodeCompressedScriptSize(serialized[bytesRead:])
|
||||
if len(serialized[bytesRead:]) < scriptSize {
|
||||
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
|
||||
"data after script size")
|
||||
}
|
||||
|
||||
// Decompress and return the amount and script.
|
||||
amount := decompressTxOutAmount(compressedAmount)
|
||||
script := decompressScript(serialized[bytesRead : bytesRead+scriptSize])
|
||||
return amount, script, bytesRead + scriptSize, nil
|
||||
}
|
||||
@@ -1,447 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||
// is an error. This is only provided for the hard-coded constants so errors in
|
||||
// the source code can be detected. It will only (and must only) be called with
|
||||
// hard-coded values.
|
||||
func hexToBytes(s string) []byte {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic("invalid hex in source file: " + s)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// TestVLQ ensures the variable length quantity serialization, deserialization,
|
||||
// and size calculation works as expected.
|
||||
func TestVLQ(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
val uint64
|
||||
serialized []byte
|
||||
}{
|
||||
{0, hexToBytes("00")},
|
||||
{1, hexToBytes("01")},
|
||||
{127, hexToBytes("7f")},
|
||||
{128, hexToBytes("8000")},
|
||||
{129, hexToBytes("8001")},
|
||||
{255, hexToBytes("807f")},
|
||||
{256, hexToBytes("8100")},
|
||||
{16383, hexToBytes("fe7f")},
|
||||
{16384, hexToBytes("ff00")},
|
||||
{16511, hexToBytes("ff7f")}, // Max 2-byte value
|
||||
{16512, hexToBytes("808000")},
|
||||
{16513, hexToBytes("808001")},
|
||||
{16639, hexToBytes("80807f")},
|
||||
{32895, hexToBytes("80ff7f")},
|
||||
{2113663, hexToBytes("ffff7f")}, // Max 3-byte value
|
||||
{2113664, hexToBytes("80808000")},
|
||||
{270549119, hexToBytes("ffffff7f")}, // Max 4-byte value
|
||||
{270549120, hexToBytes("8080808000")},
|
||||
{2147483647, hexToBytes("86fefefe7f")},
|
||||
{2147483648, hexToBytes("86fefeff00")},
|
||||
{4294967295, hexToBytes("8efefefe7f")}, // Max uint32, 5 bytes
|
||||
// Max uint64, 10 bytes
|
||||
{18446744073709551615, hexToBytes("80fefefefefefefefe7f")},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the value is calculated properly.
|
||||
gotSize := serializeSizeVLQ(test.val)
|
||||
if gotSize != len(test.serialized) {
|
||||
t.Errorf("serializeSizeVLQ: did not get expected size "+
|
||||
"for %d - got %d, want %d", test.val, gotSize,
|
||||
len(test.serialized))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the value serializes to the expected bytes.
|
||||
gotBytes := make([]byte, gotSize)
|
||||
gotBytesWritten := putVLQ(gotBytes, test.val)
|
||||
if !bytes.Equal(gotBytes, test.serialized) {
|
||||
t.Errorf("putVLQUnchecked: did not get expected bytes "+
|
||||
"for %d - got %x, want %x", test.val, gotBytes,
|
||||
test.serialized)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.serialized) {
|
||||
t.Errorf("putVLQUnchecked: did not get expected number "+
|
||||
"of bytes written for %d - got %d, want %d",
|
||||
test.val, gotBytesWritten, len(test.serialized))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the serialized bytes deserialize to the expected
|
||||
// value.
|
||||
gotVal, gotBytesRead := deserializeVLQ(test.serialized)
|
||||
if gotVal != test.val {
|
||||
t.Errorf("deserializeVLQ: did not get expected value "+
|
||||
"for %x - got %d, want %d", test.serialized,
|
||||
gotVal, test.val)
|
||||
continue
|
||||
}
|
||||
if gotBytesRead != len(test.serialized) {
|
||||
t.Errorf("deserializeVLQ: did not get expected number "+
|
||||
"of bytes read for %d - got %d, want %d",
|
||||
test.serialized, gotBytesRead,
|
||||
len(test.serialized))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestScriptCompression ensures the domain-specific script compression and
|
||||
// decompression works as expected.
|
||||
func TestScriptCompression(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
uncompressed []byte
|
||||
compressed []byte
|
||||
}{
|
||||
{
|
||||
name: "nil",
|
||||
uncompressed: nil,
|
||||
compressed: hexToBytes("06"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey-hash 1",
|
||||
uncompressed: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
|
||||
compressed: hexToBytes("001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey-hash 2",
|
||||
uncompressed: hexToBytes("76a914e34cce70c86373273efcc54ce7d2a491bb4a0e8488ac"),
|
||||
compressed: hexToBytes("00e34cce70c86373273efcc54ce7d2a491bb4a0e84"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-script-hash 1",
|
||||
uncompressed: hexToBytes("a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87"),
|
||||
compressed: hexToBytes("01da1745e9b549bd0bfa1a569971c77eba30cd5a4b"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-script-hash 2",
|
||||
uncompressed: hexToBytes("a914f815b036d9bbbce5e9f2a00abd1bf3dc91e9551087"),
|
||||
compressed: hexToBytes("01f815b036d9bbbce5e9f2a00abd1bf3dc91e95510"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey compressed 0x02",
|
||||
uncompressed: hexToBytes("2102192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4ac"),
|
||||
compressed: hexToBytes("02192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey compressed 0x03",
|
||||
uncompressed: hexToBytes("2103b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65ac"),
|
||||
compressed: hexToBytes("03b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 0x04 even",
|
||||
uncompressed: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
|
||||
compressed: hexToBytes("04192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 0x04 odd",
|
||||
uncompressed: hexToBytes("410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac"),
|
||||
compressed: hexToBytes("0511db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey invalid pubkey",
|
||||
uncompressed: hexToBytes("3302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
|
||||
compressed: hexToBytes("293302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
|
||||
},
|
||||
{
|
||||
name: "null data",
|
||||
uncompressed: hexToBytes("6a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
|
||||
compressed: hexToBytes("286a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
|
||||
},
|
||||
{
|
||||
name: "requires 2 size bytes - data push 200 bytes",
|
||||
uncompressed: append(hexToBytes("4cc8"), bytes.Repeat([]byte{0x00}, 200)...),
|
||||
// [0x80, 0x50] = 208 as a variable length quantity
|
||||
// [0x4c, 0xc8] = OP_PUSHDATA1 200
|
||||
compressed: append(hexToBytes("80504cc8"), bytes.Repeat([]byte{0x00}, 200)...),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the value is calculated properly.
|
||||
gotSize := compressedScriptSize(test.uncompressed)
|
||||
if gotSize != len(test.compressed) {
|
||||
t.Errorf("compressedScriptSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the script compresses to the expected bytes.
|
||||
gotCompressed := make([]byte, gotSize)
|
||||
gotBytesWritten := putCompressedScript(gotCompressed,
|
||||
test.uncompressed)
|
||||
if !bytes.Equal(gotCompressed, test.compressed) {
|
||||
t.Errorf("putCompressedScript (%s): did not get "+
|
||||
"expected bytes - got %x, want %x", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.compressed) {
|
||||
t.Errorf("putCompressedScript (%s): did not get "+
|
||||
"expected number of bytes written - got %d, "+
|
||||
"want %d", test.name, gotBytesWritten,
|
||||
len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the compressed script size is properly decoded from
|
||||
// the compressed script.
|
||||
gotDecodedSize := decodeCompressedScriptSize(test.compressed)
|
||||
if gotDecodedSize != len(test.compressed) {
|
||||
t.Errorf("decodeCompressedScriptSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotDecodedSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the script decompresses to the expected bytes.
|
||||
gotDecompressed := decompressScript(test.compressed)
|
||||
if !bytes.Equal(gotDecompressed, test.uncompressed) {
|
||||
t.Errorf("decompressScript (%s): did not get expected "+
|
||||
"bytes - got %x, want %x", test.name,
|
||||
gotDecompressed, test.uncompressed)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestScriptCompressionErrors ensures calling various functions related to
|
||||
// script compression with incorrect data returns the expected results.
|
||||
func TestScriptCompressionErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// A nil script must result in a decoded size of 0.
|
||||
if gotSize := decodeCompressedScriptSize(nil); gotSize != 0 {
|
||||
t.Fatalf("decodeCompressedScriptSize with nil script did not "+
|
||||
"return 0 - got %d", gotSize)
|
||||
}
|
||||
|
||||
// A nil script must result in a nil decompressed script.
|
||||
if gotScript := decompressScript(nil); gotScript != nil {
|
||||
t.Fatalf("decompressScript with nil script did not return nil "+
|
||||
"decompressed script - got %x", gotScript)
|
||||
}
|
||||
|
||||
// A compressed script for a pay-to-pubkey (uncompressed) that results
|
||||
// in an invalid pubkey must result in a nil decompressed script.
|
||||
compressedScript := hexToBytes("04012d74d0cb94344c9569c2e77901573d8d" +
|
||||
"7903c3ebec3a957724895dca52c6b4")
|
||||
if gotScript := decompressScript(compressedScript); gotScript != nil {
|
||||
t.Fatalf("decompressScript with compressed pay-to-"+
|
||||
"uncompressed-pubkey that is invalid did not return "+
|
||||
"nil decompressed script - got %x", gotScript)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAmountCompression ensures the domain-specific transaction output amount
|
||||
// compression and decompression works as expected.
|
||||
func TestAmountCompression(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
uncompressed uint64
|
||||
compressed uint64
|
||||
}{
|
||||
{
|
||||
name: "0 BTC (sometimes used in nulldata)",
|
||||
uncompressed: 0,
|
||||
compressed: 0,
|
||||
},
|
||||
{
|
||||
name: "546 Satoshi (current network dust value)",
|
||||
uncompressed: 546,
|
||||
compressed: 4911,
|
||||
},
|
||||
{
|
||||
name: "0.00001 BTC (typical transaction fee)",
|
||||
uncompressed: 1000,
|
||||
compressed: 4,
|
||||
},
|
||||
{
|
||||
name: "0.0001 BTC (typical transaction fee)",
|
||||
uncompressed: 10000,
|
||||
compressed: 5,
|
||||
},
|
||||
{
|
||||
name: "0.12345678 BTC",
|
||||
uncompressed: 12345678,
|
||||
compressed: 111111101,
|
||||
},
|
||||
{
|
||||
name: "0.5 BTC",
|
||||
uncompressed: 50000000,
|
||||
compressed: 48,
|
||||
},
|
||||
{
|
||||
name: "1 BTC",
|
||||
uncompressed: 100000000,
|
||||
compressed: 9,
|
||||
},
|
||||
{
|
||||
name: "5 BTC",
|
||||
uncompressed: 500000000,
|
||||
compressed: 49,
|
||||
},
|
||||
{
|
||||
name: "21000000 BTC (max minted coins)",
|
||||
uncompressed: 2100000000000000,
|
||||
compressed: 21000000,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the amount compresses to the expected value.
|
||||
gotCompressed := compressTxOutAmount(test.uncompressed)
|
||||
if gotCompressed != test.compressed {
|
||||
t.Errorf("compressTxOutAmount (%s): did not get "+
|
||||
"expected value - got %d, want %d", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the value decompresses to the expected value.
|
||||
gotDecompressed := decompressTxOutAmount(test.compressed)
|
||||
if gotDecompressed != test.uncompressed {
|
||||
t.Errorf("decompressTxOutAmount (%s): did not get "+
|
||||
"expected value - got %d, want %d", test.name,
|
||||
gotDecompressed, test.uncompressed)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCompressedTxOut ensures the transaction output serialization and
|
||||
// deserialization works as expected.
|
||||
func TestCompressedTxOut(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
amount uint64
|
||||
pkScript []byte
|
||||
compressed []byte
|
||||
}{
|
||||
{
|
||||
name: "nulldata with 0 BTC",
|
||||
amount: 0,
|
||||
pkScript: hexToBytes("6a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
|
||||
compressed: hexToBytes("00286a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey-hash dust",
|
||||
amount: 546,
|
||||
pkScript: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
|
||||
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 1 BTC",
|
||||
amount: 100000000,
|
||||
pkScript: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
|
||||
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the txout is calculated properly.
|
||||
gotSize := compressedTxOutSize(test.amount, test.pkScript)
|
||||
if gotSize != len(test.compressed) {
|
||||
t.Errorf("compressedTxOutSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
gotSize, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the txout compresses to the expected value.
|
||||
gotCompressed := make([]byte, gotSize)
|
||||
gotBytesWritten := putCompressedTxOut(gotCompressed,
|
||||
test.amount, test.pkScript)
|
||||
if !bytes.Equal(gotCompressed, test.compressed) {
|
||||
t.Errorf("compressTxOut (%s): did not get expected "+
|
||||
"bytes - got %x, want %x", test.name,
|
||||
gotCompressed, test.compressed)
|
||||
continue
|
||||
}
|
||||
if gotBytesWritten != len(test.compressed) {
|
||||
t.Errorf("compressTxOut (%s): did not get expected "+
|
||||
"number of bytes written - got %d, want %d",
|
||||
test.name, gotBytesWritten,
|
||||
len(test.compressed))
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the serialized bytes are decoded back to the expected
|
||||
// uncompressed values.
|
||||
gotAmount, gotScript, gotBytesRead, err := decodeCompressedTxOut(
|
||||
test.compressed)
|
||||
if err != nil {
|
||||
t.Errorf("decodeCompressedTxOut (%s): unexpected "+
|
||||
"error: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
if gotAmount != test.amount {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected amount - got %d, want %d",
|
||||
test.name, gotAmount, test.amount)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(gotScript, test.pkScript) {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected script - got %x, want %x",
|
||||
test.name, gotScript, test.pkScript)
|
||||
continue
|
||||
}
|
||||
if gotBytesRead != len(test.compressed) {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected number of bytes read - got %d, want %d",
|
||||
test.name, gotBytesRead, len(test.compressed))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestTxOutCompressionErrors ensures calling various functions related to
|
||||
// txout compression with incorrect data returns the expected results.
|
||||
func TestTxOutCompressionErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// A compressed txout with missing compressed script must error.
|
||||
compressedTxOut := hexToBytes("00")
|
||||
_, _, _, err := decodeCompressedTxOut(compressedTxOut)
|
||||
if !isDeserializeErr(err) {
|
||||
t.Fatalf("decodeCompressedTxOut with missing compressed script "+
|
||||
"did not return expected error type - got %T, want "+
|
||||
"errDeserialize", err)
|
||||
}
|
||||
|
||||
// A compressed txout with short compressed script must error.
|
||||
compressedTxOut = hexToBytes("0010")
|
||||
_, _, _, err = decodeCompressedTxOut(compressedTxOut)
|
||||
if !isDeserializeErr(err) {
|
||||
t.Fatalf("decodeCompressedTxOut with short compressed script "+
|
||||
"did not return expected error type - got %T, want "+
|
||||
"errDeserialize", err)
|
||||
}
|
||||
}
|
||||
1739
blockdag/dag.go
1739
blockdag/dag.go
File diff suppressed because it is too large
Load Diff
1240
blockdag/dag_test.go
1240
blockdag/dag_test.go
File diff suppressed because it is too large
Load Diff
@@ -1,808 +0,0 @@
|
||||
// Copyright (c) 2015-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// blockHdrSize is the size of a block header. This is simply the
|
||||
// constant from wire and is only provided here for convenience since
|
||||
// wire.MaxBlockHeaderPayload is quite long.
|
||||
blockHdrSize = wire.MaxBlockHeaderPayload
|
||||
|
||||
// latestUTXOSetBucketVersion is the current version of the UTXO set
|
||||
// bucket that is used to track all unspent outputs.
|
||||
latestUTXOSetBucketVersion = 1
|
||||
)
|
||||
|
||||
var (
|
||||
// blockIndexBucketName is the name of the db bucket used to house to the
|
||||
// block headers and contextual information.
|
||||
blockIndexBucketName = []byte("blockheaderidx")
|
||||
|
||||
// hashIndexBucketName is the name of the db bucket used to house to the
|
||||
// block hash -> block height index.
|
||||
hashIndexBucketName = []byte("hashidx")
|
||||
|
||||
// heightIndexBucketName is the name of the db bucket used to house to
|
||||
// the block height -> block hash index.
|
||||
heightIndexBucketName = []byte("heightidx")
|
||||
|
||||
// dagStateKeyName is the name of the db key used to store the DAG
|
||||
// tip hashes.
|
||||
dagStateKeyName = []byte("dagstate")
|
||||
|
||||
// utxoSetVersionKeyName is the name of the db key used to store the
|
||||
// version of the utxo set currently in the database.
|
||||
utxoSetVersionKeyName = []byte("utxosetversion")
|
||||
|
||||
// utxoSetBucketName is the name of the db bucket used to house the
|
||||
// unspent transaction output set.
|
||||
utxoSetBucketName = []byte("utxoset")
|
||||
|
||||
// utxoDiffsBucketName is the name of the db bucket used to house the
|
||||
// diffs and diff children of blocks.
|
||||
utxoDiffsBucketName = []byte("utxodiffs")
|
||||
|
||||
// subnetworksBucketName is the name of the db bucket used to store the
|
||||
// subnetwork registry.
|
||||
subnetworksBucketName = []byte("subnetworks")
|
||||
|
||||
// localSubnetworkKeyName is the name of the db key used to store the
|
||||
// node's local subnetwork ID.
|
||||
localSubnetworkKeyName = []byte("localsubnetworkidkey")
|
||||
|
||||
// byteOrder is the preferred byte order used for serializing numeric
|
||||
// fields for storage in the database.
|
||||
byteOrder = binary.LittleEndian
|
||||
)
|
||||
|
||||
// errNotInDAG signifies that a block hash or height that is not in the
|
||||
// DAG was requested.
|
||||
type errNotInDAG string
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e errNotInDAG) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// isNotInDAGErr returns whether or not the passed error is an
|
||||
// errNotInDAG error.
|
||||
func isNotInDAGErr(err error) bool {
|
||||
_, ok := err.(errNotInDAG)
|
||||
return ok
|
||||
}
|
||||
|
||||
// errDeserialize signifies that a problem was encountered when deserializing
|
||||
// data.
|
||||
type errDeserialize string
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e errDeserialize) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// isDeserializeErr returns whether or not the passed error is an errDeserialize
|
||||
// error.
|
||||
func isDeserializeErr(err error) bool {
|
||||
_, ok := err.(errDeserialize)
|
||||
return ok
|
||||
}
|
||||
|
||||
// dbPutVersion uses an existing database transaction to update the provided
|
||||
// key in the metadata bucket to the given version. It is primarily used to
|
||||
// track versions on entities such as buckets.
|
||||
func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
|
||||
var serialized [4]byte
|
||||
byteOrder.PutUint32(serialized[:], version)
|
||||
return dbTx.Metadata().Put(key, serialized[:])
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The unspent transaction output (UTXO) set consists of an entry for each
|
||||
// unspent output using a format that is optimized to reduce space using domain
|
||||
// specific compression algorithms. This format is a slightly modified version
|
||||
// of the format used in Bitcoin Core.
|
||||
//
|
||||
// Each entry is keyed by an outpoint as specified below. It is important to
|
||||
// note that the key encoding uses a VLQ, which employs an MSB encoding so
|
||||
// iteration of UTXOs when doing byte-wise comparisons will produce them in
|
||||
// order.
|
||||
//
|
||||
// The serialized key format is:
|
||||
// <hash><output index>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash daghash.HashSize
|
||||
// output index VLQ variable
|
||||
//
|
||||
// The serialized value format is:
|
||||
//
|
||||
// <header code><compressed txout>
|
||||
//
|
||||
// Field Type Size
|
||||
// header code VLQ variable
|
||||
// compressed txout
|
||||
// compressed amount VLQ variable
|
||||
// compressed script []byte variable
|
||||
//
|
||||
// The serialized header code format is:
|
||||
// bit 0 - containing transaction is a block reward
|
||||
// bits 1-x - height of the block that contains the unspent txout
|
||||
//
|
||||
// Example 1:
|
||||
// From tx in main blockchain:
|
||||
// Blk 1, b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
|
||||
//
|
||||
// 03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52
|
||||
// <><------------------------------------------------------------------>
|
||||
// | |
|
||||
// header code compressed txout
|
||||
//
|
||||
// - header code: 0x03 (coinbase, height 1)
|
||||
// - compressed txout:
|
||||
// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC)
|
||||
// - 0x04: special script type pay-to-pubkey
|
||||
// - 0x96...52: x-coordinate of the pubkey
|
||||
//
|
||||
// Example 2:
|
||||
// From tx in main blockchain:
|
||||
// Blk 113931, 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2
|
||||
//
|
||||
// 8cf316800900b8025be1b3efc63b0ad48e7f9f10e87544528d58
|
||||
// <----><------------------------------------------>
|
||||
// | |
|
||||
// header code compressed txout
|
||||
//
|
||||
// - header code: 0x8cf316 (not coinbase, height 113931)
|
||||
// - compressed txout:
|
||||
// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 BTC)
|
||||
// - 0x00: special script type pay-to-pubkey-hash
|
||||
// - 0xb8...58: pubkey hash
|
||||
//
|
||||
// Example 3:
|
||||
// From tx in main blockchain:
|
||||
// Blk 338156, 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22
|
||||
//
|
||||
// a8a2588ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6
|
||||
// <----><-------------------------------------------------->
|
||||
// | |
|
||||
// header code compressed txout
|
||||
//
|
||||
// - header code: 0xa8a258 (not coinbase, height 338156)
|
||||
// - compressed txout:
|
||||
// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 BTC)
|
||||
// - 0x01: special script type pay-to-script-hash
|
||||
// - 0x1d...e6: script hash
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// maxUint32VLQSerializeSize is the maximum number of bytes a max uint32 takes
|
||||
// to serialize as a VLQ.
|
||||
var maxUint32VLQSerializeSize = serializeSizeVLQ(1<<32 - 1)
|
||||
|
||||
// outpointKeyPool defines a concurrent safe free list of byte slices used to
|
||||
// provide temporary buffers for outpoint database keys.
|
||||
var outpointKeyPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := make([]byte, daghash.HashSize+maxUint32VLQSerializeSize)
|
||||
return &b // Pointer to slice to avoid boxing alloc.
|
||||
},
|
||||
}
|
||||
|
||||
// outpointKey returns a key suitable for use as a database key in the UTXO set
|
||||
// while making use of a free list. A new buffer is allocated if there are not
|
||||
// already any available on the free list. The returned byte slice should be
|
||||
// returned to the free list by using the recycleOutpointKey function when the
|
||||
// caller is done with it _unless_ the slice will need to live for longer than
|
||||
// the caller can calculate such as when used to write to the database.
|
||||
func outpointKey(outpoint wire.OutPoint) *[]byte {
|
||||
// A VLQ employs an MSB encoding, so they are useful not only to reduce
|
||||
// the amount of storage space, but also so iteration of UTXOs when
|
||||
// doing byte-wise comparisons will produce them in order.
|
||||
key := outpointKeyPool.Get().(*[]byte)
|
||||
idx := uint64(outpoint.Index)
|
||||
*key = (*key)[:daghash.HashSize+serializeSizeVLQ(idx)]
|
||||
copy(*key, outpoint.TxID[:])
|
||||
putVLQ((*key)[daghash.HashSize:], idx)
|
||||
return key
|
||||
}
|
||||
|
||||
// recycleOutpointKey puts the provided byte slice, which should have been
|
||||
// obtained via the outpointKey function, back on the free list.
|
||||
func recycleOutpointKey(key *[]byte) {
|
||||
outpointKeyPool.Put(key)
|
||||
}
|
||||
|
||||
// utxoEntryHeaderCode returns the calculated header code to be used when
|
||||
// serializing the provided utxo entry.
|
||||
func utxoEntryHeaderCode(entry *UTXOEntry) uint64 {
|
||||
|
||||
// As described in the serialization format comments, the header code
|
||||
// encodes the height shifted over one bit and the block reward flag in the
|
||||
// lowest bit.
|
||||
headerCode := uint64(entry.BlockHeight()) << 1
|
||||
if entry.IsBlockReward() {
|
||||
headerCode |= 0x01
|
||||
}
|
||||
|
||||
return headerCode
|
||||
}
|
||||
|
||||
// serializeUTXOEntry returns the entry serialized to a format that is suitable
|
||||
// for long-term storage. The format is described in detail above.
|
||||
func serializeUTXOEntry(entry *UTXOEntry) ([]byte, error) {
|
||||
|
||||
// Encode the header code.
|
||||
headerCode := utxoEntryHeaderCode(entry)
|
||||
|
||||
// Calculate the size needed to serialize the entry.
|
||||
size := serializeSizeVLQ(headerCode) +
|
||||
compressedTxOutSize(uint64(entry.Amount()), entry.PkScript())
|
||||
|
||||
// Serialize the header code followed by the compressed unspent
|
||||
// transaction output.
|
||||
serialized := make([]byte, size)
|
||||
offset := putVLQ(serialized, headerCode)
|
||||
offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()),
|
||||
entry.PkScript())
|
||||
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
// deserializeOutPoint decodes an outPoint from the passed serialized byte
|
||||
// slice into a new wire.OutPoint using a format that is suitable for long-
|
||||
// term storage. this format is described in detail above.
|
||||
func deserializeOutPoint(serialized []byte) (*wire.OutPoint, error) {
|
||||
if len(serialized) <= daghash.HashSize {
|
||||
return nil, errDeserialize("unexpected end of data")
|
||||
}
|
||||
|
||||
txID := daghash.TxID{}
|
||||
txID.SetBytes(serialized[:daghash.HashSize])
|
||||
index, _ := deserializeVLQ(serialized[daghash.HashSize:])
|
||||
return wire.NewOutPoint(&txID, uint32(index)), nil
|
||||
}
|
||||
|
||||
// deserializeUTXOEntry decodes a UTXO entry from the passed serialized byte
|
||||
// slice into a new UTXOEntry using a format that is suitable for long-term
|
||||
// storage. The format is described in detail above.
|
||||
func deserializeUTXOEntry(serialized []byte) (*UTXOEntry, error) {
|
||||
// Deserialize the header code.
|
||||
code, offset := deserializeVLQ(serialized)
|
||||
if offset >= len(serialized) {
|
||||
return nil, errDeserialize("unexpected end of data after header")
|
||||
}
|
||||
|
||||
// Decode the header code.
|
||||
//
|
||||
// Bit 0 indicates whether the containing transaction is a block reward.
|
||||
// Bits 1-x encode height of containing transaction.
|
||||
isBlockReward := code&0x01 != 0
|
||||
blockHeight := int32(code >> 1)
|
||||
|
||||
// Decode the compressed unspent transaction output.
|
||||
amount, pkScript, _, err := decodeCompressedTxOut(serialized[offset:])
|
||||
if err != nil {
|
||||
return nil, errDeserialize(fmt.Sprintf("unable to decode "+
|
||||
"UTXO: %s", err))
|
||||
}
|
||||
|
||||
entry := &UTXOEntry{
|
||||
amount: amount,
|
||||
pkScript: pkScript,
|
||||
blockHeight: blockHeight,
|
||||
packedFlags: 0,
|
||||
}
|
||||
if isBlockReward {
|
||||
entry.packedFlags |= tfBlockReward
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// dbPutUTXODiff uses an existing database transaction to update the UTXO set
|
||||
// in the database based on the provided UTXO view contents and state. In
|
||||
// particular, only the entries that have been marked as modified are written
|
||||
// to the database.
|
||||
func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
|
||||
utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
|
||||
for outPoint := range diff.toRemove {
|
||||
key := outpointKey(outPoint)
|
||||
err := utxoBucket.Delete(*key)
|
||||
recycleOutpointKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for outPoint, entry := range diff.toAdd {
|
||||
// Serialize and store the UTXO entry.
|
||||
serialized, err := serializeUTXOEntry(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key := outpointKey(outPoint)
|
||||
err = utxoBucket.Put(*key, serialized)
|
||||
// NOTE: The key is intentionally not recycled here since the
|
||||
// database interface contract prohibits modifications. It will
|
||||
// be garbage collected normally when the database is done with
|
||||
// it.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The block index consists of two buckets with an entry for every block in the
|
||||
// main chain. One bucket is for the hash to height mapping and the other is
|
||||
// for the height to hash mapping.
|
||||
//
|
||||
// The serialized format for values in the hash to height bucket is:
|
||||
// <height>
|
||||
//
|
||||
// Field Type Size
|
||||
// height uint32 4 bytes
|
||||
//
|
||||
// The serialized format for values in the height to hash bucket is:
|
||||
// <hash>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash daghash.HashSize
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// dbPutBlockIndex uses an existing database transaction to update or add the
|
||||
// block index entries for the hash to height and height to hash mappings for
|
||||
// the provided values.
|
||||
func dbPutBlockIndex(dbTx database.Tx, hash *daghash.Hash, height int32) error {
|
||||
// Serialize the height for use in the index entries.
|
||||
var serializedHeight [4]byte
|
||||
byteOrder.PutUint32(serializedHeight[:], uint32(height))
|
||||
|
||||
// Add the block hash to height mapping to the index.
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(hashIndexBucketName)
|
||||
if err := hashIndex.Put(hash[:], serializedHeight[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the block height to hash mapping to the index.
|
||||
heightIndex := meta.Bucket(heightIndexBucketName)
|
||||
return heightIndex.Put(serializedHeight[:], hash[:])
|
||||
}
|
||||
|
||||
// dbFetchHeightByHash uses an existing database transaction to retrieve the
|
||||
// height for the provided hash from the index.
|
||||
func dbFetchHeightByHash(dbTx database.Tx, hash *daghash.Hash) (int32, error) {
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(hashIndexBucketName)
|
||||
serializedHeight := hashIndex.Get(hash[:])
|
||||
if serializedHeight == nil {
|
||||
str := fmt.Sprintf("block %s is not in the main chain", hash)
|
||||
return 0, errNotInDAG(str)
|
||||
}
|
||||
|
||||
return int32(byteOrder.Uint32(serializedHeight)), nil
|
||||
}
|
||||
|
||||
type dagState struct {
|
||||
TipHashes []*daghash.Hash
|
||||
LastFinalityPoint *daghash.Hash
|
||||
}
|
||||
|
||||
// serializeDAGState returns the serialization of the DAG state.
|
||||
// This is data to be stored in the DAG state bucket.
|
||||
func serializeDAGState(state *dagState) ([]byte, error) {
|
||||
return json.Marshal(state)
|
||||
}
|
||||
|
||||
// deserializeDAGState deserializes the passed serialized DAG state.
|
||||
// This is data stored in the DAG state bucket and is updated after
|
||||
// every block is connected to the DAG.
|
||||
func deserializeDAGState(serializedData []byte) (*dagState, error) {
|
||||
var state *dagState
|
||||
err := json.Unmarshal(serializedData, &state)
|
||||
if err != nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: "corrupt DAG state",
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// dbPutDAGState uses an existing database transaction to store the latest
|
||||
// tip hashes of the DAG.
|
||||
func dbPutDAGState(dbTx database.Tx, state *dagState) error {
|
||||
serializedData, err := serializeDAGState(state)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dbTx.Metadata().Put(dagStateKeyName, serializedData)
|
||||
}
|
||||
|
||||
// createDAGState initializes both the database and the DAG state to the
|
||||
// genesis block. This includes creating the necessary buckets, so it
|
||||
// must only be called on an uninitialized database.
|
||||
func (dag *BlockDAG) createDAGState() error {
|
||||
// Create the initial the database DAG state including creating the
|
||||
// necessary index buckets and inserting the genesis block.
|
||||
err := dag.db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
|
||||
// Create the bucket that houses the block index data.
|
||||
_, err := meta.CreateBucket(blockIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the bucket that houses the chain block hash to height
|
||||
// index.
|
||||
_, err = meta.CreateBucket(hashIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the bucket that houses the chain block height to hash
|
||||
// index.
|
||||
_, err = meta.CreateBucket(heightIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the buckets that house the utxo set, the utxo diffs, and their
|
||||
// version.
|
||||
_, err = meta.CreateBucket(utxoSetBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = meta.CreateBucket(utxoDiffsBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbPutVersion(dbTx, utxoSetVersionKeyName,
|
||||
latestUTXOSetBucketVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the bucket that houses the registered subnetworks.
|
||||
_, err = meta.CreateBucket(subnetworksBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := dbPutLocalSubnetworkID(dbTx, dag.subnetworkID); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dbPutLocalSubnetworkID(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID) error {
|
||||
if subnetworkID == nil {
|
||||
return dbTx.Metadata().Put(localSubnetworkKeyName, []byte{})
|
||||
}
|
||||
return dbTx.Metadata().Put(localSubnetworkKeyName, subnetworkID[:])
|
||||
}
|
||||
|
||||
// initDAGState attempts to load and initialize the DAG state from the
|
||||
// database. When the db does not yet contain any DAG state, both it and the
|
||||
// DAG state are initialized to the genesis block.
|
||||
func (dag *BlockDAG) initDAGState() error {
|
||||
// Determine the state of the DAG database. We may need to initialize
|
||||
// everything from scratch or upgrade certain buckets.
|
||||
var initialized bool
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
initialized = dbTx.Metadata().Get(dagStateKeyName) != nil
|
||||
if initialized {
|
||||
var localSubnetworkID *subnetworkid.SubnetworkID
|
||||
localSubnetworkIDBytes := dbTx.Metadata().Get(localSubnetworkKeyName)
|
||||
if len(localSubnetworkIDBytes) != 0 {
|
||||
localSubnetworkID = &subnetworkid.SubnetworkID{}
|
||||
localSubnetworkID.SetBytes(localSubnetworkIDBytes)
|
||||
}
|
||||
if !localSubnetworkID.IsEqual(dag.subnetworkID) {
|
||||
return fmt.Errorf("Cannot start btcd with subnetwork ID %s because"+
|
||||
" its database is already built with subnetwork ID %s. If you"+
|
||||
" want to switch to a new database, please reset the"+
|
||||
" database by starting btcd with --reset-db flag", dag.subnetworkID, localSubnetworkID)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !initialized {
|
||||
// At this point the database has not already been initialized, so
|
||||
// initialize both it and the chain state to the genesis block.
|
||||
return dag.createDAGState()
|
||||
}
|
||||
|
||||
// Attempt to load the DAG state from the database.
|
||||
return dag.db.View(func(dbTx database.Tx) error {
|
||||
// Fetch the stored DAG tipHashes from the database metadata.
|
||||
// When it doesn't exist, it means the database hasn't been
|
||||
// initialized for use with the DAG yet, so break out now to allow
|
||||
// that to happen under a writable database transaction.
|
||||
serializedData := dbTx.Metadata().Get(dagStateKeyName)
|
||||
log.Tracef("Serialized DAG tip hashes: %x", serializedData)
|
||||
state, err := deserializeDAGState(serializedData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load all of the headers from the data for the known DAG
|
||||
// and construct the block index accordingly. Since the
|
||||
// number of nodes are already known, perform a single alloc
|
||||
// for them versus a whole bunch of little ones to reduce
|
||||
// pressure on the GC.
|
||||
log.Infof("Loading block index...")
|
||||
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
|
||||
// Determine how many blocks will be loaded into the index so we can
|
||||
// allocate the right amount.
|
||||
var blockCount int32
|
||||
cursor := blockIndexBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
blockCount++
|
||||
}
|
||||
blockNodes := make([]blockNode, blockCount)
|
||||
|
||||
var i int32
|
||||
var lastNode *blockNode
|
||||
cursor = blockIndexBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
header, status, err := deserializeBlockRow(cursor.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parents := newSet()
|
||||
if lastNode == nil {
|
||||
blockHash := header.BlockHash()
|
||||
if !blockHash.IsEqual(dag.dagParams.GenesisHash) {
|
||||
return AssertError(fmt.Sprintf("initDAGState: Expected "+
|
||||
"first entry in block index to be genesis block, "+
|
||||
"found %s", blockHash))
|
||||
}
|
||||
} else {
|
||||
for _, hash := range header.ParentHashes {
|
||||
parent := dag.index.LookupNode(hash)
|
||||
if parent == nil {
|
||||
return AssertError(fmt.Sprintf("initDAGState: Could "+
|
||||
"not find parent %s for block %s", hash, header.BlockHash()))
|
||||
}
|
||||
parents.add(parent)
|
||||
}
|
||||
if len(parents) == 0 {
|
||||
return AssertError(fmt.Sprintf("initDAGState: Could "+
|
||||
"not find any parent for block %s", header.BlockHash()))
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize the block node for the block, connect it,
|
||||
// and add it to the block index.
|
||||
node := &blockNodes[i]
|
||||
initBlockNode(node, header, parents, dag.dagParams.K)
|
||||
node.status = status
|
||||
node.updateParentsChildren()
|
||||
dag.index.addNode(node)
|
||||
|
||||
if blockStatus(status).KnownValid() {
|
||||
dag.blockCount++
|
||||
}
|
||||
|
||||
lastNode = node
|
||||
i++
|
||||
}
|
||||
|
||||
// Load all of the known UTXO entries and construct the full
|
||||
// UTXO set accordingly. Since the number of entries is already
|
||||
// known, perform a single alloc for them versus a whole bunch
|
||||
// of little ones to reduce pressure on the GC.
|
||||
log.Infof("Loading UTXO set...")
|
||||
|
||||
utxoEntryBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
|
||||
|
||||
// Determine how many UTXO entries will be loaded into the index so we can
|
||||
// allocate the right amount.
|
||||
var utxoEntryCount int32
|
||||
cursor = utxoEntryBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
utxoEntryCount++
|
||||
}
|
||||
|
||||
fullUTXOCollection := make(utxoCollection, utxoEntryCount)
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
// Deserialize the outPoint
|
||||
outPoint, err := deserializeOutPoint(cursor.Key())
|
||||
if err != nil {
|
||||
// Ensure any deserialization errors are returned as database
|
||||
// corruption errors.
|
||||
if isDeserializeErr(err) {
|
||||
return database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt outPoint: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Deserialize the utxo entry
|
||||
entry, err := deserializeUTXOEntry(cursor.Value())
|
||||
if err != nil {
|
||||
// Ensure any deserialization errors are returned as database
|
||||
// corruption errors.
|
||||
if isDeserializeErr(err) {
|
||||
return database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt utxo entry: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
fullUTXOCollection[*outPoint] = entry
|
||||
}
|
||||
|
||||
// Apply the loaded utxoCollection to the virtual block.
|
||||
dag.virtual.utxoSet.utxoCollection = fullUTXOCollection
|
||||
|
||||
// Apply the stored tips to the virtual block.
|
||||
tips := newSet()
|
||||
for _, tipHash := range state.TipHashes {
|
||||
tip := dag.index.LookupNode(tipHash)
|
||||
if tip == nil {
|
||||
return AssertError(fmt.Sprintf("initDAGState: cannot find "+
|
||||
"DAG tip %s in block index", state.TipHashes))
|
||||
}
|
||||
tips.add(tip)
|
||||
}
|
||||
dag.virtual.SetTips(tips)
|
||||
|
||||
// Set the last finality point
|
||||
dag.lastFinalityPoint = dag.index.LookupNode(state.LastFinalityPoint)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// deserializeBlockRow parses a value in the block index bucket into a block
|
||||
// header and block status bitfield.
|
||||
func deserializeBlockRow(blockRow []byte) (*wire.BlockHeader, blockStatus, error) {
|
||||
buffer := bytes.NewReader(blockRow)
|
||||
|
||||
var header wire.BlockHeader
|
||||
err := header.Deserialize(buffer)
|
||||
if err != nil {
|
||||
return nil, statusNone, err
|
||||
}
|
||||
|
||||
statusByte, err := buffer.ReadByte()
|
||||
if err != nil {
|
||||
return nil, statusNone, err
|
||||
}
|
||||
|
||||
return &header, blockStatus(statusByte), nil
|
||||
}
|
||||
|
||||
// dbFetchBlockByNode uses an existing database transaction to retrieve the
|
||||
// raw block for the provided node, deserialize it, and return a util.Block
|
||||
// with the height set.
|
||||
func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error) {
|
||||
// Load the raw block bytes from the database.
|
||||
blockBytes, err := dbTx.FetchBlock(node.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the encapsulated block and set the height appropriately.
|
||||
block, err := util.NewBlockFromBytes(blockBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block.SetHeight(node.height)
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
// dbStoreBlockNode stores the block header and validation status to the block
|
||||
// index bucket. This overwrites the current entry if there exists one.
|
||||
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
|
||||
// Serialize block data to be stored.
|
||||
w := bytes.NewBuffer(make([]byte, 0, blockHdrSize+1))
|
||||
header := node.Header()
|
||||
err := header.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.WriteByte(byte(node.status))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value := w.Bytes()
|
||||
|
||||
// Write block header data to block index bucket.
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
key := blockIndexKey(node.hash, uint32(node.height))
|
||||
return blockIndexBucket.Put(key, value)
|
||||
}
|
||||
|
||||
// dbStoreBlock stores the provided block in the database if it is not already
|
||||
// there. The full block data is written to ffldb.
|
||||
func dbStoreBlock(dbTx database.Tx, block *util.Block) error {
|
||||
hasBlock, err := dbTx.HasBlock(block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hasBlock {
|
||||
return nil
|
||||
}
|
||||
return dbTx.StoreBlock(block)
|
||||
}
|
||||
|
||||
// blockIndexKey generates the binary key for an entry in the block index
|
||||
// bucket. The key is composed of the block height encoded as a big-endian
|
||||
// 32-bit unsigned int followed by the 32 byte block hash.
|
||||
func blockIndexKey(blockHash *daghash.Hash, blockHeight uint32) []byte {
|
||||
indexKey := make([]byte, daghash.HashSize+4)
|
||||
binary.BigEndian.PutUint32(indexKey[0:4], blockHeight)
|
||||
copy(indexKey[4:daghash.HashSize+4], blockHash[:])
|
||||
return indexKey
|
||||
}
|
||||
|
||||
// BlockByHash returns the block from the main chain with the given hash with
|
||||
// the appropriate chain height set.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
|
||||
// Lookup the block hash in block index and ensure it is in the best
|
||||
// chain.
|
||||
node := dag.index.LookupNode(hash)
|
||||
if node == nil {
|
||||
str := fmt.Sprintf("block %s is not in the main chain", hash)
|
||||
return nil, errNotInDAG(str)
|
||||
}
|
||||
|
||||
// Load the block from the database and return it.
|
||||
var block *util.Block
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
block, err = dbFetchBlockByNode(dbTx, node)
|
||||
return err
|
||||
})
|
||||
return block, err
|
||||
}
|
||||
@@ -1,169 +0,0 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
// calcEasiestDifficulty calculates the easiest possible difficulty that a block
|
||||
// can have given starting difficulty bits and a duration. It is mainly used to
|
||||
// verify that claimed proof of work by a block is sane as compared to a
|
||||
// known good checkpoint.
|
||||
func (dag *BlockDAG) calcEasiestDifficulty(bits uint32, duration time.Duration) uint32 {
|
||||
// Convert types used in the calculations below.
|
||||
durationVal := int64(duration / time.Second)
|
||||
adjustmentFactor := big.NewInt(dag.dagParams.RetargetAdjustmentFactor)
|
||||
|
||||
// The test network rules allow minimum difficulty blocks after more
|
||||
// than twice the desired amount of time needed to generate a block has
|
||||
// elapsed.
|
||||
if dag.dagParams.ReduceMinDifficulty {
|
||||
reductionTime := int64(dag.dagParams.MinDiffReductionTime /
|
||||
time.Second)
|
||||
if durationVal > reductionTime {
|
||||
return dag.dagParams.PowLimitBits
|
||||
}
|
||||
}
|
||||
|
||||
// Since easier difficulty equates to higher numbers, the easiest
|
||||
// difficulty for a given duration is the largest value possible given
|
||||
// the number of retargets for the duration and starting difficulty
|
||||
// multiplied by the max adjustment factor.
|
||||
newTarget := util.CompactToBig(bits)
|
||||
for durationVal > 0 && newTarget.Cmp(dag.dagParams.PowLimit) < 0 {
|
||||
newTarget.Mul(newTarget, adjustmentFactor)
|
||||
durationVal -= dag.maxRetargetTimespan
|
||||
}
|
||||
|
||||
// Limit new value to the proof of work limit.
|
||||
if newTarget.Cmp(dag.dagParams.PowLimit) > 0 {
|
||||
newTarget.Set(dag.dagParams.PowLimit)
|
||||
}
|
||||
|
||||
return util.BigToCompact(newTarget)
|
||||
}
|
||||
|
||||
// findPrevTestNetDifficulty returns the difficulty of the previous block which
|
||||
// did not have the special testnet minimum difficulty rule applied.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
func (dag *BlockDAG) findPrevTestNetDifficulty(startNode *blockNode) uint32 {
|
||||
// Search backwards through the chain for the last block without
|
||||
// the special rule applied.
|
||||
iterNode := startNode
|
||||
for iterNode != nil && iterNode.height%dag.blocksPerRetarget != 0 &&
|
||||
iterNode.bits == dag.dagParams.PowLimitBits {
|
||||
|
||||
iterNode = iterNode.selectedParent
|
||||
}
|
||||
|
||||
// Return the found difficulty or the minimum difficulty if no
|
||||
// appropriate block was found.
|
||||
lastBits := dag.dagParams.PowLimitBits
|
||||
if iterNode != nil {
|
||||
lastBits = iterNode.bits
|
||||
}
|
||||
return lastBits
|
||||
}
|
||||
|
||||
// calcNextRequiredDifficulty calculates the required difficulty for the block
|
||||
// after the passed previous block node based on the difficulty retarget rules.
|
||||
// This function differs from the exported CalcNextRequiredDifficulty in that
|
||||
// the exported version uses the current best chain as the previous block node
|
||||
// while this function accepts any block node.
|
||||
func (dag *BlockDAG) calcNextRequiredDifficulty(bluestParent *blockNode, newBlockTime time.Time) (uint32, error) {
|
||||
// Genesis block.
|
||||
if bluestParent == nil {
|
||||
return dag.dagParams.PowLimitBits, nil
|
||||
}
|
||||
|
||||
// Return the previous block's difficulty requirements if this block
|
||||
// is not at a difficulty retarget interval.
|
||||
if (bluestParent.height+1)%dag.blocksPerRetarget != 0 {
|
||||
// For networks that support it, allow special reduction of the
|
||||
// required difficulty once too much time has elapsed without
|
||||
// mining a block.
|
||||
if dag.dagParams.ReduceMinDifficulty {
|
||||
// Return minimum difficulty when more than the desired
|
||||
// amount of time has elapsed without mining a block.
|
||||
reductionTime := int64(dag.dagParams.MinDiffReductionTime /
|
||||
time.Second)
|
||||
allowMinTime := bluestParent.timestamp + reductionTime
|
||||
if newBlockTime.Unix() > allowMinTime {
|
||||
return dag.dagParams.PowLimitBits, nil
|
||||
}
|
||||
|
||||
// The block was mined within the desired timeframe, so
|
||||
// return the difficulty for the last block which did
|
||||
// not have the special minimum difficulty rule applied.
|
||||
return dag.findPrevTestNetDifficulty(bluestParent), nil
|
||||
}
|
||||
|
||||
// For the main network (or any unrecognized networks), simply
|
||||
// return the previous block's difficulty requirements.
|
||||
return bluestParent.bits, nil
|
||||
}
|
||||
|
||||
// Get the block node at the previous retarget (targetTimespan days
|
||||
// worth of blocks).
|
||||
firstNode := bluestParent.RelativeAncestor(dag.blocksPerRetarget - 1)
|
||||
if firstNode == nil {
|
||||
return 0, AssertError("unable to obtain previous retarget block")
|
||||
}
|
||||
|
||||
// Limit the amount of adjustment that can occur to the previous
|
||||
// difficulty.
|
||||
actualTimespan := bluestParent.timestamp - firstNode.timestamp
|
||||
adjustedTimespan := actualTimespan
|
||||
if actualTimespan < dag.minRetargetTimespan {
|
||||
adjustedTimespan = dag.minRetargetTimespan
|
||||
} else if actualTimespan > dag.maxRetargetTimespan {
|
||||
adjustedTimespan = dag.maxRetargetTimespan
|
||||
}
|
||||
|
||||
// Calculate new target difficulty as:
|
||||
// currentDifficulty * (adjustedTimespan / targetTimespan)
|
||||
// The result uses integer division which means it will be slightly
|
||||
// rounded down. Bitcoind also uses integer division to calculate this
|
||||
// result.
|
||||
oldTarget := util.CompactToBig(bluestParent.bits)
|
||||
newTarget := new(big.Int).Mul(oldTarget, big.NewInt(adjustedTimespan))
|
||||
targetTimeSpan := int64(dag.dagParams.TargetTimespan / time.Second)
|
||||
newTarget.Div(newTarget, big.NewInt(targetTimeSpan))
|
||||
|
||||
// Limit new value to the proof of work limit.
|
||||
if newTarget.Cmp(dag.dagParams.PowLimit) > 0 {
|
||||
newTarget.Set(dag.dagParams.PowLimit)
|
||||
}
|
||||
|
||||
// Log new target difficulty and return it. The new target logging is
|
||||
// intentionally converting the bits back to a number instead of using
|
||||
// newTarget since conversion to the compact representation loses
|
||||
// precision.
|
||||
newTargetBits := util.BigToCompact(newTarget)
|
||||
log.Debugf("Difficulty retarget at block height %d", bluestParent.height+1)
|
||||
log.Debugf("Old target %08x (%064x)", bluestParent.bits, oldTarget)
|
||||
log.Debugf("New target %08x (%064x)", newTargetBits, util.CompactToBig(newTargetBits))
|
||||
log.Debugf("Actual timespan %s, adjusted timespan %s, target timespan %s",
|
||||
time.Duration(actualTimespan)*time.Second,
|
||||
time.Duration(adjustedTimespan)*time.Second,
|
||||
dag.dagParams.TargetTimespan)
|
||||
|
||||
return newTargetBits, nil
|
||||
}
|
||||
|
||||
// CalcNextRequiredDifficulty calculates the required difficulty for the block
|
||||
// after the end of the current best chain based on the difficulty retarget
|
||||
// rules.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) CalcNextRequiredDifficulty(timestamp time.Time) (uint32, error) {
|
||||
difficulty, err := dag.calcNextRequiredDifficulty(dag.selectedTip(), timestamp)
|
||||
return difficulty, err
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
// Copyright (c) 2014-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
// TestBigToCompact ensures BigToCompact converts big integers to the expected
|
||||
// compact representation.
|
||||
func TestBigToCompact(t *testing.T) {
|
||||
tests := []struct {
|
||||
in int64
|
||||
out uint32
|
||||
}{
|
||||
{0, 0},
|
||||
{-1, 25231360},
|
||||
}
|
||||
|
||||
for x, test := range tests {
|
||||
n := big.NewInt(test.in)
|
||||
r := util.BigToCompact(n)
|
||||
if r != test.out {
|
||||
t.Errorf("TestBigToCompact test #%d failed: got %d want %d\n",
|
||||
x, r, test.out)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCompactToBig ensures CompactToBig converts numbers using the compact
|
||||
// representation to the expected big intergers.
|
||||
func TestCompactToBig(t *testing.T) {
|
||||
tests := []struct {
|
||||
in uint32
|
||||
out int64
|
||||
}{
|
||||
{10000000, 0},
|
||||
}
|
||||
|
||||
for x, test := range tests {
|
||||
n := util.CompactToBig(test.in)
|
||||
want := big.NewInt(test.out)
|
||||
if n.Cmp(want) != 0 {
|
||||
t.Errorf("TestCompactToBig test #%d failed: got %d want %d\n",
|
||||
x, n.Int64(), want.Int64())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCalcWork ensures CalcWork calculates the expected work value from values
|
||||
// in compact representation.
|
||||
func TestCalcWork(t *testing.T) {
|
||||
tests := []struct {
|
||||
in uint32
|
||||
out int64
|
||||
}{
|
||||
{10000000, 0},
|
||||
}
|
||||
|
||||
for x, test := range tests {
|
||||
bits := uint32(test.in)
|
||||
|
||||
r := util.CalcWork(bits)
|
||||
if r.Int64() != test.out {
|
||||
t.Errorf("TestCalcWork test #%d failed: got %v want %d\n",
|
||||
x, r.Int64(), test.out)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package blockdag implements bitcoin block handling and chain selection rules.
|
||||
|
||||
The bitcoin block handling and chain selection rules are an integral, and quite
|
||||
likely the most important, part of bitcoin. Unfortunately, at the time of
|
||||
this writing, these rules are also largely undocumented and had to be
|
||||
ascertained from the bitcoind source code. At its core, bitcoin is a
|
||||
distributed consensus of which blocks are valid and which ones will comprise the
|
||||
main block chain (public ledger) that ultimately determines accepted
|
||||
transactions, so it is extremely important that fully validating nodes agree on
|
||||
all rules.
|
||||
|
||||
At a high level, this package provides support for inserting new blocks into
|
||||
the block chain according to the aforementioned rules. It includes
|
||||
functionality such as rejecting duplicate blocks, ensuring blocks and
|
||||
transactions follow all rules, orphan handling, and best chain selection along
|
||||
with reorganization.
|
||||
|
||||
Since this package does not deal with other bitcoin specifics such as network
|
||||
communication or wallets, it provides a notification system which gives the
|
||||
caller a high level of flexibility in how they want to react to certain events
|
||||
such as orphan blocks which need their parents requested and newly connected
|
||||
main chain blocks which might result in wallet updates.
|
||||
|
||||
Bitcoin Chain Processing Overview
|
||||
|
||||
Before a block is allowed into the block chain, it must go through an intensive
|
||||
series of validation rules. The following list serves as a general outline of
|
||||
those rules to provide some intuition into what is going on under the hood, but
|
||||
is by no means exhaustive:
|
||||
|
||||
- Reject duplicate blocks
|
||||
- Perform a series of sanity checks on the block and its transactions such as
|
||||
verifying proof of work, timestamps, number and character of transactions,
|
||||
transaction amounts, script complexity, and merkle root calculations
|
||||
- Compare the block against predetermined checkpoints for expected timestamps
|
||||
and difficulty based on elapsed time since the checkpoint
|
||||
- Save the most recent orphan blocks for a limited time in case their parent
|
||||
blocks become available
|
||||
- Stop processing if the block is an orphan as the rest of the processing
|
||||
depends on the block's position within the block chain
|
||||
- Perform a series of more thorough checks that depend on the block's position
|
||||
within the block chain such as verifying block difficulties adhere to
|
||||
difficulty retarget rules, timestamps are after the median of the last
|
||||
several blocks, all transactions are finalized, checkpoint blocks match, and
|
||||
block versions are in line with the previous blocks
|
||||
- Determine how the block fits into the chain and perform different actions
|
||||
accordingly in order to ensure any side chains which have higher difficulty
|
||||
than the main chain become the new main chain
|
||||
- When a block is being connected to the main chain (either through
|
||||
reorganization of a side chain to the main chain or just extending the
|
||||
main chain), perform further checks on the block's transactions such as
|
||||
verifying transaction duplicates, script complexity for the combination of
|
||||
connected scripts, coinbase maturity, double spends, and connected
|
||||
transaction values
|
||||
- Run the transaction scripts to verify the spender is allowed to spend the
|
||||
coins
|
||||
- Insert the block into the block database
|
||||
|
||||
Errors
|
||||
|
||||
Errors returned by this package are either the raw errors provided by underlying
|
||||
calls or of type blockchain.RuleError. This allows the caller to differentiate
|
||||
between unexpected errors, such as database errors, versus errors due to rule
|
||||
violations through type assertions. In addition, callers can programmatically
|
||||
determine the specific rule violation by examining the ErrorCode field of the
|
||||
type asserted blockchain.RuleError.
|
||||
|
||||
Bitcoin Improvement Proposals
|
||||
|
||||
This package includes spec changes outlined by the following BIPs:
|
||||
|
||||
BIP0016 (https://en.bitcoin.it/wiki/BIP_0016)
|
||||
BIP0030 (https://en.bitcoin.it/wiki/BIP_0030)
|
||||
BIP0034 (https://en.bitcoin.it/wiki/BIP_0034)
|
||||
*/
|
||||
package blockdag
|
||||
@@ -1,71 +0,0 @@
|
||||
// Copyright (c) 2014-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
_ "github.com/daglabs/btcd/database/ffldb"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
// This example demonstrates how to create a new chain instance and use
|
||||
// ProcessBlock to attempt to add a block to the chain. As the package
|
||||
// overview documentation describes, this includes all of the Bitcoin consensus
|
||||
// rules. This example intentionally attempts to insert a duplicate genesis
|
||||
// block to illustrate how an invalid block is handled.
|
||||
func ExampleBlockDAG_ProcessBlock() {
|
||||
// Create a new database to store the accepted blocks into. Typically
|
||||
// this would be opening an existing database and would not be deleting
|
||||
// and creating a new database like this, but it is done here so this is
|
||||
// a complete working example and does not leave temporary files laying
|
||||
// around.
|
||||
dbPath := filepath.Join(os.TempDir(), "exampleprocessblock")
|
||||
_ = os.RemoveAll(dbPath)
|
||||
db, err := database.Create("ffldb", dbPath, dagconfig.MainNetParams.Net)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create database: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(dbPath)
|
||||
defer db.Close()
|
||||
|
||||
// Create a new BlockDAG instance using the underlying database for
|
||||
// the main bitcoin network. This example does not demonstrate some
|
||||
// of the other available configuration options such as specifying a
|
||||
// notification callback and signature cache. Also, the caller would
|
||||
// ordinarily keep a reference to the median time source and add time
|
||||
// values obtained from other peers on the network so the local time is
|
||||
// adjusted to be in agreement with other peers.
|
||||
chain, err := blockdag.New(&blockdag.Config{
|
||||
DB: db,
|
||||
DAGParams: &dagconfig.MainNetParams,
|
||||
TimeSource: blockdag.NewMedianTime(),
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create chain instance: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Process a block. For this example, we are going to intentionally
|
||||
// cause an error by trying to process the genesis block which already
|
||||
// exists.
|
||||
genesisBlock := util.NewBlock(dagconfig.MainNetParams.GenesisBlock)
|
||||
isOrphan, err := chain.ProcessBlock(genesisBlock,
|
||||
blockdag.BFNone)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to process block: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Block accepted. Is it an orphan?: %v", isOrphan)
|
||||
|
||||
// Output:
|
||||
// Failed to process block: already have block 6477863f190fac902e556da4671c7537da4fe367022b1f00fa5270e0d073cc08
|
||||
}
|
||||
@@ -1,403 +0,0 @@
|
||||
package blockdag_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util/testtools"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/mining"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// TestFinality checks that the finality mechanism works as expected.
|
||||
// This is how the flow goes:
|
||||
// 1) We build a chain of blockdag.FinalityInterval blocks and call its tip altChainTip.
|
||||
// 2) We build another chain (let's call it mainChain) of 2 * blockdag.FinalityInterval
|
||||
// blocks, which points to genesis, and then we check that the block in that
|
||||
// chain with height of blockdag.FinalityInterval is marked as finality point (This is
|
||||
// very predictable, because the blue score of each new block in a chain is the
|
||||
// parents plus one).
|
||||
// 3) We make a new child to block with height (2 * blockdag.FinalityInterval - 1)
|
||||
// in mainChain, and we check that connecting it to the DAG
|
||||
// doesn't affect the last finality point.
|
||||
// 4) We make a block that points to genesis, and check that it
|
||||
// gets rejected because its blue score is lower then the last finality
|
||||
// point.
|
||||
// 5) We make a block that points to altChainTip, and check that it
|
||||
// gets rejected because it doesn't have the last finality point in
|
||||
// its selected parent chain.
|
||||
func TestFinality(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
buildNodeToDag := func(parentHashes []*daghash.Hash) (*util.Block, error) {
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, nil, false, 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block := util.NewBlock(msgBlock)
|
||||
|
||||
isOrphan, err := dag.ProcessBlock(block, blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isOrphan {
|
||||
return nil, fmt.Errorf("ProcessBlock: unexpected returned orphan block")
|
||||
}
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
genesis := util.NewBlock(params.GenesisBlock)
|
||||
currentNode := genesis
|
||||
|
||||
// First we build a chain of blockdag.FinalityInterval blocks for future use
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
altChainTip := currentNode
|
||||
|
||||
// Now we build a new chain of 2 * blockdag.FinalityInterval blocks, pointed to genesis, and
|
||||
// we expect the block with height 1 * blockdag.FinalityInterval to be the last finality point
|
||||
currentNode = genesis
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
expectedFinalityPoint := currentNode
|
||||
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !dag.LastFinalityPointHash().IsEqual(expectedFinalityPoint.Hash()) {
|
||||
t.Errorf("TestFinality: dag.lastFinalityPoint expected to be %v but got %v", expectedFinalityPoint, dag.LastFinalityPointHash())
|
||||
}
|
||||
|
||||
// Here we check that even if we create a parallel tip (a new tip with
|
||||
// the same parents as the current one) with the same blue score as the
|
||||
// current tip, it still won't affect the last finality point.
|
||||
_, err = buildNodeToDag(currentNode.MsgBlock().Header.ParentHashes)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
}
|
||||
if !dag.LastFinalityPointHash().IsEqual(expectedFinalityPoint.Hash()) {
|
||||
t.Errorf("TestFinality: dag.lastFinalityPoint was unexpectly changed")
|
||||
}
|
||||
|
||||
// Here we check that a block with lower blue score than the last finality
|
||||
// point will get rejected
|
||||
_, err = buildNodeToDag([]*daghash.Hash{genesis.Hash()})
|
||||
if err == nil {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
|
||||
}
|
||||
rErr, ok := err.(blockdag.RuleError)
|
||||
if ok {
|
||||
if rErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", rErr)
|
||||
}
|
||||
|
||||
// Here we check that a block that doesn't have the last finality point in
|
||||
// its selected parent chain will get rejected
|
||||
_, err = buildNodeToDag([]*daghash.Hash{altChainTip.Hash()})
|
||||
if err == nil {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
|
||||
}
|
||||
rErr, ok = err.(blockdag.RuleError)
|
||||
if ok {
|
||||
if rErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", rErr)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSubnetworkRegistry tests the full subnetwork registry flow
|
||||
func TestSubnetworkRegistry(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.K = 1
|
||||
params.BlockRewardMaturity = 1
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
gasLimit := uint64(12345)
|
||||
subnetworkID, err := testtools.RegisterSubnetworkForTest(dag, ¶ms, gasLimit)
|
||||
if err != nil {
|
||||
t.Fatalf("could not register network: %s", err)
|
||||
}
|
||||
limit, err := dag.SubnetworkStore.GasLimit(subnetworkID)
|
||||
if err != nil {
|
||||
t.Fatalf("could not retrieve gas limit: %s", err)
|
||||
}
|
||||
if limit != gasLimit {
|
||||
t.Fatalf("unexpected gas limit. want: %d, got: %d", gasLimit, limit)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainedTransactions(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.BlockRewardMaturity = 1
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup dag instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
block1, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{params.GenesisHash}, nil, false, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err := dag.ProcessBlock(util.NewBlock(block1), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: block1 got unexpectedly orphaned")
|
||||
}
|
||||
cbTx := block1.Transactions[0]
|
||||
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{TxID: cbTx.TxID(), Index: 0},
|
||||
SignatureScript: nil,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := &wire.TxOut{
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}
|
||||
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
|
||||
|
||||
chainedTxIn := &wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{TxID: tx.TxID(), Index: 0},
|
||||
SignatureScript: nil,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
chainedTxOut := &wire.TxOut{
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}
|
||||
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
|
||||
|
||||
block2, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
//Checks that dag.ProcessBlock fails because we don't allow a transaction to spend another transaction from the same block
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Errorf("ProcessBlock expected an error")
|
||||
} else if rErr, ok := err.(blockdag.RuleError); ok {
|
||||
if rErr.ErrorCode != blockdag.ErrMissingTxOut {
|
||||
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, rErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Errorf("ProcessBlock: block2 got unexpectedly orphaned")
|
||||
}
|
||||
|
||||
nonChainedTxIn := &wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{TxID: cbTx.TxID(), Index: 0},
|
||||
SignatureScript: nil,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
nonChainedTxOut := &wire.TxOut{
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}
|
||||
nonChainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{nonChainedTxIn}, []*wire.TxOut{nonChainedTxOut})
|
||||
|
||||
block3, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
//Checks that dag.ProcessBlock doesn't fail because all of its transaction are dependant on transactions from previous blocks
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(block3), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Errorf("ProcessBlock: %v", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Errorf("ProcessBlock: block3 got unexpectedly orphaned")
|
||||
}
|
||||
}
|
||||
|
||||
// TestGasLimit tests the gas limit rules
|
||||
func TestGasLimit(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.K = 1
|
||||
params.BlockRewardMaturity = 1
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// First we prepare a subnetwrok and a block with coinbase outputs to fund our tests
|
||||
gasLimit := uint64(12345)
|
||||
subnetworkID, err := testtools.RegisterSubnetworkForTest(dag, ¶ms, gasLimit)
|
||||
if err != nil {
|
||||
t.Fatalf("could not register network: %s", err)
|
||||
}
|
||||
|
||||
fundsBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), nil, false, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err := dag.ProcessBlock(util.NewBlock(fundsBlock), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: funds block got unexpectedly orphan")
|
||||
}
|
||||
|
||||
cbTxValue := fundsBlock.Transactions[0].TxOut[0].Value
|
||||
cbTxID := fundsBlock.Transactions[0].TxID()
|
||||
|
||||
tx1In := &wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
tx1Out := &wire.TxOut{
|
||||
Value: cbTxValue,
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
}
|
||||
tx1 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx1In}, []*wire.TxOut{tx1Out}, subnetworkID, 10000, []byte{})
|
||||
|
||||
tx2In := &wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 1),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
tx2Out := &wire.TxOut{
|
||||
Value: cbTxValue,
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
}
|
||||
tx2 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx2In}, []*wire.TxOut{tx2Out}, subnetworkID, 10000, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that has transactions that exceed the gas limit
|
||||
overLimitBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(overLimitBlock), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Fatalf("ProcessBlock expected to have an error")
|
||||
}
|
||||
rErr, ok := err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
|
||||
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
overflowGasTxIn := &wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 1),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
overflowGasTxOut := &wire.TxOut{
|
||||
Value: cbTxValue,
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
}
|
||||
overflowGasTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{overflowGasTxIn}, []*wire.TxOut{overflowGasTxOut},
|
||||
subnetworkID, math.MaxUint64, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that its transactions' gas overflows uint64
|
||||
overflowGasBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(overflowGasBlock), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Fatalf("ProcessBlock expected to have an error")
|
||||
}
|
||||
rErr, ok = err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
|
||||
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
nonExistentSubnetwork := &subnetworkid.SubnetworkID{123}
|
||||
nonExistentSubnetworkTxIn := &wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
nonExistentSubnetworkTxOut := &wire.TxOut{
|
||||
Value: cbTxValue,
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
}
|
||||
nonExistentSubnetworkTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{nonExistentSubnetworkTxIn},
|
||||
[]*wire.TxOut{nonExistentSubnetworkTxOut}, nonExistentSubnetwork, 1, []byte{})
|
||||
|
||||
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
// Here we check that we can't process a block with a transaction from a non-existent subnetwork
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
|
||||
expectedErrStr := fmt.Sprintf("subnetwork '%s' not found", nonExistentSubnetwork)
|
||||
if err.Error() != expectedErrStr {
|
||||
t.Fatalf("ProcessBlock expected error %v but got %v", expectedErrStr, err)
|
||||
}
|
||||
|
||||
// Here we check that we can process a block with a transaction that doesn't exceed the gas limit
|
||||
validBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1}, true, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(validBlock), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
}
|
||||
219
blockdag/fees.go
219
blockdag/fees.go
@@ -1,219 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/txsort"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// compactFeeData is a specialized data type to store a compact list of fees
|
||||
// inside a block.
|
||||
// Every transaction gets a single uint64 value, stored as a plain binary list.
|
||||
// The transactions are ordered the same way they are ordered inside the block, making it easy
|
||||
// to traverse every transaction in a block and extract its fee.
|
||||
//
|
||||
// compactFeeFactory is used to create such a list.
|
||||
// compactFeeIterator is used to iterate over such a list.
|
||||
|
||||
type compactFeeData []byte
|
||||
|
||||
func (cfd compactFeeData) Len() int {
|
||||
return len(cfd) / 8
|
||||
}
|
||||
|
||||
type compactFeeFactory struct {
|
||||
buffer *bytes.Buffer
|
||||
writer *bufio.Writer
|
||||
}
|
||||
|
||||
func newCompactFeeFactory() *compactFeeFactory {
|
||||
buffer := bytes.NewBuffer([]byte{})
|
||||
return &compactFeeFactory{
|
||||
buffer: buffer,
|
||||
writer: bufio.NewWriter(buffer),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfw *compactFeeFactory) add(txFee uint64) error {
|
||||
return binary.Write(cfw.writer, binary.LittleEndian, txFee)
|
||||
}
|
||||
|
||||
func (cfw *compactFeeFactory) data() (compactFeeData, error) {
|
||||
err := cfw.writer.Flush()
|
||||
|
||||
return compactFeeData(cfw.buffer.Bytes()), err
|
||||
}
|
||||
|
||||
type compactFeeIterator struct {
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func (cfd compactFeeData) iterator() *compactFeeIterator {
|
||||
return &compactFeeIterator{
|
||||
reader: bufio.NewReader(bytes.NewBuffer(cfd)),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfr *compactFeeIterator) next() (uint64, error) {
|
||||
var txFee uint64
|
||||
|
||||
err := binary.Read(cfr.reader, binary.LittleEndian, &txFee)
|
||||
|
||||
return txFee, err
|
||||
}
|
||||
|
||||
// The following functions relate to storing and retrieving fee data from the database
|
||||
var feeBucket = []byte("fees")
|
||||
|
||||
// getBluesFeeData returns the compactFeeData for all nodes's blues,
|
||||
// used to calculate the fees this blockNode needs to pay
|
||||
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
|
||||
bluesFeeData := make(map[daghash.Hash]compactFeeData)
|
||||
|
||||
dag.db.View(func(dbTx database.Tx) error {
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
|
||||
}
|
||||
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return bluesFeeData, nil
|
||||
}
|
||||
|
||||
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
|
||||
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating or retrieving fee bucket: %s", err)
|
||||
}
|
||||
|
||||
return feeBucket.Put(blockHash.CloneBytes(), feeData)
|
||||
}
|
||||
|
||||
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
|
||||
feeBucket := dbTx.Metadata().Bucket(feeBucket)
|
||||
if feeBucket == nil {
|
||||
return nil, errors.New("Fee bucket does not exist")
|
||||
}
|
||||
|
||||
feeData := feeBucket.Get(blockHash.CloneBytes())
|
||||
if feeData == nil {
|
||||
return nil, fmt.Errorf("No fee data found for block %s", blockHash)
|
||||
}
|
||||
|
||||
return feeData, nil
|
||||
}
|
||||
|
||||
// The following functions deal with building and validating the fee transaction
|
||||
|
||||
func (node *blockNode) validateFeeTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
|
||||
if node.isGenesis() {
|
||||
return nil
|
||||
}
|
||||
expectedFeeTransaction, err := node.buildFeeTransaction(dag, txsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !expectedFeeTransaction.TxHash().IsEqual(block.FeeTransaction().Hash()) {
|
||||
return ruleError(ErrBadFeeTransaction, "Fee transaction is not built as expected")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildFeeTransaction returns the expected fee transaction for the current block
|
||||
func (node *blockNode) buildFeeTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData) (*wire.MsgTx, error) {
|
||||
bluesFeeData, err := node.getBluesFeeData(dag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txIns := []*wire.TxIn{}
|
||||
txOuts := []*wire.TxOut{}
|
||||
|
||||
for _, blue := range node.blues {
|
||||
txIn, txOut, err := feeInputAndOutputForBlueBlock(blue, txsAcceptanceData, bluesFeeData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txIns = append(txIns, txIn)
|
||||
if txOut != nil {
|
||||
txOuts = append(txOuts, txOut)
|
||||
}
|
||||
}
|
||||
feeTx := wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
|
||||
return txsort.Sort(feeTx), nil
|
||||
}
|
||||
|
||||
// feeInputAndOutputForBlueBlock calculates the input and output that should go into the fee transaction of blueBlock
|
||||
// If blueBlock gets no fee - returns only txIn and nil for txOut
|
||||
func feeInputAndOutputForBlueBlock(blueBlock *blockNode, txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (
|
||||
*wire.TxIn, *wire.TxOut, error) {
|
||||
|
||||
blockTxsAcceptanceData, ok := txsAcceptanceData[*blueBlock.hash]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
|
||||
}
|
||||
blockFeeData, ok := feeData[*blueBlock.hash]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("No feeData for block %s", blueBlock.hash)
|
||||
}
|
||||
|
||||
if len(blockTxsAcceptanceData) != blockFeeData.Len() {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"length of accepted transaction data(%d) and fee data(%d) is not equal for block %s",
|
||||
len(blockTxsAcceptanceData), blockFeeData.Len(), blueBlock.hash)
|
||||
}
|
||||
|
||||
txIn := &wire.TxIn{
|
||||
SignatureScript: []byte{},
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.TxID(*blueBlock.hash),
|
||||
Index: math.MaxUint32,
|
||||
},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
|
||||
totalFees := uint64(0)
|
||||
feeIterator := blockFeeData.iterator()
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData {
|
||||
fee, err := feeIterator.next()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
|
||||
}
|
||||
if txAcceptanceData.IsAccepted {
|
||||
totalFees += fee
|
||||
}
|
||||
}
|
||||
|
||||
if totalFees == 0 {
|
||||
return txIn, nil, nil
|
||||
}
|
||||
|
||||
// the scriptPubKey for the fee is the same as the coinbase's first scriptPubKey
|
||||
pkScript := blockTxsAcceptanceData[0].Tx.MsgTx().TxOut[0].PkScript
|
||||
|
||||
txOut := &wire.TxOut{
|
||||
Value: totalFees,
|
||||
PkScript: pkScript,
|
||||
}
|
||||
|
||||
return txIn, txOut, nil
|
||||
}
|
||||
@@ -1,305 +0,0 @@
|
||||
// Copyright (c) 2016 The Decred developers
|
||||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/blockdag/fullblocktests"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
_ "github.com/daglabs/btcd/database/ffldb"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// testDbType is the database backend type to use for the tests.
|
||||
testDbType = "ffldb"
|
||||
|
||||
// testDbRoot is the root directory used to create all test databases.
|
||||
testDbRoot = "testdbs"
|
||||
|
||||
// blockDataNet is the expected network in the test block data.
|
||||
blockDataNet = wire.MainNet
|
||||
)
|
||||
|
||||
// filesExists returns whether or not the named file or directory exists.
|
||||
func fileExists(name string) bool {
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isSupportedDbType returns whether or not the passed database type is
|
||||
// currently supported.
|
||||
func isSupportedDbType(dbType string) bool {
|
||||
supportedDrivers := database.SupportedDrivers()
|
||||
for _, driver := range supportedDrivers {
|
||||
if dbType == driver {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// DAGSetup is used to create a new db and chain instance with the genesis
|
||||
// block already inserted. In addition to the new chain instance, it returns
|
||||
// a teardown function the caller should invoke when done testing to clean up.
|
||||
func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func(), error) {
|
||||
if !isSupportedDbType(testDbType) {
|
||||
return nil, nil, fmt.Errorf("unsupported db type %v", testDbType)
|
||||
}
|
||||
|
||||
// Handle memory database specially since it doesn't need the disk
|
||||
// specific handling.
|
||||
var db database.DB
|
||||
var teardown func()
|
||||
if testDbType == "memdb" {
|
||||
ndb, err := database.Create(testDbType)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating db: %v", err)
|
||||
}
|
||||
db = ndb
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
teardown = func() {
|
||||
db.Close()
|
||||
}
|
||||
} else {
|
||||
// Create the root directory for test databases.
|
||||
if !fileExists(testDbRoot) {
|
||||
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
|
||||
err := fmt.Errorf("unable to create test db "+
|
||||
"root: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new database to store the accepted blocks into.
|
||||
dbPath := filepath.Join(testDbRoot, dbName)
|
||||
_ = os.RemoveAll(dbPath)
|
||||
ndb, err := database.Create(testDbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating db: %v", err)
|
||||
}
|
||||
db = ndb
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
teardown = func() {
|
||||
db.Close()
|
||||
os.RemoveAll(dbPath)
|
||||
os.RemoveAll(testDbRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the chain params to ensure any modifications the tests do to
|
||||
// the chain parameters do not affect the global instance.
|
||||
paramsCopy := *params
|
||||
|
||||
// Create the main chain instance.
|
||||
chain, err := blockdag.New(&blockdag.Config{
|
||||
DB: db,
|
||||
DAGParams: ¶msCopy,
|
||||
Checkpoints: nil,
|
||||
TimeSource: blockdag.NewMedianTime(),
|
||||
SigCache: txscript.NewSigCache(1000),
|
||||
})
|
||||
if err != nil {
|
||||
teardown()
|
||||
err := fmt.Errorf("failed to create chain instance: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return chain, teardown, nil
|
||||
}
|
||||
|
||||
// TestFullBlocks ensures all tests generated by the fullblocktests package
|
||||
// have the expected result when processed via ProcessBlock.
|
||||
func TestFullBlocks(t *testing.T) {
|
||||
// TODO: (Stas) This test was disabled for until we have implemented Phantom
|
||||
// Ticket: https://daglabs.atlassian.net/browse/DEV-60
|
||||
t.SkipNow()
|
||||
|
||||
tests, err := fullblocktests.Generate(false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to generate tests: %v", err)
|
||||
}
|
||||
|
||||
// Create a new database and chain instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("fullblocktest",
|
||||
&dagconfig.RegressionNetParams)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup chain instance: %v", err)
|
||||
return
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// testAcceptedBlock attempts to process the block in the provided test
|
||||
// instance and ensures that it was accepted according to the flags
|
||||
// specified in the test.
|
||||
testAcceptedBlock := func(item fullblocktests.AcceptedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
isOrphan, err := dag.ProcessBlock(block,
|
||||
blockdag.BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("block %q (hash %s, height %d) should "+
|
||||
"have been accepted: %v", item.Name,
|
||||
block.Hash(), blockHeight, err)
|
||||
}
|
||||
|
||||
if isOrphan != item.IsOrphan {
|
||||
t.Fatalf("block %q (hash %s, height %d) unexpected "+
|
||||
"orphan flag -- got %v, want %v", item.Name,
|
||||
block.Hash(), blockHeight, isOrphan,
|
||||
item.IsOrphan)
|
||||
}
|
||||
}
|
||||
|
||||
// testRejectedBlock attempts to process the block in the provided test
|
||||
// instance and ensures that it was rejected with the reject code
|
||||
// specified in the test.
|
||||
testRejectedBlock := func(item fullblocktests.RejectedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
_, err := dag.ProcessBlock(block, blockdag.BFNone)
|
||||
if err == nil {
|
||||
t.Fatalf("block %q (hash %s, height %d) should not "+
|
||||
"have been accepted", item.Name, block.Hash(),
|
||||
blockHeight)
|
||||
}
|
||||
|
||||
// Ensure the error code is of the expected type and the reject
|
||||
// code matches the value specified in the test instance.
|
||||
rerr, ok := err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
t.Fatalf("block %q (hash %s, height %d) returned "+
|
||||
"unexpected error type -- got %T, want "+
|
||||
"blockchain.RuleError", item.Name, block.Hash(),
|
||||
blockHeight, err)
|
||||
}
|
||||
if rerr.ErrorCode != item.RejectCode {
|
||||
t.Fatalf("block %q (hash %s, height %d) does not have "+
|
||||
"expected reject code -- got %v, want %v",
|
||||
item.Name, block.Hash(), blockHeight,
|
||||
rerr.ErrorCode, item.RejectCode)
|
||||
}
|
||||
}
|
||||
|
||||
// testRejectedNonCanonicalBlock attempts to decode the block in the
|
||||
// provided test instance and ensures that it failed to decode with a
|
||||
// message error.
|
||||
testRejectedNonCanonicalBlock := func(item fullblocktests.RejectedNonCanonicalBlock) {
|
||||
headerLen := len(item.RawBlock)
|
||||
if headerLen > 80 {
|
||||
headerLen = 80
|
||||
}
|
||||
blockHash := daghash.DoubleHashH(item.RawBlock[0:headerLen])
|
||||
blockHeight := item.Height
|
||||
t.Logf("Testing block %s (hash %s, height %d)", item.Name,
|
||||
blockHash, blockHeight)
|
||||
|
||||
// Ensure there is an error due to deserializing the block.
|
||||
var msgBlock wire.MsgBlock
|
||||
err := msgBlock.BtcDecode(bytes.NewReader(item.RawBlock), 0)
|
||||
if _, ok := err.(*wire.MessageError); !ok {
|
||||
t.Fatalf("block %q (hash %s, height %d) should have "+
|
||||
"failed to decode", item.Name, blockHash,
|
||||
blockHeight)
|
||||
}
|
||||
}
|
||||
|
||||
// testOrphanOrRejectedBlock attempts to process the block in the
|
||||
// provided test instance and ensures that it was either accepted as an
|
||||
// orphan or rejected with a rule violation.
|
||||
testOrphanOrRejectedBlock := func(item fullblocktests.OrphanOrRejectedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
isOrphan, err := dag.ProcessBlock(block, blockdag.BFNone)
|
||||
if err != nil {
|
||||
// Ensure the error code is of the expected type.
|
||||
if _, ok := err.(blockdag.RuleError); !ok {
|
||||
t.Fatalf("block %q (hash %s, height %d) "+
|
||||
"returned unexpected error type -- "+
|
||||
"got %T, want blockchain.RuleError",
|
||||
item.Name, block.Hash(), blockHeight,
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
if !isOrphan {
|
||||
t.Fatalf("block %q (hash %s, height %d) was accepted, "+
|
||||
"but is not considered an orphan", item.Name,
|
||||
block.Hash(), blockHeight)
|
||||
}
|
||||
}
|
||||
|
||||
// testExpectedTip ensures the current tip of the blockchain is the
|
||||
// block specified in the provided test instance.
|
||||
testExpectedTip := func(item fullblocktests.ExpectedTip) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetHeight(blockHeight)
|
||||
t.Logf("Testing tip for block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
// Ensure hash and height match.
|
||||
if dag.HighestTipHash() != item.Block.BlockHash() ||
|
||||
dag.Height() != blockHeight { //TODO: (Ori) the use of dag.Height() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
|
||||
|
||||
t.Fatalf("block %q (hash %s, height %d) should be "+
|
||||
"the current tip -- got (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight, dag.HighestTipHash(),
|
||||
dag.Height()) //TODO: (Ori) the use of dag.Height() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
|
||||
}
|
||||
}
|
||||
|
||||
for testNum, test := range tests {
|
||||
for itemNum, item := range test {
|
||||
switch item := item.(type) {
|
||||
case fullblocktests.AcceptedBlock:
|
||||
testAcceptedBlock(item)
|
||||
case fullblocktests.RejectedBlock:
|
||||
testRejectedBlock(item)
|
||||
case fullblocktests.RejectedNonCanonicalBlock:
|
||||
testRejectedNonCanonicalBlock(item)
|
||||
case fullblocktests.OrphanOrRejectedBlock:
|
||||
testOrphanOrRejectedBlock(item)
|
||||
case fullblocktests.ExpectedTip:
|
||||
testExpectedTip(item)
|
||||
default:
|
||||
t.Fatalf("test #%d, item #%d is not one of "+
|
||||
"the supported test instance types -- "+
|
||||
"got type: %T", testNum, itemNum, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
fullblocktests
|
||||
==============
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd/blockchain/fullblocktests)
|
||||
|
||||
Package fullblocktests provides a set of full block tests to be used for testing
|
||||
the consensus validation rules. The tests are intended to be flexible enough to
|
||||
allow both unit-style tests directly against the blockchain code as well as
|
||||
integration style tests over the peer-to-peer network. To achieve that goal,
|
||||
each test contains additional information about the expected result, however
|
||||
that information can be ignored when doing comparison tests between two
|
||||
independent versions over the peer-to-peer network.
|
||||
|
||||
This package has intentionally been designed so it can be used as a standalone
|
||||
package for any projects needing to test their implementation against a full set
|
||||
of blocks that exercise the consensus validation rules.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/daglabs/btcd/blockchain/fullblocktests
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Package fullblocktests is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
||||
@@ -1,20 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package fullblocktests provides a set of block consensus validation tests.
|
||||
|
||||
All of the generated test instances involve full blocks that are to be used for
|
||||
testing the consensus validation rules. The tests are intended to be flexible
|
||||
enough to allow both unit-style tests directly against the blockchain code as
|
||||
well as integration style tests over the peer-to-peer network. To achieve that
|
||||
goal, each test contains additional information about the expected result,
|
||||
however that information can be ignored when doing comparison tests between two
|
||||
independent versions over the peer-to-peer network.
|
||||
|
||||
This package has intentionally been designed so it can be used as a standalone
|
||||
package for any projects needing to test their implementation against a full set
|
||||
of blocks that exercise the consensus validation rules.
|
||||
*/
|
||||
package fullblocktests
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,146 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fullblocktests
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"math"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util/hdkeychain"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// wire.Hash. It only differs from the one available in daghash in that
|
||||
// it panics on an error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, hashes.
|
||||
func newHashFromStr(hexStr string) *daghash.Hash {
|
||||
hash, err := daghash.NewHashFromStr(hexStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// newTxIDFromStr converts the passed big-endian hex string into a
|
||||
// wire.TxID. It only differs from the one available in daghash in that
|
||||
// it panics on an error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, hashes.
|
||||
func newTxIDFromStr(hexStr string) *daghash.TxID {
|
||||
txID, err := daghash.NewTxIDFromStr(hexStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return txID
|
||||
}
|
||||
|
||||
// fromHex converts the passed hex string into a byte slice and will panic if
|
||||
// there is an error. This is only provided for the hard-coded constants so
|
||||
// errors in the source code can be detected. It will only (and must only) be
|
||||
// called for initialization purposes.
|
||||
func fromHex(s string) []byte {
|
||||
r, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic("invalid hex in source file: " + s)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
var (
|
||||
// bigOne is 1 represented as a big.Int. It is defined here to avoid
|
||||
// the overhead of creating it multiple times.
|
||||
bigOne = big.NewInt(1)
|
||||
|
||||
// regressionPowLimit is the highest proof of work value a Bitcoin block
|
||||
// can have for the regression test network. It is the value 2^255 - 1.
|
||||
regressionPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne)
|
||||
|
||||
// regTestGenesisBlock defines the genesis block of the block chain which serves
|
||||
// as the public transaction ledger for the regression test network.
|
||||
regTestGenesisBlock = wire.MsgBlock{
|
||||
Header: wire.BlockHeader{
|
||||
Version: 1,
|
||||
ParentHashes: []*daghash.Hash{},
|
||||
HashMerkleRoot: newHashFromStr("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"),
|
||||
Timestamp: time.Unix(0x5b28c636, 0), // 2018-06-19 09:00:38 +0000 UTC
|
||||
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
|
||||
Nonce: 1,
|
||||
},
|
||||
Transactions: []*wire.MsgTx{{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: fromHex("04ffff001d010445" +
|
||||
"5468652054696d65732030332f4a616e2f" +
|
||||
"32303039204368616e63656c6c6f72206f" +
|
||||
"6e206272696e6b206f66207365636f6e64" +
|
||||
"206261696c6f757420666f72206261686b73"),
|
||||
Sequence: math.MaxUint64,
|
||||
}},
|
||||
TxOut: []*wire.TxOut{{
|
||||
Value: 0,
|
||||
PkScript: fromHex("4104678afdb0fe5548271967f1" +
|
||||
"a67130b7105cd6a828e03909a67962e0ea1f" +
|
||||
"61deb649f6bc3f4cef38c4f35504e51ec138" +
|
||||
"c4f35504e51ec112de5c384df7ba0b8d578a" +
|
||||
"4c702b6bf11d5fac"),
|
||||
}},
|
||||
LockTime: 0,
|
||||
SubnetworkID: *subnetworkid.SubnetworkIDNative,
|
||||
}},
|
||||
}
|
||||
)
|
||||
|
||||
// regressionNetParams defines the network parameters for the regression test
|
||||
// network.
|
||||
//
|
||||
// NOTE: The test generator intentionally does not use the existing definitions
|
||||
// in the dagconfig package since the intent is to be able to generate known
|
||||
// good tests which exercise that code. Using the dagconfig parameters would
|
||||
// allow them to change out from under the tests potentially invalidating them.
|
||||
var regressionNetParams = &dagconfig.Params{
|
||||
Name: "regtest",
|
||||
Net: wire.TestNet,
|
||||
DefaultPort: "18444",
|
||||
|
||||
// Chain parameters
|
||||
GenesisBlock: ®TestGenesisBlock,
|
||||
GenesisHash: newHashFromStr("5bec7567af40504e0994db3b573c186fffcc4edefe096ff2e58d00523bd7e8a6"),
|
||||
PowLimit: regressionPowLimit,
|
||||
PowLimitBits: 0x207fffff,
|
||||
BlockRewardMaturity: 100,
|
||||
SubsidyReductionInterval: 150,
|
||||
TargetTimespan: time.Hour * 24 * 14, // 14 days
|
||||
TargetTimePerBlock: time.Second * 10, // 10 seconds
|
||||
RetargetAdjustmentFactor: 4, // 25% less, 400% more
|
||||
ReduceMinDifficulty: true,
|
||||
MinDiffReductionTime: time.Minute * 20, // TargetTimePerBlock * 2
|
||||
GenerateSupported: true,
|
||||
|
||||
// Checkpoints ordered from oldest to newest.
|
||||
Checkpoints: nil,
|
||||
|
||||
// Mempool parameters
|
||||
RelayNonStdTxs: true,
|
||||
|
||||
// Address encoding magics
|
||||
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
|
||||
|
||||
// BIP32 hierarchical deterministic extended key magics
|
||||
HDKeyIDPair: hdkeychain.HDKeyPairRegressionNet,
|
||||
|
||||
// BIP44 coin type used in the hierarchical deterministic path for
|
||||
// address generation.
|
||||
HDCoinType: 1,
|
||||
}
|
||||
@@ -1,929 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// addrIndexName is the human-readable name for the index.
|
||||
addrIndexName = "address index"
|
||||
|
||||
// level0MaxEntries is the maximum number of transactions that are
|
||||
// stored in level 0 of an address index entry. Subsequent levels store
|
||||
// 2^n * level0MaxEntries entries, or in words, double the maximum of
|
||||
// the previous level.
|
||||
level0MaxEntries = 8
|
||||
|
||||
// addrKeySize is the number of bytes an address key consumes in the
|
||||
// index. It consists of 1 byte address type + 20 bytes hash160.
|
||||
addrKeySize = 1 + 20
|
||||
|
||||
// levelKeySize is the number of bytes a level key in the address index
|
||||
// consumes. It consists of the address key + 1 byte for the level.
|
||||
levelKeySize = addrKeySize + 1
|
||||
|
||||
// levelOffset is the offset in the level key which identifes the level.
|
||||
levelOffset = levelKeySize - 1
|
||||
|
||||
// addrKeyTypePubKeyHash is the address type in an address key which
|
||||
// represents both a pay-to-pubkey-hash and a pay-to-pubkey address.
|
||||
// This is done because both are identical for the purposes of the
|
||||
// address index.
|
||||
addrKeyTypePubKeyHash = 0
|
||||
|
||||
// addrKeyTypeScriptHash is the address type in an address key which
|
||||
// represents a pay-to-script-hash address. This is necessary because
|
||||
// the hash of a pubkey address might be the same as that of a script
|
||||
// hash.
|
||||
addrKeyTypeScriptHash = 1
|
||||
|
||||
// Size of a transaction entry. It consists of 4 bytes block id + 4
|
||||
// bytes offset + 4 bytes length.
|
||||
txEntrySize = 4 + 4 + 4
|
||||
)
|
||||
|
||||
var (
|
||||
// addrIndexKey is the key of the address index and the db bucket used
|
||||
// to house it.
|
||||
addrIndexKey = []byte("txbyaddridx")
|
||||
|
||||
// errUnsupportedAddressType is an error that is used to signal an
|
||||
// unsupported address type has been used.
|
||||
errUnsupportedAddressType = errors.New("address type is not supported " +
|
||||
"by the address index")
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The address index maps addresses referenced in the blockchain to a list of
|
||||
// all the transactions involving that address. Transactions are stored
|
||||
// according to their order of appearance in the blockchain. That is to say
|
||||
// first by block height and then by offset inside the block. It is also
|
||||
// important to note that this implementation requires the transaction index
|
||||
// since it is needed in order to catch up old blocks due to the fact the spent
|
||||
// outputs will already be pruned from the utxo set.
|
||||
//
|
||||
// The approach used to store the index is similar to a log-structured merge
|
||||
// tree (LSM tree) and is thus similar to how leveldb works internally.
|
||||
//
|
||||
// Every address consists of one or more entries identified by a level starting
|
||||
// from 0 where each level holds a maximum number of entries such that each
|
||||
// subsequent level holds double the maximum of the previous one. In equation
|
||||
// form, the number of entries each level holds is 2^n * firstLevelMaxSize.
|
||||
//
|
||||
// New transactions are appended to level 0 until it becomes full at which point
|
||||
// the entire level 0 entry is appended to the level 1 entry and level 0 is
|
||||
// cleared. This process continues until level 1 becomes full at which point it
|
||||
// will be appended to level 2 and cleared and so on.
|
||||
//
|
||||
// The result of this is the lower levels contain newer transactions and the
|
||||
// transactions within each level are ordered from oldest to newest.
|
||||
//
|
||||
// The intent of this approach is to provide a balance between space efficiency
|
||||
// and indexing cost. Storing one entry per transaction would have the lowest
|
||||
// indexing cost, but would waste a lot of space because the same address hash
|
||||
// would be duplicated for every transaction key. On the other hand, storing a
|
||||
// single entry with all transactions would be the most space efficient, but
|
||||
// would cause indexing cost to grow quadratically with the number of
|
||||
// transactions involving the same address. The approach used here provides
|
||||
// logarithmic insertion and retrieval.
|
||||
//
|
||||
// The serialized key format is:
|
||||
//
|
||||
// <addr type><addr hash><level>
|
||||
//
|
||||
// Field Type Size
|
||||
// addr type uint8 1 byte
|
||||
// addr hash hash160 20 bytes
|
||||
// level uint8 1 byte
|
||||
// -----
|
||||
// Total: 22 bytes
|
||||
//
|
||||
// The serialized value format is:
|
||||
//
|
||||
// [<block id><start offset><tx length>,...]
|
||||
//
|
||||
// Field Type Size
|
||||
// block id uint32 4 bytes
|
||||
// start offset uint32 4 bytes
|
||||
// tx length uint32 4 bytes
|
||||
// -----
|
||||
// Total: 12 bytes per indexed tx
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// fetchBlockHashFunc defines a callback function to use in order to convert a
|
||||
// serialized block ID to an associated block hash.
|
||||
type fetchBlockHashFunc func(serializedID []byte) (*daghash.Hash, error)
|
||||
|
||||
// serializeAddrIndexEntry serializes the provided block id and transaction
|
||||
// location according to the format described in detail above.
|
||||
func serializeAddrIndexEntry(blockID uint32, txLoc wire.TxLoc) []byte {
|
||||
// Serialize the entry.
|
||||
serialized := make([]byte, 12)
|
||||
byteOrder.PutUint32(serialized, blockID)
|
||||
byteOrder.PutUint32(serialized[4:], uint32(txLoc.TxStart))
|
||||
byteOrder.PutUint32(serialized[8:], uint32(txLoc.TxLen))
|
||||
return serialized
|
||||
}
|
||||
|
||||
// deserializeAddrIndexEntry decodes the passed serialized byte slice into the
|
||||
// provided region struct according to the format described in detail above and
|
||||
// uses the passed block hash fetching function in order to conver the block ID
|
||||
// to the associated block hash.
|
||||
func deserializeAddrIndexEntry(serialized []byte, region *database.BlockRegion, fetchBlockHash fetchBlockHashFunc) error {
|
||||
// Ensure there are enough bytes to decode.
|
||||
if len(serialized) < txEntrySize {
|
||||
return errDeserialize("unexpected end of data")
|
||||
}
|
||||
|
||||
hash, err := fetchBlockHash(serialized[0:4])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
region.Hash = hash
|
||||
region.Offset = byteOrder.Uint32(serialized[4:8])
|
||||
region.Len = byteOrder.Uint32(serialized[8:12])
|
||||
return nil
|
||||
}
|
||||
|
||||
// keyForLevel returns the key for a specific address and level in the address
|
||||
// index entry.
|
||||
func keyForLevel(addrKey [addrKeySize]byte, level uint8) [levelKeySize]byte {
|
||||
var key [levelKeySize]byte
|
||||
copy(key[:], addrKey[:])
|
||||
key[levelOffset] = level
|
||||
return key
|
||||
}
|
||||
|
||||
// dbPutAddrIndexEntry updates the address index to include the provided entry
|
||||
// according to the level-based scheme described in detail above.
|
||||
func dbPutAddrIndexEntry(bucket internalBucket, addrKey [addrKeySize]byte, blockID uint32, txLoc wire.TxLoc) error {
|
||||
// Start with level 0 and its initial max number of entries.
|
||||
curLevel := uint8(0)
|
||||
maxLevelBytes := level0MaxEntries * txEntrySize
|
||||
|
||||
// Simply append the new entry to level 0 and return now when it will
|
||||
// fit. This is the most common path.
|
||||
newData := serializeAddrIndexEntry(blockID, txLoc)
|
||||
level0Key := keyForLevel(addrKey, 0)
|
||||
level0Data := bucket.Get(level0Key[:])
|
||||
if len(level0Data)+len(newData) <= maxLevelBytes {
|
||||
mergedData := newData
|
||||
if len(level0Data) > 0 {
|
||||
mergedData = make([]byte, len(level0Data)+len(newData))
|
||||
copy(mergedData, level0Data)
|
||||
copy(mergedData[len(level0Data):], newData)
|
||||
}
|
||||
return bucket.Put(level0Key[:], mergedData)
|
||||
}
|
||||
|
||||
// At this point, level 0 is full, so merge each level into higher
|
||||
// levels as many times as needed to free up level 0.
|
||||
prevLevelData := level0Data
|
||||
for {
|
||||
// Each new level holds twice as much as the previous one.
|
||||
curLevel++
|
||||
maxLevelBytes *= 2
|
||||
|
||||
// Move to the next level as long as the current level is full.
|
||||
curLevelKey := keyForLevel(addrKey, curLevel)
|
||||
curLevelData := bucket.Get(curLevelKey[:])
|
||||
if len(curLevelData) == maxLevelBytes {
|
||||
prevLevelData = curLevelData
|
||||
continue
|
||||
}
|
||||
|
||||
// The current level has room for the data in the previous one,
|
||||
// so merge the data from previous level into it.
|
||||
mergedData := prevLevelData
|
||||
if len(curLevelData) > 0 {
|
||||
mergedData = make([]byte, len(curLevelData)+
|
||||
len(prevLevelData))
|
||||
copy(mergedData, curLevelData)
|
||||
copy(mergedData[len(curLevelData):], prevLevelData)
|
||||
}
|
||||
err := bucket.Put(curLevelKey[:], mergedData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Move all of the levels before the previous one up a level.
|
||||
for mergeLevel := curLevel - 1; mergeLevel > 0; mergeLevel-- {
|
||||
mergeLevelKey := keyForLevel(addrKey, mergeLevel)
|
||||
prevLevelKey := keyForLevel(addrKey, mergeLevel-1)
|
||||
prevData := bucket.Get(prevLevelKey[:])
|
||||
err := bucket.Put(mergeLevelKey[:], prevData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Finally, insert the new entry into level 0 now that it is empty.
|
||||
return bucket.Put(level0Key[:], newData)
|
||||
}
|
||||
|
||||
// dbFetchAddrIndexEntries returns block regions for transactions referenced by
|
||||
// the given address key and the number of entries skipped since it could have
|
||||
// been less in the case where there are less total entries than the requested
|
||||
// number of entries to skip.
|
||||
func dbFetchAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, numToSkip, numRequested uint32, reverse bool, fetchBlockHash fetchBlockHashFunc) ([]database.BlockRegion, uint32, error) {
|
||||
// When the reverse flag is not set, all levels need to be fetched
|
||||
// because numToSkip and numRequested are counted from the oldest
|
||||
// transactions (highest level) and thus the total count is needed.
|
||||
// However, when the reverse flag is set, only enough records to satisfy
|
||||
// the requested amount are needed.
|
||||
var level uint8
|
||||
var serialized []byte
|
||||
for !reverse || len(serialized) < int(numToSkip+numRequested)*txEntrySize {
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
levelData := bucket.Get(curLevelKey[:])
|
||||
if levelData == nil {
|
||||
// Stop when there are no more levels.
|
||||
break
|
||||
}
|
||||
|
||||
// Higher levels contain older transactions, so prepend them.
|
||||
prepended := make([]byte, len(serialized)+len(levelData))
|
||||
copy(prepended, levelData)
|
||||
copy(prepended[len(levelData):], serialized)
|
||||
serialized = prepended
|
||||
level++
|
||||
}
|
||||
|
||||
// When the requested number of entries to skip is larger than the
|
||||
// number available, skip them all and return now with the actual number
|
||||
// skipped.
|
||||
numEntries := uint32(len(serialized) / txEntrySize)
|
||||
if numToSkip >= numEntries {
|
||||
return nil, numEntries, nil
|
||||
}
|
||||
|
||||
// Nothing more to do when there are no requested entries.
|
||||
if numRequested == 0 {
|
||||
return nil, numToSkip, nil
|
||||
}
|
||||
|
||||
// Limit the number to load based on the number of available entries,
|
||||
// the number to skip, and the number requested.
|
||||
numToLoad := numEntries - numToSkip
|
||||
if numToLoad > numRequested {
|
||||
numToLoad = numRequested
|
||||
}
|
||||
|
||||
// Start the offset after all skipped entries and load the calculated
|
||||
// number.
|
||||
results := make([]database.BlockRegion, numToLoad)
|
||||
for i := uint32(0); i < numToLoad; i++ {
|
||||
// Calculate the read offset according to the reverse flag.
|
||||
var offset uint32
|
||||
if reverse {
|
||||
offset = (numEntries - numToSkip - i - 1) * txEntrySize
|
||||
} else {
|
||||
offset = (numToSkip + i) * txEntrySize
|
||||
}
|
||||
|
||||
// Deserialize and populate the result.
|
||||
err := deserializeAddrIndexEntry(serialized[offset:],
|
||||
&results[i], fetchBlockHash)
|
||||
if err != nil {
|
||||
// Ensure any deserialization errors are returned as
|
||||
// database corruption errors.
|
||||
if isDeserializeErr(err) {
|
||||
err = database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("failed to "+
|
||||
"deserialized address index "+
|
||||
"for key %x: %s", addrKey, err),
|
||||
}
|
||||
}
|
||||
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return results, numToSkip, nil
|
||||
}
|
||||
|
||||
// minEntriesToReachLevel returns the minimum number of entries that are
|
||||
// required to reach the given address index level.
|
||||
func minEntriesToReachLevel(level uint8) int {
|
||||
maxEntriesForLevel := level0MaxEntries
|
||||
minRequired := 1
|
||||
for l := uint8(1); l <= level; l++ {
|
||||
minRequired += maxEntriesForLevel
|
||||
maxEntriesForLevel *= 2
|
||||
}
|
||||
return minRequired
|
||||
}
|
||||
|
||||
// maxEntriesForLevel returns the maximum number of entries allowed for the
|
||||
// given address index level.
|
||||
func maxEntriesForLevel(level uint8) int {
|
||||
numEntries := level0MaxEntries
|
||||
for l := level; l > 0; l-- {
|
||||
numEntries *= 2
|
||||
}
|
||||
return numEntries
|
||||
}
|
||||
|
||||
// dbRemoveAddrIndexEntries removes the specified number of entries from from
|
||||
// the address index for the provided key. An assertion error will be returned
|
||||
// if the count exceeds the total number of entries in the index.
|
||||
func dbRemoveAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, count int) error {
|
||||
// Nothing to do if no entries are being deleted.
|
||||
if count <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make use of a local map to track pending updates and define a closure
|
||||
// to apply it to the database. This is done in order to reduce the
|
||||
// number of database reads and because there is more than one exit
|
||||
// path that needs to apply the updates.
|
||||
pendingUpdates := make(map[uint8][]byte)
|
||||
applyPending := func() error {
|
||||
for level, data := range pendingUpdates {
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
if len(data) == 0 {
|
||||
err := bucket.Delete(curLevelKey[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
err := bucket.Put(curLevelKey[:], data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loop forwards through the levels while removing entries until the
|
||||
// specified number has been removed. This will potentially result in
|
||||
// entirely empty lower levels which will be backfilled below.
|
||||
var highestLoadedLevel uint8
|
||||
numRemaining := count
|
||||
for level := uint8(0); numRemaining > 0; level++ {
|
||||
// Load the data for the level from the database.
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
curLevelData := bucket.Get(curLevelKey[:])
|
||||
if len(curLevelData) == 0 && numRemaining > 0 {
|
||||
return AssertError(fmt.Sprintf("dbRemoveAddrIndexEntries "+
|
||||
"not enough entries for address key %x to "+
|
||||
"delete %d entries", addrKey, count))
|
||||
}
|
||||
pendingUpdates[level] = curLevelData
|
||||
highestLoadedLevel = level
|
||||
|
||||
// Delete the entire level as needed.
|
||||
numEntries := len(curLevelData) / txEntrySize
|
||||
if numRemaining >= numEntries {
|
||||
pendingUpdates[level] = nil
|
||||
numRemaining -= numEntries
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove remaining entries to delete from the level.
|
||||
offsetEnd := len(curLevelData) - (numRemaining * txEntrySize)
|
||||
pendingUpdates[level] = curLevelData[:offsetEnd]
|
||||
break
|
||||
}
|
||||
|
||||
// When all elements in level 0 were not removed there is nothing left
|
||||
// to do other than updating the database.
|
||||
if len(pendingUpdates[0]) != 0 {
|
||||
return applyPending()
|
||||
}
|
||||
|
||||
// At this point there are one or more empty levels before the current
|
||||
// level which need to be backfilled and the current level might have
|
||||
// had some entries deleted from it as well. Since all levels after
|
||||
// level 0 are required to either be empty, half full, or completely
|
||||
// full, the current level must be adjusted accordingly by backfilling
|
||||
// each previous levels in a way which satisfies the requirements. Any
|
||||
// entries that are left are assigned to level 0 after the loop as they
|
||||
// are guaranteed to fit by the logic in the loop. In other words, this
|
||||
// effectively squashes all remaining entries in the current level into
|
||||
// the lowest possible levels while following the level rules.
|
||||
//
|
||||
// Note that the level after the current level might also have entries
|
||||
// and gaps are not allowed, so this also keeps track of the lowest
|
||||
// empty level so the code below knows how far to backfill in case it is
|
||||
// required.
|
||||
lowestEmptyLevel := uint8(255)
|
||||
curLevelData := pendingUpdates[highestLoadedLevel]
|
||||
curLevelMaxEntries := maxEntriesForLevel(highestLoadedLevel)
|
||||
for level := highestLoadedLevel; level > 0; level-- {
|
||||
// When there are not enough entries left in the current level
|
||||
// for the number that would be required to reach it, clear the
|
||||
// the current level which effectively moves them all up to the
|
||||
// previous level on the next iteration. Otherwise, there are
|
||||
// are sufficient entries, so update the current level to
|
||||
// contain as many entries as possible while still leaving
|
||||
// enough remaining entries required to reach the level.
|
||||
numEntries := len(curLevelData) / txEntrySize
|
||||
prevLevelMaxEntries := curLevelMaxEntries / 2
|
||||
minPrevRequired := minEntriesToReachLevel(level - 1)
|
||||
if numEntries < prevLevelMaxEntries+minPrevRequired {
|
||||
lowestEmptyLevel = level
|
||||
pendingUpdates[level] = nil
|
||||
} else {
|
||||
// This level can only be completely full or half full,
|
||||
// so choose the appropriate offset to ensure enough
|
||||
// entries remain to reach the level.
|
||||
var offset int
|
||||
if numEntries-curLevelMaxEntries >= minPrevRequired {
|
||||
offset = curLevelMaxEntries * txEntrySize
|
||||
} else {
|
||||
offset = prevLevelMaxEntries * txEntrySize
|
||||
}
|
||||
pendingUpdates[level] = curLevelData[:offset]
|
||||
curLevelData = curLevelData[offset:]
|
||||
}
|
||||
|
||||
curLevelMaxEntries = prevLevelMaxEntries
|
||||
}
|
||||
pendingUpdates[0] = curLevelData
|
||||
if len(curLevelData) == 0 {
|
||||
lowestEmptyLevel = 0
|
||||
}
|
||||
|
||||
// When the highest loaded level is empty, it's possible the level after
|
||||
// it still has data and thus that data needs to be backfilled as well.
|
||||
for len(pendingUpdates[highestLoadedLevel]) == 0 {
|
||||
// When the next level is empty too, the is no data left to
|
||||
// continue backfilling, so there is nothing left to do.
|
||||
// Otherwise, populate the pending updates map with the newly
|
||||
// loaded data and update the highest loaded level accordingly.
|
||||
level := highestLoadedLevel + 1
|
||||
curLevelKey := keyForLevel(addrKey, level)
|
||||
levelData := bucket.Get(curLevelKey[:])
|
||||
if len(levelData) == 0 {
|
||||
break
|
||||
}
|
||||
pendingUpdates[level] = levelData
|
||||
highestLoadedLevel = level
|
||||
|
||||
// At this point the highest level is not empty, but it might
|
||||
// be half full. When that is the case, move it up a level to
|
||||
// simplify the code below which backfills all lower levels that
|
||||
// are still empty. This also means the current level will be
|
||||
// empty, so the loop will perform another another iteration to
|
||||
// potentially backfill this level with data from the next one.
|
||||
curLevelMaxEntries := maxEntriesForLevel(level)
|
||||
if len(levelData)/txEntrySize != curLevelMaxEntries {
|
||||
pendingUpdates[level] = nil
|
||||
pendingUpdates[level-1] = levelData
|
||||
level--
|
||||
curLevelMaxEntries /= 2
|
||||
}
|
||||
|
||||
// Backfill all lower levels that are still empty by iteratively
|
||||
// halfing the data until the lowest empty level is filled.
|
||||
for level > lowestEmptyLevel {
|
||||
offset := (curLevelMaxEntries / 2) * txEntrySize
|
||||
pendingUpdates[level] = levelData[:offset]
|
||||
levelData = levelData[offset:]
|
||||
pendingUpdates[level-1] = levelData
|
||||
level--
|
||||
curLevelMaxEntries /= 2
|
||||
}
|
||||
|
||||
// The lowest possible empty level is now the highest loaded
|
||||
// level.
|
||||
lowestEmptyLevel = highestLoadedLevel
|
||||
}
|
||||
|
||||
// Apply the pending updates.
|
||||
return applyPending()
|
||||
}
|
||||
|
||||
// addrToKey converts known address types to an addrindex key. An error is
|
||||
// returned for unsupported types.
|
||||
func addrToKey(addr util.Address) ([addrKeySize]byte, error) {
|
||||
switch addr := addr.(type) {
|
||||
case *util.AddressPubKeyHash:
|
||||
var result [addrKeySize]byte
|
||||
result[0] = addrKeyTypePubKeyHash
|
||||
copy(result[1:], addr.Hash160()[:])
|
||||
return result, nil
|
||||
|
||||
case *util.AddressScriptHash:
|
||||
var result [addrKeySize]byte
|
||||
result[0] = addrKeyTypeScriptHash
|
||||
copy(result[1:], addr.Hash160()[:])
|
||||
return result, nil
|
||||
|
||||
case *util.AddressPubKey:
|
||||
var result [addrKeySize]byte
|
||||
result[0] = addrKeyTypePubKeyHash
|
||||
copy(result[1:], addr.AddressPubKeyHash().Hash160()[:])
|
||||
return result, nil
|
||||
}
|
||||
|
||||
return [addrKeySize]byte{}, errUnsupportedAddressType
|
||||
}
|
||||
|
||||
// AddrIndex implements a transaction by address index. That is to say, it
|
||||
// supports querying all transactions that reference a given address because
|
||||
// they are either crediting or debiting the address. The returned transactions
|
||||
// are ordered according to their order of appearance in the blockchain. In
|
||||
// other words, first by block height and then by offset inside the block.
|
||||
//
|
||||
// In addition, support is provided for a memory-only index of unconfirmed
|
||||
// transactions such as those which are kept in the memory pool before inclusion
|
||||
// in a block.
|
||||
type AddrIndex struct {
|
||||
// The following fields are set when the instance is created and can't
|
||||
// be changed afterwards, so there is no need to protect them with a
|
||||
// separate mutex.
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
|
||||
// The following fields are used to quickly link transactions and
|
||||
// addresses that have not been included into a block yet when an
|
||||
// address index is being maintained. The are protected by the
|
||||
// unconfirmedLock field.
|
||||
//
|
||||
// The txnsByAddr field is used to keep an index of all transactions
|
||||
// which either create an output to a given address or spend from a
|
||||
// previous output to it keyed by the address.
|
||||
//
|
||||
// The addrsByTx field is essentially the reverse and is used to
|
||||
// keep an index of all addresses which a given transaction involves.
|
||||
// This allows fairly efficient updates when transactions are removed
|
||||
// once they are included into a block.
|
||||
unconfirmedLock sync.RWMutex
|
||||
txnsByAddr map[[addrKeySize]byte]map[daghash.TxID]*util.Tx
|
||||
addrsByTx map[daghash.TxID]map[[addrKeySize]byte]struct{}
|
||||
}
|
||||
|
||||
// Ensure the AddrIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*AddrIndex)(nil)
|
||||
|
||||
// Ensure the AddrIndex type implements the NeedsInputser interface.
|
||||
var _ NeedsInputser = (*AddrIndex)(nil)
|
||||
|
||||
// NeedsInputs signals that the index requires the referenced inputs in order
|
||||
// to properly create the index.
|
||||
//
|
||||
// This implements the NeedsInputser interface.
|
||||
func (idx *AddrIndex) NeedsInputs() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Init is only provided to satisfy the Indexer interface as there is nothing to
|
||||
// initialize for this index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Init(db database.DB) error {
|
||||
idx.db = db
|
||||
return nil
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Key() []byte {
|
||||
return addrIndexKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Name() string {
|
||||
return addrIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time. It creates the bucket for the address
|
||||
// index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Create(dbTx database.Tx) error {
|
||||
_, err := dbTx.Metadata().CreateBucket(addrIndexKey)
|
||||
return err
|
||||
}
|
||||
|
||||
// writeIndexData represents the address index data to be written for one block.
|
||||
// It consists of the address mapped to an ordered list of the transactions
|
||||
// that involve the address in block. It is ordered so the transactions can be
|
||||
// stored in the order they appear in the block.
|
||||
type writeIndexData map[[addrKeySize]byte][]int
|
||||
|
||||
// indexPkScript extracts all standard addresses from the passed public key
|
||||
// script and maps each of them to the associated transaction using the passed
|
||||
// map.
|
||||
func (idx *AddrIndex) indexPkScript(data writeIndexData, pkScript []byte, txIdx int) {
|
||||
// Nothing to index if the script is non-standard or otherwise doesn't
|
||||
// contain any addresses.
|
||||
_, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript,
|
||||
idx.dagParams)
|
||||
if err != nil || len(addrs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
// Ignore unsupported address types.
|
||||
continue
|
||||
}
|
||||
|
||||
// Avoid inserting the transaction more than once. Since the
|
||||
// transactions are indexed serially any duplicates will be
|
||||
// indexed in a row, so checking the most recent entry for the
|
||||
// address is enough to detect duplicates.
|
||||
indexedTxns := data[addrKey]
|
||||
numTxns := len(indexedTxns)
|
||||
if numTxns > 0 && indexedTxns[numTxns-1] == txIdx {
|
||||
continue
|
||||
}
|
||||
indexedTxns = append(indexedTxns, txIdx)
|
||||
data[addrKey] = indexedTxns
|
||||
}
|
||||
}
|
||||
|
||||
// indexBlock extract all of the standard addresses from all of the transactions
|
||||
// in the passed block and maps each of them to the associated transaction using
|
||||
// the passed map.
|
||||
func (idx *AddrIndex) indexBlock(data writeIndexData, block *util.Block, dag *blockdag.BlockDAG) {
|
||||
for txIdx, tx := range block.Transactions() {
|
||||
// Coinbases do not reference any inputs. Since the block is
|
||||
// required to have already gone through full validation, it has
|
||||
// already been proven on the first transaction in the block is
|
||||
// a coinbase, and the second one is a fee transaction.
|
||||
if txIdx > 1 {
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
// The UTXO should always have the input since
|
||||
// the index contract requires it, however, be
|
||||
// safe and simply ignore any missing entries.
|
||||
entry, ok := dag.GetUTXOEntry(txIn.PreviousOutPoint)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
idx.indexPkScript(data, entry.PkScript(), txIdx)
|
||||
}
|
||||
}
|
||||
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
idx.indexPkScript(data, txOut.PkScript, txIdx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the main chain. This indexer adds a mapping for each address
|
||||
// the transactions in the block involve.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// The offset and length of the transactions within the serialized
|
||||
// block.
|
||||
txLocs, err := block.TxLoc()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the internal block ID associated with the block.
|
||||
blockID, err := dbFetchBlockIDByHash(dbTx, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build all of the address to transaction mappings in a local map.
|
||||
addrsToTxns := make(writeIndexData)
|
||||
idx.indexBlock(addrsToTxns, block, dag)
|
||||
|
||||
// Add all of the index entries for each address.
|
||||
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
|
||||
for addrKey, txIdxs := range addrsToTxns {
|
||||
for _, txIdx := range txIdxs {
|
||||
err := dbPutAddrIndexEntry(addrIdxBucket, addrKey,
|
||||
blockID, txLocs[txIdx])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DisconnectBlock is invoked by the index manager when a block has been
|
||||
// disconnected from the main chain. This indexer removes the address mappings
|
||||
// each transaction in the block involve.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) DisconnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG) error {
|
||||
// Build all of the address to transaction mappings in a local map.
|
||||
addrsToTxns := make(writeIndexData)
|
||||
idx.indexBlock(addrsToTxns, block, dag)
|
||||
|
||||
// Remove all of the index entries for each address.
|
||||
bucket := dbTx.Metadata().Bucket(addrIndexKey)
|
||||
for addrKey, txIdxs := range addrsToTxns {
|
||||
err := dbRemoveAddrIndexEntries(bucket, addrKey, len(txIdxs))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxRegionsForAddress returns a slice of block regions which identify each
|
||||
// transaction that involves the passed address according to the specified
|
||||
// number to skip, number requested, and whether or not the results should be
|
||||
// reversed. It also returns the number actually skipped since it could be less
|
||||
// in the case where there are not enough entries.
|
||||
//
|
||||
// NOTE: These results only include transactions confirmed in blocks. See the
|
||||
// UnconfirmedTxnsForAddress method for obtaining unconfirmed transactions
|
||||
// that involve a given address.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) TxRegionsForAddress(dbTx database.Tx, addr util.Address, numToSkip, numRequested uint32, reverse bool) ([]database.BlockRegion, uint32, error) {
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var regions []database.BlockRegion
|
||||
var skipped uint32
|
||||
err = idx.db.View(func(dbTx database.Tx) error {
|
||||
// Create closure to lookup the block hash given the ID using
|
||||
// the database transaction.
|
||||
fetchBlockHash := func(id []byte) (*daghash.Hash, error) {
|
||||
// Deserialize and populate the result.
|
||||
return dbFetchBlockHashBySerializedID(dbTx, id)
|
||||
}
|
||||
|
||||
var err error
|
||||
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
|
||||
regions, skipped, err = dbFetchAddrIndexEntries(addrIdxBucket,
|
||||
addrKey, numToSkip, numRequested, reverse,
|
||||
fetchBlockHash)
|
||||
return err
|
||||
})
|
||||
|
||||
return regions, skipped, err
|
||||
}
|
||||
|
||||
// indexUnconfirmedAddresses modifies the unconfirmed (memory-only) address
|
||||
// index to include mappings for the addresses encoded by the passed public key
|
||||
// script to the transaction.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) indexUnconfirmedAddresses(pkScript []byte, tx *util.Tx) {
|
||||
// The error is ignored here since the only reason it can fail is if the
|
||||
// script fails to parse and it was already validated before being
|
||||
// admitted to the mempool.
|
||||
_, addresses, _, _ := txscript.ExtractPkScriptAddrs(pkScript,
|
||||
idx.dagParams)
|
||||
for _, addr := range addresses {
|
||||
// Ignore unsupported address types.
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add a mapping from the address to the transaction.
|
||||
idx.unconfirmedLock.Lock()
|
||||
addrIndexEntry := idx.txnsByAddr[addrKey]
|
||||
if addrIndexEntry == nil {
|
||||
addrIndexEntry = make(map[daghash.TxID]*util.Tx)
|
||||
idx.txnsByAddr[addrKey] = addrIndexEntry
|
||||
}
|
||||
addrIndexEntry[*tx.ID()] = tx
|
||||
|
||||
// Add a mapping from the transaction to the address.
|
||||
addrsByTxEntry := idx.addrsByTx[*tx.ID()]
|
||||
if addrsByTxEntry == nil {
|
||||
addrsByTxEntry = make(map[[addrKeySize]byte]struct{})
|
||||
idx.addrsByTx[*tx.ID()] = addrsByTxEntry
|
||||
}
|
||||
addrsByTxEntry[addrKey] = struct{}{}
|
||||
idx.unconfirmedLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// AddUnconfirmedTx adds all addresses related to the transaction to the
|
||||
// unconfirmed (memory-only) address index.
|
||||
//
|
||||
// NOTE: This transaction MUST have already been validated by the memory pool
|
||||
// before calling this function with it and have all of the inputs available in
|
||||
// the provided utxo view. Failure to do so could result in some or all
|
||||
// addresses not being indexed.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) AddUnconfirmedTx(tx *util.Tx, utxoSet blockdag.UTXOSet) {
|
||||
// Index addresses of all referenced previous transaction outputs.
|
||||
//
|
||||
// The existence checks are elided since this is only called after the
|
||||
// transaction has already been validated and thus all inputs are
|
||||
// already known to exist.
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
entry, ok := utxoSet.Get(txIn.PreviousOutPoint)
|
||||
if !ok {
|
||||
// Ignore missing entries. This should never happen
|
||||
// in practice since the function comments specifically
|
||||
// call out all inputs must be available.
|
||||
continue
|
||||
}
|
||||
idx.indexUnconfirmedAddresses(entry.PkScript(), tx)
|
||||
}
|
||||
|
||||
// Index addresses of all created outputs.
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
idx.indexUnconfirmedAddresses(txOut.PkScript, tx)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveUnconfirmedTx removes the passed transaction from the unconfirmed
|
||||
// (memory-only) address index.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) RemoveUnconfirmedTx(txID *daghash.TxID) {
|
||||
idx.unconfirmedLock.Lock()
|
||||
defer idx.unconfirmedLock.Unlock()
|
||||
|
||||
// Remove all address references to the transaction from the address
|
||||
// index and remove the entry for the address altogether if it no longer
|
||||
// references any transactions.
|
||||
for addrKey := range idx.addrsByTx[*txID] {
|
||||
delete(idx.txnsByAddr[addrKey], *txID)
|
||||
if len(idx.txnsByAddr[addrKey]) == 0 {
|
||||
delete(idx.txnsByAddr, addrKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the entry from the transaction to address lookup map as well.
|
||||
delete(idx.addrsByTx, *txID)
|
||||
}
|
||||
|
||||
// UnconfirmedTxnsForAddress returns all transactions currently in the
|
||||
// unconfirmed (memory-only) address index that involve the passed address.
|
||||
// Unsupported address types are ignored and will result in no results.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr util.Address) []*util.Tx {
|
||||
// Ignore unsupported address types.
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Protect concurrent access.
|
||||
idx.unconfirmedLock.RLock()
|
||||
defer idx.unconfirmedLock.RUnlock()
|
||||
|
||||
// Return a new slice with the results if there are any. This ensures
|
||||
// safe concurrency.
|
||||
if txns, exists := idx.txnsByAddr[addrKey]; exists {
|
||||
addressTxns := make([]*util.Tx, 0, len(txns))
|
||||
for _, tx := range txns {
|
||||
addressTxns = append(addressTxns, tx)
|
||||
}
|
||||
return addressTxns
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewAddrIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping of all addresses in the blockchain to the respective transactions
|
||||
// that involve them.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that in
|
||||
// turn is used by the blockchain package. This allows the index to be
|
||||
// seamlessly maintained along with the chain.
|
||||
func NewAddrIndex(dagParams *dagconfig.Params) *AddrIndex {
|
||||
return &AddrIndex{
|
||||
dagParams: dagParams,
|
||||
txnsByAddr: make(map[[addrKeySize]byte]map[daghash.TxID]*util.Tx),
|
||||
addrsByTx: make(map[daghash.TxID]map[[addrKeySize]byte]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// DropAddrIndex drops the address index from the provided database if it
|
||||
// exists.
|
||||
func DropAddrIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
return dropIndex(db, addrIndexKey, addrIndexName, interrupt)
|
||||
}
|
||||
@@ -1,276 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// addrIndexBucket provides a mock address index database bucket by implementing
|
||||
// the internalBucket interface.
|
||||
type addrIndexBucket struct {
|
||||
levels map[[levelKeySize]byte][]byte
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of the mock address index bucket.
|
||||
func (b *addrIndexBucket) Clone() *addrIndexBucket {
|
||||
levels := make(map[[levelKeySize]byte][]byte)
|
||||
for k, v := range b.levels {
|
||||
vCopy := make([]byte, len(v))
|
||||
copy(vCopy, v)
|
||||
levels[k] = vCopy
|
||||
}
|
||||
return &addrIndexBucket{levels: levels}
|
||||
}
|
||||
|
||||
// Get returns the value associated with the key from the mock address index
|
||||
// bucket.
|
||||
//
|
||||
// This is part of the internalBucket interface.
|
||||
func (b *addrIndexBucket) Get(key []byte) []byte {
|
||||
var levelKey [levelKeySize]byte
|
||||
copy(levelKey[:], key)
|
||||
return b.levels[levelKey]
|
||||
}
|
||||
|
||||
// Put stores the provided key/value pair to the mock address index bucket.
|
||||
//
|
||||
// This is part of the internalBucket interface.
|
||||
func (b *addrIndexBucket) Put(key []byte, value []byte) error {
|
||||
var levelKey [levelKeySize]byte
|
||||
copy(levelKey[:], key)
|
||||
b.levels[levelKey] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes the provided key from the mock address index bucket.
|
||||
//
|
||||
// This is part of the internalBucket interface.
|
||||
func (b *addrIndexBucket) Delete(key []byte) error {
|
||||
var levelKey [levelKeySize]byte
|
||||
copy(levelKey[:], key)
|
||||
delete(b.levels, levelKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
// printLevels returns a string with a visual representation of the provided
|
||||
// address key taking into account the max size of each level. It is useful
|
||||
// when creating and debugging test cases.
|
||||
func (b *addrIndexBucket) printLevels(addrKey [addrKeySize]byte) string {
|
||||
highestLevel := uint8(0)
|
||||
for k := range b.levels {
|
||||
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
|
||||
continue
|
||||
}
|
||||
level := uint8(k[levelOffset])
|
||||
if level > highestLevel {
|
||||
highestLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
var levelBuf bytes.Buffer
|
||||
_, _ = levelBuf.WriteString("\n")
|
||||
maxEntries := level0MaxEntries
|
||||
for level := uint8(0); level <= highestLevel; level++ {
|
||||
data := b.levels[keyForLevel(addrKey, level)]
|
||||
numEntries := len(data) / txEntrySize
|
||||
for i := 0; i < numEntries; i++ {
|
||||
start := i * txEntrySize
|
||||
num := byteOrder.Uint32(data[start:])
|
||||
_, _ = levelBuf.WriteString(fmt.Sprintf("%02d ", num))
|
||||
}
|
||||
for i := numEntries; i < maxEntries; i++ {
|
||||
_, _ = levelBuf.WriteString("_ ")
|
||||
}
|
||||
_, _ = levelBuf.WriteString("\n")
|
||||
maxEntries *= 2
|
||||
}
|
||||
|
||||
return levelBuf.String()
|
||||
}
|
||||
|
||||
// sanityCheck ensures that all data stored in the bucket for the given address
|
||||
// adheres to the level-based rules described by the address index
|
||||
// documentation.
|
||||
func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal int) error {
|
||||
// Find the highest level for the key.
|
||||
highestLevel := uint8(0)
|
||||
for k := range b.levels {
|
||||
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
|
||||
continue
|
||||
}
|
||||
level := uint8(k[levelOffset])
|
||||
if level > highestLevel {
|
||||
highestLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the expected total number of entries are present and that
|
||||
// all levels adhere to the rules described in the address index
|
||||
// documentation.
|
||||
var totalEntries int
|
||||
maxEntries := level0MaxEntries
|
||||
for level := uint8(0); level <= highestLevel; level++ {
|
||||
// Level 0 can'have more entries than the max allowed if the
|
||||
// levels after it have data and it can't be empty. All other
|
||||
// levels must either be half full or full.
|
||||
data := b.levels[keyForLevel(addrKey, level)]
|
||||
numEntries := len(data) / txEntrySize
|
||||
totalEntries += numEntries
|
||||
if level == 0 {
|
||||
if (highestLevel != 0 && numEntries == 0) ||
|
||||
numEntries > maxEntries {
|
||||
|
||||
return fmt.Errorf("level %d has %d entries",
|
||||
level, numEntries)
|
||||
}
|
||||
} else if numEntries != maxEntries && numEntries != maxEntries/2 {
|
||||
return fmt.Errorf("level %d has %d entries", level,
|
||||
numEntries)
|
||||
}
|
||||
maxEntries *= 2
|
||||
}
|
||||
if totalEntries != expectedTotal {
|
||||
return fmt.Errorf("expected %d entries - got %d", expectedTotal,
|
||||
totalEntries)
|
||||
}
|
||||
|
||||
// Ensure all of the numbers are in order starting from the highest
|
||||
// level moving to the lowest level.
|
||||
expectedNum := uint32(0)
|
||||
for level := highestLevel + 1; level > 0; level-- {
|
||||
data := b.levels[keyForLevel(addrKey, level)]
|
||||
numEntries := len(data) / txEntrySize
|
||||
for i := 0; i < numEntries; i++ {
|
||||
start := i * txEntrySize
|
||||
num := byteOrder.Uint32(data[start:])
|
||||
if num != expectedNum {
|
||||
return fmt.Errorf("level %d offset %d does "+
|
||||
"not contain the expected number of "+
|
||||
"%d - got %d", level, i, num,
|
||||
expectedNum)
|
||||
}
|
||||
expectedNum++
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestAddrIndexLevels ensures that adding and deleting entries to the address
|
||||
// index creates multiple levels as described by the address index
|
||||
// documentation.
|
||||
func TestAddrIndexLevels(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
key [addrKeySize]byte
|
||||
numInsert int
|
||||
printLevels bool // Set to help debug a specific test.
|
||||
}{
|
||||
{
|
||||
name: "level 0 not full",
|
||||
numInsert: level0MaxEntries - 1,
|
||||
},
|
||||
{
|
||||
name: "level 1 half",
|
||||
numInsert: level0MaxEntries + 1,
|
||||
},
|
||||
{
|
||||
name: "level 1 full",
|
||||
numInsert: level0MaxEntries*2 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 half, level 1 half",
|
||||
numInsert: level0MaxEntries*3 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 half, level 1 full",
|
||||
numInsert: level0MaxEntries*4 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 full, level 1 half",
|
||||
numInsert: level0MaxEntries*5 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 2 full, level 1 full",
|
||||
numInsert: level0MaxEntries*6 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 3 half, level 2 half, level 1 half",
|
||||
numInsert: level0MaxEntries*7 + 1,
|
||||
},
|
||||
{
|
||||
name: "level 3 full, level 2 half, level 1 full",
|
||||
numInsert: level0MaxEntries*12 + 1,
|
||||
},
|
||||
}
|
||||
|
||||
nextTest:
|
||||
for testNum, test := range tests {
|
||||
// Insert entries in order.
|
||||
populatedBucket := &addrIndexBucket{
|
||||
levels: make(map[[levelKeySize]byte][]byte),
|
||||
}
|
||||
for i := 0; i < test.numInsert; i++ {
|
||||
txLoc := wire.TxLoc{TxStart: i * 2}
|
||||
err := dbPutAddrIndexEntry(populatedBucket, test.key,
|
||||
uint32(i), txLoc)
|
||||
if err != nil {
|
||||
t.Errorf("dbPutAddrIndexEntry #%d (%s) - "+
|
||||
"unexpected error: %v", testNum,
|
||||
test.name, err)
|
||||
continue nextTest
|
||||
}
|
||||
}
|
||||
if test.printLevels {
|
||||
t.Log(populatedBucket.printLevels(test.key))
|
||||
}
|
||||
|
||||
// Delete entries from the populated bucket until all entries
|
||||
// have been deleted. The bucket is reset to the fully
|
||||
// populated bucket on each iteration so every combination is
|
||||
// tested. Notice the upper limit purposes exceeds the number
|
||||
// of entries to ensure attempting to delete more entries than
|
||||
// there are works correctly.
|
||||
for numDelete := 0; numDelete <= test.numInsert+1; numDelete++ {
|
||||
// Clone populated bucket to run each delete against.
|
||||
bucket := populatedBucket.Clone()
|
||||
|
||||
// Remove the number of entries for this iteration.
|
||||
err := dbRemoveAddrIndexEntries(bucket, test.key,
|
||||
numDelete)
|
||||
if err != nil {
|
||||
if numDelete <= test.numInsert {
|
||||
t.Errorf("dbRemoveAddrIndexEntries (%s) "+
|
||||
" delete %d - unexpected error: "+
|
||||
"%v", test.name, numDelete, err)
|
||||
continue nextTest
|
||||
}
|
||||
}
|
||||
if test.printLevels {
|
||||
t.Log(bucket.printLevels(test.key))
|
||||
}
|
||||
|
||||
// Sanity check the levels to ensure the adhere to all
|
||||
// rules.
|
||||
numExpected := test.numInsert
|
||||
if numDelete <= test.numInsert {
|
||||
numExpected -= numDelete
|
||||
}
|
||||
err = bucket.sanityCheck(test.key, numExpected)
|
||||
if err != nil {
|
||||
t.Errorf("sanity check fail (%s) delete %d: %v",
|
||||
test.name, numDelete, err)
|
||||
continue nextTest
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
// blockProgressLogger provides periodic logging for other services in order
|
||||
// to show users progress of certain "actions" involving some or all current
|
||||
// blocks. Ex: syncing to best chain, indexing all blocks, etc.
|
||||
type blockProgressLogger struct {
|
||||
receivedLogBlocks int64
|
||||
receivedLogTx int64
|
||||
lastBlockLogTime time.Time
|
||||
|
||||
subsystemLogger btclog.Logger
|
||||
progressAction string
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// newBlockProgressLogger returns a new block progress logger.
|
||||
// The progress message is templated as follows:
|
||||
// {progressAction} {numProcessed} {blocks|block} in the last {timePeriod}
|
||||
// ({numTxs}, height {lastBlockHeight}, {lastBlockTimeStamp})
|
||||
func newBlockProgressLogger(progressMessage string, logger btclog.Logger) *blockProgressLogger {
|
||||
return &blockProgressLogger{
|
||||
lastBlockLogTime: time.Now(),
|
||||
progressAction: progressMessage,
|
||||
subsystemLogger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// LogBlockHeight logs a new block height as an information message to show
|
||||
// progress to the user. In order to prevent spam, it limits logging to one
|
||||
// message every 10 seconds with duration and totals included.
|
||||
func (b *blockProgressLogger) LogBlockHeight(block *util.Block) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
b.receivedLogBlocks++
|
||||
b.receivedLogTx += int64(len(block.MsgBlock().Transactions))
|
||||
|
||||
now := time.Now()
|
||||
duration := now.Sub(b.lastBlockLogTime)
|
||||
if duration < time.Second*10 {
|
||||
return
|
||||
}
|
||||
|
||||
// Truncate the duration to 10s of milliseconds.
|
||||
durationMillis := int64(duration / time.Millisecond)
|
||||
tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10)
|
||||
|
||||
// Log information about new block height.
|
||||
blockStr := "blocks"
|
||||
if b.receivedLogBlocks == 1 {
|
||||
blockStr = "block"
|
||||
}
|
||||
txStr := "transactions"
|
||||
if b.receivedLogTx == 1 {
|
||||
txStr = "transaction"
|
||||
}
|
||||
b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, height %d, %s)",
|
||||
b.progressAction, b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx,
|
||||
txStr, block.Height(), block.MsgBlock().Header.Timestamp)
|
||||
|
||||
b.receivedLogBlocks = 0
|
||||
b.receivedLogTx = 0
|
||||
b.lastBlockLogTime = now
|
||||
}
|
||||
@@ -1,352 +0,0 @@
|
||||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/gcs"
|
||||
"github.com/daglabs/btcd/util/gcs/builder"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// cfIndexName is the human-readable name for the index.
|
||||
cfIndexName = "committed filter index"
|
||||
)
|
||||
|
||||
// Committed filters come in two flavours: basic and extended. They are
|
||||
// generated and dropped in pairs, and both are indexed by a block's hash.
|
||||
// Besides holding different content, they also live in different buckets.
|
||||
var (
|
||||
// cfIndexParentBucketKey is the name of the parent bucket used to house
|
||||
// the index. The rest of the buckets live below this bucket.
|
||||
cfIndexParentBucketKey = []byte("cfindexparentbucket")
|
||||
|
||||
// cfIndexKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cfilters.
|
||||
cfIndexKeys = [][]byte{
|
||||
[]byte("cf0byhashidx"),
|
||||
[]byte("cf1byhashidx"),
|
||||
}
|
||||
|
||||
// cfHeaderKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cf headers.
|
||||
cfHeaderKeys = [][]byte{
|
||||
[]byte("cf0headerbyhashidx"),
|
||||
[]byte("cf1headerbyhashidx"),
|
||||
}
|
||||
|
||||
// cfHashKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cf hashes.
|
||||
cfHashKeys = [][]byte{
|
||||
[]byte("cf0hashbyhashidx"),
|
||||
[]byte("cf1hashbyhashidx"),
|
||||
}
|
||||
|
||||
maxFilterType = uint8(len(cfHeaderKeys) - 1)
|
||||
)
|
||||
|
||||
// dbFetchFilterIdxEntry retrieves a data blob from the filter index database.
|
||||
// An entry's absence is not considered an error.
|
||||
func dbFetchFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash) ([]byte, error) {
|
||||
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
||||
return idx.Get(h[:]), nil
|
||||
}
|
||||
|
||||
// dbStoreFilterIdxEntry stores a data blob in the filter index database.
|
||||
func dbStoreFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash, f []byte) error {
|
||||
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
||||
return idx.Put(h[:], f)
|
||||
}
|
||||
|
||||
// dbDeleteFilterIdxEntry deletes a data blob from the filter index database.
|
||||
func dbDeleteFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash) error {
|
||||
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
||||
return idx.Delete(h[:])
|
||||
}
|
||||
|
||||
// CfIndex implements a committed filter (cf) by hash index.
|
||||
type CfIndex struct {
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
}
|
||||
|
||||
// Ensure the CfIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*CfIndex)(nil)
|
||||
|
||||
// Init initializes the hash-based cf index. This is part of the Indexer
|
||||
// interface.
|
||||
func (idx *CfIndex) Init(db database.DB) error {
|
||||
idx.db = db
|
||||
return nil
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice. This is
|
||||
// part of the Indexer interface.
|
||||
func (idx *CfIndex) Key() []byte {
|
||||
return cfIndexParentBucketKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index. This is part of the
|
||||
// Indexer interface.
|
||||
func (idx *CfIndex) Name() string {
|
||||
return cfIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs to
|
||||
// be created for the first time. It creates buckets for the two hash-based cf
|
||||
// indexes (simple, extended).
|
||||
func (idx *CfIndex) Create(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
|
||||
cfIndexParentBucket, err := meta.CreateBucket(cfIndexParentBucketKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bucketName := range cfIndexKeys {
|
||||
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucketName := range cfHeaderKeys {
|
||||
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucketName := range cfHashKeys {
|
||||
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// storeFilter stores a given filter, and performs the steps needed to
|
||||
// generate the filter's header.
|
||||
func storeFilter(dbTx database.Tx, block *util.Block, f *gcs.Filter,
|
||||
filterType wire.FilterType) error {
|
||||
if uint8(filterType) > maxFilterType {
|
||||
return errors.New("unsupported filter type")
|
||||
}
|
||||
|
||||
// Figure out which buckets to use.
|
||||
fkey := cfIndexKeys[filterType]
|
||||
hkey := cfHeaderKeys[filterType]
|
||||
hashkey := cfHashKeys[filterType]
|
||||
|
||||
// Start by storing the filter.
|
||||
h := block.Hash()
|
||||
filterBytes, err := f.NBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbStoreFilterIdxEntry(dbTx, fkey, h, filterBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Next store the filter hash.
|
||||
filterHash, err := builder.GetFilterHash(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbStoreFilterIdxEntry(dbTx, hashkey, h, filterHash[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then fetch the previous block's filter header.
|
||||
var prevHeader *daghash.Hash
|
||||
header := block.MsgBlock().Header
|
||||
if header.IsGenesis() {
|
||||
prevHeader = &daghash.ZeroHash
|
||||
} else {
|
||||
ph := header.SelectedParentHash()
|
||||
pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct the new block's filter header, and store it.
|
||||
prevHeader, err = daghash.NewHash(pfh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fh, err := builder.MakeHeaderForFilter(f, prevHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dbStoreFilterIdxEntry(dbTx, hkey, h, fh[:])
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the main chain. This indexer adds a hash-to-cf mapping for
|
||||
// every passed block. This is part of the Indexer interface.
|
||||
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *util.Block,
|
||||
_ *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
f, err := builder.BuildBasicFilter(block.MsgBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = storeFilter(dbTx, block, f, wire.GCSFilterRegular)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err = builder.BuildExtFilter(block.MsgBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return storeFilter(dbTx, block, f, wire.GCSFilterExtended)
|
||||
}
|
||||
|
||||
// DisconnectBlock is invoked by the index manager when a block has been
|
||||
// disconnected from the main chain. This indexer removes the hash-to-cf
|
||||
// mapping for every passed block. This is part of the Indexer interface.
|
||||
func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *util.Block,
|
||||
_ *blockdag.BlockDAG) error {
|
||||
|
||||
for _, key := range cfIndexKeys {
|
||||
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range cfHeaderKeys {
|
||||
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range cfHashKeys {
|
||||
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// entryByBlockHash fetches a filter index entry of a particular type
|
||||
// (eg. filter, filter header, etc) for a filter type and block hash.
|
||||
func (idx *CfIndex) entryByBlockHash(filterTypeKeys [][]byte,
|
||||
filterType wire.FilterType, h *daghash.Hash) ([]byte, error) {
|
||||
|
||||
if uint8(filterType) > maxFilterType {
|
||||
return nil, errors.New("unsupported filter type")
|
||||
}
|
||||
key := filterTypeKeys[filterType]
|
||||
|
||||
var entry []byte
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
entry, err = dbFetchFilterIdxEntry(dbTx, key, h)
|
||||
return err
|
||||
})
|
||||
return entry, err
|
||||
}
|
||||
|
||||
// entriesByBlockHashes batch fetches a filter index entry of a particular type
|
||||
// (eg. filter, filter header, etc) for a filter type and slice of block hashes.
|
||||
func (idx *CfIndex) entriesByBlockHashes(filterTypeKeys [][]byte,
|
||||
filterType wire.FilterType, blockHashes []*daghash.Hash) ([][]byte, error) {
|
||||
|
||||
if uint8(filterType) > maxFilterType {
|
||||
return nil, errors.New("unsupported filter type")
|
||||
}
|
||||
key := filterTypeKeys[filterType]
|
||||
|
||||
entries := make([][]byte, 0, len(blockHashes))
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
for _, blockHash := range blockHashes {
|
||||
entry, err := dbFetchFilterIdxEntry(dbTx, key, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// FilterByBlockHash returns the serialized contents of a block's basic or
|
||||
// extended committed filter.
|
||||
func (idx *CfIndex) FilterByBlockHash(h *daghash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfIndexKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FiltersByBlockHashes returns the serialized contents of a block's basic or
|
||||
// extended committed filter for a set of blocks by hash.
|
||||
func (idx *CfIndex) FiltersByBlockHashes(blockHashes []*daghash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfIndexKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// FilterHeaderByBlockHash returns the serialized contents of a block's basic
|
||||
// or extended committed filter header.
|
||||
func (idx *CfIndex) FilterHeaderByBlockHash(h *daghash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfHeaderKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FilterHeadersByBlockHashes returns the serialized contents of a block's basic
|
||||
// or extended committed filter header for a set of blocks by hash.
|
||||
func (idx *CfIndex) FilterHeadersByBlockHashes(blockHashes []*daghash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfHeaderKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// FilterHashByBlockHash returns the serialized contents of a block's basic
|
||||
// or extended committed filter hash.
|
||||
func (idx *CfIndex) FilterHashByBlockHash(h *daghash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfHashKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FilterHashesByBlockHashes returns the serialized contents of a block's basic
|
||||
// or extended committed filter hash for a set of blocks by hash.
|
||||
func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*daghash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfHashKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// NewCfIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping of the hashes of all blocks in the blockchain to their respective
|
||||
// committed filters.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that
|
||||
// in turn is used by the blockchain package. This allows the index to be
|
||||
// seamlessly maintained along with the chain.
|
||||
func NewCfIndex(dagParams *dagconfig.Params) *CfIndex {
|
||||
return &CfIndex{dagParams: dagParams}
|
||||
}
|
||||
|
||||
// DropCfIndex drops the CF index from the provided database if exists.
|
||||
func DropCfIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
return dropIndex(db, cfIndexParentBucketKey, cfIndexName, interrupt)
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package indexers implements optional block chain indexes.
|
||||
*/
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
var (
|
||||
// byteOrder is the preferred byte order used for serializing numeric
|
||||
// fields for storage in the database.
|
||||
byteOrder = binary.LittleEndian
|
||||
|
||||
// errInterruptRequested indicates that an operation was cancelled due
|
||||
// to a user-requested interrupt.
|
||||
errInterruptRequested = errors.New("interrupt requested")
|
||||
)
|
||||
|
||||
// NeedsInputser provides a generic interface for an indexer to specify the it
|
||||
// requires the ability to look up inputs for a transaction.
|
||||
type NeedsInputser interface {
|
||||
NeedsInputs() bool
|
||||
}
|
||||
|
||||
// Indexer provides a generic interface for an indexer that is managed by an
|
||||
// index manager such as the Manager type provided by this package.
|
||||
type Indexer interface {
|
||||
// Key returns the key of the index as a byte slice.
|
||||
Key() []byte
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
Name() string
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time.
|
||||
Create(dbTx database.Tx) error
|
||||
|
||||
// Init is invoked when the index manager is first initializing the
|
||||
// index. This differs from the Create method in that it is called on
|
||||
// every load, including the case the index was just created.
|
||||
Init(db database.DB) error
|
||||
|
||||
// ConnectBlock is invoked when the index manager is notified that a new
|
||||
// block has been connected to the DAG.
|
||||
ConnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData) error
|
||||
}
|
||||
|
||||
// AssertError identifies an error that indicates an internal code consistency
|
||||
// issue and should be treated as a critical and unrecoverable error.
|
||||
type AssertError string
|
||||
|
||||
// Error returns the assertion error as a huma-readable string and satisfies
|
||||
// the error interface.
|
||||
func (e AssertError) Error() string {
|
||||
return "assertion failed: " + string(e)
|
||||
}
|
||||
|
||||
// errDeserialize signifies that a problem was encountered when deserializing
|
||||
// data.
|
||||
type errDeserialize string
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e errDeserialize) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// isDeserializeErr returns whether or not the passed error is an errDeserialize
|
||||
// error.
|
||||
func isDeserializeErr(err error) bool {
|
||||
_, ok := err.(errDeserialize)
|
||||
return ok
|
||||
}
|
||||
|
||||
// internalBucket is an abstraction over a database bucket. It is used to make
|
||||
// the code easier to test since it allows mock objects in the tests to only
|
||||
// implement these functions instead of everything a database.Bucket supports.
|
||||
type internalBucket interface {
|
||||
Get(key []byte) []byte
|
||||
Put(key []byte, value []byte) error
|
||||
Delete(key []byte) error
|
||||
}
|
||||
|
||||
// interruptRequested returns true when the provided channel has been closed.
|
||||
// This simplifies early shutdown slightly since the caller can just use an if
|
||||
// statement instead of a select.
|
||||
func interruptRequested(interrupted <-chan struct{}) bool {
|
||||
select {
|
||||
case <-interrupted:
|
||||
return true
|
||||
default:
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/daglabs/btcd/logger"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This
|
||||
// means the package will not perform any logging by default until the caller
|
||||
// requests it.
|
||||
var log btclog.Logger
|
||||
|
||||
// The default amount of logging is none.
|
||||
func init() {
|
||||
log, _ = logger.Get(logger.SubsystemTags.INDX)
|
||||
}
|
||||
@@ -1,343 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
var (
|
||||
// indexTipsBucketName is the name of the db bucket used to house the
|
||||
// current tip of each index.
|
||||
indexTipsBucketName = []byte("idxtips")
|
||||
)
|
||||
|
||||
// Manager defines an index manager that manages multiple optional indexes and
|
||||
// implements the blockchain.IndexManager interface so it can be seamlessly
|
||||
// plugged into normal chain processing.
|
||||
type Manager struct {
|
||||
db database.DB
|
||||
enabledIndexes []Indexer
|
||||
}
|
||||
|
||||
// Ensure the Manager type implements the blockchain.IndexManager interface.
|
||||
var _ blockdag.IndexManager = (*Manager)(nil)
|
||||
|
||||
// indexDropKey returns the key for an index which indicates it is in the
|
||||
// process of being dropped.
|
||||
func indexDropKey(idxKey []byte) []byte {
|
||||
dropKey := make([]byte, len(idxKey)+1)
|
||||
dropKey[0] = 'd'
|
||||
copy(dropKey[1:], idxKey)
|
||||
return dropKey
|
||||
}
|
||||
|
||||
// maybeFinishDrops determines if each of the enabled indexes are in the middle
|
||||
// of being dropped and finishes dropping them when the are. This is necessary
|
||||
// because dropping and index has to be done in several atomic steps rather than
|
||||
// one big atomic step due to the massive number of entries.
|
||||
func (m *Manager) maybeFinishDrops(interrupt <-chan struct{}) error {
|
||||
indexNeedsDrop := make([]bool, len(m.enabledIndexes))
|
||||
err := m.db.View(func(dbTx database.Tx) error {
|
||||
// None of the indexes needs to be dropped if the index tips
|
||||
// bucket hasn't been created yet.
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
if indexesBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark the indexer as requiring a drop if one is already in
|
||||
// progress.
|
||||
for i, indexer := range m.enabledIndexes {
|
||||
dropKey := indexDropKey(indexer.Key())
|
||||
if indexesBucket.Get(dropKey) != nil {
|
||||
indexNeedsDrop[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
// Finish dropping any of the enabled indexes that are already in the
|
||||
// middle of being dropped.
|
||||
for i, indexer := range m.enabledIndexes {
|
||||
if !indexNeedsDrop[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infof("Resuming %s drop", indexer.Name())
|
||||
err := dropIndex(m.db, indexer.Key(), indexer.Name(), interrupt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// maybeCreateIndexes determines if each of the enabled indexes have already
|
||||
// been created and creates them if not.
|
||||
func (m *Manager) maybeCreateIndexes(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
// Nothing to do if the index tip already exists.
|
||||
idxKey := indexer.Key()
|
||||
if indexesBucket.Get(idxKey) != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// The tip for the index does not exist, so create it and
|
||||
// invoke the create callback for the index so it can perform
|
||||
// any one-time initialization it requires.
|
||||
if err := indexer.Create(dbTx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO (Mike): this is temporary solution to prevent node from not starting
|
||||
// because it thinks indexers are not initialized.
|
||||
// Indexers, however, do not work properly, and a general solution to their work operation is required
|
||||
indexesBucket.Put(idxKey, []byte{0})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init initializes the enabled indexes. This is called during chain
|
||||
// initialization and primarily consists of catching up all indexes to the
|
||||
// current best chain tip. This is necessary since each index can be disabled
|
||||
// and re-enabled at any time and attempting to catch-up indexes at the same
|
||||
// time new blocks are being downloaded would lead to an overall longer time to
|
||||
// catch up due to the I/O contention.
|
||||
//
|
||||
// This is part of the blockchain.IndexManager interface.
|
||||
func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) error {
|
||||
// Nothing to do when no indexes are enabled.
|
||||
if len(m.enabledIndexes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
m.db = db
|
||||
|
||||
// Finish and drops that were previously interrupted.
|
||||
if err := m.maybeFinishDrops(interrupt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the initial state for the indexes as needed.
|
||||
err := m.db.Update(func(dbTx database.Tx) error {
|
||||
// Create the bucket for the current tips as needed.
|
||||
meta := dbTx.Metadata()
|
||||
_, err := meta.CreateBucketIfNotExists(indexTipsBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.maybeCreateIndexes(dbTx)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize each of the enabled indexes.
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
if err := indexer.Init(db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectBlock must be invoked when a block is extending the main chain. It
|
||||
// keeps track of the state of each index it is managing, performs some sanity
|
||||
// checks, and invokes each indexer.
|
||||
//
|
||||
// This is part of the blockchain.IndexManager interface.
|
||||
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// Call each of the currently active optional indexes with the block
|
||||
// being connected so they can update accordingly.
|
||||
for _, index := range m.enabledIndexes {
|
||||
// Notify the indexer with the connected block so it can index it.
|
||||
if err := index.ConnectBlock(dbTx, block, dag, txsAcceptanceData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewManager returns a new index manager with the provided indexes enabled.
|
||||
//
|
||||
// The manager returned satisfies the blockchain.IndexManager interface and thus
|
||||
// cleanly plugs into the normal blockchain processing path.
|
||||
func NewManager(enabledIndexes []Indexer) *Manager {
|
||||
return &Manager{
|
||||
enabledIndexes: enabledIndexes,
|
||||
}
|
||||
}
|
||||
|
||||
// dropIndex drops the passed index from the database. Since indexes can be
|
||||
// massive, it deletes the index in multiple database transactions in order to
|
||||
// keep memory usage to reasonable levels. It also marks the drop in progress
|
||||
// so the drop can be resumed if it is stopped before it is done before the
|
||||
// index can be used again.
|
||||
func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan struct{}) error {
|
||||
// Nothing to do if the index doesn't already exist.
|
||||
var needsDelete bool
|
||||
err := db.View(func(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
if indexesBucket != nil && indexesBucket.Get(idxKey) != nil {
|
||||
needsDelete = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !needsDelete {
|
||||
log.Infof("Not dropping %s because it does not exist", idxName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark that the index is in the process of being dropped so that it
|
||||
// can be resumed on the next start if interrupted before the process is
|
||||
// complete.
|
||||
log.Infof("Dropping all %s entries. This might take a while...",
|
||||
idxName)
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
return indexesBucket.Put(indexDropKey(idxKey), idxKey)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since the indexes can be so large, attempting to simply delete
|
||||
// the bucket in a single database transaction would result in massive
|
||||
// memory usage and likely crash many systems due to ulimits. In order
|
||||
// to avoid this, use a cursor to delete a maximum number of entries out
|
||||
// of the bucket at a time. Recurse buckets depth-first to delete any
|
||||
// sub-buckets.
|
||||
const maxDeletions = 2000000
|
||||
var totalDeleted uint64
|
||||
|
||||
// Recurse through all buckets in the index, cataloging each for
|
||||
// later deletion.
|
||||
var subBuckets [][][]byte
|
||||
var subBucketClosure func(database.Tx, []byte, [][]byte) error
|
||||
subBucketClosure = func(dbTx database.Tx,
|
||||
subBucket []byte, tlBucket [][]byte) error {
|
||||
// Get full bucket name and append to subBuckets for later
|
||||
// deletion.
|
||||
var bucketName [][]byte
|
||||
if (tlBucket == nil) || (len(tlBucket) == 0) {
|
||||
bucketName = append(bucketName, subBucket)
|
||||
} else {
|
||||
bucketName = append(tlBucket, subBucket)
|
||||
}
|
||||
subBuckets = append(subBuckets, bucketName)
|
||||
// Recurse sub-buckets to append to subBuckets slice.
|
||||
bucket := dbTx.Metadata()
|
||||
for _, subBucketName := range bucketName {
|
||||
bucket = bucket.Bucket(subBucketName)
|
||||
}
|
||||
return bucket.ForEachBucket(func(k []byte) error {
|
||||
return subBucketClosure(dbTx, k, bucketName)
|
||||
})
|
||||
}
|
||||
|
||||
// Call subBucketClosure with top-level bucket.
|
||||
err = db.View(func(dbTx database.Tx) error {
|
||||
return subBucketClosure(dbTx, idxKey, nil)
|
||||
})
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Iterate through each sub-bucket in reverse, deepest-first, deleting
|
||||
// all keys inside them and then dropping the buckets themselves.
|
||||
for i := range subBuckets {
|
||||
bucketName := subBuckets[len(subBuckets)-1-i]
|
||||
// Delete maxDeletions key/value pairs at a time.
|
||||
for numDeleted := maxDeletions; numDeleted == maxDeletions; {
|
||||
numDeleted = 0
|
||||
err := db.Update(func(dbTx database.Tx) error {
|
||||
subBucket := dbTx.Metadata()
|
||||
for _, subBucketName := range bucketName {
|
||||
subBucket = subBucket.Bucket(subBucketName)
|
||||
}
|
||||
cursor := subBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() &&
|
||||
numDeleted < maxDeletions {
|
||||
|
||||
if err := cursor.Delete(); err != nil {
|
||||
return err
|
||||
}
|
||||
numDeleted++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numDeleted > 0 {
|
||||
totalDeleted += uint64(numDeleted)
|
||||
log.Infof("Deleted %d keys (%d total) from %s",
|
||||
numDeleted, totalDeleted, idxName)
|
||||
}
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
// Drop the bucket itself.
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata()
|
||||
for j := 0; j < len(bucketName)-1; j++ {
|
||||
bucket = bucket.Bucket(bucketName[j])
|
||||
}
|
||||
return bucket.DeleteBucket(bucketName[len(bucketName)-1])
|
||||
})
|
||||
}
|
||||
|
||||
// Call extra index specific deinitialization for the transaction index.
|
||||
if idxName == txIndexName {
|
||||
if err := dropBlockIDIndex(db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the index tip, index bucket, and in-progress drop flag now
|
||||
// that all index entries have been removed.
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
indexesBucket := meta.Bucket(indexTipsBucketName)
|
||||
if err := indexesBucket.Delete(idxKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return indexesBucket.Delete(indexDropKey(idxKey))
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Dropped %s", idxName)
|
||||
return nil
|
||||
}
|
||||
@@ -1,572 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// txIndexName is the human-readable name for the index.
|
||||
txIndexName = "transaction index"
|
||||
|
||||
includingBlocksIndexKeyEntrySize = 8 // 4 bytes for offset + 4 bytes for transaction length
|
||||
|
||||
acceptingBlocksIndexKeyEntrySize = 4 // 4 bytes for accepting block ID
|
||||
)
|
||||
|
||||
var (
|
||||
includingBlocksIndexKey = []byte("includingblocksidx")
|
||||
|
||||
acceptingBlocksIndexKey = []byte("acceptingblocksidx")
|
||||
|
||||
// idByHashIndexBucketName is the name of the db bucket used to house
|
||||
// the block id -> block hash index.
|
||||
idByHashIndexBucketName = []byte("idbyhashidx")
|
||||
|
||||
// hashByIDIndexBucketName is the name of the db bucket used to house
|
||||
// the block hash -> block id index.
|
||||
hashByIDIndexBucketName = []byte("hashbyididx")
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The transaction index consists of an entry for every transaction in the DAG.
|
||||
// In order to significantly optimize the space requirements a separate
|
||||
// index which provides an internal mapping between each block that has been
|
||||
// indexed and a unique ID for use within the hash to location mappings. The ID
|
||||
// is simply a sequentially incremented uint32. This is useful because it is
|
||||
// only 4 bytes versus 32 bytes hashes and thus saves a ton of space in the
|
||||
// index.
|
||||
//
|
||||
// There are four buckets used in total. The first bucket maps the hash of
|
||||
// each transaction to its location in each block it's included in. The second bucket
|
||||
// contains all of the blocks that from their viewpoint the transaction has been
|
||||
// accepted (i.e. the transaction is found in their blue set without double spends),
|
||||
// and their blue block (or themselves) that included the transaction. The third
|
||||
// bucket maps the hash of each block to the unique ID and the fourth maps
|
||||
// that ID back to the block hash.
|
||||
//
|
||||
// NOTE: Although it is technically possible for multiple transactions to have
|
||||
// the same hash as long as the previous transaction with the same hash is fully
|
||||
// spent, this code only stores the most recent one because doing otherwise
|
||||
// would add a non-trivial amount of space and overhead for something that will
|
||||
// realistically never happen per the probability and even if it did, the old
|
||||
// one must be fully spent and so the most likely transaction a caller would
|
||||
// want for a given hash is the most recent one anyways.
|
||||
//
|
||||
// The including blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
|
||||
//
|
||||
// <block id> = <start offset><tx length>
|
||||
//
|
||||
// Field Type Size
|
||||
// block id uint32 4 bytes
|
||||
// start offset uint32 4 bytes
|
||||
// tx length uint32 4 bytes
|
||||
// -----
|
||||
// Total: 12 bytes
|
||||
//
|
||||
// The accepting blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
|
||||
//
|
||||
// <accepting block id> = <including block id>
|
||||
//
|
||||
// Field Type Size
|
||||
// accepting block id uint32 4 bytes
|
||||
// including block id uint32 4 bytes
|
||||
// -----
|
||||
// Total: 8 bytes
|
||||
//
|
||||
// The serialized format for keys and values in the block hash to ID bucket is:
|
||||
// <hash> = <ID>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash 32 bytes
|
||||
// ID uint32 4 bytes
|
||||
// -----
|
||||
// Total: 36 bytes
|
||||
//
|
||||
// The serialized format for keys and values in the ID to block hash bucket is:
|
||||
// <ID> = <hash>
|
||||
//
|
||||
// Field Type Size
|
||||
// ID uint32 4 bytes
|
||||
// hash daghash.Hash 32 bytes
|
||||
// -----
|
||||
// Total: 36 bytes
|
||||
//
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
|
||||
// the index entries for the hash to id and id to hash mappings for the provided
|
||||
// values.
|
||||
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, id uint32) error {
|
||||
// Serialize the height for use in the index entries.
|
||||
var serializedID [4]byte
|
||||
byteOrder.PutUint32(serializedID[:], id)
|
||||
|
||||
// Add the block hash to ID mapping to the index.
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(idByHashIndexBucketName)
|
||||
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the block ID to hash mapping to the index.
|
||||
idIndex := meta.Bucket(hashByIDIndexBucketName)
|
||||
return idIndex.Put(serializedID[:], hash[:])
|
||||
}
|
||||
|
||||
// dbFetchBlockIDByHash uses an existing database transaction to retrieve the
|
||||
// block id for the provided hash from the index.
|
||||
func dbFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint32, error) {
|
||||
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
|
||||
serializedID := hashIndex.Get(hash[:])
|
||||
if serializedID == nil {
|
||||
return 0, fmt.Errorf("No entry in the block ID index for block with hash %s", hash)
|
||||
}
|
||||
|
||||
return byteOrder.Uint32(serializedID), nil
|
||||
}
|
||||
|
||||
// dbFetchBlockHashBySerializedID uses an existing database transaction to
|
||||
// retrieve the hash for the provided serialized block id from the index.
|
||||
func dbFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
|
||||
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
|
||||
hashBytes := idIndex.Get(serializedID)
|
||||
if hashBytes == nil {
|
||||
return nil, fmt.Errorf("No entry in the block ID index for block with id %d", byteOrder.Uint32(serializedID))
|
||||
}
|
||||
|
||||
var hash daghash.Hash
|
||||
copy(hash[:], hashBytes)
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
// dbFetchBlockHashByID uses an existing database transaction to retrieve the
|
||||
// hash for the provided block id from the index.
|
||||
func dbFetchBlockHashByID(dbTx database.Tx, id uint32) (*daghash.Hash, error) {
|
||||
var serializedID [4]byte
|
||||
byteOrder.PutUint32(serializedID[:], id)
|
||||
return dbFetchBlockHashBySerializedID(dbTx, serializedID[:])
|
||||
}
|
||||
|
||||
func putIncludingBlocksEntry(target []byte, txLoc wire.TxLoc) {
|
||||
byteOrder.PutUint32(target, uint32(txLoc.TxStart))
|
||||
byteOrder.PutUint32(target[4:], uint32(txLoc.TxLen))
|
||||
}
|
||||
|
||||
func putAcceptingBlocksEntry(target []byte, includingBlockID uint32) {
|
||||
byteOrder.PutUint32(target, includingBlockID)
|
||||
}
|
||||
|
||||
func dbPutIncludingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint32, serializedData []byte) error {
|
||||
bucket, err := dbTx.Metadata().Bucket(includingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockIDBytes := make([]byte, 4)
|
||||
byteOrder.PutUint32(blockIDBytes, uint32(blockID))
|
||||
return bucket.Put(blockIDBytes, serializedData)
|
||||
}
|
||||
|
||||
func dbPutAcceptingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint32, serializedData []byte) error {
|
||||
bucket, err := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockIDBytes := make([]byte, 4)
|
||||
byteOrder.PutUint32(blockIDBytes, uint32(blockID))
|
||||
return bucket.Put(blockIDBytes, serializedData)
|
||||
}
|
||||
|
||||
// dbFetchFirstTxRegion uses an existing database transaction to fetch the block
|
||||
// region for the provided transaction hash from the transaction index. When
|
||||
// there is no entry for the provided hash, nil will be returned for the both
|
||||
// the region and the error.
|
||||
//
|
||||
// P.S Because the transaction can be found in multiple blocks, this function arbitarily
|
||||
// returns the first block region that is stored in the txindex.
|
||||
func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.BlockRegion, error) {
|
||||
// Load the record from the database and return now if it doesn't exist.
|
||||
txBucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txID[:])
|
||||
if txBucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No block region"+
|
||||
"was found for %s", txID),
|
||||
}
|
||||
}
|
||||
cursor := txBucket.Cursor()
|
||||
if ok := cursor.First(); !ok {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No block region"+
|
||||
"was found for %s", txID),
|
||||
}
|
||||
}
|
||||
blockIDBytes := cursor.Key()
|
||||
serializedData := cursor.Value()
|
||||
if len(serializedData) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Ensure the serialized data has enough bytes to properly deserialize.
|
||||
if len(serializedData) < includingBlocksIndexKeyEntrySize {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt transaction index "+
|
||||
"entry for %s", txID),
|
||||
}
|
||||
}
|
||||
|
||||
// Load the block hash associated with the block ID.
|
||||
hash, err := dbFetchBlockHashBySerializedID(dbTx, blockIDBytes)
|
||||
if err != nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt transaction index "+
|
||||
"entry for %s: %s", txID, err),
|
||||
}
|
||||
}
|
||||
|
||||
// Deserialize the final entry.
|
||||
region := database.BlockRegion{Hash: &daghash.Hash{}}
|
||||
copy(region.Hash[:], hash[:])
|
||||
region.Offset = byteOrder.Uint32(serializedData[:4])
|
||||
region.Len = byteOrder.Uint32(serializedData[4:])
|
||||
|
||||
return ®ion, nil
|
||||
}
|
||||
|
||||
// dbAddTxIndexEntries uses an existing database transaction to add a
|
||||
// transaction index entry for every transaction in the passed block.
|
||||
func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint32, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// The offset and length of the transactions within the serialized
|
||||
// block.
|
||||
txLocs, err := block.TxLoc()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As an optimization, allocate a single slice big enough to hold all
|
||||
// of the serialized transaction index entries for the block and
|
||||
// serialize them directly into the slice. Then, pass the appropriate
|
||||
// subslice to the database to be written. This approach significantly
|
||||
// cuts down on the number of required allocations.
|
||||
includingBlocksOffset := 0
|
||||
serializedIncludingBlocksValues := make([]byte, len(block.Transactions())*includingBlocksIndexKeyEntrySize)
|
||||
for i, tx := range block.Transactions() {
|
||||
putIncludingBlocksEntry(serializedIncludingBlocksValues[includingBlocksOffset:], txLocs[i])
|
||||
endOffset := includingBlocksOffset + includingBlocksIndexKeyEntrySize
|
||||
err := dbPutIncludingBlocksEntry(dbTx, tx.ID(), blockID,
|
||||
serializedIncludingBlocksValues[includingBlocksOffset:endOffset:endOffset])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
includingBlocksOffset += includingBlocksIndexKeyEntrySize
|
||||
}
|
||||
|
||||
for includingBlockHash, blockTxsAcceptanceData := range txsAcceptanceData {
|
||||
var includingBlockID uint32
|
||||
if includingBlockHash.IsEqual(block.Hash()) {
|
||||
includingBlockID = blockID
|
||||
} else {
|
||||
includingBlockID, err = dbFetchBlockIDByHash(dbTx, &includingBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
includingBlockIDBytes := make([]byte, 4)
|
||||
byteOrder.PutUint32(includingBlockIDBytes, uint32(includingBlockID))
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData {
|
||||
err = dbPutAcceptingBlocksEntry(dbTx, txAcceptanceData.Tx.ID(), blockID, includingBlockIDBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxIndex implements a transaction by hash index. That is to say, it supports
|
||||
// querying all transactions by their hash.
|
||||
type TxIndex struct {
|
||||
db database.DB
|
||||
curBlockID uint32
|
||||
}
|
||||
|
||||
// Ensure the TxIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*TxIndex)(nil)
|
||||
|
||||
// Init initializes the hash-based transaction index. In particular, it finds
|
||||
// the highest used block ID and stores it for later use when connecting or
|
||||
// disconnecting blocks.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Init(db database.DB) error {
|
||||
idx.db = db
|
||||
|
||||
// Find the latest known block id field for the internal block id
|
||||
// index and initialize it. This is done because it's a lot more
|
||||
// efficient to do a single search at initialize time than it is to
|
||||
// write another value to the database on every update.
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
// Scan forward in large gaps to find a block id that doesn't
|
||||
// exist yet to serve as an upper bound for the binary search
|
||||
// below.
|
||||
var highestKnown, nextUnknown uint32
|
||||
testBlockID := uint32(1)
|
||||
increment := uint32(100000)
|
||||
for {
|
||||
_, err := dbFetchBlockHashByID(dbTx, testBlockID)
|
||||
if err != nil {
|
||||
nextUnknown = testBlockID
|
||||
break
|
||||
}
|
||||
|
||||
highestKnown = testBlockID
|
||||
testBlockID += increment
|
||||
}
|
||||
log.Tracef("Forward scan (highest known %d, next unknown %d)",
|
||||
highestKnown, nextUnknown)
|
||||
|
||||
// No used block IDs due to new database.
|
||||
if nextUnknown == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use a binary search to find the final highest used block id.
|
||||
// This will take at most ceil(log_2(increment)) attempts.
|
||||
for {
|
||||
testBlockID = (highestKnown + nextUnknown) / 2
|
||||
_, err := dbFetchBlockHashByID(dbTx, testBlockID)
|
||||
if err != nil {
|
||||
nextUnknown = testBlockID
|
||||
} else {
|
||||
highestKnown = testBlockID
|
||||
}
|
||||
log.Tracef("Binary scan (highest known %d, next "+
|
||||
"unknown %d)", highestKnown, nextUnknown)
|
||||
if highestKnown+1 == nextUnknown {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
idx.curBlockID = highestKnown
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Current internal block ID: %d", idx.curBlockID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Key() []byte {
|
||||
return includingBlocksIndexKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Name() string {
|
||||
return txIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time. It creates the buckets for the hash-based
|
||||
// transaction index and the internal block ID indexes.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Create(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
if _, err := meta.CreateBucket(idByHashIndexBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucket(hashByIDIndexBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucket(includingBlocksIndexKey); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := meta.CreateBucket(acceptingBlocksIndexKey)
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the DAG. This indexer adds a hash-to-transaction mapping
|
||||
// for every transaction in the passed block.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *util.Block, _ *blockdag.BlockDAG, acceptedTxsData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// Increment the internal block ID to use for the block being connected
|
||||
// and add all of the transactions in the block to the index.
|
||||
newBlockID := idx.curBlockID + 1
|
||||
if block.MsgBlock().Header.IsGenesis() {
|
||||
newBlockID = 0
|
||||
}
|
||||
if err := dbAddTxIndexEntries(dbTx, block, newBlockID, acceptedTxsData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the new block ID index entry for the block being connected and
|
||||
// update the current internal block ID accordingly.
|
||||
err := dbPutBlockIDIndexEntry(dbTx, block.Hash(), newBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idx.curBlockID = newBlockID
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxFirstBlockRegion returns the first block region for the provided transaction hash
|
||||
// from the transaction index. The block region can in turn be used to load the
|
||||
// raw transaction bytes. When there is no entry for the provided hash, nil
|
||||
// will be returned for the both the entry and the error.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *TxIndex) TxFirstBlockRegion(txID *daghash.TxID) (*database.BlockRegion, error) {
|
||||
var region *database.BlockRegion
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
region, err = dbFetchFirstTxRegion(dbTx, txID)
|
||||
return err
|
||||
})
|
||||
return region, err
|
||||
}
|
||||
|
||||
// TxBlocks returns the hashes of the blocks where the transaction exists
|
||||
func (idx *TxIndex) TxBlocks(txHash *daghash.Hash) ([]*daghash.Hash, error) {
|
||||
blockHashes := make([]*daghash.Hash, 0)
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
blockHashes, err = dbFetchTxBlocks(dbTx, txHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return blockHashes, err
|
||||
}
|
||||
|
||||
func dbFetchTxBlocks(dbTx database.Tx, txHash *daghash.Hash) ([]*daghash.Hash, error) {
|
||||
blockHashes := make([]*daghash.Hash, 0)
|
||||
bucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txHash[:])
|
||||
if bucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No including blocks "+
|
||||
"were found for %s", txHash),
|
||||
}
|
||||
}
|
||||
err := bucket.ForEach(func(blockIDBytes, _ []byte) error {
|
||||
blockID := byteOrder.Uint32(blockIDBytes)
|
||||
blockHash, err := dbFetchBlockHashByID(dbTx, blockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHashes = append(blockHashes, blockHash)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockHashes, nil
|
||||
}
|
||||
|
||||
// BlockThatAcceptedTx returns the hash of the block where the transaction got accepted (from the virtual block point of view)
|
||||
func (idx *TxIndex) BlockThatAcceptedTx(dag *blockdag.BlockDAG, txID *daghash.TxID) (*daghash.Hash, error) {
|
||||
var acceptingBlock *daghash.Hash
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
acceptingBlock, err = dbFetchTxAcceptingBlock(dbTx, txID, dag)
|
||||
return err
|
||||
})
|
||||
return acceptingBlock, err
|
||||
}
|
||||
|
||||
func dbFetchTxAcceptingBlock(dbTx database.Tx, txID *daghash.TxID, dag *blockdag.BlockDAG) (*daghash.Hash, error) {
|
||||
bucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(txID[:])
|
||||
if bucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No accepting blocks "+
|
||||
"were found for %s", txID),
|
||||
}
|
||||
}
|
||||
cursor := bucket.Cursor()
|
||||
if !cursor.First() {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No accepting blocks "+
|
||||
"were found for %s", txID),
|
||||
}
|
||||
}
|
||||
for ; cursor.Key() != nil; cursor.Next() {
|
||||
blockID := byteOrder.Uint32(cursor.Key())
|
||||
blockHash, err := dbFetchBlockHashByID(dbTx, blockID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dag.IsInSelectedPathChain(blockHash) {
|
||||
return blockHash, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewTxIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping of the hashes of all transactions in the blockchain to the respective
|
||||
// block, location within the block, and size of the transaction.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that in
|
||||
// turn is used by the blockchain package. This allows the index to be
|
||||
// seamlessly maintained along with the chain.
|
||||
func NewTxIndex() *TxIndex {
|
||||
return &TxIndex{}
|
||||
}
|
||||
|
||||
// dropBlockIDIndex drops the internal block id index.
|
||||
func dropBlockIDIndex(db database.DB) error {
|
||||
return db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
err := meta.DeleteBucket(idByHashIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return meta.DeleteBucket(hashByIDIndexBucketName)
|
||||
})
|
||||
}
|
||||
|
||||
// DropTxIndex drops the transaction index from the provided database if it
|
||||
// exists. Since the address index relies on it, the address index will also be
|
||||
// dropped when it exists.
|
||||
func DropTxIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
err := dropIndex(db, addrIndexKey, addrIndexName, interrupt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dropIndex(db, includingBlocksIndexKey, addrIndexName, interrupt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dropIndex(db, acceptingBlocksIndexKey, txIndexName, interrupt)
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/mining"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
func createTransaction(value uint64, originTx *wire.MsgTx, outputIndex uint32) *wire.MsgTx {
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: originTx.TxID(),
|
||||
Index: outputIndex,
|
||||
},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := wire.NewTxOut(value, blockdag.OpTrueScript)
|
||||
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
|
||||
|
||||
return tx
|
||||
}
|
||||
|
||||
func TestTxIndexConnectBlock(t *testing.T) {
|
||||
blocks := make(map[daghash.Hash]*util.Block)
|
||||
|
||||
txIndex := NewTxIndex()
|
||||
indexManager := NewManager([]Indexer{txIndex})
|
||||
|
||||
params := dagconfig.SimNetParams
|
||||
params.BlockRewardMaturity = 1
|
||||
params.K = 1
|
||||
|
||||
config := blockdag.Config{
|
||||
IndexManager: indexManager,
|
||||
DAGParams: ¶ms,
|
||||
}
|
||||
|
||||
dag, teardown, err := blockdag.DAGSetup("TestTxIndexConnectBlock", config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
if teardown != nil {
|
||||
defer teardown()
|
||||
}
|
||||
|
||||
prepareAndProcessBlock := func(parentHashes []*daghash.Hash, transactions []*wire.MsgTx, blockName string) *wire.MsgBlock {
|
||||
block, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, transactions, false, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: block %v got unexpected error from PrepareBlockForTest: %v", blockName, err)
|
||||
}
|
||||
utilBlock := util.NewBlock(block)
|
||||
blocks[*block.BlockHash()] = utilBlock
|
||||
isOrphan, err := dag.ProcessBlock(utilBlock, blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: dag.ProcessBlock got unexpected error for block %v: %v", blockName, err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestTxIndexConnectBlock: block %v was unexpectedly orphan", blockName)
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
block1 := prepareAndProcessBlock([]*daghash.Hash{params.GenesisHash}, nil, "1")
|
||||
block2Tx := createTransaction(block1.Transactions[0].TxOut[0].Value, block1.Transactions[0], 0)
|
||||
block2 := prepareAndProcessBlock([]*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{block2Tx}, "2")
|
||||
block3Tx := createTransaction(block2.Transactions[0].TxOut[0].Value, block2.Transactions[0], 0)
|
||||
block3 := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3")
|
||||
|
||||
block3TxID := block3Tx.TxID()
|
||||
block3TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, &block3TxID)
|
||||
if err != nil {
|
||||
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
|
||||
}
|
||||
block3Hash := block3.BlockHash()
|
||||
if !block3TxNewAcceptedBlock.IsEqual(block3Hash) {
|
||||
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
|
||||
"been accepted in block %v but instead got accepted in block %v", block3Hash, block3TxNewAcceptedBlock)
|
||||
}
|
||||
|
||||
block3A := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3A")
|
||||
block4 := prepareAndProcessBlock([]*daghash.Hash{block3.BlockHash()}, nil, "4")
|
||||
prepareAndProcessBlock([]*daghash.Hash{block3A.BlockHash(), block4.BlockHash()}, nil, "5")
|
||||
|
||||
block3TxAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, &block3TxID)
|
||||
if err != nil {
|
||||
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
|
||||
}
|
||||
block3AHash := block3A.BlockHash()
|
||||
if !block3TxAcceptedBlock.IsEqual(block3AHash) {
|
||||
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
|
||||
"been accepted in block %v but instead got accepted in block %v", block3AHash, block3TxAcceptedBlock)
|
||||
}
|
||||
|
||||
region, err := txIndex.TxFirstBlockRegion(&block3TxID)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: no block region was found for block3Tx")
|
||||
}
|
||||
regionBlock, ok := blocks[*region.Hash]
|
||||
if !ok {
|
||||
t.Fatalf("TestTxIndexConnectBlock: couldn't find block with hash %v", region.Hash)
|
||||
}
|
||||
|
||||
regionBlockBytes, err := regionBlock.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: Couldn't serialize block to bytes")
|
||||
}
|
||||
block3TxInBlock := regionBlockBytes[region.Offset : region.Offset+region.Len]
|
||||
|
||||
block3TxBuf := bytes.NewBuffer(make([]byte, 0, block3Tx.SerializeSize()))
|
||||
block3Tx.BtcEncode(block3TxBuf, 0)
|
||||
blockTxBytes := block3TxBuf.Bytes()
|
||||
|
||||
if !reflect.DeepEqual(blockTxBytes, block3TxInBlock) {
|
||||
t.Errorf("TestTxIndexConnectBlock: the block region that was in the bucket doesn't match block3Tx")
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/daglabs/btcd/logger"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This
|
||||
// means the package will not perform any logging by default until the caller
|
||||
// requests it.
|
||||
var log btclog.Logger
|
||||
|
||||
// The default amount of logging is none.
|
||||
func init() {
|
||||
log, _ = logger.Get(logger.SubsystemTags.CHAN)
|
||||
}
|
||||
@@ -1,218 +0,0 @@
|
||||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxAllowedOffsetSeconds is the maximum number of seconds in either
|
||||
// direction that local clock will be adjusted. When the median time
|
||||
// of the network is outside of this range, no offset will be applied.
|
||||
maxAllowedOffsetSecs = 70 * 60 // 1 hour 10 minutes
|
||||
|
||||
// similarTimeSecs is the number of seconds in either direction from the
|
||||
// local clock that is used to determine that it is likley wrong and
|
||||
// hence to show a warning.
|
||||
similarTimeSecs = 5 * 60 // 5 minutes
|
||||
)
|
||||
|
||||
var (
|
||||
// maxMedianTimeEntries is the maximum number of entries allowed in the
|
||||
// median time data. This is a variable as opposed to a constant so the
|
||||
// test code can modify it.
|
||||
maxMedianTimeEntries = 200
|
||||
)
|
||||
|
||||
// MedianTimeSource provides a mechanism to add several time samples which are
|
||||
// used to determine a median time which is then used as an offset to the local
|
||||
// clock.
|
||||
type MedianTimeSource interface {
|
||||
// AdjustedTime returns the current time adjusted by the median time
|
||||
// offset as calculated from the time samples added by AddTimeSample.
|
||||
AdjustedTime() time.Time
|
||||
|
||||
// AddTimeSample adds a time sample that is used when determining the
|
||||
// median time of the added samples.
|
||||
AddTimeSample(id string, timeVal time.Time)
|
||||
|
||||
// Offset returns the number of seconds to adjust the local clock based
|
||||
// upon the median of the time samples added by AddTimeData.
|
||||
Offset() time.Duration
|
||||
}
|
||||
|
||||
// int64Sorter implements sort.Interface to allow a slice of 64-bit integers to
|
||||
// be sorted.
|
||||
type int64Sorter []int64
|
||||
|
||||
// Len returns the number of 64-bit integers in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s int64Sorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Swap swaps the 64-bit integers at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s int64Sorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Less returns whether the 64-bit integer with index i should sort before the
|
||||
// 64-bit integer with index j. It is part of the sort.Interface
|
||||
// implementation.
|
||||
func (s int64Sorter) Less(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
}
|
||||
|
||||
// medianTime provides an implementation of the MedianTimeSource interface.
|
||||
// It is limited to maxMedianTimeEntries includes the same buggy behavior as
|
||||
// the time offset mechanism in Bitcoin Core. This is necessary because it is
|
||||
// used in the consensus code.
|
||||
type medianTime struct {
|
||||
mtx sync.Mutex
|
||||
knownIDs map[string]struct{}
|
||||
offsets []int64
|
||||
offsetSecs int64
|
||||
invalidTimeChecked bool
|
||||
}
|
||||
|
||||
// Ensure the medianTime type implements the MedianTimeSource interface.
|
||||
var _ MedianTimeSource = (*medianTime)(nil)
|
||||
|
||||
// AdjustedTime returns the current time adjusted by the median time offset as
|
||||
// calculated from the time samples added by AddTimeSample.
|
||||
//
|
||||
// This function is safe for concurrent access and is part of the
|
||||
// MedianTimeSource interface implementation.
|
||||
func (m *medianTime) AdjustedTime() time.Time {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
// Limit the adjusted time to 1 second precision.
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
return now.Add(time.Duration(m.offsetSecs) * time.Second)
|
||||
}
|
||||
|
||||
// AddTimeSample adds a time sample that is used when determining the median
|
||||
// time of the added samples.
|
||||
//
|
||||
// This function is safe for concurrent access and is part of the
|
||||
// MedianTimeSource interface implementation.
|
||||
func (m *medianTime) AddTimeSample(sourceID string, timeVal time.Time) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
// Don't add time data from the same source.
|
||||
if _, exists := m.knownIDs[sourceID]; exists {
|
||||
return
|
||||
}
|
||||
m.knownIDs[sourceID] = struct{}{}
|
||||
|
||||
// Truncate the provided offset to seconds and append it to the slice
|
||||
// of offsets while respecting the maximum number of allowed entries by
|
||||
// replacing the oldest entry with the new entry once the maximum number
|
||||
// of entries is reached.
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
offsetSecs := int64(timeVal.Sub(now).Seconds())
|
||||
numOffsets := len(m.offsets)
|
||||
if numOffsets == maxMedianTimeEntries && maxMedianTimeEntries > 0 {
|
||||
m.offsets = m.offsets[1:]
|
||||
numOffsets--
|
||||
}
|
||||
m.offsets = append(m.offsets, offsetSecs)
|
||||
numOffsets++
|
||||
|
||||
// Sort the offsets so the median can be obtained as needed later.
|
||||
sortedOffsets := make([]int64, numOffsets)
|
||||
copy(sortedOffsets, m.offsets)
|
||||
sort.Sort(int64Sorter(sortedOffsets))
|
||||
|
||||
offsetDuration := time.Duration(offsetSecs) * time.Second
|
||||
log.Debugf("Added time sample of %s (total: %d)", offsetDuration,
|
||||
numOffsets)
|
||||
|
||||
// NOTE: The following code intentionally has a bug to mirror the
|
||||
// buggy behavior in Bitcoin Core since the median time is used in the
|
||||
// consensus rules.
|
||||
//
|
||||
// In particular, the offset is only updated when the number of entries
|
||||
// is odd, but the max number of entries is 200, an even number. Thus,
|
||||
// the offset will never be updated again once the max number of entries
|
||||
// is reached.
|
||||
|
||||
// The median offset is only updated when there are enough offsets and
|
||||
// the number of offsets is odd so the middle value is the true median.
|
||||
// Thus, there is nothing to do when those conditions are not met.
|
||||
if numOffsets < 5 || numOffsets&0x01 != 1 {
|
||||
return
|
||||
}
|
||||
|
||||
// At this point the number of offsets in the list is odd, so the
|
||||
// middle value of the sorted offsets is the median.
|
||||
median := sortedOffsets[numOffsets/2]
|
||||
|
||||
// Set the new offset when the median offset is within the allowed
|
||||
// offset range.
|
||||
if math.Abs(float64(median)) < maxAllowedOffsetSecs {
|
||||
m.offsetSecs = median
|
||||
} else {
|
||||
// The median offset of all added time data is larger than the
|
||||
// maximum allowed offset, so don't use an offset. This
|
||||
// effectively limits how far the local clock can be skewed.
|
||||
m.offsetSecs = 0
|
||||
|
||||
if !m.invalidTimeChecked {
|
||||
m.invalidTimeChecked = true
|
||||
|
||||
// Find if any time samples have a time that is close
|
||||
// to the local time.
|
||||
var remoteHasCloseTime bool
|
||||
for _, offset := range sortedOffsets {
|
||||
if math.Abs(float64(offset)) < similarTimeSecs {
|
||||
remoteHasCloseTime = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Warn if none of the time samples are close.
|
||||
if !remoteHasCloseTime {
|
||||
log.Warnf("Please check your date and time " +
|
||||
"are correct! btcd will not work " +
|
||||
"properly with an invalid time")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
medianDuration := time.Duration(m.offsetSecs) * time.Second
|
||||
log.Debugf("New time offset: %d", medianDuration)
|
||||
}
|
||||
|
||||
// Offset returns the number of seconds to adjust the local clock based upon the
|
||||
// median of the time samples added by AddTimeData.
|
||||
//
|
||||
// This function is safe for concurrent access and is part of the
|
||||
// MedianTimeSource interface implementation.
|
||||
func (m *medianTime) Offset() time.Duration {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
return time.Duration(m.offsetSecs) * time.Second
|
||||
}
|
||||
|
||||
// NewMedianTime returns a new instance of concurrency-safe implementation of
|
||||
// the MedianTimeSource interface. The returned implementation contains the
|
||||
// rules necessary for proper time handling in the chain consensus rules and
|
||||
// expects the time samples to be added from the timestamp field of the version
|
||||
// message received from remote peers that successfully connect and negotiate.
|
||||
func NewMedianTime() MedianTimeSource {
|
||||
return &medianTime{
|
||||
knownIDs: make(map[string]struct{}),
|
||||
offsets: make([]int64, 0, maxMedianTimeEntries),
|
||||
}
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestMedianTime tests the medianTime implementation.
|
||||
func TestMedianTime(t *testing.T) {
|
||||
tests := []struct {
|
||||
in []int64
|
||||
wantOffset int64
|
||||
useDupID bool
|
||||
}{
|
||||
// Not enough samples must result in an offset of 0.
|
||||
{in: []int64{1}, wantOffset: 0},
|
||||
{in: []int64{1, 2}, wantOffset: 0},
|
||||
{in: []int64{1, 2, 3}, wantOffset: 0},
|
||||
{in: []int64{1, 2, 3, 4}, wantOffset: 0},
|
||||
|
||||
// Various number of entries. The expected offset is only
|
||||
// updated on odd number of elements.
|
||||
{in: []int64{-13, 57, -4, -23, -12}, wantOffset: -12},
|
||||
{in: []int64{55, -13, 61, -52, 39, 55}, wantOffset: 39},
|
||||
{in: []int64{-62, -58, -30, -62, 51, -30, 15}, wantOffset: -30},
|
||||
{in: []int64{29, -47, 39, 54, 42, 41, 8, -33}, wantOffset: 39},
|
||||
{in: []int64{37, 54, 9, -21, -56, -36, 5, -11, -39}, wantOffset: -11},
|
||||
{in: []int64{57, -28, 25, -39, 9, 63, -16, 19, -60, 25}, wantOffset: 9},
|
||||
{in: []int64{-5, -4, -3, -2, -1}, wantOffset: -3, useDupID: true},
|
||||
|
||||
// The offset stops being updated once the max number of entries
|
||||
// has been reached. This is actually a bug from Bitcoin Core,
|
||||
// but since the time is ultimately used as a part of the
|
||||
// consensus rules, it must be mirrored.
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52}, wantOffset: 17},
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45}, wantOffset: 17},
|
||||
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45, 4}, wantOffset: 17},
|
||||
|
||||
// Offsets that are too far away from the local time should
|
||||
// be ignored.
|
||||
{in: []int64{-4201, 4202, -4203, 4204, -4205}, wantOffset: 0},
|
||||
|
||||
// Exercise the condition where the median offset is greater
|
||||
// than the max allowed adjustment, but there is at least one
|
||||
// sample that is close enough to the current time to avoid
|
||||
// triggering a warning about an invalid local clock.
|
||||
{in: []int64{4201, 4202, 4203, 4204, -299}, wantOffset: 0},
|
||||
}
|
||||
|
||||
// Modify the max number of allowed median time entries for these tests.
|
||||
maxMedianTimeEntries = 10
|
||||
defer func() { maxMedianTimeEntries = 200 }()
|
||||
|
||||
for i, test := range tests {
|
||||
filter := NewMedianTime()
|
||||
for j, offset := range test.in {
|
||||
id := strconv.Itoa(j)
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
tOffset := now.Add(time.Duration(offset) * time.Second)
|
||||
filter.AddTimeSample(id, tOffset)
|
||||
|
||||
// Ensure the duplicate IDs are ignored.
|
||||
if test.useDupID {
|
||||
// Modify the offsets to ensure the final median
|
||||
// would be different if the duplicate is added.
|
||||
tOffset = tOffset.Add(time.Duration(offset) *
|
||||
time.Second)
|
||||
filter.AddTimeSample(id, tOffset)
|
||||
}
|
||||
}
|
||||
|
||||
// Since it is possible that the time.Now call in AddTimeSample
|
||||
// and the time.Now calls here in the tests will be off by one
|
||||
// second, allow a fudge factor to compensate.
|
||||
gotOffset := filter.Offset()
|
||||
wantOffset := time.Duration(test.wantOffset) * time.Second
|
||||
wantOffset2 := time.Duration(test.wantOffset-1) * time.Second
|
||||
if gotOffset != wantOffset && gotOffset != wantOffset2 {
|
||||
t.Errorf("Offset #%d: unexpected offset -- got %v, "+
|
||||
"want %v or %v", i, gotOffset, wantOffset,
|
||||
wantOffset2)
|
||||
continue
|
||||
}
|
||||
|
||||
// Since it is possible that the time.Now call in AdjustedTime
|
||||
// and the time.Now call here in the tests will be off by one
|
||||
// second, allow a fudge factor to compensate.
|
||||
adjustedTime := filter.AdjustedTime()
|
||||
now := time.Unix(time.Now().Unix(), 0)
|
||||
wantTime := now.Add(filter.Offset())
|
||||
wantTime2 := now.Add(filter.Offset() - time.Second)
|
||||
if !adjustedTime.Equal(wantTime) && !adjustedTime.Equal(wantTime2) {
|
||||
t.Errorf("AdjustedTime #%d: unexpected result -- got %v, "+
|
||||
"want %v or %v", i, adjustedTime, wantTime,
|
||||
wantTime2)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
)
|
||||
|
||||
// phantom calculates and returns the block's blue set, selected parent and blue score.
|
||||
// Chain start is determined by going down the DAG through the selected path
|
||||
// (follow the selected parent of each block) k + 1 steps.
|
||||
// The blue set of a block are all blue blocks in its past.
|
||||
// To optimize memory usage, for each block we are storing only the blue blocks in
|
||||
// its selected parent's anticone that are in the future of the chain start
|
||||
// as well as the selected parent itself - the rest of the
|
||||
// blue set can be restored by traversing the selected parent chain and combining
|
||||
// the .blues of all blocks in it.
|
||||
// The blue score is the total number of blocks in this block's blue set
|
||||
// of the selected parent. (the blue score of the genesis block is defined as 0)
|
||||
// The selected parent is chosen by determining which block's parent will give this block the highest blue score.
|
||||
func phantom(block *blockNode, k uint32) (blues []*blockNode, selectedParent *blockNode, score uint64) {
|
||||
bestScore := uint64(0)
|
||||
var bestParent *blockNode
|
||||
var bestBlues []*blockNode
|
||||
var bestHash *daghash.Hash
|
||||
for _, parent := range block.parents {
|
||||
chainStart := digToChainStart(parent, k)
|
||||
candidates := blueCandidates(chainStart)
|
||||
blues := traverseCandidates(block, candidates, parent)
|
||||
score := uint64(len(blues)) + parent.blueScore
|
||||
|
||||
if score > bestScore || (score == bestScore && (bestHash == nil || daghash.Less(parent.hash, bestHash))) {
|
||||
bestScore = score
|
||||
bestBlues = blues
|
||||
bestParent = parent
|
||||
bestHash = parent.hash
|
||||
}
|
||||
}
|
||||
|
||||
return bestBlues, bestParent, bestScore
|
||||
}
|
||||
|
||||
// digToChainStart digs through the selected path and returns the block in depth k+1
|
||||
func digToChainStart(parent *blockNode, k uint32) *blockNode {
|
||||
current := parent
|
||||
|
||||
for i := uint32(0); i < k; i++ {
|
||||
if current.isGenesis() {
|
||||
break
|
||||
}
|
||||
current = current.selectedParent
|
||||
}
|
||||
|
||||
return current
|
||||
}
|
||||
|
||||
func blueCandidates(chainStart *blockNode) blockSet {
|
||||
candidates := newSet()
|
||||
candidates.add(chainStart)
|
||||
|
||||
queue := []*blockNode{chainStart}
|
||||
for len(queue) > 0 {
|
||||
var current *blockNode
|
||||
current, queue = queue[0], queue[1:]
|
||||
|
||||
children := current.children
|
||||
for _, child := range children {
|
||||
if !candidates.contains(child) {
|
||||
candidates.add(child)
|
||||
queue = append(queue, child)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return candidates
|
||||
}
|
||||
|
||||
//traverseCandidates returns all the blocks that are in the future of the chain start and in the anticone of the selected parent
|
||||
func traverseCandidates(newBlock *blockNode, candidates blockSet, selectedParent *blockNode) []*blockNode {
|
||||
blues := []*blockNode{}
|
||||
selectedParentPast := newSet()
|
||||
queue := NewDownHeap()
|
||||
visited := newSet()
|
||||
|
||||
for _, parent := range newBlock.parents {
|
||||
queue.Push(parent)
|
||||
}
|
||||
|
||||
for queue.Len() > 0 {
|
||||
current := queue.pop()
|
||||
if candidates.contains(current) {
|
||||
if current == selectedParent || selectedParentPast.anyChildInSet(current) {
|
||||
selectedParentPast.add(current)
|
||||
} else {
|
||||
blues = append(blues, current)
|
||||
}
|
||||
for _, parent := range current.parents {
|
||||
if !visited.contains(parent) {
|
||||
visited.add(parent)
|
||||
queue.Push(parent)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return append(blues, selectedParent)
|
||||
}
|
||||
@@ -1,897 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
)
|
||||
|
||||
type testBlockData struct {
|
||||
parents []string
|
||||
id string //id is a virtual entity that is used only for tests so we can define relations between blocks without knowing their hash
|
||||
expectedScore uint64
|
||||
expectedSelectedParent string
|
||||
expectedBlues []string
|
||||
}
|
||||
|
||||
type hashIDPair struct {
|
||||
hash *daghash.Hash
|
||||
id string
|
||||
}
|
||||
|
||||
//TestPhantom iterate over several dag simulations, and checks
|
||||
//that the blue score, blue set and selected parent of each
|
||||
//block calculated as expected
|
||||
func TestPhantom(t *testing.T) {
|
||||
netParams := dagconfig.SimNetParams
|
||||
|
||||
blockVersion := int32(0x10000000)
|
||||
|
||||
tests := []struct {
|
||||
k uint32
|
||||
dagData []*testBlockData
|
||||
virtualBlockID string
|
||||
expectedReds []string
|
||||
}{
|
||||
{
|
||||
//Block hash order:AKJIHGFEDCB
|
||||
k: 1,
|
||||
virtualBlockID: "K",
|
||||
expectedReds: []string{"D"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "C",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "D",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "E",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "F",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
id: "G",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"D", "B", "C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "E"},
|
||||
id: "H",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"E", "B", "C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E", "G"},
|
||||
id: "I",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"G", "D", "E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F"},
|
||||
id: "J",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I", "J"},
|
||||
id: "K",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "H",
|
||||
expectedBlues: []string{"I", "J", "G", "F", "H"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
//block hash order:AVUTSRQPONMLKJIHGFEDCB
|
||||
k: 2,
|
||||
virtualBlockID: "V",
|
||||
expectedReds: []string{"D", "J", "P"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "C",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "D",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "E",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "F",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "G",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"G"},
|
||||
id: "H",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "G",
|
||||
expectedBlues: []string{"G"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "I",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "J",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "K",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "I",
|
||||
expectedBlues: []string{"I"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K", "H"},
|
||||
id: "L",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "K",
|
||||
expectedBlues: []string{"K"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F", "L"},
|
||||
id: "M",
|
||||
expectedScore: 10,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"L", "K", "I", "H", "G", "E", "B", "F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"G", "K"},
|
||||
id: "N",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "G",
|
||||
expectedBlues: []string{"K", "I", "E", "B", "G"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "N"},
|
||||
id: "O",
|
||||
expectedScore: 8,
|
||||
expectedSelectedParent: "N",
|
||||
expectedBlues: []string{"N"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "P",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P"},
|
||||
id: "Q",
|
||||
expectedScore: 10,
|
||||
expectedSelectedParent: "P",
|
||||
expectedBlues: []string{"O", "N", "K", "J", "I", "E", "P"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L", "Q"},
|
||||
id: "R",
|
||||
expectedScore: 11,
|
||||
expectedSelectedParent: "Q",
|
||||
expectedBlues: []string{"Q"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M", "R"},
|
||||
id: "S",
|
||||
expectedScore: 15,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"R", "Q", "O", "N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "F"},
|
||||
id: "T",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"H", "G", "F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M", "T"},
|
||||
id: "U",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"T", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"S", "U"},
|
||||
id: "V",
|
||||
expectedScore: 18,
|
||||
expectedSelectedParent: "S",
|
||||
expectedBlues: []string{"U", "T", "S"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
//Block hash order:AXWVUTSRQPONMLKJIHGFEDCB
|
||||
k: 1,
|
||||
virtualBlockID: "X",
|
||||
expectedReds: []string{"D", "F", "G", "H", "J", "K", "L", "N", "O", "Q", "R", "S", "U", "V"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "C",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "D",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "E",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "F",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "G",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "H",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "I",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "J",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "K",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "L",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "M",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "N",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F", "G", "J"},
|
||||
id: "O",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"J", "G", "F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B", "M", "I"},
|
||||
id: "P",
|
||||
expectedScore: 6,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"M", "I", "E", "C", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K", "E"},
|
||||
id: "Q",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"K", "D", "E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L", "N"},
|
||||
id: "R",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "L",
|
||||
expectedBlues: []string{"L"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I", "Q"},
|
||||
id: "S",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "Q",
|
||||
expectedBlues: []string{"Q"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K", "P"},
|
||||
id: "T",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "P",
|
||||
expectedBlues: []string{"P"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K", "L"},
|
||||
id: "U",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "K",
|
||||
expectedBlues: []string{"L", "K"},
|
||||
},
|
||||
{
|
||||
parents: []string{"U", "R"},
|
||||
id: "V",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"U", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"S", "U", "T"},
|
||||
id: "W",
|
||||
expectedScore: 8,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"V", "W", "H"},
|
||||
id: "X",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "W",
|
||||
expectedBlues: []string{"W"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
//Secret mining attack: The attacker is mining
|
||||
//blocks B,C,D,E,F,G,T in secret without propagating
|
||||
//them, so all blocks except T should be red, because
|
||||
//they don't follow the rules of PHANTOM that require
|
||||
//you to point to all the parents that you know, and
|
||||
//propagate your block as soon as it's mined
|
||||
|
||||
//Block hash order:AYXWVUTSRQPONMLKJIHGFEDCB
|
||||
k: 1,
|
||||
virtualBlockID: "Y",
|
||||
expectedReds: []string{"B", "C", "D", "E", "F", "G", "L"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "C",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "D",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "E",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "F",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F"},
|
||||
id: "G",
|
||||
expectedScore: 6,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "H",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "I",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I"},
|
||||
id: "J",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "H",
|
||||
expectedBlues: []string{"I", "H"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I"},
|
||||
id: "K",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "H",
|
||||
expectedBlues: []string{"I", "H"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "L",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "I",
|
||||
expectedBlues: []string{"I"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "K", "L"},
|
||||
id: "M",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "J",
|
||||
expectedBlues: []string{"K", "J"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "K", "L"},
|
||||
id: "N",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "J",
|
||||
expectedBlues: []string{"K", "J"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M"},
|
||||
id: "O",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M"},
|
||||
id: "P",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M"},
|
||||
id: "Q",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P", "Q"},
|
||||
id: "R",
|
||||
expectedScore: 10,
|
||||
expectedSelectedParent: "O",
|
||||
expectedBlues: []string{"Q", "P", "O"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P", "Q"},
|
||||
id: "S",
|
||||
expectedScore: 10,
|
||||
expectedSelectedParent: "O",
|
||||
expectedBlues: []string{"Q", "P", "O"},
|
||||
},
|
||||
{
|
||||
parents: []string{"G", "S", "R"},
|
||||
id: "T",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"S", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"S", "R"},
|
||||
id: "U",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"S", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "V",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "W",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"U", "T"},
|
||||
id: "X",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"V", "W", "X"},
|
||||
id: "Y",
|
||||
expectedScore: 17,
|
||||
expectedSelectedParent: "V",
|
||||
expectedBlues: []string{"X", "W", "V"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
//Censorship mining attack: The attacker is mining blocks B,C,D,E,F,G in secret without propagating them,
|
||||
//so all blocks except B and C should be red, because they don't follow the rules of
|
||||
//PHANTOM that require you to point to all the parents that you know
|
||||
|
||||
//Block hash order:AYXWVUTSRQPONMLKJIHGFEDCB
|
||||
k: 1,
|
||||
virtualBlockID: "Y",
|
||||
expectedReds: []string{"D", "E", "F", "G", "L"},
|
||||
dagData: []*testBlockData{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "C",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C"},
|
||||
id: "D",
|
||||
expectedScore: 3,
|
||||
expectedSelectedParent: "C",
|
||||
expectedBlues: []string{"C"},
|
||||
},
|
||||
{
|
||||
parents: []string{"D"},
|
||||
id: "E",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "D",
|
||||
expectedBlues: []string{"D"},
|
||||
},
|
||||
{
|
||||
parents: []string{"E"},
|
||||
id: "F",
|
||||
expectedScore: 5,
|
||||
expectedSelectedParent: "E",
|
||||
expectedBlues: []string{"E"},
|
||||
},
|
||||
{
|
||||
parents: []string{"F"},
|
||||
id: "G",
|
||||
expectedScore: 6,
|
||||
expectedSelectedParent: "F",
|
||||
expectedBlues: []string{"F"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "H",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "I",
|
||||
expectedScore: 1,
|
||||
expectedSelectedParent: "A",
|
||||
expectedBlues: []string{"A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I", "B"},
|
||||
id: "J",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"I", "H", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "I", "B"},
|
||||
id: "K",
|
||||
expectedScore: 4,
|
||||
expectedSelectedParent: "B",
|
||||
expectedBlues: []string{"I", "H", "B"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "L",
|
||||
expectedScore: 2,
|
||||
expectedSelectedParent: "I",
|
||||
expectedBlues: []string{"I"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "K", "L", "C"},
|
||||
id: "M",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "J",
|
||||
expectedBlues: []string{"K", "C", "J"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J", "K", "L", "C"},
|
||||
id: "N",
|
||||
expectedScore: 7,
|
||||
expectedSelectedParent: "J",
|
||||
expectedBlues: []string{"K", "C", "J"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M", "D"},
|
||||
id: "O",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M", "D"},
|
||||
id: "P",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N", "M", "D"},
|
||||
id: "Q",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "M",
|
||||
expectedBlues: []string{"N", "M"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P", "Q", "E"},
|
||||
id: "R",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "O",
|
||||
expectedBlues: []string{"Q", "P", "O"},
|
||||
},
|
||||
{
|
||||
parents: []string{"O", "P", "Q", "E"},
|
||||
id: "S",
|
||||
expectedScore: 12,
|
||||
expectedSelectedParent: "O",
|
||||
expectedBlues: []string{"Q", "P", "O"},
|
||||
},
|
||||
{
|
||||
parents: []string{"G", "S", "R"},
|
||||
id: "T",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"S", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"S", "R", "F"},
|
||||
id: "U",
|
||||
expectedScore: 14,
|
||||
expectedSelectedParent: "R",
|
||||
expectedBlues: []string{"S", "R"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "V",
|
||||
expectedScore: 16,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "W",
|
||||
expectedScore: 16,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"T", "U"},
|
||||
id: "X",
|
||||
expectedScore: 16,
|
||||
expectedSelectedParent: "T",
|
||||
expectedBlues: []string{"U", "T"},
|
||||
},
|
||||
{
|
||||
parents: []string{"V", "W", "X"},
|
||||
id: "Y",
|
||||
expectedScore: 19,
|
||||
expectedSelectedParent: "V",
|
||||
expectedBlues: []string{"X", "W", "V"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
netParams.K = test.k
|
||||
// Generate enough synthetic blocks for the rest of the test
|
||||
blockDAG := newTestDAG(&netParams)
|
||||
genesisNode := blockDAG.genesis
|
||||
blockTime := genesisNode.Header().Timestamp
|
||||
blockByIDMap := make(map[string]*blockNode)
|
||||
idByBlockMap := make(map[*blockNode]string)
|
||||
blockByIDMap["A"] = genesisNode
|
||||
idByBlockMap[genesisNode] = "A"
|
||||
|
||||
for _, blockData := range test.dagData {
|
||||
blockTime = blockTime.Add(time.Second)
|
||||
parents := blockSet{}
|
||||
for _, parentID := range blockData.parents {
|
||||
parent := blockByIDMap[parentID]
|
||||
parents.add(parent)
|
||||
}
|
||||
node := newTestNode(parents, blockVersion, 0, blockTime, test.k)
|
||||
node.hash = &daghash.Hash{} //It helps to predict hash order
|
||||
for i, char := range blockData.id {
|
||||
node.hash[i] = byte(char)
|
||||
}
|
||||
|
||||
blockDAG.index.AddNode(node)
|
||||
addNodeAsChildToParents(node)
|
||||
|
||||
blockByIDMap[blockData.id] = node
|
||||
idByBlockMap[node] = blockData.id
|
||||
|
||||
bluesIDs := make([]string, 0, len(node.blues))
|
||||
for _, blue := range node.blues {
|
||||
bluesIDs = append(bluesIDs, idByBlockMap[blue])
|
||||
}
|
||||
selectedParentID := idByBlockMap[node.selectedParent]
|
||||
fullDataStr := fmt.Sprintf("blues: %v, selectedParent: %v, score: %v",
|
||||
bluesIDs, selectedParentID, node.blueScore)
|
||||
if blockData.expectedScore != node.blueScore {
|
||||
t.Errorf("Test %d: Block %v expected to have score %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedScore, node.blueScore, fullDataStr)
|
||||
}
|
||||
if blockData.expectedSelectedParent != selectedParentID {
|
||||
t.Errorf("Test %d: Block %v expected to have selected parent %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedSelectedParent, selectedParentID, fullDataStr)
|
||||
}
|
||||
if !reflect.DeepEqual(blockData.expectedBlues, bluesIDs) {
|
||||
t.Errorf("Test %d: Block %v expected to have blues %v but got %v (fulldata: %v)",
|
||||
i, blockData.id, blockData.expectedBlues, bluesIDs, fullDataStr)
|
||||
}
|
||||
}
|
||||
|
||||
reds := make(map[string]bool)
|
||||
|
||||
for id := range blockByIDMap {
|
||||
reds[id] = true
|
||||
}
|
||||
|
||||
for tip := blockByIDMap[test.virtualBlockID]; tip.selectedParent != nil; tip = tip.selectedParent {
|
||||
tipID := idByBlockMap[tip]
|
||||
delete(reds, tipID)
|
||||
for _, blue := range tip.blues {
|
||||
blueID := idByBlockMap[blue]
|
||||
delete(reds, blueID)
|
||||
}
|
||||
}
|
||||
if !checkReds(test.expectedReds, reds) {
|
||||
redsIDs := make([]string, 0, len(reds))
|
||||
for id := range reds {
|
||||
redsIDs = append(redsIDs, id)
|
||||
}
|
||||
sort.Strings(redsIDs)
|
||||
sort.Strings(test.expectedReds)
|
||||
t.Errorf("Test %d: Expected reds %v but got %v", i, test.expectedReds, redsIDs)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func checkReds(expectedReds []string, reds map[string]bool) bool {
|
||||
if len(expectedReds) != len(reds) {
|
||||
return false
|
||||
}
|
||||
for _, redID := range expectedReds {
|
||||
if !reds[redID] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -1,250 +0,0 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
// BehaviorFlags is a bitmask defining tweaks to the normal behavior when
|
||||
// performing chain processing and consensus rules checks.
|
||||
type BehaviorFlags uint32
|
||||
|
||||
const (
|
||||
// BFFastAdd may be set to indicate that several checks can be avoided
|
||||
// for the block since it is already known to fit into the chain due to
|
||||
// already proving it correct links into the chain up to a known
|
||||
// checkpoint. This is primarily used for headers-first mode.
|
||||
BFFastAdd BehaviorFlags = 1 << iota
|
||||
|
||||
// BFNoPoWCheck may be set to indicate the proof of work check which
|
||||
// ensures a block hashes to a value less than the required target will
|
||||
// not be performed.
|
||||
BFNoPoWCheck
|
||||
|
||||
// BFNone is a convenience value to specifically indicate no flags.
|
||||
BFNone BehaviorFlags = 0
|
||||
)
|
||||
|
||||
// BlockExists determines whether a block with the given hash exists in
|
||||
// the DAG.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) BlockExists(hash *daghash.Hash) (bool, error) {
|
||||
// Check block index first (could be main chain or side chain blocks).
|
||||
if dag.index.HaveBlock(hash) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Check in the database.
|
||||
var exists bool
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
exists, err = dbTx.HasBlock(hash)
|
||||
if err != nil || !exists {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ignore side chain blocks in the database. This is necessary
|
||||
// because there is not currently any record of the associated
|
||||
// block index data such as its block height, so it's not yet
|
||||
// possible to efficiently load the block and do anything useful
|
||||
// with it.
|
||||
//
|
||||
// Ultimately the entire block index should be serialized
|
||||
// instead of only the current main chain so it can be consulted
|
||||
// directly.
|
||||
_, err = dbFetchHeightByHash(dbTx, hash)
|
||||
if isNotInDAGErr(err) {
|
||||
exists = false
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
return exists, err
|
||||
}
|
||||
|
||||
// processOrphans determines if there are any orphans which depend on the passed
|
||||
// block hash (they are no longer orphans if true) and potentially accepts them.
|
||||
// It repeats the process for the newly accepted blocks (to detect further
|
||||
// orphans which may no longer be orphans) until there are no more.
|
||||
//
|
||||
// The flags do not modify the behavior of this function directly, however they
|
||||
// are needed to pass along to maybeAcceptBlock.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) error {
|
||||
// Start with processing at least the passed hash. Leave a little room
|
||||
// for additional orphan blocks that need to be processed without
|
||||
// needing to grow the array in the common case.
|
||||
processHashes := make([]*daghash.Hash, 0, 10)
|
||||
processHashes = append(processHashes, hash)
|
||||
for len(processHashes) > 0 {
|
||||
// Pop the first hash to process from the slice.
|
||||
processHash := processHashes[0]
|
||||
processHashes[0] = nil // Prevent GC leak.
|
||||
processHashes = processHashes[1:]
|
||||
|
||||
// Look up all orphans that are parented by the block we just
|
||||
// accepted. This will typically only be one, but it could
|
||||
// be multiple if multiple blocks are mined and broadcast
|
||||
// around the same time. The one with the most proof of work
|
||||
// will eventually win out. An indexing for loop is
|
||||
// intentionally used over a range here as range does not
|
||||
// reevaluate the slice on each iteration nor does it adjust the
|
||||
// index for the modified slice.
|
||||
for i := 0; i < len(dag.prevOrphans[*processHash]); i++ {
|
||||
orphan := dag.prevOrphans[*processHash][i]
|
||||
if orphan == nil {
|
||||
log.Warnf("Found a nil entry at index %d in the "+
|
||||
"orphan dependency list for block %s", i,
|
||||
processHash)
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove the orphan from the orphan pool.
|
||||
orphanHash := orphan.block.Hash()
|
||||
dag.removeOrphanBlock(orphan)
|
||||
i--
|
||||
|
||||
// Potentially accept the block into the block chain.
|
||||
err := dag.maybeAcceptBlock(orphan.block, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add this block to the list of blocks to process so
|
||||
// any orphan blocks that depend on this block are
|
||||
// handled too.
|
||||
processHashes = append(processHashes, orphanHash)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessBlock is the main workhorse for handling insertion of new blocks into
|
||||
// the block chain. It includes functionality such as rejecting duplicate
|
||||
// blocks, ensuring blocks follow all rules, orphan handling, and insertion into
|
||||
// the block DAG.
|
||||
//
|
||||
// When no errors occurred during processing, the first return value indicates
|
||||
// whether or not the block is an orphan.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) ProcessBlock(block *util.Block, flags BehaviorFlags) (bool, error) {
|
||||
dag.dagLock.Lock()
|
||||
defer dag.dagLock.Unlock()
|
||||
|
||||
fastAdd := flags&BFFastAdd == BFFastAdd
|
||||
|
||||
blockHash := block.Hash()
|
||||
log.Tracef("Processing block %s", blockHash)
|
||||
|
||||
// The block must not already exist in the main chain or side chains.
|
||||
exists, err := dag.BlockExists(blockHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if exists {
|
||||
str := fmt.Sprintf("already have block %s", blockHash)
|
||||
return false, ruleError(ErrDuplicateBlock, str)
|
||||
}
|
||||
|
||||
// The block must not already exist as an orphan.
|
||||
if _, exists := dag.orphans[*blockHash]; exists {
|
||||
str := fmt.Sprintf("already have block (orphan) %s", blockHash)
|
||||
return false, ruleError(ErrDuplicateBlock, str)
|
||||
}
|
||||
|
||||
// Perform preliminary sanity checks on the block and its transactions.
|
||||
err = dag.checkBlockSanity(block, flags)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Find the previous checkpoint and perform some additional checks based
|
||||
// on the checkpoint. This provides a few nice properties such as
|
||||
// preventing old side chain blocks before the last checkpoint,
|
||||
// rejecting easy to mine, but otherwise bogus, blocks that could be
|
||||
// used to eat memory, and ensuring expected (versus claimed) proof of
|
||||
// work requirements since the previous checkpoint are met.
|
||||
blockHeader := &block.MsgBlock().Header
|
||||
checkpointNode, err := dag.findPreviousCheckpoint()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if checkpointNode != nil {
|
||||
// Ensure the block timestamp is after the checkpoint timestamp.
|
||||
checkpointTime := time.Unix(checkpointNode.timestamp, 0)
|
||||
if blockHeader.Timestamp.Before(checkpointTime) {
|
||||
str := fmt.Sprintf("block %s has timestamp %s before "+
|
||||
"last checkpoint timestamp %s", blockHash,
|
||||
blockHeader.Timestamp, checkpointTime)
|
||||
return false, ruleError(ErrCheckpointTimeTooOld, str)
|
||||
}
|
||||
if !fastAdd {
|
||||
// Even though the checks prior to now have already ensured the
|
||||
// proof of work exceeds the claimed amount, the claimed amount
|
||||
// is a field in the block header which could be forged. This
|
||||
// check ensures the proof of work is at least the minimum
|
||||
// expected based on elapsed time since the last checkpoint and
|
||||
// maximum adjustment allowed by the retarget rules.
|
||||
duration := blockHeader.Timestamp.Sub(checkpointTime)
|
||||
requiredTarget := util.CompactToBig(dag.calcEasiestDifficulty(
|
||||
checkpointNode.bits, duration))
|
||||
currentTarget := util.CompactToBig(blockHeader.Bits)
|
||||
if currentTarget.Cmp(requiredTarget) > 0 {
|
||||
str := fmt.Sprintf("block target difficulty of %064x "+
|
||||
"is too low when compared to the previous "+
|
||||
"checkpoint", currentTarget)
|
||||
return false, ruleError(ErrDifficultyTooLow, str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle orphan blocks.
|
||||
allParentsExist := true
|
||||
for _, parentHash := range blockHeader.ParentHashes {
|
||||
parentExists, err := dag.BlockExists(parentHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !parentExists {
|
||||
log.Infof("Adding orphan block %s with parent %s", blockHash, parentHash)
|
||||
dag.addOrphanBlock(block)
|
||||
|
||||
allParentsExist = false
|
||||
}
|
||||
}
|
||||
|
||||
if !allParentsExist {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// The block has passed all context independent checks and appears sane
|
||||
// enough to potentially accept it into the block DAG.
|
||||
err = dag.maybeAcceptBlock(block, flags)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Accept any orphan blocks that depend on this block (they are
|
||||
// no longer orphans) and repeat for those accepted blocks until
|
||||
// there are no more.
|
||||
err = dag.processOrphans(blockHash, flags)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
log.Debugf("Accepted block %s", blockHash)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
@@ -1,200 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/daglabs/btcd/util"
|
||||
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// SubnetworkStore stores the subnetworks data
|
||||
type SubnetworkStore struct {
|
||||
db database.DB
|
||||
}
|
||||
|
||||
func newSubnetworkStore(db database.DB) *SubnetworkStore {
|
||||
return &SubnetworkStore{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// registerSubnetworks scans a list of accepted transactions, singles out
|
||||
// subnetwork registry transactions, validates them, and registers a new
|
||||
// subnetwork based on it.
|
||||
// This function returns an error if one or more transactions are invalid
|
||||
func registerSubnetworks(dbTx database.Tx, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
|
||||
validSubnetworkRegistryTxs := make([]*wire.MsgTx, 0)
|
||||
|
||||
for _, txs := range txsAcceptanceData {
|
||||
for _, txData := range txs {
|
||||
if !txData.IsAccepted {
|
||||
continue
|
||||
}
|
||||
|
||||
tx := txData.Tx.MsgTx()
|
||||
if tx.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDRegistry) {
|
||||
err := validateSubnetworkRegistryTransaction(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
validSubnetworkRegistryTxs = append(validSubnetworkRegistryTxs, tx)
|
||||
}
|
||||
|
||||
if subnetworkid.Less(subnetworkid.SubnetworkIDRegistry, &tx.SubnetworkID) {
|
||||
// Transactions are ordered by subnetwork, so we can safely assume
|
||||
// that the rest of the transactions will not be subnetwork registry
|
||||
// transactions.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, registryTx := range validSubnetworkRegistryTxs {
|
||||
subnetworkID, err := TxToSubnetworkID(registryTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sNet, err := dbGetSubnetwork(dbTx, subnetworkID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sNet == nil {
|
||||
createdSubnetwork := newSubnetwork(registryTx)
|
||||
err := dbRegisterSubnetwork(dbTx, subnetworkID, createdSubnetwork)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed registering subnetwork"+
|
||||
"for tx '%s': %s", registryTx.TxHash(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateSubnetworkRegistryTransaction makes sure that a given subnetwork registry
|
||||
// transaction is valid. Such a transaction is valid iff:
|
||||
// - Its entire payload is a uint64 (8 bytes)
|
||||
func validateSubnetworkRegistryTransaction(tx *wire.MsgTx) error {
|
||||
if len(tx.Payload) != 8 {
|
||||
return ruleError(ErrSubnetworkRegistry, fmt.Sprintf("validation failed: subnetwork registry"+
|
||||
"tx '%s' has an invalid payload", tx.TxHash()))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxToSubnetworkID creates a subnetwork ID from a subnetwork registry transaction
|
||||
func TxToSubnetworkID(tx *wire.MsgTx) (*subnetworkid.SubnetworkID, error) {
|
||||
txHash := tx.TxHash()
|
||||
return subnetworkid.New(util.Hash160(txHash[:]))
|
||||
}
|
||||
|
||||
// subnetwork returns a registered subnetwork. If the subnetwork does not exist
|
||||
// this method returns an error.
|
||||
func (s *SubnetworkStore) subnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
|
||||
var sNet *subnetwork
|
||||
var err error
|
||||
dbErr := s.db.View(func(dbTx database.Tx) error {
|
||||
sNet, err = dbGetSubnetwork(dbTx, subnetworkID)
|
||||
return nil
|
||||
})
|
||||
if dbErr != nil {
|
||||
return nil, fmt.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, dbErr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
return sNet, nil
|
||||
}
|
||||
|
||||
// GasLimit returns the gas limit of a registered subnetwork. If the subnetwork does not
|
||||
// exist this method returns an error.
|
||||
func (s *SubnetworkStore) GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
|
||||
sNet, err := s.subnetwork(subnetworkID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if sNet == nil {
|
||||
return 0, fmt.Errorf("subnetwork '%s' not found", subnetworkID)
|
||||
}
|
||||
|
||||
return sNet.gasLimit, nil
|
||||
}
|
||||
|
||||
// dbRegisterSubnetwork stores mappings from ID of the subnetwork to the subnetwork data.
|
||||
func dbRegisterSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID, network *subnetwork) error {
|
||||
// Serialize the subnetwork
|
||||
serializedSubnetwork, err := serializeSubnetwork(network)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize sub-netowrk '%s': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
// Store the subnetwork
|
||||
subnetworksBucket := dbTx.Metadata().Bucket(subnetworksBucketName)
|
||||
err = subnetworksBucket.Put(subnetworkID[:], serializedSubnetwork)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write sub-netowrk '%s': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbGetSubnetwork returns the subnetwork associated with subnetworkID or nil if the subnetwork was not found.
|
||||
func dbGetSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
|
||||
bucket := dbTx.Metadata().Bucket(subnetworksBucketName)
|
||||
serializedSubnetwork := bucket.Get(subnetworkID[:])
|
||||
if serializedSubnetwork == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return deserializeSubnetwork(serializedSubnetwork)
|
||||
}
|
||||
|
||||
type subnetwork struct {
|
||||
gasLimit uint64
|
||||
}
|
||||
|
||||
func newSubnetwork(tx *wire.MsgTx) *subnetwork {
|
||||
gasLimit := binary.LittleEndian.Uint64(tx.Payload[:8])
|
||||
|
||||
return &subnetwork{
|
||||
gasLimit: gasLimit,
|
||||
}
|
||||
}
|
||||
|
||||
// serializeSubnetwork serializes a subnetwork into the following binary format:
|
||||
// | gasLimit (8 bytes) |
|
||||
func serializeSubnetwork(sNet *subnetwork) ([]byte, error) {
|
||||
serializedSNet := bytes.NewBuffer(make([]byte, 0, 8))
|
||||
|
||||
// Write the gas limit
|
||||
err := binary.Write(serializedSNet, byteOrder, sNet.gasLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize subnetwork: %s", err)
|
||||
}
|
||||
|
||||
return serializedSNet.Bytes(), nil
|
||||
}
|
||||
|
||||
// deserializeSubnetwork deserializes a byte slice into a subnetwork.
|
||||
// See serializeSubnetwork for the binary format.
|
||||
func deserializeSubnetwork(serializedSNetBytes []byte) (*subnetwork, error) {
|
||||
serializedSNet := bytes.NewBuffer(serializedSNetBytes)
|
||||
|
||||
// Read the gas limit
|
||||
var gasLimit uint64
|
||||
err := binary.Read(serializedSNet, byteOrder, &gasLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to deserialize subnetwork: %s", err)
|
||||
}
|
||||
|
||||
return &subnetwork{
|
||||
gasLimit: gasLimit,
|
||||
}, nil
|
||||
}
|
||||
@@ -1,209 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
// This file functions are not considered safe for regular use, and should be used for test purposes only.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
_ "github.com/daglabs/btcd/database/ffldb" // blank import ffldb so that its init() function runs before tests
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// testDbType is the database backend type to use for the tests.
|
||||
testDbType = "ffldb"
|
||||
|
||||
// testDbRoot is the root directory used to create all test databases.
|
||||
testDbRoot = "testdbs"
|
||||
|
||||
// blockDataNet is the expected network in the test block data.
|
||||
blockDataNet = wire.MainNet
|
||||
)
|
||||
|
||||
// isSupportedDbType returns whether or not the passed database type is
|
||||
// currently supported.
|
||||
func isSupportedDbType(dbType string) bool {
|
||||
supportedDrivers := database.SupportedDrivers()
|
||||
for _, driver := range supportedDrivers {
|
||||
if dbType == driver {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// filesExists returns whether or not the named file or directory exists.
|
||||
func fileExists(name string) bool {
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// DAGSetup is used to create a new db and chain instance with the genesis
|
||||
// block already inserted. In addition to the new chain instance, it returns
|
||||
// a teardown function the caller should invoke when done testing to clean up.
|
||||
func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
|
||||
if !isSupportedDbType(testDbType) {
|
||||
return nil, nil, fmt.Errorf("unsupported db type %s", testDbType)
|
||||
}
|
||||
|
||||
var teardown func()
|
||||
|
||||
if config.DB == nil {
|
||||
// Create the root directory for test databases.
|
||||
if !fileExists(testDbRoot) {
|
||||
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
|
||||
err := fmt.Errorf("unable to create test db "+
|
||||
"root: %s", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(testDbRoot, dbName)
|
||||
_ = os.RemoveAll(dbPath)
|
||||
var err error
|
||||
config.DB, err = database.Create(testDbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
teardown = func() {
|
||||
config.DB.Close()
|
||||
os.RemoveAll(dbPath)
|
||||
os.RemoveAll(testDbRoot)
|
||||
}
|
||||
}
|
||||
|
||||
config.TimeSource = NewMedianTime()
|
||||
config.SigCache = txscript.NewSigCache(1000)
|
||||
|
||||
// Create the DAG instance.
|
||||
dag, err := New(&config)
|
||||
if err != nil {
|
||||
teardown()
|
||||
err := fmt.Errorf("failed to create dag instance: %s", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return dag, teardown, nil
|
||||
}
|
||||
|
||||
// OpTrueScript is script returning TRUE
|
||||
var OpTrueScript = []byte{txscript.OpTrue}
|
||||
|
||||
type txSubnetworkData struct {
|
||||
subnetworkID *subnetworkid.SubnetworkID
|
||||
Gas uint64
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
func createTxForTest(numInputs uint32, numOutputs uint32, outputValue uint64, subnetworkData *txSubnetworkData) *wire.MsgTx {
|
||||
txIns := []*wire.TxIn{}
|
||||
txOuts := []*wire.TxOut{}
|
||||
|
||||
for i := uint32(0); i < numInputs; i++ {
|
||||
txIns = append(txIns, &wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.TxID{}, i),
|
||||
SignatureScript: []byte{},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
})
|
||||
}
|
||||
|
||||
for i := uint32(0); i < numOutputs; i++ {
|
||||
txOuts = append(txOuts, &wire.TxOut{
|
||||
PkScript: OpTrueScript,
|
||||
Value: outputValue,
|
||||
})
|
||||
}
|
||||
|
||||
if subnetworkData != nil {
|
||||
return wire.NewSubnetworkMsgTx(wire.TxVersion, txIns, txOuts, subnetworkData.subnetworkID, subnetworkData.Gas, subnetworkData.Payload)
|
||||
}
|
||||
|
||||
return wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
|
||||
}
|
||||
|
||||
// createCoinbaseTxForTest returns a coinbase transaction with the requested number of
|
||||
// outputs paying an appropriate subsidy based on the passed block height to the
|
||||
// address associated with the harness. It automatically uses a standard
|
||||
// signature script that starts with the block height
|
||||
func createCoinbaseTxForTest(blockHeight int32, numOutputs uint32, extraNonce int64, params *dagconfig.Params) (*wire.MsgTx, error) {
|
||||
// Create standard coinbase script.
|
||||
coinbaseScript, err := txscript.NewScriptBuilder().
|
||||
AddInt64(int64(blockHeight)).AddInt64(extraNonce).Script()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txIns := []*wire.TxIn{&wire.TxIn{
|
||||
// Coinbase transactions have no inputs, so previous outpoint is
|
||||
// zero hash and max index.
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.TxID{},
|
||||
wire.MaxPrevOutIndex),
|
||||
SignatureScript: coinbaseScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}}
|
||||
|
||||
txOuts := []*wire.TxOut{}
|
||||
|
||||
totalInput := CalcBlockSubsidy(blockHeight, params)
|
||||
amountPerOutput := totalInput / uint64(numOutputs)
|
||||
remainder := totalInput - amountPerOutput*uint64(numOutputs)
|
||||
for i := uint32(0); i < numOutputs; i++ {
|
||||
// Ensure the final output accounts for any remainder that might
|
||||
// be left from splitting the input amount.
|
||||
amount := amountPerOutput
|
||||
if i == numOutputs-1 {
|
||||
amount = amountPerOutput + remainder
|
||||
}
|
||||
txOuts = append(txOuts, &wire.TxOut{
|
||||
PkScript: OpTrueScript,
|
||||
Value: amount,
|
||||
})
|
||||
}
|
||||
|
||||
return wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts), nil
|
||||
}
|
||||
|
||||
// SetVirtualForTest replaces the dag's virtual block. This function is used for test purposes only
|
||||
func SetVirtualForTest(dag *BlockDAG, virtual *virtualBlock) *virtualBlock {
|
||||
oldVirtual := dag.virtual
|
||||
dag.virtual = virtual
|
||||
return oldVirtual
|
||||
}
|
||||
|
||||
// GetVirtualFromParentsForTest generates a virtual block with the given parents.
|
||||
func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (*virtualBlock, error) {
|
||||
parents := newSet()
|
||||
for _, hash := range parentHashes {
|
||||
parent := dag.index.LookupNode(hash)
|
||||
if parent == nil {
|
||||
return nil, fmt.Errorf("GetVirtualFromParentsForTest: didn't found node for hash %s", hash)
|
||||
}
|
||||
parents.add(parent)
|
||||
}
|
||||
virtual := newVirtualBlock(parents, dag.dagParams.K)
|
||||
|
||||
pastUTXO, _, err := virtual.pastUTXO(dag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffPastUTXO := pastUTXO.clone().(*DiffUTXOSet)
|
||||
diffPastUTXO.meldToBase()
|
||||
virtual.utxoSet = diffPastUTXO.base
|
||||
|
||||
return virtual, nil
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"bou.ke/monkey"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
)
|
||||
|
||||
func TestIsSupportedDbType(t *testing.T) {
|
||||
if !isSupportedDbType("ffldb") {
|
||||
t.Errorf("ffldb should be a supported DB driver")
|
||||
}
|
||||
if isSupportedDbType("madeUpDb") {
|
||||
t.Errorf("madeUpDb should not be a supported DB driver")
|
||||
}
|
||||
}
|
||||
|
||||
// TestDAGSetupErrors tests all error-cases in DAGSetup.
|
||||
// The non-error-cases are tested in the more general tests.
|
||||
func TestDAGSetupErrors(t *testing.T) {
|
||||
os.RemoveAll(testDbRoot)
|
||||
testDAGSetupErrorThroughPatching(t, "unable to create test db root: ", os.MkdirAll, func(path string, perm os.FileMode) error {
|
||||
return errors.New("Made up error")
|
||||
})
|
||||
|
||||
testDAGSetupErrorThroughPatching(t, "failed to create dag instance: ", New, func(config *Config) (*BlockDAG, error) {
|
||||
return nil, errors.New("Made up error")
|
||||
})
|
||||
|
||||
testDAGSetupErrorThroughPatching(t, "unsupported db type ", isSupportedDbType, func(dbType string) bool {
|
||||
return false
|
||||
})
|
||||
|
||||
testDAGSetupErrorThroughPatching(t, "error creating db: ", database.Create, func(dbType string, args ...interface{}) (database.DB, error) {
|
||||
return nil, errors.New("Made up error")
|
||||
})
|
||||
}
|
||||
|
||||
func testDAGSetupErrorThroughPatching(t *testing.T, expectedErrorMessage string, targetFunction interface{}, replacementFunction interface{}) {
|
||||
guard := monkey.Patch(targetFunction, replacementFunction)
|
||||
defer guard.Unpatch()
|
||||
_, tearDown, err := DAGSetup("TestDAGSetup", Config{
|
||||
DAGParams: &dagconfig.MainNetParams,
|
||||
})
|
||||
if tearDown != nil {
|
||||
defer tearDown()
|
||||
}
|
||||
if err == nil || !strings.HasPrefix(err.Error(), expectedErrorMessage) {
|
||||
t.Errorf("DAGSetup: expected error to have prefix '%s' but got error '%v'", expectedErrorMessage, err)
|
||||
}
|
||||
}
|
||||
BIN
blockdag/testdata/blk_0_to_4.dat
vendored
BIN
blockdag/testdata/blk_0_to_4.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3A.dat
vendored
BIN
blockdag/testdata/blk_3A.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3B.dat
vendored
BIN
blockdag/testdata/blk_3B.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3C.dat
vendored
BIN
blockdag/testdata/blk_3C.dat
vendored
Binary file not shown.
BIN
blockdag/testdata/blk_3D.dat
vendored
BIN
blockdag/testdata/blk_3D.dat
vendored
Binary file not shown.
@@ -1,356 +0,0 @@
|
||||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
)
|
||||
|
||||
// ThresholdState define the various threshold states used when voting on
|
||||
// consensus changes.
|
||||
type ThresholdState byte
|
||||
|
||||
// These constants are used to identify specific threshold states.
|
||||
const (
|
||||
// ThresholdDefined is the first state for each deployment and is the
|
||||
// state for the genesis block has by definition for all deployments.
|
||||
ThresholdDefined ThresholdState = iota
|
||||
|
||||
// ThresholdStarted is the state for a deployment once its start time
|
||||
// has been reached.
|
||||
ThresholdStarted
|
||||
|
||||
// ThresholdLockedIn is the state for a deployment during the retarget
|
||||
// period which is after the ThresholdStarted state period and the
|
||||
// number of blocks that have voted for the deployment equal or exceed
|
||||
// the required number of votes for the deployment.
|
||||
ThresholdLockedIn
|
||||
|
||||
// ThresholdActive is the state for a deployment for all blocks after a
|
||||
// retarget period in which the deployment was in the ThresholdLockedIn
|
||||
// state.
|
||||
ThresholdActive
|
||||
|
||||
// ThresholdFailed is the state for a deployment once its expiration
|
||||
// time has been reached and it did not reach the ThresholdLockedIn
|
||||
// state.
|
||||
ThresholdFailed
|
||||
|
||||
// numThresholdsStates is the maximum number of threshold states used in
|
||||
// tests.
|
||||
numThresholdsStates
|
||||
)
|
||||
|
||||
// thresholdStateStrings is a map of ThresholdState values back to their
|
||||
// constant names for pretty printing.
|
||||
var thresholdStateStrings = map[ThresholdState]string{
|
||||
ThresholdDefined: "ThresholdDefined",
|
||||
ThresholdStarted: "ThresholdStarted",
|
||||
ThresholdLockedIn: "ThresholdLockedIn",
|
||||
ThresholdActive: "ThresholdActive",
|
||||
ThresholdFailed: "ThresholdFailed",
|
||||
}
|
||||
|
||||
// String returns the ThresholdState as a human-readable name.
|
||||
func (t ThresholdState) String() string {
|
||||
if s := thresholdStateStrings[t]; s != "" {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("Unknown ThresholdState (%d)", int(t))
|
||||
}
|
||||
|
||||
// thresholdConditionChecker provides a generic interface that is invoked to
|
||||
// determine when a consensus rule change threshold should be changed.
|
||||
type thresholdConditionChecker interface {
|
||||
// BeginTime returns the unix timestamp for the median block time after
|
||||
// which voting on a rule change starts (at the next window).
|
||||
BeginTime() uint64
|
||||
|
||||
// EndTime returns the unix timestamp for the median block time after
|
||||
// which an attempted rule change fails if it has not already been
|
||||
// locked in or activated.
|
||||
EndTime() uint64
|
||||
|
||||
// RuleChangeActivationThreshold is the number of blocks for which the
|
||||
// condition must be true in order to lock in a rule change.
|
||||
RuleChangeActivationThreshold() uint32
|
||||
|
||||
// MinerConfirmationWindow is the number of blocks in each threshold
|
||||
// state retarget window.
|
||||
MinerConfirmationWindow() uint32
|
||||
|
||||
// Condition returns whether or not the rule change activation condition
|
||||
// has been met. This typically involves checking whether or not the
|
||||
// bit associated with the condition is set, but can be more complex as
|
||||
// needed.
|
||||
Condition(*blockNode) (bool, error)
|
||||
}
|
||||
|
||||
// thresholdStateCache provides a type to cache the threshold states of each
|
||||
// threshold window for a set of IDs.
|
||||
type thresholdStateCache struct {
|
||||
entries map[daghash.Hash]ThresholdState
|
||||
}
|
||||
|
||||
// Lookup returns the threshold state associated with the given hash along with
|
||||
// a boolean that indicates whether or not it is valid.
|
||||
func (c *thresholdStateCache) Lookup(hash *daghash.Hash) (ThresholdState, bool) {
|
||||
state, ok := c.entries[*hash]
|
||||
return state, ok
|
||||
}
|
||||
|
||||
// Update updates the cache to contain the provided hash to threshold state
|
||||
// mapping.
|
||||
func (c *thresholdStateCache) Update(hash *daghash.Hash, state ThresholdState) {
|
||||
c.entries[*hash] = state
|
||||
}
|
||||
|
||||
// newThresholdCaches returns a new array of caches to be used when calculating
|
||||
// threshold states.
|
||||
func newThresholdCaches(numCaches uint32) []thresholdStateCache {
|
||||
caches := make([]thresholdStateCache, numCaches)
|
||||
for i := 0; i < len(caches); i++ {
|
||||
caches[i] = thresholdStateCache{
|
||||
entries: make(map[daghash.Hash]ThresholdState),
|
||||
}
|
||||
}
|
||||
return caches
|
||||
}
|
||||
|
||||
// thresholdState returns the current rule change threshold state for the block
|
||||
// AFTER the given node and deployment ID. The cache is used to ensure the
|
||||
// threshold states for previous windows are only calculated once.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdConditionChecker, cache *thresholdStateCache) (ThresholdState, error) {
|
||||
// The threshold state for the window that contains the genesis block is
|
||||
// defined by definition.
|
||||
confirmationWindow := int32(checker.MinerConfirmationWindow())
|
||||
if prevNode == nil || (prevNode.height+1) < confirmationWindow {
|
||||
return ThresholdDefined, nil
|
||||
}
|
||||
|
||||
// Get the ancestor that is the last block of the previous confirmation
|
||||
// window in order to get its threshold state. This can be done because
|
||||
// the state is the same for all blocks within a given window.
|
||||
prevNode = prevNode.SelectedAncestor(prevNode.height -
|
||||
(prevNode.height+1)%confirmationWindow)
|
||||
|
||||
// Iterate backwards through each of the previous confirmation windows
|
||||
// to find the most recently cached threshold state.
|
||||
var neededStates []*blockNode
|
||||
for prevNode != nil {
|
||||
// Nothing more to do if the state of the block is already
|
||||
// cached.
|
||||
if _, ok := cache.Lookup(prevNode.hash); ok {
|
||||
break
|
||||
}
|
||||
|
||||
// The start and expiration times are based on the median block
|
||||
// time, so calculate it now.
|
||||
medianTime := prevNode.PastMedianTime()
|
||||
|
||||
// The state is simply defined if the start time hasn't been
|
||||
// been reached yet.
|
||||
if uint64(medianTime.Unix()) < checker.BeginTime() {
|
||||
cache.Update(prevNode.hash, ThresholdDefined)
|
||||
break
|
||||
}
|
||||
|
||||
// Add this node to the list of nodes that need the state
|
||||
// calculated and cached.
|
||||
neededStates = append(neededStates, prevNode)
|
||||
|
||||
// Get the ancestor that is the last block of the previous
|
||||
// confirmation window.
|
||||
prevNode = prevNode.RelativeAncestor(confirmationWindow)
|
||||
}
|
||||
|
||||
// Start with the threshold state for the most recent confirmation
|
||||
// window that has a cached state.
|
||||
state := ThresholdDefined
|
||||
if prevNode != nil {
|
||||
var ok bool
|
||||
state, ok = cache.Lookup(prevNode.hash)
|
||||
if !ok {
|
||||
return ThresholdFailed, AssertError(fmt.Sprintf(
|
||||
"thresholdState: cache lookup failed for %s",
|
||||
prevNode.hash))
|
||||
}
|
||||
}
|
||||
|
||||
// Since each threshold state depends on the state of the previous
|
||||
// window, iterate starting from the oldest unknown window.
|
||||
for neededNum := len(neededStates) - 1; neededNum >= 0; neededNum-- {
|
||||
prevNode := neededStates[neededNum]
|
||||
|
||||
switch state {
|
||||
case ThresholdDefined:
|
||||
// The deployment of the rule change fails if it expires
|
||||
// before it is accepted and locked in.
|
||||
medianTime := prevNode.PastMedianTime()
|
||||
medianTimeUnix := uint64(medianTime.Unix())
|
||||
if medianTimeUnix >= checker.EndTime() {
|
||||
state = ThresholdFailed
|
||||
break
|
||||
}
|
||||
|
||||
// The state for the rule moves to the started state
|
||||
// once its start time has been reached (and it hasn't
|
||||
// already expired per the above).
|
||||
if medianTimeUnix >= checker.BeginTime() {
|
||||
state = ThresholdStarted
|
||||
}
|
||||
|
||||
case ThresholdStarted:
|
||||
// The deployment of the rule change fails if it expires
|
||||
// before it is accepted and locked in.
|
||||
medianTime := prevNode.PastMedianTime()
|
||||
if uint64(medianTime.Unix()) >= checker.EndTime() {
|
||||
state = ThresholdFailed
|
||||
break
|
||||
}
|
||||
|
||||
// At this point, the rule change is still being voted
|
||||
// on by the miners, so iterate backwards through the
|
||||
// confirmation window to count all of the votes in it.
|
||||
var count uint32
|
||||
countNode := prevNode
|
||||
for i := int32(0); i < confirmationWindow; i++ {
|
||||
condition, err := checker.Condition(countNode)
|
||||
if err != nil {
|
||||
return ThresholdFailed, err
|
||||
}
|
||||
if condition {
|
||||
count++
|
||||
}
|
||||
|
||||
// Get the previous block node.
|
||||
countNode = countNode.selectedParent
|
||||
}
|
||||
|
||||
// The state is locked in if the number of blocks in the
|
||||
// period that voted for the rule change meets the
|
||||
// activation threshold.
|
||||
if count >= checker.RuleChangeActivationThreshold() {
|
||||
state = ThresholdLockedIn
|
||||
}
|
||||
|
||||
case ThresholdLockedIn:
|
||||
// The new rule becomes active when its previous state
|
||||
// was locked in.
|
||||
state = ThresholdActive
|
||||
|
||||
// Nothing to do if the previous state is active or failed since
|
||||
// they are both terminal states.
|
||||
case ThresholdActive:
|
||||
case ThresholdFailed:
|
||||
}
|
||||
|
||||
// Update the cache to avoid recalculating the state in the
|
||||
// future.
|
||||
cache.Update(prevNode.hash, state)
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ThresholdState returns the current rule change threshold state of the given
|
||||
// deployment ID for the block AFTER the end of the current best chain.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) ThresholdState(deploymentID uint32) (ThresholdState, error) {
|
||||
dag.dagLock.Lock()
|
||||
state, err := dag.deploymentState(dag.selectedTip(), deploymentID)
|
||||
dag.dagLock.Unlock()
|
||||
|
||||
return state, err
|
||||
}
|
||||
|
||||
// IsDeploymentActive returns true if the target deploymentID is active, and
|
||||
// false otherwise.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
|
||||
dag.dagLock.Lock()
|
||||
state, err := dag.deploymentState(dag.selectedTip(), deploymentID)
|
||||
dag.dagLock.Unlock()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return state == ThresholdActive, nil
|
||||
}
|
||||
|
||||
// deploymentState returns the current rule change threshold for a given
|
||||
// deploymentID. The threshold is evaluated from the point of view of the block
|
||||
// node passed in as the first argument to this method.
|
||||
//
|
||||
// It is important to note that, as the variable name indicates, this function
|
||||
// expects the block node prior to the block for which the deployment state is
|
||||
// desired. In other words, the returned deployment state is for the block
|
||||
// AFTER the passed node.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) {
|
||||
if deploymentID > uint32(len(dag.dagParams.Deployments)) {
|
||||
return ThresholdFailed, DeploymentError(deploymentID)
|
||||
}
|
||||
|
||||
deployment := &dag.dagParams.Deployments[deploymentID]
|
||||
checker := deploymentChecker{deployment: deployment, chain: dag}
|
||||
cache := &dag.deploymentCaches[deploymentID]
|
||||
|
||||
return dag.thresholdState(prevNode, checker, cache)
|
||||
}
|
||||
|
||||
// initThresholdCaches initializes the threshold state caches for each warning
|
||||
// bit and defined deployment and provides warnings if the chain is current per
|
||||
// the warnUnknownVersions and warnUnknownRuleActivations functions.
|
||||
func (dag *BlockDAG) initThresholdCaches() error {
|
||||
// Initialize the warning and deployment caches by calculating the
|
||||
// threshold state for each of them. This will ensure the caches are
|
||||
// populated and any states that needed to be recalculated due to
|
||||
// definition changes is done now.
|
||||
prevNode := dag.selectedTip().selectedParent
|
||||
for bit := uint32(0); bit < vbNumBits; bit++ {
|
||||
checker := bitConditionChecker{bit: bit, chain: dag}
|
||||
cache := &dag.warningCaches[bit]
|
||||
_, err := dag.thresholdState(prevNode, checker, cache)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for id := 0; id < len(dag.dagParams.Deployments); id++ {
|
||||
deployment := &dag.dagParams.Deployments[id]
|
||||
cache := &dag.deploymentCaches[id]
|
||||
checker := deploymentChecker{deployment: deployment, chain: dag}
|
||||
_, err := dag.thresholdState(prevNode, checker, cache)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// No warnings about unknown rules or versions until the chain is
|
||||
// current.
|
||||
if dag.isCurrent() {
|
||||
// Warn if a high enough percentage of the last blocks have
|
||||
// unexpected versions.
|
||||
bestNode := dag.selectedTip()
|
||||
if err := dag.warnUnknownVersions(bestNode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Warn if any unknown new rules are either about to activate or
|
||||
// have already been activated.
|
||||
if err := dag.warnUnknownRuleActivations(bestNode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,134 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
)
|
||||
|
||||
// TestThresholdStateStringer tests the stringized output for the
|
||||
// ThresholdState type.
|
||||
func TestThresholdStateStringer(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
in ThresholdState
|
||||
want string
|
||||
}{
|
||||
{ThresholdDefined, "ThresholdDefined"},
|
||||
{ThresholdStarted, "ThresholdStarted"},
|
||||
{ThresholdLockedIn, "ThresholdLockedIn"},
|
||||
{ThresholdActive, "ThresholdActive"},
|
||||
{ThresholdFailed, "ThresholdFailed"},
|
||||
{0xff, "Unknown ThresholdState (255)"},
|
||||
}
|
||||
|
||||
// Detect additional threshold states that don't have the stringer added.
|
||||
if len(tests)-1 != int(numThresholdsStates) {
|
||||
t.Errorf("It appears a threshold statewas added without " +
|
||||
"adding an associated stringer test")
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
result := test.in.String()
|
||||
if result != test.want {
|
||||
t.Errorf("String #%d\n got: %s want: %s", i, result,
|
||||
test.want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestThresholdStateCache ensure the threshold state cache works as intended
|
||||
// including adding entries, updating existing entries, and flushing.
|
||||
func TestThresholdStateCache(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
numEntries int
|
||||
state ThresholdState
|
||||
}{
|
||||
{name: "2 entries defined", numEntries: 2, state: ThresholdDefined},
|
||||
{name: "7 entries started", numEntries: 7, state: ThresholdStarted},
|
||||
{name: "10 entries active", numEntries: 10, state: ThresholdActive},
|
||||
{name: "5 entries locked in", numEntries: 5, state: ThresholdLockedIn},
|
||||
{name: "3 entries failed", numEntries: 3, state: ThresholdFailed},
|
||||
}
|
||||
|
||||
nextTest:
|
||||
for _, test := range tests {
|
||||
cache := &newThresholdCaches(1)[0]
|
||||
for i := 0; i < test.numEntries; i++ {
|
||||
var hash daghash.Hash
|
||||
hash[0] = uint8(i + 1)
|
||||
|
||||
// Ensure the hash isn't available in the cache already.
|
||||
_, ok := cache.Lookup(&hash)
|
||||
if ok {
|
||||
t.Errorf("Lookup (%s): has entry for hash %v",
|
||||
test.name, hash)
|
||||
continue nextTest
|
||||
}
|
||||
|
||||
// Ensure hash that was added to the cache reports it's
|
||||
// available and the state is the expected value.
|
||||
cache.Update(&hash, test.state)
|
||||
state, ok := cache.Lookup(&hash)
|
||||
if !ok {
|
||||
t.Errorf("Lookup (%s): missing entry for hash "+
|
||||
"%v", test.name, hash)
|
||||
continue nextTest
|
||||
}
|
||||
if state != test.state {
|
||||
t.Errorf("Lookup (%s): state mismatch - got "+
|
||||
"%v, want %v", test.name, state,
|
||||
test.state)
|
||||
continue nextTest
|
||||
}
|
||||
|
||||
// Ensure adding an existing hash with the same state
|
||||
// doesn't break the existing entry.
|
||||
cache.Update(&hash, test.state)
|
||||
state, ok = cache.Lookup(&hash)
|
||||
if !ok {
|
||||
t.Errorf("Lookup (%s): missing entry after "+
|
||||
"second add for hash %v", test.name,
|
||||
hash)
|
||||
continue nextTest
|
||||
}
|
||||
if state != test.state {
|
||||
t.Errorf("Lookup (%s): state mismatch after "+
|
||||
"second add - got %v, want %v",
|
||||
test.name, state, test.state)
|
||||
continue nextTest
|
||||
}
|
||||
|
||||
// Ensure adding an existing hash with a different state
|
||||
// updates the existing entry.
|
||||
newState := ThresholdFailed
|
||||
if newState == test.state {
|
||||
newState = ThresholdStarted
|
||||
}
|
||||
cache.Update(&hash, newState)
|
||||
state, ok = cache.Lookup(&hash)
|
||||
if !ok {
|
||||
t.Errorf("Lookup (%s): missing entry after "+
|
||||
"state change for hash %v", test.name,
|
||||
hash)
|
||||
continue nextTest
|
||||
}
|
||||
if state != newState {
|
||||
t.Errorf("Lookup (%s): state mismatch after "+
|
||||
"state change - got %v, want %v",
|
||||
test.name, state, newState)
|
||||
continue nextTest
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,321 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
type blockUTXODiffData struct {
|
||||
diff *UTXODiff
|
||||
diffChild *blockNode
|
||||
}
|
||||
|
||||
type utxoDiffStore struct {
|
||||
dag *BlockDAG
|
||||
dirty map[daghash.Hash]struct{}
|
||||
loaded map[daghash.Hash]*blockUTXODiffData
|
||||
mtx sync.RWMutex
|
||||
}
|
||||
|
||||
func newUTXODiffStore(dag *BlockDAG) *utxoDiffStore {
|
||||
return &utxoDiffStore{
|
||||
dag: dag,
|
||||
dirty: make(map[daghash.Hash]struct{}),
|
||||
loaded: make(map[daghash.Hash]*blockUTXODiffData),
|
||||
}
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) setBlockDiff(node *blockNode, diff *UTXODiff) error {
|
||||
diffStore.mtx.Lock()
|
||||
defer diffStore.mtx.Unlock()
|
||||
// load the diff data from DB to diffStore.loaded
|
||||
_, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
diffStore.loaded[*node.hash] = &blockUTXODiffData{}
|
||||
}
|
||||
|
||||
diffStore.loaded[*node.hash].diff = diff
|
||||
diffStore.setBlockAsDirty(node.hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) setBlockDiffChild(node *blockNode, diffChild *blockNode) error {
|
||||
diffStore.mtx.Lock()
|
||||
defer diffStore.mtx.Unlock()
|
||||
// load the diff data from DB to diffStore.loaded
|
||||
_, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return diffNotFoundError(node)
|
||||
}
|
||||
|
||||
diffStore.loaded[*node.hash].diffChild = diffChild
|
||||
diffStore.setBlockAsDirty(node.hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) setBlockAsDirty(blockHash *daghash.Hash) {
|
||||
diffStore.dirty[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffDataByHash(hash *daghash.Hash) (*blockUTXODiffData, bool, error) {
|
||||
if diffData, ok := diffStore.loaded[*hash]; ok {
|
||||
return diffData, true, nil
|
||||
}
|
||||
diffData, err := diffStore.diffDataFromDB(hash)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
exists := diffData != nil
|
||||
if exists {
|
||||
diffStore.loaded[*hash] = diffData
|
||||
}
|
||||
return diffData, exists, nil
|
||||
}
|
||||
|
||||
func diffNotFoundError(node *blockNode) error {
|
||||
return fmt.Errorf("Couldn't find diff data for block %s", node.hash)
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffByNode(node *blockNode) (*UTXODiff, error) {
|
||||
diffStore.mtx.RLock()
|
||||
defer diffStore.mtx.RUnlock()
|
||||
diffData, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, diffNotFoundError(node)
|
||||
}
|
||||
return diffData.diff, nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffChildByNode(node *blockNode) (*blockNode, error) {
|
||||
diffStore.mtx.RLock()
|
||||
defer diffStore.mtx.RUnlock()
|
||||
diffData, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, diffNotFoundError(node)
|
||||
}
|
||||
return diffData.diffChild, nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffDataFromDB(hash *daghash.Hash) (*blockUTXODiffData, error) {
|
||||
var diffData *blockUTXODiffData
|
||||
err := diffStore.dag.db.View(func(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata().Bucket(utxoDiffsBucketName)
|
||||
serializedBlockDiffData := bucket.Get(hash[:])
|
||||
if serializedBlockDiffData != nil {
|
||||
var err error
|
||||
diffData, err = diffStore.deserializeBlockUTXODiffData(serializedBlockDiffData)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return diffData, nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffDataBytes []byte) (*blockUTXODiffData, error) {
|
||||
diffData := &blockUTXODiffData{}
|
||||
serializedDiffData := bytes.NewBuffer(serializedDiffDataBytes)
|
||||
|
||||
var hasDiffChild bool
|
||||
err := wire.ReadElement(serializedDiffData, &hasDiffChild)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if hasDiffChild {
|
||||
hash := &daghash.Hash{}
|
||||
err := wire.ReadElement(serializedDiffData, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffData.diffChild = diffStore.dag.index.LookupNode(hash)
|
||||
}
|
||||
|
||||
diffData.diff = &UTXODiff{}
|
||||
|
||||
diffData.diff.toAdd, err = deserializeDiffEntries(serializedDiffData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diffData.diff.toRemove, err = deserializeDiffEntries(serializedDiffData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return diffData, nil
|
||||
}
|
||||
|
||||
func deserializeDiffEntries(r io.Reader) (utxoCollection, error) {
|
||||
count, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
collection := utxoCollection{}
|
||||
for i := uint64(0); i < count; i++ {
|
||||
outPointSize, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serializedOutPoint := make([]byte, outPointSize)
|
||||
err = binary.Read(r, byteOrder, serializedOutPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outPoint, err := deserializeOutPoint(serializedOutPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utxoEntrySize, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serializedEntry := make([]byte, utxoEntrySize)
|
||||
err = binary.Read(r, byteOrder, serializedEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoEntry, err := deserializeUTXOEntry(serializedEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
collection.add(*outPoint, utxoEntry)
|
||||
}
|
||||
return collection, nil
|
||||
}
|
||||
|
||||
// serializeBlockUTXODiffData serializes diff data in the following format:
|
||||
// Name | Data type | Description
|
||||
// ------------ | --------- | -----------
|
||||
// hasDiffChild | Boolean | Indicates if a diff child exist
|
||||
// diffChild | Hash | The diffChild's hash. Empty if hasDiffChild is true.
|
||||
// diff | UTXODiff | The diff data's diff
|
||||
func serializeBlockUTXODiffData(diffData *blockUTXODiffData) ([]byte, error) {
|
||||
w := &bytes.Buffer{}
|
||||
hasDiffChild := diffData.diffChild != nil
|
||||
err := wire.WriteElement(w, hasDiffChild)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hasDiffChild {
|
||||
err := wire.WriteElement(w, diffData.diffChild.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = serializeUTXODiff(w, diffData.diff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return w.Bytes(), nil
|
||||
}
|
||||
|
||||
// serializeUTXODiff serializes UTXODiff by serializing
|
||||
// UTXODiff.toAdd and UTXODiff.toRemove one after the other.
|
||||
func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
|
||||
err := serializeUTXOCollection(w, diff.toAdd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = serializeUTXOCollection(w, diff.toRemove)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// serializeUTXOCollection serializes utxoCollection by iterating over
|
||||
// the utxo entries and serializing them and their corresponding outpoint
|
||||
// prefixed by a varint that indicates their size.
|
||||
func serializeUTXOCollection(w io.Writer, collection utxoCollection) error {
|
||||
err := wire.WriteVarInt(w, uint64(len(collection)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for outPoint, utxoEntry := range collection {
|
||||
serializedOutPoint := *outpointKey(outPoint)
|
||||
err = wire.WriteVarInt(w, uint64(len(serializedOutPoint)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := binary.Write(w, byteOrder, serializedOutPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serializedUTXOEntry, err := serializeUTXOEntry(utxoEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = wire.WriteVarInt(w, uint64(len(serializedUTXOEntry)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = binary.Write(w, byteOrder, serializedUTXOEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// flushToDB writes all dirty diff data to the database. If all writes
|
||||
// succeed, this clears the dirty set.
|
||||
func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
|
||||
diffStore.mtx.Lock()
|
||||
defer diffStore.mtx.Unlock()
|
||||
if len(diffStore.dirty) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for hash := range diffStore.dirty {
|
||||
diffData := diffStore.loaded[hash]
|
||||
err := dbStoreDiffData(dbTx, &hash, diffData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) clearDirtyEntries() {
|
||||
diffStore.dirty = make(map[daghash.Hash]struct{})
|
||||
}
|
||||
|
||||
// dbStoreDiffData stores the UTXO diff data to the database.
|
||||
// This overwrites the current entry if there exists one.
|
||||
func dbStoreDiffData(dbTx database.Tx, hash *daghash.Hash, diffData *blockUTXODiffData) error {
|
||||
serializedDiffData, err := serializeBlockUTXODiffData(diffData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Put(hash[:], serializedDiffData)
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
func TestUTXODiffStore(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestUTXODiffStore", Config{
|
||||
DAGParams: &dagconfig.SimNetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestUTXODiffStore: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
nodeCounter := byte(0)
|
||||
createNode := func() *blockNode {
|
||||
nodeCounter++
|
||||
node := &blockNode{hash: &daghash.Hash{nodeCounter}}
|
||||
dag.index.AddNode(node)
|
||||
return node
|
||||
}
|
||||
|
||||
// Check that an error is returned when asking for non existing node
|
||||
nonExistingNode := createNode()
|
||||
_, err = dag.utxoDiffStore.diffByNode(nonExistingNode)
|
||||
expectedErrString := fmt.Sprintf("Couldn't find diff data for block %s", nonExistingNode.hash)
|
||||
if err == nil || err.Error() != expectedErrString {
|
||||
t.Errorf("diffByNode: expected error %s but got %s", expectedErrString, err)
|
||||
}
|
||||
|
||||
// Add node's diff data to the utxoDiffStore and check if it's checked correctly.
|
||||
node := createNode()
|
||||
diff := NewUTXODiff()
|
||||
diff.toAdd.add(wire.OutPoint{TxID: daghash.TxID{0x01}, Index: 0}, &UTXOEntry{amount: 1, pkScript: []byte{0x01}})
|
||||
diff.toRemove.add(wire.OutPoint{TxID: daghash.TxID{0x02}, Index: 0}, &UTXOEntry{amount: 2, pkScript: []byte{0x02}})
|
||||
if err := dag.utxoDiffStore.setBlockDiff(node, diff); err != nil {
|
||||
t.Fatalf("setBlockDiff: unexpected error: %s", err)
|
||||
}
|
||||
diffChild := createNode()
|
||||
if err := dag.utxoDiffStore.setBlockDiffChild(node, diffChild); err != nil {
|
||||
t.Fatalf("setBlockDiffChild: unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if storeDiff, err := dag.utxoDiffStore.diffByNode(node); err != nil {
|
||||
t.Fatalf("diffByNode: unexpected error: %s", err)
|
||||
} else if !reflect.DeepEqual(storeDiff, diff) {
|
||||
t.Errorf("Expected diff and storeDiff to be equal")
|
||||
}
|
||||
|
||||
if storeDiffChild, err := dag.utxoDiffStore.diffChildByNode(node); err != nil {
|
||||
t.Fatalf("diffByNode: unexpected error: %s", err)
|
||||
} else if !reflect.DeepEqual(storeDiffChild, diffChild) {
|
||||
t.Errorf("Expected diff and storeDiff to be equal")
|
||||
}
|
||||
|
||||
// Flush changes to db, delete them from the dag.utxoDiffStore.loaded
|
||||
// map, and check if the diff data is re-fetched from the database.
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
return dag.utxoDiffStore.flushToDB(dbTx)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing utxoDiffStore data to DB: %s", err)
|
||||
}
|
||||
delete(dag.utxoDiffStore.loaded, *node.hash)
|
||||
|
||||
if storeDiff, err := dag.utxoDiffStore.diffByNode(node); err != nil {
|
||||
t.Fatalf("diffByNode: unexpected error: %s", err)
|
||||
} else if !reflect.DeepEqual(storeDiff, diff) {
|
||||
t.Errorf("Expected diff and storeDiff to be equal")
|
||||
}
|
||||
|
||||
// Check if getBlockDiff caches the result in dag.utxoDiffStore.loaded
|
||||
if loadedDiffData, ok := dag.utxoDiffStore.loaded[*node.hash]; !ok {
|
||||
t.Errorf("the diff data wasn't added to loaded map after requesting it")
|
||||
} else if !reflect.DeepEqual(loadedDiffData.diff, diff) {
|
||||
t.Errorf("Expected diff and loadedDiff to be equal")
|
||||
}
|
||||
}
|
||||
@@ -1,587 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// UTXOEntry houses details about an individual transaction output in a utxo
|
||||
// set such as whether or not it was contained in a block reward tx, the height of
|
||||
// the block that contains the tx, whether or not it is spent, its public key
|
||||
// script, and how much it pays.
|
||||
type UTXOEntry struct {
|
||||
// NOTE: Additions, deletions, or modifications to the order of the
|
||||
// definitions in this struct should not be changed without considering
|
||||
// how it affects alignment on 64-bit platforms. The current order is
|
||||
// specifically crafted to result in minimal padding. There will be a
|
||||
// lot of these in memory, so a few extra bytes of padding adds up.
|
||||
|
||||
amount uint64
|
||||
pkScript []byte // The public key script for the output.
|
||||
blockHeight int32 // Height of block containing tx.
|
||||
|
||||
// packedFlags contains additional info about output such as whether it
|
||||
// is a block reward, and whether it has been modified
|
||||
// since it was loaded. This approach is used in order to reduce memory
|
||||
// usage since there will be a lot of these in memory.
|
||||
packedFlags txoFlags
|
||||
}
|
||||
|
||||
// IsBlockReward returns whether or not the output was contained in a block
|
||||
// reward transaction.
|
||||
func (entry *UTXOEntry) IsBlockReward() bool {
|
||||
return entry.packedFlags&tfBlockReward == tfBlockReward
|
||||
}
|
||||
|
||||
// BlockHeight returns the height of the block containing the output.
|
||||
func (entry *UTXOEntry) BlockHeight() int32 {
|
||||
return entry.blockHeight
|
||||
}
|
||||
|
||||
// Amount returns the amount of the output.
|
||||
func (entry *UTXOEntry) Amount() uint64 {
|
||||
return entry.amount
|
||||
}
|
||||
|
||||
// PkScript returns the public key script for the output.
|
||||
func (entry *UTXOEntry) PkScript() []byte {
|
||||
return entry.pkScript
|
||||
}
|
||||
|
||||
// txoFlags is a bitmask defining additional information and state for a
|
||||
// transaction output in a UTXO set.
|
||||
type txoFlags uint8
|
||||
|
||||
const (
|
||||
// tfBlockReward indicates that a txout was contained in a block reward tx (coinbase or fee transaction).
|
||||
tfBlockReward txoFlags = 1 << iota
|
||||
)
|
||||
|
||||
// utxoCollection represents a set of UTXOs indexed by their outPoints
|
||||
type utxoCollection map[wire.OutPoint]*UTXOEntry
|
||||
|
||||
func (uc utxoCollection) String() string {
|
||||
utxoStrings := make([]string, len(uc))
|
||||
|
||||
i := 0
|
||||
for outPoint, utxoEntry := range uc {
|
||||
utxoStrings[i] = fmt.Sprintf("(%s, %d) => %d", outPoint.TxID, outPoint.Index, utxoEntry.amount)
|
||||
i++
|
||||
}
|
||||
|
||||
// Sort strings for determinism.
|
||||
sort.Strings(utxoStrings)
|
||||
|
||||
return fmt.Sprintf("[ %s ]", strings.Join(utxoStrings, ", "))
|
||||
}
|
||||
|
||||
// add adds a new UTXO entry to this collection
|
||||
func (uc utxoCollection) add(outPoint wire.OutPoint, entry *UTXOEntry) {
|
||||
uc[outPoint] = entry
|
||||
}
|
||||
|
||||
// remove removes a UTXO entry from this collection if it exists
|
||||
func (uc utxoCollection) remove(outPoint wire.OutPoint) {
|
||||
delete(uc, outPoint)
|
||||
}
|
||||
|
||||
// get returns the UTXOEntry represented by provided outPoint,
|
||||
// and a boolean value indicating if said UTXOEntry is in the set or not
|
||||
func (uc utxoCollection) get(outPoint wire.OutPoint) (*UTXOEntry, bool) {
|
||||
entry, ok := uc[outPoint]
|
||||
return entry, ok
|
||||
}
|
||||
|
||||
// contains returns a boolean value indicating whether a UTXO entry is in the set
|
||||
func (uc utxoCollection) contains(outPoint wire.OutPoint) bool {
|
||||
_, ok := uc[outPoint]
|
||||
return ok
|
||||
}
|
||||
|
||||
// clone returns a clone of this collection
|
||||
func (uc utxoCollection) clone() utxoCollection {
|
||||
clone := utxoCollection{}
|
||||
for outPoint, entry := range uc {
|
||||
clone.add(outPoint, entry)
|
||||
}
|
||||
|
||||
return clone
|
||||
}
|
||||
|
||||
// UTXODiff represents a diff between two UTXO Sets.
|
||||
type UTXODiff struct {
|
||||
toAdd utxoCollection
|
||||
toRemove utxoCollection
|
||||
}
|
||||
|
||||
// NewUTXODiff creates a new, empty utxoDiff
|
||||
func NewUTXODiff() *UTXODiff {
|
||||
return &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
}
|
||||
}
|
||||
|
||||
// diffFrom returns a new utxoDiff with the difference between this utxoDiff and another
|
||||
// Assumes that:
|
||||
// Both utxoDiffs are from the same base
|
||||
// If a txOut exists in both utxoDiffs, its underlying values would be the same
|
||||
//
|
||||
// diffFrom follows a set of rules represented by the following 3 by 3 table:
|
||||
//
|
||||
// | | this | |
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// | | toAdd | toRemove | None
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// other | toAdd | - | X | toAdd
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// | toRemove | X | - | toRemove
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// | None | toRemove | toAdd | -
|
||||
//
|
||||
// Key:
|
||||
// - Don't add anything to the result
|
||||
// X Return an error
|
||||
// toAdd Add the UTXO into the toAdd collection of the result
|
||||
// toRemove Add the UTXO into the toRemove collection of the result
|
||||
//
|
||||
// Examples:
|
||||
// 1. This diff contains a UTXO in toAdd, and the other diff contains it in toRemove
|
||||
// diffFrom results in an error
|
||||
// 2. This diff contains a UTXO in toRemove, and the other diff does not contain it
|
||||
// diffFrom results in the UTXO being added to toAdd
|
||||
func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
|
||||
result := NewUTXODiff()
|
||||
|
||||
// Note that the following cases are not accounted for, as they are impossible
|
||||
// as long as the base utxoSet is the same:
|
||||
// - if utxoEntry is in d.toAdd and other.toRemove
|
||||
// - if utxoEntry is in d.toRemove and other.toAdd
|
||||
|
||||
// All transactions in d.toAdd:
|
||||
// If they are not in other.toAdd - should be added in result.toRemove
|
||||
// If they are in other.toRemove - base utxoSet is not the same
|
||||
for outPoint, utxoEntry := range d.toAdd {
|
||||
if !other.toAdd.contains(outPoint) {
|
||||
result.toRemove.add(outPoint, utxoEntry)
|
||||
}
|
||||
if other.toRemove.contains(outPoint) {
|
||||
return nil, fmt.Errorf("diffFrom: outpoint %s both in d.toAdd and in other.toRemove", outPoint)
|
||||
}
|
||||
}
|
||||
|
||||
// All transactions in d.toRemove:
|
||||
// If they are not in other.toRemove - should be added in result.toAdd
|
||||
// If they are in other.toAdd - base utxoSet is not the same
|
||||
for outPoint, utxoEntry := range d.toRemove {
|
||||
if !other.toRemove.contains(outPoint) {
|
||||
result.toAdd.add(outPoint, utxoEntry)
|
||||
}
|
||||
if other.toAdd.contains(outPoint) {
|
||||
return nil, errors.New("diffFrom: transaction both in d.toRemove and in other.toAdd")
|
||||
}
|
||||
}
|
||||
|
||||
// All transactions in other.toAdd:
|
||||
// If they are not in d.toAdd - should be added in result.toAdd
|
||||
for outPoint, utxoEntry := range other.toAdd {
|
||||
if !d.toAdd.contains(outPoint) {
|
||||
result.toAdd.add(outPoint, utxoEntry)
|
||||
}
|
||||
}
|
||||
|
||||
// All transactions in other.toRemove:
|
||||
// If they are not in d.toRemove - should be added in result.toRemove
|
||||
for outPoint, utxoEntry := range other.toRemove {
|
||||
if !d.toRemove.contains(outPoint) {
|
||||
result.toRemove.add(outPoint, utxoEntry)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// WithDiff applies provided diff to this diff, creating a new utxoDiff, that would be the result if
|
||||
// first d, and than diff were applied to the same base
|
||||
//
|
||||
// WithDiff follows a set of rules represented by the following 3 by 3 table:
|
||||
//
|
||||
// | | this | |
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// | | toAdd | toRemove | None
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// other | toAdd | X | - | toAdd
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// | toRemove | - | X | toRemove
|
||||
// ---------+-----------+-----------+-----------+-----------
|
||||
// | None | toAdd | toRemove | -
|
||||
//
|
||||
// Key:
|
||||
// - Don't add anything to the result
|
||||
// X Return an error
|
||||
// toAdd Add the UTXO into the toAdd collection of the result
|
||||
// toRemove Add the UTXO into the toRemove collection of the result
|
||||
//
|
||||
// Examples:
|
||||
// 1. This diff contains a UTXO in toAdd, and the other diff contains it in toRemove
|
||||
// WithDiff results in nothing being added
|
||||
// 2. This diff contains a UTXO in toRemove, and the other diff does not contain it
|
||||
// WithDiff results in the UTXO being added to toRemove
|
||||
func (d *UTXODiff) WithDiff(diff *UTXODiff) (*UTXODiff, error) {
|
||||
result := NewUTXODiff()
|
||||
|
||||
// All transactions in d.toAdd:
|
||||
// If they are not in diff.toRemove - should be added in result.toAdd
|
||||
// If they are in diff.toAdd - should throw an error
|
||||
// Otherwise - should be ignored
|
||||
for outPoint, utxoEntry := range d.toAdd {
|
||||
if !diff.toRemove.contains(outPoint) {
|
||||
result.toAdd.add(outPoint, utxoEntry)
|
||||
}
|
||||
if diff.toAdd.contains(outPoint) {
|
||||
return nil, ruleError(ErrWithDiff, fmt.Sprintf("WithDiff: outpoint %s both in d.toAdd and in other.toAdd", outPoint))
|
||||
}
|
||||
}
|
||||
|
||||
// All transactions in d.toRemove:
|
||||
// If they are not in diff.toAdd - should be added in result.toRemove
|
||||
// If they are in diff.toRemove - should throw an error
|
||||
// Otherwise - should be ignored
|
||||
for outPoint, utxoEntry := range d.toRemove {
|
||||
if !diff.toAdd.contains(outPoint) {
|
||||
result.toRemove.add(outPoint, utxoEntry)
|
||||
}
|
||||
if diff.toRemove.contains(outPoint) {
|
||||
return nil, ruleError(ErrWithDiff, "WithDiff: transaction both in d.toRemove and in other.toRemove")
|
||||
}
|
||||
}
|
||||
|
||||
// All transactions in diff.toAdd:
|
||||
// If they are not in d.toRemove - should be added in result.toAdd
|
||||
for outPoint, utxoEntry := range diff.toAdd {
|
||||
if !d.toRemove.contains(outPoint) {
|
||||
result.toAdd.add(outPoint, utxoEntry)
|
||||
}
|
||||
}
|
||||
|
||||
// All transactions in diff.toRemove:
|
||||
// If they are not in d.toAdd - should be added in result.toRemove
|
||||
for outPoint, utxoEntry := range diff.toRemove {
|
||||
if !d.toAdd.contains(outPoint) {
|
||||
result.toRemove.add(outPoint, utxoEntry)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// clone returns a clone of this utxoDiff
|
||||
func (d *UTXODiff) clone() *UTXODiff {
|
||||
return &UTXODiff{
|
||||
toAdd: d.toAdd.clone(),
|
||||
toRemove: d.toRemove.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
//RemoveTxOuts marks the transaction's outputs to removal
|
||||
func (d *UTXODiff) RemoveTxOuts(tx *wire.MsgTx) {
|
||||
for idx := range tx.TxOut {
|
||||
hash := tx.TxID()
|
||||
d.toRemove.add(*wire.NewOutPoint(&hash, uint32(idx)), nil)
|
||||
}
|
||||
}
|
||||
|
||||
//AddEntry adds an UTXOEntry to the diff
|
||||
func (d *UTXODiff) AddEntry(outpoint wire.OutPoint, entry *UTXOEntry) {
|
||||
d.toAdd.add(outpoint, entry)
|
||||
}
|
||||
|
||||
func (d UTXODiff) String() string {
|
||||
return fmt.Sprintf("toAdd: %s; toRemove: %s", d.toAdd, d.toRemove)
|
||||
}
|
||||
|
||||
// NewUTXOEntry creates a new utxoEntry representing the given txOut
|
||||
func NewUTXOEntry(txOut *wire.TxOut, isBlockReward bool, blockHeight int32) *UTXOEntry {
|
||||
entry := &UTXOEntry{
|
||||
amount: txOut.Value,
|
||||
pkScript: txOut.PkScript,
|
||||
blockHeight: blockHeight,
|
||||
}
|
||||
|
||||
if isBlockReward {
|
||||
entry.packedFlags |= tfBlockReward
|
||||
}
|
||||
|
||||
return entry
|
||||
}
|
||||
|
||||
// UTXOSet represents a set of unspent transaction outputs
|
||||
// Every DAG has exactly one fullUTXOSet.
|
||||
// When a new block arrives, it is validated and applied to the fullUTXOSet in the following manner:
|
||||
// 1. Get the block's PastUTXO:
|
||||
// 2. Add all the block's transactions to the block's PastUTXO
|
||||
// 3. For each of the block's parents,
|
||||
// 3.1. Rebuild their utxoDiff
|
||||
// 3.2. Set the block as their diffChild
|
||||
// 4. Create and initialize a new virtual block
|
||||
// 5. Get the new virtual's PastUTXO
|
||||
// 6. Rebuild the utxoDiff for all the tips
|
||||
// 7. Convert (meld) the new virtual's diffUTXOSet into a fullUTXOSet. This updates the DAG's fullUTXOSet
|
||||
type UTXOSet interface {
|
||||
fmt.Stringer
|
||||
diffFrom(other UTXOSet) (*UTXODiff, error)
|
||||
WithDiff(utxoDiff *UTXODiff) (UTXOSet, error)
|
||||
diffFromTx(tx *wire.MsgTx, node *blockNode) (*UTXODiff, error)
|
||||
AddTx(tx *wire.MsgTx, blockHeight int32) (ok bool)
|
||||
clone() UTXOSet
|
||||
Get(outPoint wire.OutPoint) (*UTXOEntry, bool)
|
||||
}
|
||||
|
||||
// diffFromTx is a common implementation for diffFromTx, that works
|
||||
// for both diff-based and full UTXO sets
|
||||
// Returns a diff that is equivalent to provided transaction,
|
||||
// or an error if provided transaction is not valid in the context of this UTXOSet
|
||||
func diffFromTx(u UTXOSet, tx *wire.MsgTx, containingNode *blockNode) (*UTXODiff, error) {
|
||||
diff := NewUTXODiff()
|
||||
isBlockReward := tx.IsBlockReward()
|
||||
if !isBlockReward {
|
||||
for _, txIn := range tx.TxIn {
|
||||
if entry, ok := u.Get(txIn.PreviousOutPoint); ok {
|
||||
diff.toRemove.add(txIn.PreviousOutPoint, entry)
|
||||
} else {
|
||||
return nil, ruleError(ErrMissingTxOut, fmt.Sprintf(
|
||||
"Transaction %s is invalid because spends outpoint %s that is not in utxo set",
|
||||
tx.TxID(), txIn.PreviousOutPoint))
|
||||
}
|
||||
}
|
||||
}
|
||||
for i, txOut := range tx.TxOut {
|
||||
hash := tx.TxID()
|
||||
entry := NewUTXOEntry(txOut, isBlockReward, containingNode.height)
|
||||
outPoint := *wire.NewOutPoint(&hash, uint32(i))
|
||||
diff.toAdd.add(outPoint, entry)
|
||||
}
|
||||
return diff, nil
|
||||
}
|
||||
|
||||
// FullUTXOSet represents a full list of transaction outputs and their values
|
||||
type FullUTXOSet struct {
|
||||
utxoCollection
|
||||
}
|
||||
|
||||
// NewFullUTXOSet creates a new utxoSet with full list of transaction outputs and their values
|
||||
func NewFullUTXOSet() *FullUTXOSet {
|
||||
return &FullUTXOSet{
|
||||
utxoCollection: utxoCollection{},
|
||||
}
|
||||
}
|
||||
|
||||
// diffFrom returns the difference between this utxoSet and another
|
||||
// diffFrom can only work when other is a diffUTXOSet, and its base utxoSet is this.
|
||||
func (fus *FullUTXOSet) diffFrom(other UTXOSet) (*UTXODiff, error) {
|
||||
otherDiffSet, ok := other.(*DiffUTXOSet)
|
||||
if !ok {
|
||||
return nil, errors.New("can't diffFrom two fullUTXOSets")
|
||||
}
|
||||
|
||||
if otherDiffSet.base != fus {
|
||||
return nil, errors.New("can diffFrom only with diffUTXOSet where this fullUTXOSet is the base")
|
||||
}
|
||||
|
||||
return otherDiffSet.UTXODiff, nil
|
||||
}
|
||||
|
||||
// WithDiff returns a utxoSet which is a diff between this and another utxoSet
|
||||
func (fus *FullUTXOSet) WithDiff(other *UTXODiff) (UTXOSet, error) {
|
||||
return NewDiffUTXOSet(fus, other.clone()), nil
|
||||
}
|
||||
|
||||
// AddTx adds a transaction to this utxoSet and returns true iff it's valid in this UTXO's context
|
||||
func (fus *FullUTXOSet) AddTx(tx *wire.MsgTx, blockHeight int32) bool {
|
||||
isBlockReward := tx.IsBlockReward()
|
||||
if !isBlockReward {
|
||||
if !fus.containsInputs(tx) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, txIn := range tx.TxIn {
|
||||
outPoint := *wire.NewOutPoint(&txIn.PreviousOutPoint.TxID, txIn.PreviousOutPoint.Index)
|
||||
fus.remove(outPoint)
|
||||
}
|
||||
}
|
||||
|
||||
for i, txOut := range tx.TxOut {
|
||||
hash := tx.TxID()
|
||||
outPoint := *wire.NewOutPoint(&hash, uint32(i))
|
||||
entry := NewUTXOEntry(txOut, isBlockReward, blockHeight)
|
||||
|
||||
fus.add(outPoint, entry)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// diffFromTx returns a diff that is equivalent to provided transaction,
|
||||
// or an error if provided transaction is not valid in the context of this UTXOSet
|
||||
func (fus *FullUTXOSet) diffFromTx(tx *wire.MsgTx, node *blockNode) (*UTXODiff, error) {
|
||||
return diffFromTx(fus, tx, node)
|
||||
}
|
||||
|
||||
func (fus *FullUTXOSet) containsInputs(tx *wire.MsgTx) bool {
|
||||
for _, txIn := range tx.TxIn {
|
||||
outPoint := *wire.NewOutPoint(&txIn.PreviousOutPoint.TxID, txIn.PreviousOutPoint.Index)
|
||||
if !fus.contains(outPoint) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// clone returns a clone of this utxoSet
|
||||
func (fus *FullUTXOSet) clone() UTXOSet {
|
||||
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone()}
|
||||
}
|
||||
|
||||
// Get returns the UTXOEntry associated with the given OutPoint, and a boolean indicating if such entry was found
|
||||
func (fus *FullUTXOSet) Get(outPoint wire.OutPoint) (*UTXOEntry, bool) {
|
||||
utxoEntry, ok := fus.utxoCollection[outPoint]
|
||||
return utxoEntry, ok
|
||||
}
|
||||
|
||||
// DiffUTXOSet represents a utxoSet with a base fullUTXOSet and a UTXODiff
|
||||
type DiffUTXOSet struct {
|
||||
base *FullUTXOSet
|
||||
UTXODiff *UTXODiff
|
||||
}
|
||||
|
||||
// NewDiffUTXOSet Creates a new utxoSet based on a base fullUTXOSet and a UTXODiff
|
||||
func NewDiffUTXOSet(base *FullUTXOSet, diff *UTXODiff) *DiffUTXOSet {
|
||||
return &DiffUTXOSet{
|
||||
base: base,
|
||||
UTXODiff: diff,
|
||||
}
|
||||
}
|
||||
|
||||
// diffFrom returns the difference between this utxoSet and another.
|
||||
// diffFrom can work if other is this's base fullUTXOSet, or a diffUTXOSet with the same base as this
|
||||
func (dus *DiffUTXOSet) diffFrom(other UTXOSet) (*UTXODiff, error) {
|
||||
otherDiffSet, ok := other.(*DiffUTXOSet)
|
||||
if !ok {
|
||||
return nil, errors.New("can't diffFrom diffUTXOSet with fullUTXOSet")
|
||||
}
|
||||
|
||||
if otherDiffSet.base != dus.base {
|
||||
return nil, errors.New("can't diffFrom with another diffUTXOSet with a different base")
|
||||
}
|
||||
|
||||
return dus.UTXODiff.diffFrom(otherDiffSet.UTXODiff)
|
||||
}
|
||||
|
||||
// WithDiff return a new utxoSet which is a diffFrom between this and another utxoSet
|
||||
func (dus *DiffUTXOSet) WithDiff(other *UTXODiff) (UTXOSet, error) {
|
||||
diff, err := dus.UTXODiff.WithDiff(other)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewDiffUTXOSet(dus.base, diff), nil
|
||||
}
|
||||
|
||||
// AddTx adds a transaction to this utxoSet and returns true iff it's valid in this UTXO's context
|
||||
func (dus *DiffUTXOSet) AddTx(tx *wire.MsgTx, blockHeight int32) bool {
|
||||
isBlockReward := tx.IsBlockReward()
|
||||
if !isBlockReward && !dus.containsInputs(tx) {
|
||||
return false
|
||||
}
|
||||
|
||||
dus.appendTx(tx, blockHeight, isBlockReward)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (dus *DiffUTXOSet) appendTx(tx *wire.MsgTx, blockHeight int32, isBlockReward bool) {
|
||||
if !isBlockReward {
|
||||
|
||||
for _, txIn := range tx.TxIn {
|
||||
outPoint := *wire.NewOutPoint(&txIn.PreviousOutPoint.TxID, txIn.PreviousOutPoint.Index)
|
||||
if dus.UTXODiff.toAdd.contains(outPoint) {
|
||||
dus.UTXODiff.toAdd.remove(outPoint)
|
||||
} else {
|
||||
prevUTXOEntry := dus.base.utxoCollection[outPoint]
|
||||
dus.UTXODiff.toRemove.add(outPoint, prevUTXOEntry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, txOut := range tx.TxOut {
|
||||
hash := tx.TxID()
|
||||
outPoint := *wire.NewOutPoint(&hash, uint32(i))
|
||||
entry := NewUTXOEntry(txOut, isBlockReward, blockHeight)
|
||||
|
||||
if dus.UTXODiff.toRemove.contains(outPoint) {
|
||||
dus.UTXODiff.toRemove.remove(outPoint)
|
||||
} else {
|
||||
dus.UTXODiff.toAdd.add(outPoint, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dus *DiffUTXOSet) containsInputs(tx *wire.MsgTx) bool {
|
||||
for _, txIn := range tx.TxIn {
|
||||
outPoint := *wire.NewOutPoint(&txIn.PreviousOutPoint.TxID, txIn.PreviousOutPoint.Index)
|
||||
isInBase := dus.base.contains(outPoint)
|
||||
isInDiffToAdd := dus.UTXODiff.toAdd.contains(outPoint)
|
||||
isInDiffToRemove := dus.UTXODiff.toRemove.contains(outPoint)
|
||||
if (!isInBase && !isInDiffToAdd) || isInDiffToRemove {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// meldToBase updates the base fullUTXOSet with all changes in diff
|
||||
func (dus *DiffUTXOSet) meldToBase() {
|
||||
for outPoint := range dus.UTXODiff.toRemove {
|
||||
dus.base.remove(outPoint)
|
||||
}
|
||||
|
||||
for outPoint, utxoEntry := range dus.UTXODiff.toAdd {
|
||||
dus.base.add(outPoint, utxoEntry)
|
||||
}
|
||||
|
||||
dus.UTXODiff = NewUTXODiff()
|
||||
}
|
||||
|
||||
// diffFromTx returns a diff that is equivalent to provided transaction,
|
||||
// or an error if provided transaction is not valid in the context of this UTXOSet
|
||||
func (dus *DiffUTXOSet) diffFromTx(tx *wire.MsgTx, node *blockNode) (*UTXODiff, error) {
|
||||
return diffFromTx(dus, tx, node)
|
||||
}
|
||||
|
||||
func (dus *DiffUTXOSet) String() string {
|
||||
return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove)
|
||||
}
|
||||
|
||||
// clone returns a clone of this UTXO Set
|
||||
func (dus *DiffUTXOSet) clone() UTXOSet {
|
||||
return NewDiffUTXOSet(dus.base.clone().(*FullUTXOSet), dus.UTXODiff.clone())
|
||||
}
|
||||
|
||||
// Get returns the UTXOEntry associated with provided outPoint in this UTXOSet.
|
||||
// Returns false in second output if this UTXOEntry was not found
|
||||
func (dus *DiffUTXOSet) Get(outPoint wire.OutPoint) (*UTXOEntry, bool) {
|
||||
if dus.UTXODiff.toRemove.contains(outPoint) {
|
||||
return nil, false
|
||||
}
|
||||
if txOut, ok := dus.base.get(outPoint); ok {
|
||||
return txOut, true
|
||||
}
|
||||
txOut, ok := dus.UTXODiff.toAdd.get(outPoint)
|
||||
return txOut, ok
|
||||
}
|
||||
@@ -1,970 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// TestUTXOCollection makes sure that utxoCollection cloning and string representations work as expected.
|
||||
func TestUTXOCollection(t *testing.T) {
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txID1, _ := daghash.NewTxIDFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outPoint0 := *wire.NewOutPoint(txID0, 0)
|
||||
outPoint1 := *wire.NewOutPoint(txID1, 0)
|
||||
utxoEntry0 := NewUTXOEntry(&wire.TxOut{PkScript: []byte{}, Value: 10}, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(&wire.TxOut{PkScript: []byte{}, Value: 20}, false, 1)
|
||||
|
||||
// For each of the following test cases, we will:
|
||||
// .String() the given collection and compare it to expectedString
|
||||
// .clone() the given collection and compare its value to itself (expected: equals) and its reference to itself (expected: not equal)
|
||||
tests := []struct {
|
||||
name string
|
||||
collection utxoCollection
|
||||
expectedString string
|
||||
}{
|
||||
{
|
||||
name: "empty collection",
|
||||
collection: utxoCollection{},
|
||||
expectedString: "[ ]",
|
||||
},
|
||||
{
|
||||
name: "one member",
|
||||
collection: utxoCollection{
|
||||
outPoint0: utxoEntry1,
|
||||
},
|
||||
expectedString: "[ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 20 ]",
|
||||
},
|
||||
{
|
||||
name: "two members",
|
||||
collection: utxoCollection{
|
||||
outPoint0: utxoEntry0,
|
||||
outPoint1: utxoEntry1,
|
||||
},
|
||||
expectedString: "[ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20 ]",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Test utxoCollection string representation
|
||||
collectionString := test.collection.String()
|
||||
if collectionString != test.expectedString {
|
||||
t.Errorf("unexpected string in test \"%s\". "+
|
||||
"Expected: \"%s\", got: \"%s\".", test.name, test.expectedString, collectionString)
|
||||
}
|
||||
|
||||
// Test utxoCollection cloning
|
||||
collectionClone := test.collection.clone()
|
||||
if reflect.ValueOf(collectionClone).Pointer() == reflect.ValueOf(test.collection).Pointer() {
|
||||
t.Errorf("collection is reference-equal to its clone in test \"%s\". ", test.name)
|
||||
}
|
||||
if !reflect.DeepEqual(test.collection, collectionClone) {
|
||||
t.Errorf("collection is not equal to its clone in test \"%s\". "+
|
||||
"Expected: \"%s\", got: \"%s\".", test.name, collectionString, collectionClone.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestUTXODiff makes sure that utxoDiff creation, cloning, and string representations work as expected.
|
||||
func TestUTXODiff(t *testing.T) {
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txID1, _ := daghash.NewTxIDFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outPoint0 := *wire.NewOutPoint(txID0, 0)
|
||||
outPoint1 := *wire.NewOutPoint(txID1, 0)
|
||||
utxoEntry0 := NewUTXOEntry(&wire.TxOut{PkScript: []byte{}, Value: 10}, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(&wire.TxOut{PkScript: []byte{}, Value: 20}, false, 1)
|
||||
diff := UTXODiff{
|
||||
toAdd: utxoCollection{outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{outPoint1: utxoEntry1},
|
||||
}
|
||||
|
||||
// Test utxoDiff creation
|
||||
newDiff := NewUTXODiff()
|
||||
if len(newDiff.toAdd) != 0 || len(newDiff.toRemove) != 0 {
|
||||
t.Errorf("new diff is not empty")
|
||||
}
|
||||
|
||||
// Test utxoDiff cloning
|
||||
clonedDiff := *diff.clone()
|
||||
if &clonedDiff == &diff {
|
||||
t.Errorf("cloned diff is reference-equal to the original")
|
||||
}
|
||||
if !reflect.DeepEqual(clonedDiff, diff) {
|
||||
t.Errorf("cloned diff not equal to the original"+
|
||||
"Original: \"%v\", cloned: \"%v\".", diff, clonedDiff)
|
||||
}
|
||||
|
||||
// Test utxoDiff string representation
|
||||
expectedDiffString := "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20 ]"
|
||||
diffString := clonedDiff.String()
|
||||
if diffString != expectedDiffString {
|
||||
t.Errorf("unexpected diff string. "+
|
||||
"Expected: \"%s\", got: \"%s\".", expectedDiffString, diffString)
|
||||
}
|
||||
}
|
||||
|
||||
// TestUTXODiffRules makes sure that all diffFrom and WithDiff rules are followed.
|
||||
// Each test case represents a cell in the two tables outlined in the documentation for utxoDiff.
|
||||
func TestUTXODiffRules(t *testing.T) {
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
outPoint0 := *wire.NewOutPoint(txID0, 0)
|
||||
utxoEntry0 := NewUTXOEntry(&wire.TxOut{PkScript: []byte{}, Value: 10}, true, 0)
|
||||
|
||||
// For each of the following test cases, we will:
|
||||
// this.diffFrom(other) and compare it to expectedDiffFromResult
|
||||
// this.WithDiff(other) and compare it to expectedWithDiffResult
|
||||
//
|
||||
// Note: an expected nil result means that we expect the respective operation to fail
|
||||
tests := []struct {
|
||||
name string
|
||||
this *UTXODiff
|
||||
other *UTXODiff
|
||||
expectedDiffFromResult *UTXODiff
|
||||
expectedWithDiffResult *UTXODiff
|
||||
}{
|
||||
{
|
||||
name: "one toAdd in this, one toAdd in other",
|
||||
this: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
other: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
expectedDiffFromResult: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
expectedWithDiffResult: nil,
|
||||
},
|
||||
{
|
||||
name: "one toAdd in this, one toRemove in other",
|
||||
this: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
other: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{outPoint0: utxoEntry0},
|
||||
},
|
||||
expectedDiffFromResult: nil,
|
||||
expectedWithDiffResult: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one toAdd in this, empty other",
|
||||
this: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
other: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
expectedDiffFromResult: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{outPoint0: utxoEntry0},
|
||||
},
|
||||
expectedWithDiffResult: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one toRemove in this, one toAdd in other",
|
||||
this: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{outPoint0: utxoEntry0},
|
||||
},
|
||||
other: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
expectedDiffFromResult: nil,
|
||||
expectedWithDiffResult: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one toRemove in this, one toRemove in other",
|
||||
this: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{outPoint0: utxoEntry0},
|
||||
},
|
||||
other: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{outPoint0: utxoEntry0},
|
||||
},
|
||||
expectedDiffFromResult: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
expectedWithDiffResult: nil,
|
||||
},
|
||||
{
|
||||
name: "one toRemove in this, empty other",
|
||||
this: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{outPoint0: utxoEntry0},
|
||||
},
|
||||
other: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
expectedDiffFromResult: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
expectedWithDiffResult: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{outPoint0: utxoEntry0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty this, one toAdd in other",
|
||||
this: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
other: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
expectedDiffFromResult: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
expectedWithDiffResult: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty this, one toRemove in other",
|
||||
this: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
other: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{outPoint0: utxoEntry0},
|
||||
},
|
||||
expectedDiffFromResult: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{outPoint0: utxoEntry0},
|
||||
},
|
||||
expectedWithDiffResult: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{outPoint0: utxoEntry0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty this, empty other",
|
||||
this: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
other: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
expectedDiffFromResult: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
expectedWithDiffResult: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// diffFrom from this to other
|
||||
diffResult, err := test.this.diffFrom(test.other)
|
||||
|
||||
// Test whether diffFrom returned an error
|
||||
isDiffFromOk := err == nil
|
||||
expectedIsDiffFromOk := test.expectedDiffFromResult != nil
|
||||
if isDiffFromOk != expectedIsDiffFromOk {
|
||||
t.Errorf("unexpected diffFrom error in test \"%s\". "+
|
||||
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsDiffFromOk, isDiffFromOk)
|
||||
}
|
||||
|
||||
// If not error, test the diffFrom result
|
||||
if isDiffFromOk && !reflect.DeepEqual(diffResult, test.expectedDiffFromResult) {
|
||||
t.Errorf("unexpected diffFrom result in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedDiffFromResult, diffResult)
|
||||
}
|
||||
|
||||
// WithDiff from this to other
|
||||
withDiffResult, err := test.this.WithDiff(test.other)
|
||||
|
||||
// Test whether WithDiff returned an error
|
||||
isWithDiffOk := err == nil
|
||||
expectedIsWithDiffOk := test.expectedWithDiffResult != nil
|
||||
if isWithDiffOk != expectedIsWithDiffOk {
|
||||
t.Errorf("unexpected WithDiff error in test \"%s\". "+
|
||||
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsWithDiffOk, isWithDiffOk)
|
||||
}
|
||||
|
||||
// Ig not error, test the WithDiff result
|
||||
if isWithDiffOk && !reflect.DeepEqual(withDiffResult, test.expectedWithDiffResult) {
|
||||
t.Errorf("unexpected WithDiff result in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedWithDiffResult, withDiffResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestFullUTXOSet makes sure that fullUTXOSet is working as expected.
|
||||
func TestFullUTXOSet(t *testing.T) {
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txID1, _ := daghash.NewTxIDFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outPoint0 := *wire.NewOutPoint(txID0, 0)
|
||||
outPoint1 := *wire.NewOutPoint(txID1, 0)
|
||||
txOut0 := &wire.TxOut{PkScript: []byte{}, Value: 10}
|
||||
txOut1 := &wire.TxOut{PkScript: []byte{}, Value: 20}
|
||||
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(txOut1, false, 1)
|
||||
diff := &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{outPoint1: utxoEntry1},
|
||||
}
|
||||
|
||||
// Test fullUTXOSet creation
|
||||
emptySet := NewFullUTXOSet()
|
||||
if len(emptySet.collection()) != 0 {
|
||||
t.Errorf("new set is not empty")
|
||||
}
|
||||
|
||||
// Test fullUTXOSet WithDiff
|
||||
withDiffResult, err := emptySet.WithDiff(diff)
|
||||
if err != nil {
|
||||
t.Errorf("WithDiff unexpectedly failed")
|
||||
}
|
||||
withDiffUTXOSet, ok := withDiffResult.(*DiffUTXOSet)
|
||||
if !ok {
|
||||
t.Errorf("WithDiff is of unexpected type")
|
||||
}
|
||||
if !reflect.DeepEqual(withDiffUTXOSet.base, emptySet) || !reflect.DeepEqual(withDiffUTXOSet.UTXODiff, diff) {
|
||||
t.Errorf("WithDiff is of unexpected composition")
|
||||
}
|
||||
|
||||
// Test fullUTXOSet addTx
|
||||
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutPoint: wire.OutPoint{TxID: *txID0, Index: 0}, Sequence: 0}
|
||||
transaction0 := wire.NewNativeMsgTx(1, []*wire.TxIn{txIn0}, []*wire.TxOut{txOut0})
|
||||
if ok = emptySet.AddTx(transaction0, 0); ok {
|
||||
t.Errorf("addTx unexpectedly succeeded")
|
||||
}
|
||||
emptySet = &FullUTXOSet{utxoCollection: utxoCollection{outPoint0: utxoEntry0}}
|
||||
if ok = emptySet.AddTx(transaction0, 0); !ok {
|
||||
t.Errorf("addTx unexpectedly failed")
|
||||
}
|
||||
|
||||
// Test fullUTXOSet collection
|
||||
if !reflect.DeepEqual(emptySet.collection(), emptySet.utxoCollection) {
|
||||
t.Errorf("collection does not equal the set's utxoCollection")
|
||||
}
|
||||
|
||||
// Test fullUTXOSet cloning
|
||||
clonedEmptySet := emptySet.clone().(*FullUTXOSet)
|
||||
if !reflect.DeepEqual(clonedEmptySet, emptySet) {
|
||||
t.Errorf("clone does not equal the original set")
|
||||
}
|
||||
if clonedEmptySet == emptySet {
|
||||
t.Errorf("cloned set is reference-equal to the original")
|
||||
}
|
||||
}
|
||||
|
||||
// TestDiffUTXOSet makes sure that diffUTXOSet is working as expected.
|
||||
func TestDiffUTXOSet(t *testing.T) {
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txID1, _ := daghash.NewTxIDFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outPoint0 := *wire.NewOutPoint(txID0, 0)
|
||||
outPoint1 := *wire.NewOutPoint(txID1, 0)
|
||||
txOut0 := &wire.TxOut{PkScript: []byte{}, Value: 10}
|
||||
txOut1 := &wire.TxOut{PkScript: []byte{}, Value: 20}
|
||||
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(txOut1, false, 1)
|
||||
diff := &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{outPoint1: utxoEntry1},
|
||||
}
|
||||
|
||||
// Test diffUTXOSet creation
|
||||
emptySet := NewDiffUTXOSet(NewFullUTXOSet(), NewUTXODiff())
|
||||
if len(emptySet.collection()) != 0 {
|
||||
t.Errorf("new set is not empty")
|
||||
}
|
||||
|
||||
// Test diffUTXOSet WithDiff
|
||||
withDiffResult, err := emptySet.WithDiff(diff)
|
||||
if err != nil {
|
||||
t.Errorf("WithDiff unexpectedly failed")
|
||||
}
|
||||
withDiffUTXOSet, ok := withDiffResult.(*DiffUTXOSet)
|
||||
if !ok {
|
||||
t.Errorf("WithDiff is of unexpected type")
|
||||
}
|
||||
withDiff, _ := NewUTXODiff().WithDiff(diff)
|
||||
if !reflect.DeepEqual(withDiffUTXOSet.base, emptySet.base) || !reflect.DeepEqual(withDiffUTXOSet.UTXODiff, withDiff) {
|
||||
t.Errorf("WithDiff is of unexpected composition")
|
||||
}
|
||||
_, err = NewDiffUTXOSet(NewFullUTXOSet(), diff).WithDiff(diff)
|
||||
if err == nil {
|
||||
t.Errorf("WithDiff unexpectedly succeeded")
|
||||
}
|
||||
|
||||
// Given a diffSet, each case tests that meldToBase, String, collection, and cloning work as expected
|
||||
// For each of the following test cases, we will:
|
||||
// .meldToBase() the given diffSet and compare it to expectedMeldSet
|
||||
// .String() the given diffSet and compare it to expectedString
|
||||
// .collection() the given diffSet and compare it to expectedCollection
|
||||
// .clone() the given diffSet and compare its value to itself (expected: equals) and its reference to itself (expected: not equal)
|
||||
tests := []struct {
|
||||
name string
|
||||
diffSet *DiffUTXOSet
|
||||
expectedMeldSet *DiffUTXOSet
|
||||
expectedString string
|
||||
expectedCollection utxoCollection
|
||||
}{
|
||||
{
|
||||
name: "empty base, empty diff",
|
||||
diffSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedMeldSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ ]}",
|
||||
expectedCollection: utxoCollection{},
|
||||
},
|
||||
{
|
||||
name: "empty base, one member in diff toAdd",
|
||||
diffSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedMeldSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outPoint0: utxoEntry0}},
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedString: "{Base: [ ], To Add: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10 ], To Remove: [ ]}",
|
||||
expectedCollection: utxoCollection{outPoint0: utxoEntry0},
|
||||
},
|
||||
{
|
||||
name: "empty base, one member in diff toRemove",
|
||||
diffSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{outPoint0: utxoEntry0},
|
||||
},
|
||||
},
|
||||
expectedMeldSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{}},
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10 ]}",
|
||||
expectedCollection: utxoCollection{},
|
||||
},
|
||||
{
|
||||
name: "one member in base toAdd, one member in diff toAdd",
|
||||
diffSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outPoint0: utxoEntry0}},
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint1: utxoEntry1},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedMeldSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{
|
||||
utxoCollection: utxoCollection{
|
||||
outPoint0: utxoEntry0,
|
||||
outPoint1: utxoEntry1,
|
||||
},
|
||||
},
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10 ], To Add: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20 ], To Remove: [ ]}",
|
||||
expectedCollection: utxoCollection{
|
||||
outPoint0: utxoEntry0,
|
||||
outPoint1: utxoEntry1,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one member in base toAdd, same one member in diff toRemove",
|
||||
diffSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outPoint0: utxoEntry0}},
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{outPoint0: utxoEntry0},
|
||||
},
|
||||
},
|
||||
expectedMeldSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{
|
||||
utxoCollection: utxoCollection{},
|
||||
},
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10 ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10 ]}",
|
||||
expectedCollection: utxoCollection{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Test meldToBase
|
||||
meldSet := test.diffSet.clone().(*DiffUTXOSet)
|
||||
meldSet.meldToBase()
|
||||
if !reflect.DeepEqual(meldSet, test.expectedMeldSet) {
|
||||
t.Errorf("unexpected melded set in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedMeldSet, meldSet)
|
||||
}
|
||||
|
||||
// Test string representation
|
||||
setString := test.diffSet.String()
|
||||
if setString != test.expectedString {
|
||||
t.Errorf("unexpected string in test \"%s\". "+
|
||||
"Expected: \"%s\", got: \"%s\".", test.name, test.expectedString, setString)
|
||||
}
|
||||
|
||||
// Test collection
|
||||
setCollection := test.diffSet.collection()
|
||||
if !reflect.DeepEqual(setCollection, test.expectedCollection) {
|
||||
t.Errorf("unexpected set collection in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedCollection, setCollection)
|
||||
}
|
||||
|
||||
// Test cloning
|
||||
clonedSet := test.diffSet.clone().(*DiffUTXOSet)
|
||||
if !reflect.DeepEqual(clonedSet, test.diffSet) {
|
||||
t.Errorf("unexpected set clone in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.diffSet, clonedSet)
|
||||
}
|
||||
if clonedSet == test.diffSet {
|
||||
t.Errorf("cloned set is reference-equal to the original")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestUTXOSetDiffRules makes sure that utxoSet diffFrom rules are followed.
|
||||
// The rules are:
|
||||
// 1. Neither fullUTXOSet nor diffUTXOSet can diffFrom a fullUTXOSet.
|
||||
// 2. fullUTXOSet cannot diffFrom a diffUTXOSet with a base other that itself.
|
||||
// 3. diffUTXOSet cannot diffFrom a diffUTXOSet with a different base.
|
||||
func TestUTXOSetDiffRules(t *testing.T) {
|
||||
fullSet := NewFullUTXOSet()
|
||||
diffSet := NewDiffUTXOSet(fullSet, NewUTXODiff())
|
||||
|
||||
// For each of the following test cases, we will call utxoSet.diffFrom(diffSet) and compare
|
||||
// whether the function succeeded with expectedSuccess
|
||||
//
|
||||
// Note: since test cases are similar for both fullUTXOSet and diffUTXOSet, we test both using the same test cases
|
||||
run := func(set UTXOSet) {
|
||||
tests := []struct {
|
||||
name string
|
||||
diffSet UTXOSet
|
||||
expectedSuccess bool
|
||||
}{
|
||||
{
|
||||
name: "diff from fullSet",
|
||||
diffSet: NewFullUTXOSet(),
|
||||
expectedSuccess: false,
|
||||
},
|
||||
{
|
||||
name: "diff from diffSet with different base",
|
||||
diffSet: NewDiffUTXOSet(NewFullUTXOSet(), NewUTXODiff()),
|
||||
expectedSuccess: false,
|
||||
},
|
||||
{
|
||||
name: "diff from diffSet with same base",
|
||||
diffSet: NewDiffUTXOSet(fullSet, NewUTXODiff()),
|
||||
expectedSuccess: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
_, err := set.diffFrom(test.diffSet)
|
||||
success := err == nil
|
||||
if success != test.expectedSuccess {
|
||||
t.Errorf("unexpected diffFrom success in test \"%s\". "+
|
||||
"Expected: \"%t\", got: \"%t\".", test.name, test.expectedSuccess, success)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
run(fullSet) // Perform the test cases above on a fullUTXOSet
|
||||
run(diffSet) // Perform the test cases above on a diffUTXOSet
|
||||
}
|
||||
|
||||
// TestDiffUTXOSet_addTx makes sure that diffUTXOSet addTx works as expected
|
||||
func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
// transaction0 is coinbase. As such, it has exactly one input with hash zero and MaxUInt32 index
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutPoint: wire.OutPoint{TxID: *txID0, Index: math.MaxUint32}, Sequence: 0}
|
||||
txOut0 := &wire.TxOut{PkScript: []byte{0}, Value: 10}
|
||||
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
|
||||
transaction0 := wire.NewNativeMsgTx(1, []*wire.TxIn{txIn0}, []*wire.TxOut{txOut0})
|
||||
|
||||
// transaction1 spends transaction0
|
||||
id1 := transaction0.TxID()
|
||||
outPoint1 := *wire.NewOutPoint(&id1, 0)
|
||||
txIn1 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutPoint: wire.OutPoint{TxID: id1, Index: 0}, Sequence: 0}
|
||||
txOut1 := &wire.TxOut{PkScript: []byte{1}, Value: 20}
|
||||
utxoEntry1 := NewUTXOEntry(txOut1, false, 1)
|
||||
transaction1 := wire.NewNativeMsgTx(1, []*wire.TxIn{txIn1}, []*wire.TxOut{txOut1})
|
||||
|
||||
// transaction2 spends transaction1
|
||||
id2 := transaction1.TxID()
|
||||
outPoint2 := *wire.NewOutPoint(&id2, 0)
|
||||
txIn2 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutPoint: wire.OutPoint{TxID: id2, Index: 0}, Sequence: 0}
|
||||
txOut2 := &wire.TxOut{PkScript: []byte{2}, Value: 30}
|
||||
utxoEntry2 := NewUTXOEntry(txOut2, false, 2)
|
||||
transaction2 := wire.NewNativeMsgTx(1, []*wire.TxIn{txIn2}, []*wire.TxOut{txOut2})
|
||||
|
||||
// outpoint3 is the outpoint for transaction2
|
||||
id3 := transaction2.TxID()
|
||||
outPoint3 := *wire.NewOutPoint(&id3, 0)
|
||||
|
||||
// For each of the following test cases, we will:
|
||||
// 1. startSet.addTx() all the transactions in toAdd, in order, with the initial block height startHeight
|
||||
// 2. Compare the result set with expectedSet
|
||||
tests := []struct {
|
||||
name string
|
||||
startSet *DiffUTXOSet
|
||||
startHeight int32
|
||||
toAdd []*wire.MsgTx
|
||||
expectedSet *DiffUTXOSet
|
||||
}{
|
||||
{
|
||||
name: "add coinbase transaction to empty set",
|
||||
startSet: NewDiffUTXOSet(NewFullUTXOSet(), NewUTXODiff()),
|
||||
startHeight: 0,
|
||||
toAdd: []*wire.MsgTx{transaction0},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{}},
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint1: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add regular transaction to empty set",
|
||||
startSet: NewDiffUTXOSet(NewFullUTXOSet(), NewUTXODiff()),
|
||||
startHeight: 0,
|
||||
toAdd: []*wire.MsgTx{transaction1},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{}},
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add transaction to set with its input in base",
|
||||
startSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outPoint1: utxoEntry0}},
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
startHeight: 1,
|
||||
toAdd: []*wire.MsgTx{transaction1},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outPoint1: utxoEntry0}},
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint2: utxoEntry1},
|
||||
toRemove: utxoCollection{outPoint1: utxoEntry0},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add transaction to set with its input in diff toAdd",
|
||||
startSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint1: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
startHeight: 1,
|
||||
toAdd: []*wire.MsgTx{transaction1},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint2: utxoEntry1},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add transaction to set with its input in diff toAdd and its output in diff toRemove",
|
||||
startSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint1: utxoEntry0},
|
||||
toRemove: utxoCollection{outPoint2: utxoEntry1},
|
||||
},
|
||||
},
|
||||
startHeight: 1,
|
||||
toAdd: []*wire.MsgTx{transaction1},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add two transactions, one spending the other, to set with the first input in base",
|
||||
startSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outPoint1: utxoEntry0}},
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
startHeight: 1,
|
||||
toAdd: []*wire.MsgTx{transaction1, transaction2},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outPoint1: utxoEntry0}},
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outPoint3: utxoEntry2},
|
||||
toRemove: utxoCollection{outPoint1: utxoEntry0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
diffSet := test.startSet.clone()
|
||||
|
||||
// Apply all transactions to diffSet, in order, with the initial block height startHeight
|
||||
for i, transaction := range test.toAdd {
|
||||
diffSet.AddTx(transaction, test.startHeight+int32(i))
|
||||
}
|
||||
|
||||
// Make sure that the result diffSet equals to the expectedSet
|
||||
if !reflect.DeepEqual(diffSet, test.expectedSet) {
|
||||
t.Errorf("unexpected diffSet in test \"%s\". "+
|
||||
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedSet, diffSet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiffFromTx(t *testing.T) {
|
||||
fus := &FullUTXOSet{
|
||||
utxoCollection: utxoCollection{},
|
||||
}
|
||||
cbTx, err := createCoinbaseTxForTest(1, 1, 0, &dagconfig.SimNetParams)
|
||||
if err != nil {
|
||||
t.Errorf("createCoinbaseTxForTest: %v", err)
|
||||
}
|
||||
fus.AddTx(cbTx, 1)
|
||||
node := &blockNode{height: 2} //Fake node
|
||||
cbOutpoint := wire.OutPoint{TxID: cbTx.TxID(), Index: 0}
|
||||
txIns := []*wire.TxIn{&wire.TxIn{
|
||||
PreviousOutPoint: cbOutpoint,
|
||||
SignatureScript: nil,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}}
|
||||
txOuts := []*wire.TxOut{&wire.TxOut{
|
||||
PkScript: OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}}
|
||||
tx := wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
|
||||
diff, err := fus.diffFromTx(tx, node)
|
||||
if err != nil {
|
||||
t.Errorf("diffFromTx: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(diff.toAdd, utxoCollection{
|
||||
wire.OutPoint{TxID: tx.TxID(), Index: 0}: NewUTXOEntry(tx.TxOut[0], false, 2),
|
||||
}) {
|
||||
t.Errorf("diff.toAdd doesn't have the expected values")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(diff.toRemove, utxoCollection{
|
||||
wire.OutPoint{TxID: cbTx.TxID(), Index: 0}: NewUTXOEntry(cbTx.TxOut[0], true, 1),
|
||||
}) {
|
||||
t.Errorf("diff.toRemove doesn't have the expected values")
|
||||
}
|
||||
|
||||
//Test that we get an error if we don't have the outpoint inside the utxo set
|
||||
invalidTxIns := []*wire.TxIn{&wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{TxID: daghash.TxID{}, Index: 0},
|
||||
SignatureScript: nil,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}}
|
||||
invalidTxOuts := []*wire.TxOut{&wire.TxOut{
|
||||
PkScript: OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}}
|
||||
invalidTx := wire.NewNativeMsgTx(wire.TxVersion, invalidTxIns, invalidTxOuts)
|
||||
_, err = fus.diffFromTx(invalidTx, node)
|
||||
if err == nil {
|
||||
t.Errorf("diffFromTx: expected an error but got <nil>")
|
||||
}
|
||||
|
||||
//Test that we get an error if the outpoint is inside diffUTXOSet's toRemove
|
||||
dus := NewDiffUTXOSet(fus, &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
})
|
||||
dus.AddTx(tx, 2)
|
||||
_, err = dus.diffFromTx(tx, node)
|
||||
if err == nil {
|
||||
t.Errorf("diffFromTx: expected an error but got <nil>")
|
||||
}
|
||||
}
|
||||
|
||||
// collection returns a collection of all UTXOs in this set
|
||||
func (fus *FullUTXOSet) collection() utxoCollection {
|
||||
return fus.utxoCollection.clone()
|
||||
}
|
||||
|
||||
// collection returns a collection of all UTXOs in this set
|
||||
func (dus *DiffUTXOSet) collection() utxoCollection {
|
||||
clone := dus.clone().(*DiffUTXOSet)
|
||||
clone.meldToBase()
|
||||
|
||||
return clone.base.collection()
|
||||
}
|
||||
|
||||
func TestUTXOSetAddEntry(t *testing.T) {
|
||||
hash0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
hash1, _ := daghash.NewTxIDFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outPoint0 := wire.NewOutPoint(hash0, 0)
|
||||
outPoint1 := wire.NewOutPoint(hash1, 0)
|
||||
utxoEntry0 := NewUTXOEntry(&wire.TxOut{PkScript: []byte{}, Value: 10}, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(&wire.TxOut{PkScript: []byte{}, Value: 20}, false, 1)
|
||||
|
||||
utxoDiff := NewUTXODiff()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
outPointToAdd *wire.OutPoint
|
||||
utxoEntryToAdd *UTXOEntry
|
||||
expectedUTXODiff *UTXODiff
|
||||
}{
|
||||
{
|
||||
name: "add an entry",
|
||||
outPointToAdd: outPoint0,
|
||||
utxoEntryToAdd: utxoEntry0,
|
||||
expectedUTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{*outPoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add another entry",
|
||||
outPointToAdd: outPoint1,
|
||||
utxoEntryToAdd: utxoEntry1,
|
||||
expectedUTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{*outPoint0: utxoEntry0, *outPoint1: utxoEntry1},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add first entry again",
|
||||
outPointToAdd: outPoint0,
|
||||
utxoEntryToAdd: utxoEntry0,
|
||||
expectedUTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{*outPoint0: utxoEntry0, *outPoint1: utxoEntry1},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
utxoDiff.AddEntry(*test.outPointToAdd, test.utxoEntryToAdd)
|
||||
if !reflect.DeepEqual(utxoDiff, test.expectedUTXODiff) {
|
||||
t.Fatalf("utxoDiff.AddEntry: unexpected utxoDiff in test '%s'. "+
|
||||
"Expected: %v, got: %v", test.name, test.expectedUTXODiff, utxoDiff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUTXOSetRemoveTxOuts(t *testing.T) {
|
||||
tx0 := wire.NewNativeMsgTx(1, nil, []*wire.TxOut{{PkScript: []byte{1}, Value: 10}})
|
||||
tx1 := wire.NewNativeMsgTx(1, nil, []*wire.TxOut{{PkScript: []byte{2}, Value: 20}})
|
||||
hash0 := tx0.TxID()
|
||||
hash1 := tx1.TxID()
|
||||
outPoint0 := wire.NewOutPoint(&hash0, 0)
|
||||
outPoint1 := wire.NewOutPoint(&hash1, 0)
|
||||
|
||||
utxoDiff := NewUTXODiff()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
txToRemove *wire.MsgTx
|
||||
expectedUTXODiff *UTXODiff
|
||||
}{
|
||||
{
|
||||
name: "remove a transaction",
|
||||
txToRemove: tx0,
|
||||
expectedUTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{*outPoint0: nil},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "remove another transaction",
|
||||
txToRemove: tx1,
|
||||
expectedUTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{*outPoint0: nil, *outPoint1: nil},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "remove first entry again",
|
||||
txToRemove: tx0,
|
||||
expectedUTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{*outPoint0: nil, *outPoint1: nil},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
utxoDiff.RemoveTxOuts(test.txToRemove)
|
||||
if !reflect.DeepEqual(utxoDiff, test.expectedUTXODiff) {
|
||||
t.Fatalf("utxoDiff.AddEntry: unexpected utxoDiff in test '%s'. "+
|
||||
"Expected: %v, got: %v", test.name, test.expectedUTXODiff, utxoDiff)
|
||||
}
|
||||
}
|
||||
}
|
||||
1228
blockdag/validate.go
1228
blockdag/validate.go
File diff suppressed because it is too large
Load Diff
@@ -1,295 +0,0 @@
|
||||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
// vbTopBits defines the bits to set in the version to signal that the
|
||||
// version bits scheme is being used.
|
||||
vbTopBits = 0x10000000
|
||||
|
||||
// vbTopMask is the bitmask to use to determine whether or not the
|
||||
// version bits scheme is in use.
|
||||
vbTopMask = 0xe0000000
|
||||
|
||||
// vbNumBits is the total number of bits available for use with the
|
||||
// version bits scheme.
|
||||
vbNumBits = 29
|
||||
|
||||
// unknownVerNumToCheck is the number of previous blocks to consider
|
||||
// when checking for a threshold of unknown block versions for the
|
||||
// purposes of warning the user.
|
||||
unknownVerNumToCheck = 100
|
||||
|
||||
// unknownVerWarnNum is the threshold of previous blocks that have an
|
||||
// unknown version to use for the purposes of warning the user.
|
||||
unknownVerWarnNum = unknownVerNumToCheck / 2
|
||||
)
|
||||
|
||||
// bitConditionChecker provides a thresholdConditionChecker which can be used to
|
||||
// test whether or not a specific bit is set when it's not supposed to be
|
||||
// according to the expected version based on the known deployments and the
|
||||
// current state of the chain. This is useful for detecting and warning about
|
||||
// unknown rule activations.
|
||||
type bitConditionChecker struct {
|
||||
bit uint32
|
||||
chain *BlockDAG
|
||||
}
|
||||
|
||||
// Ensure the bitConditionChecker type implements the thresholdConditionChecker
|
||||
// interface.
|
||||
var _ thresholdConditionChecker = bitConditionChecker{}
|
||||
|
||||
// BeginTime returns the unix timestamp for the median block time after which
|
||||
// voting on a rule change starts (at the next window).
|
||||
//
|
||||
// Since this implementation checks for unknown rules, it returns 0 so the rule
|
||||
// is always treated as active.
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c bitConditionChecker) BeginTime() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// EndTime returns the unix timestamp for the median block time after which an
|
||||
// attempted rule change fails if it has not already been locked in or
|
||||
// activated.
|
||||
//
|
||||
// Since this implementation checks for unknown rules, it returns the maximum
|
||||
// possible timestamp so the rule is always treated as active.
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c bitConditionChecker) EndTime() uint64 {
|
||||
return math.MaxUint64
|
||||
}
|
||||
|
||||
// RuleChangeActivationThreshold is the number of blocks for which the condition
|
||||
// must be true in order to lock in a rule change.
|
||||
//
|
||||
// This implementation returns the value defined by the chain params the checker
|
||||
// is associated with.
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c bitConditionChecker) RuleChangeActivationThreshold() uint32 {
|
||||
return c.chain.dagParams.RuleChangeActivationThreshold
|
||||
}
|
||||
|
||||
// MinerConfirmationWindow is the number of blocks in each threshold state
|
||||
// retarget window.
|
||||
//
|
||||
// This implementation returns the value defined by the chain params the checker
|
||||
// is associated with.
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c bitConditionChecker) MinerConfirmationWindow() uint32 {
|
||||
return c.chain.dagParams.MinerConfirmationWindow
|
||||
}
|
||||
|
||||
// Condition returns true when the specific bit associated with the checker is
|
||||
// set and it's not supposed to be according to the expected version based on
|
||||
// the known deployments and the current state of the chain.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c bitConditionChecker) Condition(node *blockNode) (bool, error) {
|
||||
conditionMask := uint32(1) << c.bit
|
||||
version := uint32(node.version)
|
||||
if version&vbTopMask != vbTopBits {
|
||||
return false, nil
|
||||
}
|
||||
if version&conditionMask == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
expectedVersion, err := c.chain.calcNextBlockVersion(node.selectedParent)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return uint32(expectedVersion)&conditionMask == 0, nil
|
||||
}
|
||||
|
||||
// deploymentChecker provides a thresholdConditionChecker which can be used to
|
||||
// test a specific deployment rule. This is required for properly detecting
|
||||
// and activating consensus rule changes.
|
||||
type deploymentChecker struct {
|
||||
deployment *dagconfig.ConsensusDeployment
|
||||
chain *BlockDAG
|
||||
}
|
||||
|
||||
// Ensure the deploymentChecker type implements the thresholdConditionChecker
|
||||
// interface.
|
||||
var _ thresholdConditionChecker = deploymentChecker{}
|
||||
|
||||
// BeginTime returns the unix timestamp for the median block time after which
|
||||
// voting on a rule change starts (at the next window).
|
||||
//
|
||||
// This implementation returns the value defined by the specific deployment the
|
||||
// checker is associated with.
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c deploymentChecker) BeginTime() uint64 {
|
||||
return c.deployment.StartTime
|
||||
}
|
||||
|
||||
// EndTime returns the unix timestamp for the median block time after which an
|
||||
// attempted rule change fails if it has not already been locked in or
|
||||
// activated.
|
||||
//
|
||||
// This implementation returns the value defined by the specific deployment the
|
||||
// checker is associated with.
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c deploymentChecker) EndTime() uint64 {
|
||||
return c.deployment.ExpireTime
|
||||
}
|
||||
|
||||
// RuleChangeActivationThreshold is the number of blocks for which the condition
|
||||
// must be true in order to lock in a rule change.
|
||||
//
|
||||
// This implementation returns the value defined by the chain params the checker
|
||||
// is associated with.
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c deploymentChecker) RuleChangeActivationThreshold() uint32 {
|
||||
return c.chain.dagParams.RuleChangeActivationThreshold
|
||||
}
|
||||
|
||||
// MinerConfirmationWindow is the number of blocks in each threshold state
|
||||
// retarget window.
|
||||
//
|
||||
// This implementation returns the value defined by the chain params the checker
|
||||
// is associated with.
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c deploymentChecker) MinerConfirmationWindow() uint32 {
|
||||
return c.chain.dagParams.MinerConfirmationWindow
|
||||
}
|
||||
|
||||
// Condition returns true when the specific bit defined by the deployment
|
||||
// associated with the checker is set.
|
||||
//
|
||||
// This is part of the thresholdConditionChecker interface implementation.
|
||||
func (c deploymentChecker) Condition(node *blockNode) (bool, error) {
|
||||
conditionMask := uint32(1) << c.deployment.BitNumber
|
||||
version := uint32(node.version)
|
||||
return (version&vbTopMask == vbTopBits) && (version&conditionMask != 0),
|
||||
nil
|
||||
}
|
||||
|
||||
// calcNextBlockVersion calculates the expected version of the block after the
|
||||
// passed previous block node based on the state of started and locked in
|
||||
// rule change deployments.
|
||||
//
|
||||
// This function differs from the exported CalcNextBlockVersion in that the
|
||||
// exported version uses the current best chain as the previous block node
|
||||
// while this function accepts any block node.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
func (dag *BlockDAG) calcNextBlockVersion(prevNode *blockNode) (int32, error) {
|
||||
// Set the appropriate bits for each actively defined rule deployment
|
||||
// that is either in the process of being voted on, or locked in for the
|
||||
// activation at the next threshold window change.
|
||||
expectedVersion := uint32(vbTopBits)
|
||||
for id := 0; id < len(dag.dagParams.Deployments); id++ {
|
||||
deployment := &dag.dagParams.Deployments[id]
|
||||
cache := &dag.deploymentCaches[id]
|
||||
checker := deploymentChecker{deployment: deployment, chain: dag}
|
||||
state, err := dag.thresholdState(prevNode, checker, cache)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if state == ThresholdStarted || state == ThresholdLockedIn {
|
||||
expectedVersion |= uint32(1) << deployment.BitNumber
|
||||
}
|
||||
}
|
||||
return int32(expectedVersion), nil
|
||||
}
|
||||
|
||||
// CalcNextBlockVersion calculates the expected version of the block after the
|
||||
// end of the current best chain based on the state of started and locked in
|
||||
// rule change deployments.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) CalcNextBlockVersion() (int32, error) {
|
||||
version, err := dag.calcNextBlockVersion(dag.selectedTip())
|
||||
return version, err
|
||||
}
|
||||
|
||||
// warnUnknownRuleActivations displays a warning when any unknown new rules are
|
||||
// either about to activate or have been activated. This will only happen once
|
||||
// when new rules have been activated and every block for those about to be
|
||||
// activated.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes)
|
||||
func (dag *BlockDAG) warnUnknownRuleActivations(node *blockNode) error {
|
||||
// Warn if any unknown new rules are either about to activate or have
|
||||
// already been activated.
|
||||
for bit := uint32(0); bit < vbNumBits; bit++ {
|
||||
checker := bitConditionChecker{bit: bit, chain: dag}
|
||||
cache := &dag.warningCaches[bit]
|
||||
state, err := dag.thresholdState(node.selectedParent, checker, cache)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch state {
|
||||
case ThresholdActive:
|
||||
if !dag.unknownRulesWarned {
|
||||
log.Warnf("Unknown new rules activated (bit %d)",
|
||||
bit)
|
||||
dag.unknownRulesWarned = true
|
||||
}
|
||||
|
||||
case ThresholdLockedIn:
|
||||
window := int32(checker.MinerConfirmationWindow())
|
||||
activationHeight := window - (node.height % window)
|
||||
log.Warnf("Unknown new rules are about to activate in "+
|
||||
"%d blocks (bit %d)", activationHeight, bit)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// warnUnknownVersions logs a warning if a high enough percentage of the last
|
||||
// blocks have unexpected versions.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes)
|
||||
func (dag *BlockDAG) warnUnknownVersions(node *blockNode) error {
|
||||
// Nothing to do if already warned.
|
||||
if dag.unknownVersionsWarned {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Warn if enough previous blocks have unexpected versions.
|
||||
numUpgraded := uint32(0)
|
||||
for i := uint32(0); i < unknownVerNumToCheck && node != nil; i++ {
|
||||
expectedVersion, err := dag.calcNextBlockVersion(node.selectedParent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (node.version & ^expectedVersion) != 0 {
|
||||
|
||||
numUpgraded++
|
||||
}
|
||||
|
||||
node = node.selectedParent
|
||||
}
|
||||
if numUpgraded > unknownVerWarnNum {
|
||||
log.Warn("Unknown block versions are being mined, so new " +
|
||||
"rules might be in effect. Are you running the " +
|
||||
"latest version of the software?")
|
||||
dag.unknownVersionsWarned = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// virtualBlock is a virtual block whose parents are the tips of the DAG.
|
||||
type virtualBlock struct {
|
||||
mtx sync.Mutex
|
||||
phantomK uint32
|
||||
utxoSet *FullUTXOSet
|
||||
blockNode
|
||||
// selectedPathChainSet is a block set that includes all the blocks that belong to the chain of selected parents from the virtual block.
|
||||
selectedPathChainSet blockSet
|
||||
}
|
||||
|
||||
// newVirtualBlock creates and returns a new VirtualBlock.
|
||||
func newVirtualBlock(tips blockSet, phantomK uint32) *virtualBlock {
|
||||
// The mutex is intentionally not held since this is a constructor.
|
||||
var virtual virtualBlock
|
||||
virtual.phantomK = phantomK
|
||||
virtual.utxoSet = NewFullUTXOSet()
|
||||
virtual.selectedPathChainSet = newSet()
|
||||
virtual.setTips(tips)
|
||||
|
||||
return &virtual
|
||||
}
|
||||
|
||||
// clone creates and returns a clone of the virtual block.
|
||||
func (v *virtualBlock) clone() *virtualBlock {
|
||||
return &virtualBlock{
|
||||
phantomK: v.phantomK,
|
||||
utxoSet: v.utxoSet,
|
||||
blockNode: v.blockNode,
|
||||
selectedPathChainSet: v.selectedPathChainSet,
|
||||
}
|
||||
}
|
||||
|
||||
// setTips replaces the tips of the virtual block with the blocks in the
|
||||
// given blockSet. This only differs from the exported version in that it
|
||||
// is up to the caller to ensure the lock is held.
|
||||
//
|
||||
// This function MUST be called with the view mutex locked (for writes).
|
||||
func (v *virtualBlock) setTips(tips blockSet) {
|
||||
oldSelectedParent := v.selectedParent
|
||||
v.blockNode = *newBlockNode(nil, tips, v.phantomK)
|
||||
v.updateSelectedPathSet(oldSelectedParent)
|
||||
}
|
||||
|
||||
// updateSelectedPathSet updates the selectedPathSet to match the
|
||||
// new selected parent of the virtual block.
|
||||
// Every time the new selected parent is not a child of
|
||||
// the old one, it updates the selected path by removing from
|
||||
// the path blocks that are selected ancestors of the old selected
|
||||
// parent and are not selected ancestors of the new one, and adding
|
||||
// blocks that are selected ancestors of the new selected parent
|
||||
// and aren't selected ancestors of the old one.
|
||||
func (v *virtualBlock) updateSelectedPathSet(oldSelectedParent *blockNode) {
|
||||
var intersectionNode *blockNode
|
||||
for node := v.blockNode.selectedParent; intersectionNode == nil && node != nil; node = node.selectedParent {
|
||||
if v.selectedPathChainSet.contains(node) {
|
||||
intersectionNode = node
|
||||
} else {
|
||||
v.selectedPathChainSet.add(node)
|
||||
}
|
||||
}
|
||||
|
||||
if intersectionNode == nil && oldSelectedParent != nil {
|
||||
panic("updateSelectedPathSet: Cannot find intersection node. The block index may be corrupted.")
|
||||
}
|
||||
|
||||
if intersectionNode != nil {
|
||||
for node := oldSelectedParent; !node.hash.IsEqual(intersectionNode.hash); node = node.selectedParent {
|
||||
v.selectedPathChainSet.remove(node)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetTips replaces the tips of the virtual block with the blocks in the
|
||||
// given blockSet.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (v *virtualBlock) SetTips(tips blockSet) {
|
||||
v.mtx.Lock()
|
||||
v.setTips(tips)
|
||||
v.mtx.Unlock()
|
||||
}
|
||||
|
||||
// addTip adds the given tip to the set of tips in the virtual block.
|
||||
// All former tips that happen to be the given tips parents are removed
|
||||
// from the set. This only differs from the exported version in that it
|
||||
// is up to the caller to ensure the lock is held.
|
||||
//
|
||||
// This function MUST be called with the view mutex locked (for writes).
|
||||
func (v *virtualBlock) addTip(newTip *blockNode) {
|
||||
updatedTips := v.tips().clone()
|
||||
for _, parent := range newTip.parents {
|
||||
updatedTips.remove(parent)
|
||||
}
|
||||
|
||||
updatedTips.add(newTip)
|
||||
v.setTips(updatedTips)
|
||||
}
|
||||
|
||||
// AddTip adds the given tip to the set of tips in the virtual block.
|
||||
// All former tips that happen to be the given tip's parents are removed
|
||||
// from the set.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (v *virtualBlock) AddTip(newTip *blockNode) {
|
||||
v.mtx.Lock()
|
||||
v.addTip(newTip)
|
||||
v.mtx.Unlock()
|
||||
}
|
||||
|
||||
// tips returns the current tip block nodes for the DAG. It will return
|
||||
// an empty blockSet if there is no tip.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (v *virtualBlock) tips() blockSet {
|
||||
return v.parents
|
||||
}
|
||||
@@ -1,160 +0,0 @@
|
||||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestVirtualBlock ensures that VirtualBlock works as expected.
|
||||
func TestVirtualBlock(t *testing.T) {
|
||||
phantomK := uint32(1)
|
||||
buildNode := buildNodeGenerator(phantomK, false)
|
||||
|
||||
// Create a DAG as follows:
|
||||
// 0 <- 1 <- 2
|
||||
// \
|
||||
// <- 3 <- 5
|
||||
// \ X
|
||||
// <- 4 <- 6
|
||||
node0 := buildNode(setFromSlice())
|
||||
node1 := buildNode(setFromSlice(node0))
|
||||
node2 := buildNode(setFromSlice(node1))
|
||||
node3 := buildNode(setFromSlice(node0))
|
||||
node4 := buildNode(setFromSlice(node0))
|
||||
node5 := buildNode(setFromSlice(node3, node4))
|
||||
node6 := buildNode(setFromSlice(node3, node4))
|
||||
|
||||
// Given an empty VirtualBlock, each of the following test cases will:
|
||||
// Set its tips to tipsToSet
|
||||
// Add to it all the tips in tipsToAdd, one after the other
|
||||
// Call .Tips() on it and compare the result to expectedTips
|
||||
// Call .selectedTip() on it and compare the result to expectedSelectedParent
|
||||
tests := []struct {
|
||||
name string
|
||||
tipsToSet []*blockNode
|
||||
tipsToAdd []*blockNode
|
||||
expectedTips blockSet
|
||||
expectedSelectedParent *blockNode
|
||||
}{
|
||||
{
|
||||
name: "empty virtual",
|
||||
tipsToSet: []*blockNode{},
|
||||
tipsToAdd: []*blockNode{},
|
||||
expectedTips: newSet(),
|
||||
expectedSelectedParent: nil,
|
||||
},
|
||||
{
|
||||
name: "virtual with genesis tip",
|
||||
tipsToSet: []*blockNode{node0},
|
||||
tipsToAdd: []*blockNode{},
|
||||
expectedTips: setFromSlice(node0),
|
||||
expectedSelectedParent: node0,
|
||||
},
|
||||
{
|
||||
name: "virtual with genesis tip, add child of genesis",
|
||||
tipsToSet: []*blockNode{node0},
|
||||
tipsToAdd: []*blockNode{node1},
|
||||
expectedTips: setFromSlice(node1),
|
||||
expectedSelectedParent: node1,
|
||||
},
|
||||
{
|
||||
name: "empty virtual, add a full DAG",
|
||||
tipsToSet: []*blockNode{},
|
||||
tipsToAdd: []*blockNode{node0, node1, node2, node3, node4, node5, node6},
|
||||
expectedTips: setFromSlice(node2, node5, node6),
|
||||
expectedSelectedParent: node5,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Create an empty VirtualBlock
|
||||
virtual := newVirtualBlock(nil, phantomK)
|
||||
|
||||
// Set the tips. This will be the initial state
|
||||
virtual.SetTips(setFromSlice(test.tipsToSet...))
|
||||
|
||||
// Add all blockNodes in tipsToAdd in order
|
||||
for _, tipToAdd := range test.tipsToAdd {
|
||||
addNodeAsChildToParents(tipToAdd)
|
||||
virtual.AddTip(tipToAdd)
|
||||
}
|
||||
|
||||
// Ensure that the virtual block's tips are now equal to expectedTips
|
||||
resultTips := virtual.tips()
|
||||
if !reflect.DeepEqual(resultTips, test.expectedTips) {
|
||||
t.Errorf("unexpected tips in test \"%s\". "+
|
||||
"Expected: %v, got: %v.", test.name, test.expectedTips, resultTips)
|
||||
}
|
||||
|
||||
// Ensure that the virtual block's selectedParent is now equal to expectedSelectedParent
|
||||
resultSelectedTip := virtual.selectedParent
|
||||
if !reflect.DeepEqual(resultSelectedTip, test.expectedSelectedParent) {
|
||||
t.Errorf("unexpected selected tip in test \"%s\". "+
|
||||
"Expected: %v, got: %v.", test.name, test.expectedSelectedParent, resultSelectedTip)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelectedPath(t *testing.T) {
|
||||
phantomK := uint32(1)
|
||||
buildNode := buildNodeGenerator(phantomK, false)
|
||||
|
||||
// Create an empty VirtualBlock
|
||||
virtual := newVirtualBlock(nil, phantomK)
|
||||
|
||||
tip := buildNode(setFromSlice())
|
||||
virtual.AddTip(tip)
|
||||
initialPath := setFromSlice(tip)
|
||||
for i := 0; i < 5; i++ {
|
||||
tip = buildNode(setFromSlice(tip))
|
||||
initialPath.add(tip)
|
||||
virtual.AddTip(tip)
|
||||
}
|
||||
initialTip := tip
|
||||
|
||||
firstPath := initialPath.clone()
|
||||
for i := 0; i < 5; i++ {
|
||||
tip = buildNode(setFromSlice(tip))
|
||||
firstPath.add(tip)
|
||||
virtual.AddTip(tip)
|
||||
}
|
||||
// For now we don't have any DAG, just chain, the selected path should include all the blocks on the chain.
|
||||
if !reflect.DeepEqual(virtual.selectedPathChainSet, firstPath) {
|
||||
t.Fatalf("TestSelectedPath: selectedPathSet doesn't include the expected values. got %v, want %v", virtual.selectedParent, firstPath)
|
||||
}
|
||||
|
||||
secondPath := initialPath.clone()
|
||||
tip = initialTip
|
||||
for i := 0; i < 100; i++ {
|
||||
tip = buildNode(setFromSlice(tip))
|
||||
secondPath.add(tip)
|
||||
virtual.AddTip(tip)
|
||||
}
|
||||
// Because we added a chain that is much longer than the previous chain, the selected path should be re-organized.
|
||||
if !reflect.DeepEqual(virtual.selectedPathChainSet, secondPath) {
|
||||
t.Fatalf("TestSelectedPath: selectedPathSet didn't handle the re-org as expected. got %v, want %v", virtual.selectedParent, firstPath)
|
||||
}
|
||||
|
||||
tip = initialTip
|
||||
for i := 0; i < 3; i++ {
|
||||
tip = buildNode(setFromSlice(tip))
|
||||
virtual.AddTip(tip)
|
||||
}
|
||||
// Because we added a very short chain, the selected path should not be affected.
|
||||
if !reflect.DeepEqual(virtual.selectedPathChainSet, secondPath) {
|
||||
t.Fatalf("TestSelectedPath: selectedPathSet did an unexpected re-org. got %v, want %v", virtual.selectedParent, firstPath)
|
||||
}
|
||||
|
||||
// We call updateSelectedPathSet manually without updating the tips, to check if it panics
|
||||
virtual2 := newVirtualBlock(nil, phantomK)
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Fatalf("updateSelectedPathSet didn't panic")
|
||||
}
|
||||
}()
|
||||
virtual2.updateSelectedPathSet(buildNode(setFromSlice()))
|
||||
}
|
||||
354
btcd.go
354
btcd.go
@@ -1,354 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"runtime/pprof"
|
||||
"strings"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag/indexers"
|
||||
"github.com/daglabs/btcd/config"
|
||||
"github.com/daglabs/btcd/database"
|
||||
_ "github.com/daglabs/btcd/database/ffldb"
|
||||
"github.com/daglabs/btcd/limits"
|
||||
"github.com/daglabs/btcd/logger"
|
||||
"github.com/daglabs/btcd/server"
|
||||
"github.com/daglabs/btcd/signal"
|
||||
"github.com/daglabs/btcd/util/fs"
|
||||
"github.com/daglabs/btcd/version"
|
||||
)
|
||||
|
||||
const (
|
||||
// blockDbNamePrefix is the prefix for the block database name. The
|
||||
// database type is appended to this value to form the full block
|
||||
// database name.
|
||||
blockDbNamePrefix = "blocks"
|
||||
)
|
||||
|
||||
var (
|
||||
cfg *config.Config
|
||||
)
|
||||
|
||||
// winServiceMain is only invoked on Windows. It detects when btcd is running
|
||||
// as a service and reacts accordingly.
|
||||
var winServiceMain func() (bool, error)
|
||||
|
||||
// btcdMain is the real main function for btcd. It is necessary to work around
|
||||
// the fact that deferred functions do not run when os.Exit() is called. The
|
||||
// optional serverChan parameter is mainly used by the service code to be
|
||||
// notified with the server once it is setup so it can gracefully stop it when
|
||||
// requested from the service control manager.
|
||||
func btcdMain(serverChan chan<- *server.Server) error {
|
||||
// Load configuration and parse command line. This function also
|
||||
// initializes logging and configures it accordingly.
|
||||
err := config.LoadAndSetMainConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg = config.MainConfig()
|
||||
defer func() {
|
||||
if logger.LogRotator != nil {
|
||||
logger.LogRotator.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Get a channel that will be closed when a shutdown signal has been
|
||||
// triggered either from an OS signal such as SIGINT (Ctrl+C) or from
|
||||
// another subsystem such as the RPC server.
|
||||
interrupt := signal.InterruptListener()
|
||||
defer btcdLog.Info("Shutdown complete")
|
||||
|
||||
// Show version at startup.
|
||||
btcdLog.Infof("Version %s", version.Version())
|
||||
|
||||
// Enable http profiling server if requested.
|
||||
if cfg.Profile != "" {
|
||||
go func() {
|
||||
listenAddr := net.JoinHostPort("", cfg.Profile)
|
||||
btcdLog.Infof("Profile server listening on %s", listenAddr)
|
||||
profileRedirect := http.RedirectHandler("/debug/pprof",
|
||||
http.StatusSeeOther)
|
||||
http.Handle("/", profileRedirect)
|
||||
btcdLog.Errorf("%s", http.ListenAndServe(listenAddr, nil))
|
||||
}()
|
||||
}
|
||||
|
||||
// Write cpu profile if requested.
|
||||
if cfg.CPUProfile != "" {
|
||||
f, err := os.Create(cfg.CPUProfile)
|
||||
if err != nil {
|
||||
btcdLog.Errorf("Unable to create cpu profile: %s", err)
|
||||
return err
|
||||
}
|
||||
pprof.StartCPUProfile(f)
|
||||
defer f.Close()
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
// Perform upgrades to btcd as new versions require it.
|
||||
if err := doUpgrades(); err != nil {
|
||||
btcdLog.Errorf("%s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Return now if an interrupt signal was triggered.
|
||||
if signal.InterruptRequested(interrupt) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if cfg.ResetDatabase {
|
||||
err := removeDatabase()
|
||||
if err != nil {
|
||||
btcdLog.Errorf("%s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Load the block database.
|
||||
db, err := loadBlockDB()
|
||||
if err != nil {
|
||||
btcdLog.Errorf("%s", err)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
// Ensure the database is sync'd and closed on shutdown.
|
||||
btcdLog.Infof("Gracefully shutting down the database...")
|
||||
db.Close()
|
||||
}()
|
||||
|
||||
// Return now if an interrupt signal was triggered.
|
||||
if signal.InterruptRequested(interrupt) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Drop indexes and exit if requested.
|
||||
//
|
||||
// NOTE: The order is important here because dropping the tx index also
|
||||
// drops the address index since it relies on it.
|
||||
if cfg.DropAddrIndex {
|
||||
if err := indexers.DropAddrIndex(db, interrupt); err != nil {
|
||||
btcdLog.Errorf("%s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if cfg.DropTxIndex {
|
||||
if err := indexers.DropTxIndex(db, interrupt); err != nil {
|
||||
btcdLog.Errorf("%s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if cfg.DropCfIndex {
|
||||
if err := indexers.DropCfIndex(db, interrupt); err != nil {
|
||||
btcdLog.Errorf("%s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create server and start it.
|
||||
server, err := server.NewServer(cfg.Listeners, db, config.ActiveNetParams(),
|
||||
interrupt)
|
||||
if err != nil {
|
||||
// TODO: this logging could do with some beautifying.
|
||||
btcdLog.Errorf("Unable to start server on %s: %s",
|
||||
strings.Join(cfg.Listeners, ", "), err)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
btcdLog.Infof("Gracefully shutting down the server...")
|
||||
server.Stop()
|
||||
server.WaitForShutdown()
|
||||
srvrLog.Infof("Server shutdown complete")
|
||||
}()
|
||||
server.Start()
|
||||
if serverChan != nil {
|
||||
serverChan <- server
|
||||
}
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal or
|
||||
// shutdown is requested through one of the subsystems such as the RPC
|
||||
// server.
|
||||
<-interrupt
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeDatabase() error {
|
||||
dbPath := blockDbPath(cfg.DbType)
|
||||
return os.RemoveAll(dbPath)
|
||||
}
|
||||
|
||||
// removeRegressionDB removes the existing regression test database if running
|
||||
// in regression test mode and it already exists.
|
||||
func removeRegressionDB(dbPath string) error {
|
||||
// Don't do anything if not in regression test mode.
|
||||
if !cfg.RegressionTest {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the old regression test database if it already exists.
|
||||
fi, err := os.Stat(dbPath)
|
||||
if err == nil {
|
||||
btcdLog.Infof("Removing regression test database from '%s'", dbPath)
|
||||
if fi.IsDir() {
|
||||
err := os.RemoveAll(dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err := os.Remove(dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbPath returns the path to the block database given a database type.
|
||||
func blockDbPath(dbType string) string {
|
||||
// The database name is based on the database type.
|
||||
dbName := blockDbNamePrefix + "_" + dbType
|
||||
if dbType == "sqlite" {
|
||||
dbName = dbName + ".db"
|
||||
}
|
||||
dbPath := filepath.Join(cfg.DataDir, dbName)
|
||||
return dbPath
|
||||
}
|
||||
|
||||
// warnMultipleDBs shows a warning if multiple block database types are detected.
|
||||
// This is not a situation most users want. It is handy for development however
|
||||
// to support multiple side-by-side databases.
|
||||
func warnMultipleDBs() {
|
||||
// This is intentionally not using the known db types which depend
|
||||
// on the database types compiled into the binary since we want to
|
||||
// detect legacy db types as well.
|
||||
dbTypes := []string{"ffldb", "leveldb", "sqlite"}
|
||||
duplicateDbPaths := make([]string, 0, len(dbTypes)-1)
|
||||
for _, dbType := range dbTypes {
|
||||
if dbType == cfg.DbType {
|
||||
continue
|
||||
}
|
||||
|
||||
// Store db path as a duplicate db if it exists.
|
||||
dbPath := blockDbPath(dbType)
|
||||
if fs.FileExists(dbPath) {
|
||||
duplicateDbPaths = append(duplicateDbPaths, dbPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Warn if there are extra databases.
|
||||
if len(duplicateDbPaths) > 0 {
|
||||
selectedDbPath := blockDbPath(cfg.DbType)
|
||||
btcdLog.Warnf("WARNING: There are multiple block chain databases "+
|
||||
"using different database types.\nYou probably don't "+
|
||||
"want to waste disk space by having more than one.\n"+
|
||||
"Your current database is located at [%s].\nThe "+
|
||||
"additional database is located at %s", selectedDbPath,
|
||||
strings.Join(duplicateDbPaths, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
// loadBlockDB loads (or creates when needed) the block database taking into
|
||||
// account the selected database backend and returns a handle to it. It also
|
||||
// contains additional logic such warning the user if there are multiple
|
||||
// databases which consume space on the file system and ensuring the regression
|
||||
// test database is clean when in regression test mode.
|
||||
func loadBlockDB() (database.DB, error) {
|
||||
// The memdb backend does not have a file path associated with it, so
|
||||
// handle it uniquely. We also don't want to worry about the multiple
|
||||
// database type warnings when running with the memory database.
|
||||
if cfg.DbType == "memdb" {
|
||||
btcdLog.Infof("Creating block database in memory.")
|
||||
db, err := database.Create(cfg.DbType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
warnMultipleDBs()
|
||||
|
||||
// The database name is based on the database type.
|
||||
dbPath := blockDbPath(cfg.DbType)
|
||||
|
||||
// The regression test is special in that it needs a clean database for
|
||||
// each run, so remove it now if it already exists.
|
||||
removeRegressionDB(dbPath)
|
||||
|
||||
btcdLog.Infof("Loading block database from '%s'", dbPath)
|
||||
db, err := database.Open(cfg.DbType, dbPath, config.ActiveNetParams().Net)
|
||||
if err != nil {
|
||||
// Return the error if it's not because the database doesn't
|
||||
// exist.
|
||||
if dbErr, ok := err.(database.Error); !ok || dbErr.ErrorCode !=
|
||||
database.ErrDbDoesNotExist {
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the db if it does not exist.
|
||||
err = os.MkdirAll(cfg.DataDir, 0700)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db, err = database.Create(cfg.DbType, dbPath, config.ActiveNetParams().Net)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
btcdLog.Info("Block database loaded")
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Use all processor cores.
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
// Block and transaction processing can cause bursty allocations. This
|
||||
// limits the garbage collector from excessively overallocating during
|
||||
// bursts. This value was arrived at with the help of profiling live
|
||||
// usage.
|
||||
debug.SetGCPercent(10)
|
||||
|
||||
// Up some limits.
|
||||
if err := limits.SetLimits(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to set limits: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Call serviceMain on Windows to handle running as a service. When
|
||||
// the return isService flag is true, exit now since we ran as a
|
||||
// service. Otherwise, just fall through to normal operation.
|
||||
if runtime.GOOS == "windows" {
|
||||
isService, err := winServiceMain()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if isService {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Work around defer not working after os.Exit()
|
||||
if err := btcdMain(nil); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
btcec
|
||||
=====
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcec)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd/btcec)
|
||||
|
||||
Package btcec implements elliptic curve cryptography needed for working with
|
||||
Bitcoin (secp256k1 only for now). It is designed so that it may be used with the
|
||||
standard crypto/ecdsa packages provided with go. A comprehensive suite of test
|
||||
is provided to ensure proper functionality. Package btcec was originally based
|
||||
on work from ThePiachu which is licensed under the same terms as Go, but it has
|
||||
signficantly diverged since then. The btcsuite developers original is licensed
|
||||
under the liberal ISC license.
|
||||
|
||||
Although this package was primarily written for btcd, it has intentionally been
|
||||
designed so it can be used as a standalone package for any projects needing to
|
||||
use secp256k1 elliptic curve cryptography.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/daglabs/btcd/btcec
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
* [Sign Message](http://godoc.org/github.com/daglabs/btcd/btcec#example-package--SignMessage)
|
||||
Demonstrates signing a message with a secp256k1 private key that is first
|
||||
parsed form raw bytes and serializing the generated signature.
|
||||
|
||||
* [Verify Signature](http://godoc.org/github.com/daglabs/btcd/btcec#example-package--VerifySignature)
|
||||
Demonstrates verifying a secp256k1 signature against a public key that is
|
||||
first parsed from raw bytes. The signature is also parsed from raw bytes.
|
||||
|
||||
* [Encryption](http://godoc.org/github.com/daglabs/btcd/btcec#example-package--EncryptMessage)
|
||||
Demonstrates encrypting a message for a public key that is first parsed from
|
||||
raw bytes, then decrypting it using the corresponding private key.
|
||||
|
||||
* [Decryption](http://godoc.org/github.com/daglabs/btcd/btcec#example-package--DecryptMessage)
|
||||
Demonstrates decrypting a message using a private key that is first parsed
|
||||
from raw bytes.
|
||||
|
||||
## GPG Verification Key
|
||||
|
||||
All official release tags are signed by Conformal so users can ensure the code
|
||||
has not been tampered with and is coming from the btcsuite developers. To
|
||||
verify the signature perform the following:
|
||||
|
||||
- Download the public key from the Conformal website at
|
||||
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
|
||||
|
||||
- Import the public key into your GPG keyring:
|
||||
```bash
|
||||
gpg --import GIT-GPG-KEY-conformal.txt
|
||||
```
|
||||
|
||||
- Verify the release tag with the following command where `TAG_NAME` is a
|
||||
placeholder for the specific tag:
|
||||
```bash
|
||||
git tag -v TAG_NAME
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Package btcec is licensed under the [copyfree](http://copyfree.org) ISC License
|
||||
except for btcec.go and btcec_test.go which is under the same license as Go.
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
// Copyright 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcec
|
||||
|
||||
import "testing"
|
||||
|
||||
// BenchmarkAddJacobian benchmarks the secp256k1 curve addJacobian function with
|
||||
// Z values of 1 so that the associated optimizations are used.
|
||||
func BenchmarkAddJacobian(b *testing.B) {
|
||||
b.StopTimer()
|
||||
x1 := new(fieldVal).SetHex("34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6")
|
||||
y1 := new(fieldVal).SetHex("0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232")
|
||||
z1 := new(fieldVal).SetHex("1")
|
||||
x2 := new(fieldVal).SetHex("34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6")
|
||||
y2 := new(fieldVal).SetHex("0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232")
|
||||
z2 := new(fieldVal).SetHex("1")
|
||||
x3, y3, z3 := new(fieldVal), new(fieldVal), new(fieldVal)
|
||||
curve := S256()
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
curve.addJacobian(x1, y1, z1, x2, y2, z2, x3, y3, z3)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkAddJacobianNotZOne benchmarks the secp256k1 curve addJacobian
|
||||
// function with Z values other than one so the optimizations associated with
|
||||
// Z=1 aren't used.
|
||||
func BenchmarkAddJacobianNotZOne(b *testing.B) {
|
||||
b.StopTimer()
|
||||
x1 := new(fieldVal).SetHex("d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718")
|
||||
y1 := new(fieldVal).SetHex("5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190")
|
||||
z1 := new(fieldVal).SetHex("2")
|
||||
x2 := new(fieldVal).SetHex("91abba6a34b7481d922a4bd6a04899d5a686f6cf6da4e66a0cb427fb25c04bd4")
|
||||
y2 := new(fieldVal).SetHex("03fede65e30b4e7576a2abefc963ddbf9fdccbf791b77c29beadefe49951f7d1")
|
||||
z2 := new(fieldVal).SetHex("3")
|
||||
x3, y3, z3 := new(fieldVal), new(fieldVal), new(fieldVal)
|
||||
curve := S256()
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
curve.addJacobian(x1, y1, z1, x2, y2, z2, x3, y3, z3)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkScalarBaseMult benchmarks the secp256k1 curve ScalarBaseMult
|
||||
// function.
|
||||
func BenchmarkScalarBaseMult(b *testing.B) {
|
||||
k := fromHex("d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575")
|
||||
curve := S256()
|
||||
for i := 0; i < b.N; i++ {
|
||||
curve.ScalarBaseMult(k.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkScalarBaseMultLarge benchmarks the secp256k1 curve ScalarBaseMult
|
||||
// function with abnormally large k values.
|
||||
func BenchmarkScalarBaseMultLarge(b *testing.B) {
|
||||
k := fromHex("d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c005751111111011111110")
|
||||
curve := S256()
|
||||
for i := 0; i < b.N; i++ {
|
||||
curve.ScalarBaseMult(k.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkScalarMult benchmarks the secp256k1 curve ScalarMult function.
|
||||
func BenchmarkScalarMult(b *testing.B) {
|
||||
x := fromHex("34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6")
|
||||
y := fromHex("0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232")
|
||||
k := fromHex("d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575")
|
||||
curve := S256()
|
||||
for i := 0; i < b.N; i++ {
|
||||
curve.ScalarMult(x, y, k.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkNAF benchmarks the NAF function.
|
||||
func BenchmarkNAF(b *testing.B) {
|
||||
k := fromHex("d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575")
|
||||
for i := 0; i < b.N; i++ {
|
||||
NAF(k.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkSigVerify benchmarks how long it takes the secp256k1 curve to
|
||||
// verify signatures.
|
||||
func BenchmarkSigVerify(b *testing.B) {
|
||||
b.StopTimer()
|
||||
// Randomly generated keypair.
|
||||
// Private key: 9e0699c91ca1e3b7e3c9ba71eb71c89890872be97576010fe593fbf3fd57e66d
|
||||
pubKey := PublicKey{
|
||||
Curve: S256(),
|
||||
X: fromHex("d2e670a19c6d753d1a6d8b20bd045df8a08fb162cf508956c31268c6d81ffdab"),
|
||||
Y: fromHex("ab65528eefbb8057aa85d597258a3fbd481a24633bc9b47a9aa045c91371de52"),
|
||||
}
|
||||
|
||||
// Double sha256 of []byte{0x01, 0x02, 0x03, 0x04}
|
||||
msgHash := fromHex("8de472e2399610baaa7f84840547cd409434e31f5d3bd71e4d947f283874f9c0")
|
||||
sig := Signature{
|
||||
R: fromHex("fef45d2892953aa5bbcdb057b5e98b208f1617a7498af7eb765574e29b5d9c2c"),
|
||||
S: fromHex("d47563f52aac6b04b55de236b7c515eb9311757db01e02cff079c3ca6efb063f"),
|
||||
}
|
||||
|
||||
if !sig.Verify(msgHash.Bytes(), &pubKey) {
|
||||
b.Errorf("Signature failed to verify")
|
||||
return
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
sig.Verify(msgHash.Bytes(), &pubKey)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkFieldNormalize benchmarks how long it takes the internal field
|
||||
// to perform normalization (which includes modular reduction).
|
||||
func BenchmarkFieldNormalize(b *testing.B) {
|
||||
// The normalize function is constant time so default value is fine.
|
||||
f := new(fieldVal)
|
||||
for i := 0; i < b.N; i++ {
|
||||
f.Normalize()
|
||||
}
|
||||
}
|
||||
958
btcec/btcec.go
958
btcec/btcec.go
@@ -1,958 +0,0 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Copyright 2011 ThePiachu. All rights reserved.
|
||||
// Copyright 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcec
|
||||
|
||||
// References:
|
||||
// [SECG]: Recommended Elliptic Curve Domain Parameters
|
||||
// http://www.secg.org/sec2-v2.pdf
|
||||
//
|
||||
// [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone)
|
||||
|
||||
// This package operates, internally, on Jacobian coordinates. For a given
|
||||
// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1)
|
||||
// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole
|
||||
// calculation can be performed within the transform (as in ScalarMult and
|
||||
// ScalarBaseMult). But even for Add and Double, it's faster to apply and
|
||||
// reverse the transform than to operate in affine coordinates.
|
||||
|
||||
import (
|
||||
"crypto/elliptic"
|
||||
"math/big"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
// fieldOne is simply the integer 1 in field representation. It is
|
||||
// used to avoid needing to create it multiple times during the internal
|
||||
// arithmetic.
|
||||
fieldOne = new(fieldVal).SetInt(1)
|
||||
)
|
||||
|
||||
// KoblitzCurve supports a koblitz curve implementation that fits the ECC Curve
|
||||
// interface from crypto/elliptic.
|
||||
type KoblitzCurve struct {
|
||||
*elliptic.CurveParams
|
||||
q *big.Int
|
||||
H int // cofactor of the curve.
|
||||
halfOrder *big.Int // half the order N
|
||||
|
||||
// byteSize is simply the bit size / 8 and is provided for convenience
|
||||
// since it is calculated repeatedly.
|
||||
byteSize int
|
||||
|
||||
// bytePoints
|
||||
bytePoints *[32][256][3]fieldVal
|
||||
|
||||
// The next 6 values are used specifically for endomorphism
|
||||
// optimizations in ScalarMult.
|
||||
|
||||
// lambda must fulfill lambda^3 = 1 mod N where N is the order of G.
|
||||
lambda *big.Int
|
||||
|
||||
// beta must fulfill beta^3 = 1 mod P where P is the prime field of the
|
||||
// curve.
|
||||
beta *fieldVal
|
||||
|
||||
// See the EndomorphismVectors in gensecp256k1.go to see how these are
|
||||
// derived.
|
||||
a1 *big.Int
|
||||
b1 *big.Int
|
||||
a2 *big.Int
|
||||
b2 *big.Int
|
||||
}
|
||||
|
||||
// Params returns the parameters for the curve.
|
||||
func (curve *KoblitzCurve) Params() *elliptic.CurveParams {
|
||||
return curve.CurveParams
|
||||
}
|
||||
|
||||
// bigAffineToField takes an affine point (x, y) as big integers and converts
|
||||
// it to an affine point as field values.
|
||||
func (curve *KoblitzCurve) bigAffineToField(x, y *big.Int) (*fieldVal, *fieldVal) {
|
||||
x3, y3 := new(fieldVal), new(fieldVal)
|
||||
x3.SetByteSlice(x.Bytes())
|
||||
y3.SetByteSlice(y.Bytes())
|
||||
|
||||
return x3, y3
|
||||
}
|
||||
|
||||
// fieldJacobianToBigAffine takes a Jacobian point (x, y, z) as field values and
|
||||
// converts it to an affine point as big integers.
|
||||
func (curve *KoblitzCurve) fieldJacobianToBigAffine(x, y, z *fieldVal) (*big.Int, *big.Int) {
|
||||
// Inversions are expensive and both point addition and point doubling
|
||||
// are faster when working with points that have a z value of one. So,
|
||||
// if the point needs to be converted to affine, go ahead and normalize
|
||||
// the point itself at the same time as the calculation is the same.
|
||||
var zInv, tempZ fieldVal
|
||||
zInv.Set(z).Inverse() // zInv = Z^-1
|
||||
tempZ.SquareVal(&zInv) // tempZ = Z^-2
|
||||
x.Mul(&tempZ) // X = X/Z^2 (mag: 1)
|
||||
y.Mul(tempZ.Mul(&zInv)) // Y = Y/Z^3 (mag: 1)
|
||||
z.SetInt(1) // Z = 1 (mag: 1)
|
||||
|
||||
// Normalize the x and y values.
|
||||
x.Normalize()
|
||||
y.Normalize()
|
||||
|
||||
// Convert the field values for the now affine point to big.Ints.
|
||||
x3, y3 := new(big.Int), new(big.Int)
|
||||
x3.SetBytes(x.Bytes()[:])
|
||||
y3.SetBytes(y.Bytes()[:])
|
||||
return x3, y3
|
||||
}
|
||||
|
||||
// IsOnCurve returns boolean if the point (x,y) is on the curve.
|
||||
// Part of the elliptic.Curve interface. This function differs from the
|
||||
// crypto/elliptic algorithm since a = 0 not -3.
|
||||
func (curve *KoblitzCurve) IsOnCurve(x, y *big.Int) bool {
|
||||
// Convert big ints to field values for faster arithmetic.
|
||||
fx, fy := curve.bigAffineToField(x, y)
|
||||
|
||||
// Elliptic curve equation for secp256k1 is: y^2 = x^3 + 7
|
||||
y2 := new(fieldVal).SquareVal(fy).Normalize()
|
||||
result := new(fieldVal).SquareVal(fx).Mul(fx).AddInt(7).Normalize()
|
||||
return y2.Equals(result)
|
||||
}
|
||||
|
||||
// addZ1AndZ2EqualsOne adds two Jacobian points that are already known to have
|
||||
// z values of 1 and stores the result in (x3, y3, z3). That is to say
|
||||
// (x1, y1, 1) + (x2, y2, 1) = (x3, y3, z3). It performs faster addition than
|
||||
// the generic add routine since less arithmetic is needed due to the ability to
|
||||
// avoid the z value multiplications.
|
||||
func (curve *KoblitzCurve) addZ1AndZ2EqualsOne(x1, y1, z1, x2, y2, x3, y3, z3 *fieldVal) {
|
||||
// To compute the point addition efficiently, this implementation splits
|
||||
// the equation into intermediate elements which are used to minimize
|
||||
// the number of field multiplications using the method shown at:
|
||||
// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl
|
||||
//
|
||||
// In particular it performs the calculations using the following:
|
||||
// H = X2-X1, HH = H^2, I = 4*HH, J = H*I, r = 2*(Y2-Y1), V = X1*I
|
||||
// X3 = r^2-J-2*V, Y3 = r*(V-X3)-2*Y1*J, Z3 = 2*H
|
||||
//
|
||||
// This results in a cost of 4 field multiplications, 2 field squarings,
|
||||
// 6 field additions, and 5 integer multiplications.
|
||||
|
||||
// When the x coordinates are the same for two points on the curve, the
|
||||
// y coordinates either must be the same, in which case it is point
|
||||
// doubling, or they are opposite and the result is the point at
|
||||
// infinity per the group law for elliptic curve cryptography.
|
||||
x1.Normalize()
|
||||
y1.Normalize()
|
||||
x2.Normalize()
|
||||
y2.Normalize()
|
||||
if x1.Equals(x2) {
|
||||
if y1.Equals(y2) {
|
||||
// Since x1 == x2 and y1 == y2, point doubling must be
|
||||
// done, otherwise the addition would end up dividing
|
||||
// by zero.
|
||||
curve.doubleJacobian(x1, y1, z1, x3, y3, z3)
|
||||
return
|
||||
}
|
||||
|
||||
// Since x1 == x2 and y1 == -y2, the sum is the point at
|
||||
// infinity per the group law.
|
||||
x3.SetInt(0)
|
||||
y3.SetInt(0)
|
||||
z3.SetInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate X3, Y3, and Z3 according to the intermediate elements
|
||||
// breakdown above.
|
||||
var h, i, j, r, v fieldVal
|
||||
var negJ, neg2V, negX3 fieldVal
|
||||
h.Set(x1).Negate(1).Add(x2) // H = X2-X1 (mag: 3)
|
||||
i.SquareVal(&h).MulInt(4) // I = 4*H^2 (mag: 4)
|
||||
j.Mul2(&h, &i) // J = H*I (mag: 1)
|
||||
r.Set(y1).Negate(1).Add(y2).MulInt(2) // r = 2*(Y2-Y1) (mag: 6)
|
||||
v.Mul2(x1, &i) // V = X1*I (mag: 1)
|
||||
negJ.Set(&j).Negate(1) // negJ = -J (mag: 2)
|
||||
neg2V.Set(&v).MulInt(2).Negate(2) // neg2V = -(2*V) (mag: 3)
|
||||
x3.Set(&r).Square().Add(&negJ).Add(&neg2V) // X3 = r^2-J-2*V (mag: 6)
|
||||
negX3.Set(x3).Negate(6) // negX3 = -X3 (mag: 7)
|
||||
j.Mul(y1).MulInt(2).Negate(2) // J = -(2*Y1*J) (mag: 3)
|
||||
y3.Set(&v).Add(&negX3).Mul(&r).Add(&j) // Y3 = r*(V-X3)-2*Y1*J (mag: 4)
|
||||
z3.Set(&h).MulInt(2) // Z3 = 2*H (mag: 6)
|
||||
|
||||
// Normalize the resulting field values to a magnitude of 1 as needed.
|
||||
x3.Normalize()
|
||||
y3.Normalize()
|
||||
z3.Normalize()
|
||||
}
|
||||
|
||||
// addZ1EqualsZ2 adds two Jacobian points that are already known to have the
|
||||
// same z value and stores the result in (x3, y3, z3). That is to say
|
||||
// (x1, y1, z1) + (x2, y2, z1) = (x3, y3, z3). It performs faster addition than
|
||||
// the generic add routine since less arithmetic is needed due to the known
|
||||
// equivalence.
|
||||
func (curve *KoblitzCurve) addZ1EqualsZ2(x1, y1, z1, x2, y2, x3, y3, z3 *fieldVal) {
|
||||
// To compute the point addition efficiently, this implementation splits
|
||||
// the equation into intermediate elements which are used to minimize
|
||||
// the number of field multiplications using a slightly modified version
|
||||
// of the method shown at:
|
||||
// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl
|
||||
//
|
||||
// In particular it performs the calculations using the following:
|
||||
// A = X2-X1, B = A^2, C=Y2-Y1, D = C^2, E = X1*B, F = X2*B
|
||||
// X3 = D-E-F, Y3 = C*(E-X3)-Y1*(F-E), Z3 = Z1*A
|
||||
//
|
||||
// This results in a cost of 5 field multiplications, 2 field squarings,
|
||||
// 9 field additions, and 0 integer multiplications.
|
||||
|
||||
// When the x coordinates are the same for two points on the curve, the
|
||||
// y coordinates either must be the same, in which case it is point
|
||||
// doubling, or they are opposite and the result is the point at
|
||||
// infinity per the group law for elliptic curve cryptography.
|
||||
x1.Normalize()
|
||||
y1.Normalize()
|
||||
x2.Normalize()
|
||||
y2.Normalize()
|
||||
if x1.Equals(x2) {
|
||||
if y1.Equals(y2) {
|
||||
// Since x1 == x2 and y1 == y2, point doubling must be
|
||||
// done, otherwise the addition would end up dividing
|
||||
// by zero.
|
||||
curve.doubleJacobian(x1, y1, z1, x3, y3, z3)
|
||||
return
|
||||
}
|
||||
|
||||
// Since x1 == x2 and y1 == -y2, the sum is the point at
|
||||
// infinity per the group law.
|
||||
x3.SetInt(0)
|
||||
y3.SetInt(0)
|
||||
z3.SetInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate X3, Y3, and Z3 according to the intermediate elements
|
||||
// breakdown above.
|
||||
var a, b, c, d, e, f fieldVal
|
||||
var negX1, negY1, negE, negX3 fieldVal
|
||||
negX1.Set(x1).Negate(1) // negX1 = -X1 (mag: 2)
|
||||
negY1.Set(y1).Negate(1) // negY1 = -Y1 (mag: 2)
|
||||
a.Set(&negX1).Add(x2) // A = X2-X1 (mag: 3)
|
||||
b.SquareVal(&a) // B = A^2 (mag: 1)
|
||||
c.Set(&negY1).Add(y2) // C = Y2-Y1 (mag: 3)
|
||||
d.SquareVal(&c) // D = C^2 (mag: 1)
|
||||
e.Mul2(x1, &b) // E = X1*B (mag: 1)
|
||||
negE.Set(&e).Negate(1) // negE = -E (mag: 2)
|
||||
f.Mul2(x2, &b) // F = X2*B (mag: 1)
|
||||
x3.Add2(&e, &f).Negate(3).Add(&d) // X3 = D-E-F (mag: 5)
|
||||
negX3.Set(x3).Negate(5).Normalize() // negX3 = -X3 (mag: 1)
|
||||
y3.Set(y1).Mul(f.Add(&negE)).Negate(3) // Y3 = -(Y1*(F-E)) (mag: 4)
|
||||
y3.Add(e.Add(&negX3).Mul(&c)) // Y3 = C*(E-X3)+Y3 (mag: 5)
|
||||
z3.Mul2(z1, &a) // Z3 = Z1*A (mag: 1)
|
||||
|
||||
// Normalize the resulting field values to a magnitude of 1 as needed.
|
||||
x3.Normalize()
|
||||
y3.Normalize()
|
||||
}
|
||||
|
||||
// addZ2EqualsOne adds two Jacobian points when the second point is already
|
||||
// known to have a z value of 1 (and the z value for the first point is not 1)
|
||||
// and stores the result in (x3, y3, z3). That is to say (x1, y1, z1) +
|
||||
// (x2, y2, 1) = (x3, y3, z3). It performs faster addition than the generic
|
||||
// add routine since less arithmetic is needed due to the ability to avoid
|
||||
// multiplications by the second point's z value.
|
||||
func (curve *KoblitzCurve) addZ2EqualsOne(x1, y1, z1, x2, y2, x3, y3, z3 *fieldVal) {
|
||||
// To compute the point addition efficiently, this implementation splits
|
||||
// the equation into intermediate elements which are used to minimize
|
||||
// the number of field multiplications using the method shown at:
|
||||
// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl
|
||||
//
|
||||
// In particular it performs the calculations using the following:
|
||||
// Z1Z1 = Z1^2, U2 = X2*Z1Z1, S2 = Y2*Z1*Z1Z1, H = U2-X1, HH = H^2,
|
||||
// I = 4*HH, J = H*I, r = 2*(S2-Y1), V = X1*I
|
||||
// X3 = r^2-J-2*V, Y3 = r*(V-X3)-2*Y1*J, Z3 = (Z1+H)^2-Z1Z1-HH
|
||||
//
|
||||
// This results in a cost of 7 field multiplications, 4 field squarings,
|
||||
// 9 field additions, and 4 integer multiplications.
|
||||
|
||||
// When the x coordinates are the same for two points on the curve, the
|
||||
// y coordinates either must be the same, in which case it is point
|
||||
// doubling, or they are opposite and the result is the point at
|
||||
// infinity per the group law for elliptic curve cryptography. Since
|
||||
// any number of Jacobian coordinates can represent the same affine
|
||||
// point, the x and y values need to be converted to like terms. Due to
|
||||
// the assumption made for this function that the second point has a z
|
||||
// value of 1 (z2=1), the first point is already "converted".
|
||||
var z1z1, u2, s2 fieldVal
|
||||
x1.Normalize()
|
||||
y1.Normalize()
|
||||
z1z1.SquareVal(z1) // Z1Z1 = Z1^2 (mag: 1)
|
||||
u2.Set(x2).Mul(&z1z1).Normalize() // U2 = X2*Z1Z1 (mag: 1)
|
||||
s2.Set(y2).Mul(&z1z1).Mul(z1).Normalize() // S2 = Y2*Z1*Z1Z1 (mag: 1)
|
||||
if x1.Equals(&u2) {
|
||||
if y1.Equals(&s2) {
|
||||
// Since x1 == x2 and y1 == y2, point doubling must be
|
||||
// done, otherwise the addition would end up dividing
|
||||
// by zero.
|
||||
curve.doubleJacobian(x1, y1, z1, x3, y3, z3)
|
||||
return
|
||||
}
|
||||
|
||||
// Since x1 == x2 and y1 == -y2, the sum is the point at
|
||||
// infinity per the group law.
|
||||
x3.SetInt(0)
|
||||
y3.SetInt(0)
|
||||
z3.SetInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate X3, Y3, and Z3 according to the intermediate elements
|
||||
// breakdown above.
|
||||
var h, hh, i, j, r, rr, v fieldVal
|
||||
var negX1, negY1, negX3 fieldVal
|
||||
negX1.Set(x1).Negate(1) // negX1 = -X1 (mag: 2)
|
||||
h.Add2(&u2, &negX1) // H = U2-X1 (mag: 3)
|
||||
hh.SquareVal(&h) // HH = H^2 (mag: 1)
|
||||
i.Set(&hh).MulInt(4) // I = 4 * HH (mag: 4)
|
||||
j.Mul2(&h, &i) // J = H*I (mag: 1)
|
||||
negY1.Set(y1).Negate(1) // negY1 = -Y1 (mag: 2)
|
||||
r.Set(&s2).Add(&negY1).MulInt(2) // r = 2*(S2-Y1) (mag: 6)
|
||||
rr.SquareVal(&r) // rr = r^2 (mag: 1)
|
||||
v.Mul2(x1, &i) // V = X1*I (mag: 1)
|
||||
x3.Set(&v).MulInt(2).Add(&j).Negate(3) // X3 = -(J+2*V) (mag: 4)
|
||||
x3.Add(&rr) // X3 = r^2+X3 (mag: 5)
|
||||
negX3.Set(x3).Negate(5) // negX3 = -X3 (mag: 6)
|
||||
y3.Set(y1).Mul(&j).MulInt(2).Negate(2) // Y3 = -(2*Y1*J) (mag: 3)
|
||||
y3.Add(v.Add(&negX3).Mul(&r)) // Y3 = r*(V-X3)+Y3 (mag: 4)
|
||||
z3.Add2(z1, &h).Square() // Z3 = (Z1+H)^2 (mag: 1)
|
||||
z3.Add(z1z1.Add(&hh).Negate(2)) // Z3 = Z3-(Z1Z1+HH) (mag: 4)
|
||||
|
||||
// Normalize the resulting field values to a magnitude of 1 as needed.
|
||||
x3.Normalize()
|
||||
y3.Normalize()
|
||||
z3.Normalize()
|
||||
}
|
||||
|
||||
// addGeneric adds two Jacobian points (x1, y1, z1) and (x2, y2, z2) without any
|
||||
// assumptions about the z values of the two points and stores the result in
|
||||
// (x3, y3, z3). That is to say (x1, y1, z1) + (x2, y2, z2) = (x3, y3, z3). It
|
||||
// is the slowest of the add routines due to requiring the most arithmetic.
|
||||
func (curve *KoblitzCurve) addGeneric(x1, y1, z1, x2, y2, z2, x3, y3, z3 *fieldVal) {
|
||||
// To compute the point addition efficiently, this implementation splits
|
||||
// the equation into intermediate elements which are used to minimize
|
||||
// the number of field multiplications using the method shown at:
|
||||
// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl
|
||||
//
|
||||
// In particular it performs the calculations using the following:
|
||||
// Z1Z1 = Z1^2, Z2Z2 = Z2^2, U1 = X1*Z2Z2, U2 = X2*Z1Z1, S1 = Y1*Z2*Z2Z2
|
||||
// S2 = Y2*Z1*Z1Z1, H = U2-U1, I = (2*H)^2, J = H*I, r = 2*(S2-S1)
|
||||
// V = U1*I
|
||||
// X3 = r^2-J-2*V, Y3 = r*(V-X3)-2*S1*J, Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2)*H
|
||||
//
|
||||
// This results in a cost of 11 field multiplications, 5 field squarings,
|
||||
// 9 field additions, and 4 integer multiplications.
|
||||
|
||||
// When the x coordinates are the same for two points on the curve, the
|
||||
// y coordinates either must be the same, in which case it is point
|
||||
// doubling, or they are opposite and the result is the point at
|
||||
// infinity. Since any number of Jacobian coordinates can represent the
|
||||
// same affine point, the x and y values need to be converted to like
|
||||
// terms.
|
||||
var z1z1, z2z2, u1, u2, s1, s2 fieldVal
|
||||
z1z1.SquareVal(z1) // Z1Z1 = Z1^2 (mag: 1)
|
||||
z2z2.SquareVal(z2) // Z2Z2 = Z2^2 (mag: 1)
|
||||
u1.Set(x1).Mul(&z2z2).Normalize() // U1 = X1*Z2Z2 (mag: 1)
|
||||
u2.Set(x2).Mul(&z1z1).Normalize() // U2 = X2*Z1Z1 (mag: 1)
|
||||
s1.Set(y1).Mul(&z2z2).Mul(z2).Normalize() // S1 = Y1*Z2*Z2Z2 (mag: 1)
|
||||
s2.Set(y2).Mul(&z1z1).Mul(z1).Normalize() // S2 = Y2*Z1*Z1Z1 (mag: 1)
|
||||
if u1.Equals(&u2) {
|
||||
if s1.Equals(&s2) {
|
||||
// Since x1 == x2 and y1 == y2, point doubling must be
|
||||
// done, otherwise the addition would end up dividing
|
||||
// by zero.
|
||||
curve.doubleJacobian(x1, y1, z1, x3, y3, z3)
|
||||
return
|
||||
}
|
||||
|
||||
// Since x1 == x2 and y1 == -y2, the sum is the point at
|
||||
// infinity per the group law.
|
||||
x3.SetInt(0)
|
||||
y3.SetInt(0)
|
||||
z3.SetInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate X3, Y3, and Z3 according to the intermediate elements
|
||||
// breakdown above.
|
||||
var h, i, j, r, rr, v fieldVal
|
||||
var negU1, negS1, negX3 fieldVal
|
||||
negU1.Set(&u1).Negate(1) // negU1 = -U1 (mag: 2)
|
||||
h.Add2(&u2, &negU1) // H = U2-U1 (mag: 3)
|
||||
i.Set(&h).MulInt(2).Square() // I = (2*H)^2 (mag: 2)
|
||||
j.Mul2(&h, &i) // J = H*I (mag: 1)
|
||||
negS1.Set(&s1).Negate(1) // negS1 = -S1 (mag: 2)
|
||||
r.Set(&s2).Add(&negS1).MulInt(2) // r = 2*(S2-S1) (mag: 6)
|
||||
rr.SquareVal(&r) // rr = r^2 (mag: 1)
|
||||
v.Mul2(&u1, &i) // V = U1*I (mag: 1)
|
||||
x3.Set(&v).MulInt(2).Add(&j).Negate(3) // X3 = -(J+2*V) (mag: 4)
|
||||
x3.Add(&rr) // X3 = r^2+X3 (mag: 5)
|
||||
negX3.Set(x3).Negate(5) // negX3 = -X3 (mag: 6)
|
||||
y3.Mul2(&s1, &j).MulInt(2).Negate(2) // Y3 = -(2*S1*J) (mag: 3)
|
||||
y3.Add(v.Add(&negX3).Mul(&r)) // Y3 = r*(V-X3)+Y3 (mag: 4)
|
||||
z3.Add2(z1, z2).Square() // Z3 = (Z1+Z2)^2 (mag: 1)
|
||||
z3.Add(z1z1.Add(&z2z2).Negate(2)) // Z3 = Z3-(Z1Z1+Z2Z2) (mag: 4)
|
||||
z3.Mul(&h) // Z3 = Z3*H (mag: 1)
|
||||
|
||||
// Normalize the resulting field values to a magnitude of 1 as needed.
|
||||
x3.Normalize()
|
||||
y3.Normalize()
|
||||
}
|
||||
|
||||
// addJacobian adds the passed Jacobian points (x1, y1, z1) and (x2, y2, z2)
|
||||
// together and stores the result in (x3, y3, z3).
|
||||
func (curve *KoblitzCurve) addJacobian(x1, y1, z1, x2, y2, z2, x3, y3, z3 *fieldVal) {
|
||||
// A point at infinity is the identity according to the group law for
|
||||
// elliptic curve cryptography. Thus, ∞ + P = P and P + ∞ = P.
|
||||
if (x1.IsZero() && y1.IsZero()) || z1.IsZero() {
|
||||
x3.Set(x2)
|
||||
y3.Set(y2)
|
||||
z3.Set(z2)
|
||||
return
|
||||
}
|
||||
if (x2.IsZero() && y2.IsZero()) || z2.IsZero() {
|
||||
x3.Set(x1)
|
||||
y3.Set(y1)
|
||||
z3.Set(z1)
|
||||
return
|
||||
}
|
||||
|
||||
// Faster point addition can be achieved when certain assumptions are
|
||||
// met. For example, when both points have the same z value, arithmetic
|
||||
// on the z values can be avoided. This section thus checks for these
|
||||
// conditions and calls an appropriate add function which is accelerated
|
||||
// by using those assumptions.
|
||||
z1.Normalize()
|
||||
z2.Normalize()
|
||||
isZ1One := z1.Equals(fieldOne)
|
||||
isZ2One := z2.Equals(fieldOne)
|
||||
switch {
|
||||
case isZ1One && isZ2One:
|
||||
curve.addZ1AndZ2EqualsOne(x1, y1, z1, x2, y2, x3, y3, z3)
|
||||
return
|
||||
case z1.Equals(z2):
|
||||
curve.addZ1EqualsZ2(x1, y1, z1, x2, y2, x3, y3, z3)
|
||||
return
|
||||
case isZ2One:
|
||||
curve.addZ2EqualsOne(x1, y1, z1, x2, y2, x3, y3, z3)
|
||||
return
|
||||
}
|
||||
|
||||
// None of the above assumptions are true, so fall back to generic
|
||||
// point addition.
|
||||
curve.addGeneric(x1, y1, z1, x2, y2, z2, x3, y3, z3)
|
||||
}
|
||||
|
||||
// Add returns the sum of (x1,y1) and (x2,y2). Part of the elliptic.Curve
|
||||
// interface.
|
||||
func (curve *KoblitzCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
|
||||
// A point at infinity is the identity according to the group law for
|
||||
// elliptic curve cryptography. Thus, ∞ + P = P and P + ∞ = P.
|
||||
if x1.Sign() == 0 && y1.Sign() == 0 {
|
||||
return x2, y2
|
||||
}
|
||||
if x2.Sign() == 0 && y2.Sign() == 0 {
|
||||
return x1, y1
|
||||
}
|
||||
|
||||
// Convert the affine coordinates from big integers to field values
|
||||
// and do the point addition in Jacobian projective space.
|
||||
fx1, fy1 := curve.bigAffineToField(x1, y1)
|
||||
fx2, fy2 := curve.bigAffineToField(x2, y2)
|
||||
fx3, fy3, fz3 := new(fieldVal), new(fieldVal), new(fieldVal)
|
||||
fOne := new(fieldVal).SetInt(1)
|
||||
curve.addJacobian(fx1, fy1, fOne, fx2, fy2, fOne, fx3, fy3, fz3)
|
||||
|
||||
// Convert the Jacobian coordinate field values back to affine big
|
||||
// integers.
|
||||
return curve.fieldJacobianToBigAffine(fx3, fy3, fz3)
|
||||
}
|
||||
|
||||
// doubleZ1EqualsOne performs point doubling on the passed Jacobian point
|
||||
// when the point is already known to have a z value of 1 and stores
|
||||
// the result in (x3, y3, z3). That is to say (x3, y3, z3) = 2*(x1, y1, 1). It
|
||||
// performs faster point doubling than the generic routine since less arithmetic
|
||||
// is needed due to the ability to avoid multiplication by the z value.
|
||||
func (curve *KoblitzCurve) doubleZ1EqualsOne(x1, y1, x3, y3, z3 *fieldVal) {
|
||||
// This function uses the assumptions that z1 is 1, thus the point
|
||||
// doubling formulas reduce to:
|
||||
//
|
||||
// X3 = (3*X1^2)^2 - 8*X1*Y1^2
|
||||
// Y3 = (3*X1^2)*(4*X1*Y1^2 - X3) - 8*Y1^4
|
||||
// Z3 = 2*Y1
|
||||
//
|
||||
// To compute the above efficiently, this implementation splits the
|
||||
// equation into intermediate elements which are used to minimize the
|
||||
// number of field multiplications in favor of field squarings which
|
||||
// are roughly 35% faster than field multiplications with the current
|
||||
// implementation at the time this was written.
|
||||
//
|
||||
// This uses a slightly modified version of the method shown at:
|
||||
// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl
|
||||
//
|
||||
// In particular it performs the calculations using the following:
|
||||
// A = X1^2, B = Y1^2, C = B^2, D = 2*((X1+B)^2-A-C)
|
||||
// E = 3*A, F = E^2, X3 = F-2*D, Y3 = E*(D-X3)-8*C
|
||||
// Z3 = 2*Y1
|
||||
//
|
||||
// This results in a cost of 1 field multiplication, 5 field squarings,
|
||||
// 6 field additions, and 5 integer multiplications.
|
||||
var a, b, c, d, e, f fieldVal
|
||||
z3.Set(y1).MulInt(2) // Z3 = 2*Y1 (mag: 2)
|
||||
a.SquareVal(x1) // A = X1^2 (mag: 1)
|
||||
b.SquareVal(y1) // B = Y1^2 (mag: 1)
|
||||
c.SquareVal(&b) // C = B^2 (mag: 1)
|
||||
b.Add(x1).Square() // B = (X1+B)^2 (mag: 1)
|
||||
d.Set(&a).Add(&c).Negate(2) // D = -(A+C) (mag: 3)
|
||||
d.Add(&b).MulInt(2) // D = 2*(B+D)(mag: 8)
|
||||
e.Set(&a).MulInt(3) // E = 3*A (mag: 3)
|
||||
f.SquareVal(&e) // F = E^2 (mag: 1)
|
||||
x3.Set(&d).MulInt(2).Negate(16) // X3 = -(2*D) (mag: 17)
|
||||
x3.Add(&f) // X3 = F+X3 (mag: 18)
|
||||
f.Set(x3).Negate(18).Add(&d).Normalize() // F = D-X3 (mag: 1)
|
||||
y3.Set(&c).MulInt(8).Negate(8) // Y3 = -(8*C) (mag: 9)
|
||||
y3.Add(f.Mul(&e)) // Y3 = E*F+Y3 (mag: 10)
|
||||
|
||||
// Normalize the field values back to a magnitude of 1.
|
||||
x3.Normalize()
|
||||
y3.Normalize()
|
||||
z3.Normalize()
|
||||
}
|
||||
|
||||
// doubleGeneric performs point doubling on the passed Jacobian point without
|
||||
// any assumptions about the z value and stores the result in (x3, y3, z3).
|
||||
// That is to say (x3, y3, z3) = 2*(x1, y1, z1). It is the slowest of the point
|
||||
// doubling routines due to requiring the most arithmetic.
|
||||
func (curve *KoblitzCurve) doubleGeneric(x1, y1, z1, x3, y3, z3 *fieldVal) {
|
||||
// Point doubling formula for Jacobian coordinates for the secp256k1
|
||||
// curve:
|
||||
// X3 = (3*X1^2)^2 - 8*X1*Y1^2
|
||||
// Y3 = (3*X1^2)*(4*X1*Y1^2 - X3) - 8*Y1^4
|
||||
// Z3 = 2*Y1*Z1
|
||||
//
|
||||
// To compute the above efficiently, this implementation splits the
|
||||
// equation into intermediate elements which are used to minimize the
|
||||
// number of field multiplications in favor of field squarings which
|
||||
// are roughly 35% faster than field multiplications with the current
|
||||
// implementation at the time this was written.
|
||||
//
|
||||
// This uses a slightly modified version of the method shown at:
|
||||
// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
|
||||
//
|
||||
// In particular it performs the calculations using the following:
|
||||
// A = X1^2, B = Y1^2, C = B^2, D = 2*((X1+B)^2-A-C)
|
||||
// E = 3*A, F = E^2, X3 = F-2*D, Y3 = E*(D-X3)-8*C
|
||||
// Z3 = 2*Y1*Z1
|
||||
//
|
||||
// This results in a cost of 1 field multiplication, 5 field squarings,
|
||||
// 6 field additions, and 5 integer multiplications.
|
||||
var a, b, c, d, e, f fieldVal
|
||||
z3.Mul2(y1, z1).MulInt(2) // Z3 = 2*Y1*Z1 (mag: 2)
|
||||
a.SquareVal(x1) // A = X1^2 (mag: 1)
|
||||
b.SquareVal(y1) // B = Y1^2 (mag: 1)
|
||||
c.SquareVal(&b) // C = B^2 (mag: 1)
|
||||
b.Add(x1).Square() // B = (X1+B)^2 (mag: 1)
|
||||
d.Set(&a).Add(&c).Negate(2) // D = -(A+C) (mag: 3)
|
||||
d.Add(&b).MulInt(2) // D = 2*(B+D)(mag: 8)
|
||||
e.Set(&a).MulInt(3) // E = 3*A (mag: 3)
|
||||
f.SquareVal(&e) // F = E^2 (mag: 1)
|
||||
x3.Set(&d).MulInt(2).Negate(16) // X3 = -(2*D) (mag: 17)
|
||||
x3.Add(&f) // X3 = F+X3 (mag: 18)
|
||||
f.Set(x3).Negate(18).Add(&d).Normalize() // F = D-X3 (mag: 1)
|
||||
y3.Set(&c).MulInt(8).Negate(8) // Y3 = -(8*C) (mag: 9)
|
||||
y3.Add(f.Mul(&e)) // Y3 = E*F+Y3 (mag: 10)
|
||||
|
||||
// Normalize the field values back to a magnitude of 1.
|
||||
x3.Normalize()
|
||||
y3.Normalize()
|
||||
z3.Normalize()
|
||||
}
|
||||
|
||||
// doubleJacobian doubles the passed Jacobian point (x1, y1, z1) and stores the
|
||||
// result in (x3, y3, z3).
|
||||
func (curve *KoblitzCurve) doubleJacobian(x1, y1, z1, x3, y3, z3 *fieldVal) {
|
||||
// Doubling a point at infinity is still infinity.
|
||||
if y1.IsZero() || z1.IsZero() {
|
||||
x3.SetInt(0)
|
||||
y3.SetInt(0)
|
||||
z3.SetInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
// Slightly faster point doubling can be achieved when the z value is 1
|
||||
// by avoiding the multiplication on the z value. This section calls
|
||||
// a point doubling function which is accelerated by using that
|
||||
// assumption when possible.
|
||||
if z1.Normalize().Equals(fieldOne) {
|
||||
curve.doubleZ1EqualsOne(x1, y1, x3, y3, z3)
|
||||
return
|
||||
}
|
||||
|
||||
// Fall back to generic point doubling which works with arbitrary z
|
||||
// values.
|
||||
curve.doubleGeneric(x1, y1, z1, x3, y3, z3)
|
||||
}
|
||||
|
||||
// Double returns 2*(x1,y1). Part of the elliptic.Curve interface.
|
||||
func (curve *KoblitzCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
|
||||
if y1.Sign() == 0 {
|
||||
return new(big.Int), new(big.Int)
|
||||
}
|
||||
|
||||
// Convert the affine coordinates from big integers to field values
|
||||
// and do the point doubling in Jacobian projective space.
|
||||
fx1, fy1 := curve.bigAffineToField(x1, y1)
|
||||
fx3, fy3, fz3 := new(fieldVal), new(fieldVal), new(fieldVal)
|
||||
fOne := new(fieldVal).SetInt(1)
|
||||
curve.doubleJacobian(fx1, fy1, fOne, fx3, fy3, fz3)
|
||||
|
||||
// Convert the Jacobian coordinate field values back to affine big
|
||||
// integers.
|
||||
return curve.fieldJacobianToBigAffine(fx3, fy3, fz3)
|
||||
}
|
||||
|
||||
// splitK returns a balanced length-two representation of k and their signs.
|
||||
// This is algorithm 3.74 from [GECC].
|
||||
//
|
||||
// One thing of note about this algorithm is that no matter what c1 and c2 are,
|
||||
// the final equation of k = k1 + k2 * lambda (mod n) will hold. This is
|
||||
// provable mathematically due to how a1/b1/a2/b2 are computed.
|
||||
//
|
||||
// c1 and c2 are chosen to minimize the max(k1,k2).
|
||||
func (curve *KoblitzCurve) splitK(k []byte) ([]byte, []byte, int, int) {
|
||||
// All math here is done with big.Int, which is slow.
|
||||
// At some point, it might be useful to write something similar to
|
||||
// fieldVal but for N instead of P as the prime field if this ends up
|
||||
// being a bottleneck.
|
||||
bigIntK := new(big.Int)
|
||||
c1, c2 := new(big.Int), new(big.Int)
|
||||
tmp1, tmp2 := new(big.Int), new(big.Int)
|
||||
k1, k2 := new(big.Int), new(big.Int)
|
||||
|
||||
bigIntK.SetBytes(k)
|
||||
// c1 = round(b2 * k / n) from step 4.
|
||||
// Rounding isn't really necessary and costs too much, hence skipped
|
||||
c1.Mul(curve.b2, bigIntK)
|
||||
c1.Div(c1, curve.N)
|
||||
// c2 = round(b1 * k / n) from step 4 (sign reversed to optimize one step)
|
||||
// Rounding isn't really necessary and costs too much, hence skipped
|
||||
c2.Mul(curve.b1, bigIntK)
|
||||
c2.Div(c2, curve.N)
|
||||
// k1 = k - c1 * a1 - c2 * a2 from step 5 (note c2's sign is reversed)
|
||||
tmp1.Mul(c1, curve.a1)
|
||||
tmp2.Mul(c2, curve.a2)
|
||||
k1.Sub(bigIntK, tmp1)
|
||||
k1.Add(k1, tmp2)
|
||||
// k2 = - c1 * b1 - c2 * b2 from step 5 (note c2's sign is reversed)
|
||||
tmp1.Mul(c1, curve.b1)
|
||||
tmp2.Mul(c2, curve.b2)
|
||||
k2.Sub(tmp2, tmp1)
|
||||
|
||||
// Note Bytes() throws out the sign of k1 and k2. This matters
|
||||
// since k1 and/or k2 can be negative. Hence, we pass that
|
||||
// back separately.
|
||||
return k1.Bytes(), k2.Bytes(), k1.Sign(), k2.Sign()
|
||||
}
|
||||
|
||||
// moduloReduce reduces k from more than 32 bytes to 32 bytes and under. This
|
||||
// is done by doing a simple modulo curve.N. We can do this since G^N = 1 and
|
||||
// thus any other valid point on the elliptic curve has the same order.
|
||||
func (curve *KoblitzCurve) moduloReduce(k []byte) []byte {
|
||||
// Since the order of G is curve.N, we can use a much smaller number
|
||||
// by doing modulo curve.N
|
||||
if len(k) > curve.byteSize {
|
||||
// Reduce k by performing modulo curve.N.
|
||||
tmpK := new(big.Int).SetBytes(k)
|
||||
tmpK.Mod(tmpK, curve.N)
|
||||
return tmpK.Bytes()
|
||||
}
|
||||
|
||||
return k
|
||||
}
|
||||
|
||||
// NAF takes a positive integer k and returns the Non-Adjacent Form (NAF) as two
|
||||
// byte slices. The first is where 1s will be. The second is where -1s will
|
||||
// be. NAF is convenient in that on average, only 1/3rd of its values are
|
||||
// non-zero. This is algorithm 3.30 from [GECC].
|
||||
//
|
||||
// Essentially, this makes it possible to minimize the number of operations
|
||||
// since the resulting ints returned will be at least 50% 0s.
|
||||
func NAF(k []byte) ([]byte, []byte) {
|
||||
// The essence of this algorithm is that whenever we have consecutive 1s
|
||||
// in the binary, we want to put a -1 in the lowest bit and get a bunch
|
||||
// of 0s up to the highest bit of consecutive 1s. This is due to this
|
||||
// identity:
|
||||
// 2^n + 2^(n-1) + 2^(n-2) + ... + 2^(n-k) = 2^(n+1) - 2^(n-k)
|
||||
//
|
||||
// The algorithm thus may need to go 1 more bit than the length of the
|
||||
// bits we actually have, hence bits being 1 bit longer than was
|
||||
// necessary. Since we need to know whether adding will cause a carry,
|
||||
// we go from right-to-left in this addition.
|
||||
var carry, curIsOne, nextIsOne bool
|
||||
// these default to zero
|
||||
retPos := make([]byte, len(k)+1)
|
||||
retNeg := make([]byte, len(k)+1)
|
||||
for i := len(k) - 1; i >= 0; i-- {
|
||||
curByte := k[i]
|
||||
for j := uint(0); j < 8; j++ {
|
||||
curIsOne = curByte&1 == 1
|
||||
if j == 7 {
|
||||
if i == 0 {
|
||||
nextIsOne = false
|
||||
} else {
|
||||
nextIsOne = k[i-1]&1 == 1
|
||||
}
|
||||
} else {
|
||||
nextIsOne = curByte&2 == 2
|
||||
}
|
||||
if carry {
|
||||
if curIsOne {
|
||||
// This bit is 1, so continue to carry
|
||||
// and don't need to do anything.
|
||||
} else {
|
||||
// We've hit a 0 after some number of
|
||||
// 1s.
|
||||
if nextIsOne {
|
||||
// Start carrying again since
|
||||
// a new sequence of 1s is
|
||||
// starting.
|
||||
retNeg[i+1] += 1 << j
|
||||
} else {
|
||||
// Stop carrying since 1s have
|
||||
// stopped.
|
||||
carry = false
|
||||
retPos[i+1] += 1 << j
|
||||
}
|
||||
}
|
||||
} else if curIsOne {
|
||||
if nextIsOne {
|
||||
// If this is the start of at least 2
|
||||
// consecutive 1s, set the current one
|
||||
// to -1 and start carrying.
|
||||
retNeg[i+1] += 1 << j
|
||||
carry = true
|
||||
} else {
|
||||
// This is a singleton, not consecutive
|
||||
// 1s.
|
||||
retPos[i+1] += 1 << j
|
||||
}
|
||||
}
|
||||
curByte >>= 1
|
||||
}
|
||||
}
|
||||
if carry {
|
||||
retPos[0] = 1
|
||||
return retPos, retNeg
|
||||
}
|
||||
return retPos[1:], retNeg[1:]
|
||||
}
|
||||
|
||||
// ScalarMult returns k*(Bx, By) where k is a big endian integer.
|
||||
// Part of the elliptic.Curve interface.
|
||||
func (curve *KoblitzCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
|
||||
// Point Q = ∞ (point at infinity).
|
||||
qx, qy, qz := new(fieldVal), new(fieldVal), new(fieldVal)
|
||||
|
||||
// Decompose K into k1 and k2 in order to halve the number of EC ops.
|
||||
// See Algorithm 3.74 in [GECC].
|
||||
k1, k2, signK1, signK2 := curve.splitK(curve.moduloReduce(k))
|
||||
|
||||
// The main equation here to remember is:
|
||||
// k * P = k1 * P + k2 * ϕ(P)
|
||||
//
|
||||
// P1 below is P in the equation, P2 below is ϕ(P) in the equation
|
||||
p1x, p1y := curve.bigAffineToField(Bx, By)
|
||||
p1yNeg := new(fieldVal).NegateVal(p1y, 1)
|
||||
p1z := new(fieldVal).SetInt(1)
|
||||
|
||||
// NOTE: ϕ(x,y) = (βx,y). The Jacobian z coordinate is 1, so this math
|
||||
// goes through.
|
||||
p2x := new(fieldVal).Mul2(p1x, curve.beta)
|
||||
p2y := new(fieldVal).Set(p1y)
|
||||
p2yNeg := new(fieldVal).NegateVal(p2y, 1)
|
||||
p2z := new(fieldVal).SetInt(1)
|
||||
|
||||
// Flip the positive and negative values of the points as needed
|
||||
// depending on the signs of k1 and k2. As mentioned in the equation
|
||||
// above, each of k1 and k2 are multiplied by the respective point.
|
||||
// Since -k * P is the same thing as k * -P, and the group law for
|
||||
// elliptic curves states that P(x, y) = -P(x, -y), it's faster and
|
||||
// simplifies the code to just make the point negative.
|
||||
if signK1 == -1 {
|
||||
p1y, p1yNeg = p1yNeg, p1y
|
||||
}
|
||||
if signK2 == -1 {
|
||||
p2y, p2yNeg = p2yNeg, p2y
|
||||
}
|
||||
|
||||
// NAF versions of k1 and k2 should have a lot more zeros.
|
||||
//
|
||||
// The Pos version of the bytes contain the +1s and the Neg versions
|
||||
// contain the -1s.
|
||||
k1PosNAF, k1NegNAF := NAF(k1)
|
||||
k2PosNAF, k2NegNAF := NAF(k2)
|
||||
k1Len := len(k1PosNAF)
|
||||
k2Len := len(k2PosNAF)
|
||||
|
||||
m := k1Len
|
||||
if m < k2Len {
|
||||
m = k2Len
|
||||
}
|
||||
|
||||
// Add left-to-right using the NAF optimization. See algorithm 3.77
|
||||
// from [GECC]. This should be faster overall since there will be a lot
|
||||
// more instances of 0, hence reducing the number of Jacobian additions
|
||||
// at the cost of 1 possible extra doubling.
|
||||
var k1BytePos, k1ByteNeg, k2BytePos, k2ByteNeg byte
|
||||
for i := 0; i < m; i++ {
|
||||
// Since we're going left-to-right, pad the front with 0s.
|
||||
if i < m-k1Len {
|
||||
k1BytePos = 0
|
||||
k1ByteNeg = 0
|
||||
} else {
|
||||
k1BytePos = k1PosNAF[i-(m-k1Len)]
|
||||
k1ByteNeg = k1NegNAF[i-(m-k1Len)]
|
||||
}
|
||||
if i < m-k2Len {
|
||||
k2BytePos = 0
|
||||
k2ByteNeg = 0
|
||||
} else {
|
||||
k2BytePos = k2PosNAF[i-(m-k2Len)]
|
||||
k2ByteNeg = k2NegNAF[i-(m-k2Len)]
|
||||
}
|
||||
|
||||
for j := 7; j >= 0; j-- {
|
||||
// Q = 2 * Q
|
||||
curve.doubleJacobian(qx, qy, qz, qx, qy, qz)
|
||||
|
||||
if k1BytePos&0x80 == 0x80 {
|
||||
curve.addJacobian(qx, qy, qz, p1x, p1y, p1z,
|
||||
qx, qy, qz)
|
||||
} else if k1ByteNeg&0x80 == 0x80 {
|
||||
curve.addJacobian(qx, qy, qz, p1x, p1yNeg, p1z,
|
||||
qx, qy, qz)
|
||||
}
|
||||
|
||||
if k2BytePos&0x80 == 0x80 {
|
||||
curve.addJacobian(qx, qy, qz, p2x, p2y, p2z,
|
||||
qx, qy, qz)
|
||||
} else if k2ByteNeg&0x80 == 0x80 {
|
||||
curve.addJacobian(qx, qy, qz, p2x, p2yNeg, p2z,
|
||||
qx, qy, qz)
|
||||
}
|
||||
k1BytePos <<= 1
|
||||
k1ByteNeg <<= 1
|
||||
k2BytePos <<= 1
|
||||
k2ByteNeg <<= 1
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the Jacobian coordinate field values back to affine big.Ints.
|
||||
return curve.fieldJacobianToBigAffine(qx, qy, qz)
|
||||
}
|
||||
|
||||
// ScalarBaseMult returns k*G where G is the base point of the group and k is a
|
||||
// big endian integer.
|
||||
// Part of the elliptic.Curve interface.
|
||||
func (curve *KoblitzCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
|
||||
newK := curve.moduloReduce(k)
|
||||
diff := len(curve.bytePoints) - len(newK)
|
||||
|
||||
// Point Q = ∞ (point at infinity).
|
||||
qx, qy, qz := new(fieldVal), new(fieldVal), new(fieldVal)
|
||||
|
||||
// curve.bytePoints has all 256 byte points for each 8-bit window. The
|
||||
// strategy is to add up the byte points. This is best understood by
|
||||
// expressing k in base-256 which it already sort of is.
|
||||
// Each "digit" in the 8-bit window can be looked up using bytePoints
|
||||
// and added together.
|
||||
for i, byteVal := range newK {
|
||||
p := curve.bytePoints[diff+i][byteVal]
|
||||
curve.addJacobian(qx, qy, qz, &p[0], &p[1], &p[2], qx, qy, qz)
|
||||
}
|
||||
return curve.fieldJacobianToBigAffine(qx, qy, qz)
|
||||
}
|
||||
|
||||
// QPlus1Div4 returns the Q+1/4 constant for the curve for use in calculating
|
||||
// square roots via exponention.
|
||||
func (curve *KoblitzCurve) QPlus1Div4() *big.Int {
|
||||
return curve.q
|
||||
}
|
||||
|
||||
var initonce sync.Once
|
||||
var secp256k1 KoblitzCurve
|
||||
|
||||
func initAll() {
|
||||
initS256()
|
||||
}
|
||||
|
||||
// fromHex converts the passed hex string into a big integer pointer and will
|
||||
// panic is there is an error. This is only provided for the hard-coded
|
||||
// constants so errors in the source code can bet detected. It will only (and
|
||||
// must only) be called for initialization purposes.
|
||||
func fromHex(s string) *big.Int {
|
||||
r, ok := new(big.Int).SetString(s, 16)
|
||||
if !ok {
|
||||
panic("invalid hex in source file: " + s)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func initS256() {
|
||||
// Curve parameters taken from [SECG] section 2.4.1.
|
||||
secp256k1.CurveParams = new(elliptic.CurveParams)
|
||||
secp256k1.P = fromHex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F")
|
||||
secp256k1.N = fromHex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141")
|
||||
secp256k1.B = fromHex("0000000000000000000000000000000000000000000000000000000000000007")
|
||||
secp256k1.Gx = fromHex("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798")
|
||||
secp256k1.Gy = fromHex("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8")
|
||||
secp256k1.BitSize = 256
|
||||
secp256k1.q = new(big.Int).Div(new(big.Int).Add(secp256k1.P,
|
||||
big.NewInt(1)), big.NewInt(4))
|
||||
secp256k1.H = 1
|
||||
secp256k1.halfOrder = new(big.Int).Rsh(secp256k1.N, 1)
|
||||
|
||||
// Provided for convenience since this gets computed repeatedly.
|
||||
secp256k1.byteSize = secp256k1.BitSize / 8
|
||||
|
||||
// Deserialize and set the pre-computed table used to accelerate scalar
|
||||
// base multiplication. This is hard-coded data, so any errors are
|
||||
// panics because it means something is wrong in the source code.
|
||||
if err := loadS256BytePoints(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Next 6 constants are from Hal Finney's bitcointalk.org post:
|
||||
// https://bitcointalk.org/index.php?topic=3238.msg45565#msg45565
|
||||
// May he rest in peace.
|
||||
//
|
||||
// They have also been independently derived from the code in the
|
||||
// EndomorphismVectors function in gensecp256k1.go.
|
||||
secp256k1.lambda = fromHex("5363AD4CC05C30E0A5261C028812645A122E22EA20816678DF02967C1B23BD72")
|
||||
secp256k1.beta = new(fieldVal).SetHex("7AE96A2B657C07106E64479EAC3434E99CF0497512F58995C1396C28719501EE")
|
||||
secp256k1.a1 = fromHex("3086D221A7D46BCDE86C90E49284EB15")
|
||||
secp256k1.b1 = fromHex("-E4437ED6010E88286F547FA90ABFE4C3")
|
||||
secp256k1.a2 = fromHex("114CA50F7A8E2F3F657C1108D9D44CFD8")
|
||||
secp256k1.b2 = fromHex("3086D221A7D46BCDE86C90E49284EB15")
|
||||
|
||||
// Alternatively, we can use the parameters below, however, they seem
|
||||
// to be about 8% slower.
|
||||
// secp256k1.lambda = fromHex("AC9C52B33FA3CF1F5AD9E3FD77ED9BA4A880B9FC8EC739C2E0CFC810B51283CE")
|
||||
// secp256k1.beta = new(fieldVal).SetHex("851695D49A83F8EF919BB86153CBCB16630FB68AED0A766A3EC693D68E6AFA40")
|
||||
// secp256k1.a1 = fromHex("E4437ED6010E88286F547FA90ABFE4C3")
|
||||
// secp256k1.b1 = fromHex("-3086D221A7D46BCDE86C90E49284EB15")
|
||||
// secp256k1.a2 = fromHex("3086D221A7D46BCDE86C90E49284EB15")
|
||||
// secp256k1.b2 = fromHex("114CA50F7A8E2F3F657C1108D9D44CFD8")
|
||||
}
|
||||
|
||||
// S256 returns a Curve which implements secp256k1.
|
||||
func S256() *KoblitzCurve {
|
||||
initonce.Do(initAll)
|
||||
return &secp256k1
|
||||
}
|
||||
@@ -1,889 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Copyright 2011 ThePiachu. All rights reserved.
|
||||
// Copyright 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// isJacobianOnS256Curve returns boolean if the point (x,y,z) is on the
|
||||
// secp256k1 curve.
|
||||
func isJacobianOnS256Curve(x, y, z *fieldVal) bool {
|
||||
// Elliptic curve equation for secp256k1 is: y^2 = x^3 + 7
|
||||
// In Jacobian coordinates, Y = y/z^3 and X = x/z^2
|
||||
// Thus:
|
||||
// (y/z^3)^2 = (x/z^2)^3 + 7
|
||||
// y^2/z^6 = x^3/z^6 + 7
|
||||
// y^2 = x^3 + 7*z^6
|
||||
var y2, z2, x3, result fieldVal
|
||||
y2.SquareVal(y).Normalize()
|
||||
z2.SquareVal(z)
|
||||
x3.SquareVal(x).Mul(x)
|
||||
result.SquareVal(&z2).Mul(&z2).MulInt(7).Add(&x3).Normalize()
|
||||
return y2.Equals(&result)
|
||||
}
|
||||
|
||||
// TestAddJacobian tests addition of points projected in Jacobian coordinates.
|
||||
func TestAddJacobian(t *testing.T) {
|
||||
tests := []struct {
|
||||
x1, y1, z1 string // Coordinates (in hex) of first point to add
|
||||
x2, y2, z2 string // Coordinates (in hex) of second point to add
|
||||
x3, y3, z3 string // Coordinates (in hex) of expected point
|
||||
}{
|
||||
// Addition with a point at infinity (left hand side).
|
||||
// ∞ + P = P
|
||||
{
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
|
||||
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
|
||||
"1",
|
||||
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
|
||||
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
|
||||
"1",
|
||||
},
|
||||
// Addition with a point at infinity (right hand side).
|
||||
// P + ∞ = P
|
||||
{
|
||||
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
|
||||
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
|
||||
"1",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
|
||||
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
|
||||
"1",
|
||||
},
|
||||
// Addition with z1=z2=1 different x values.
|
||||
{
|
||||
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
|
||||
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
|
||||
"1",
|
||||
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
|
||||
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
|
||||
"1",
|
||||
"0cfbc7da1e569b334460788faae0286e68b3af7379d5504efc25e4dba16e46a6",
|
||||
"e205f79361bbe0346b037b4010985dbf4f9e1e955e7d0d14aca876bfa79aad87",
|
||||
"44a5646b446e3877a648d6d381370d9ef55a83b666ebce9df1b1d7d65b817b2f",
|
||||
},
|
||||
// Addition with z1=z2=1 same x opposite y.
|
||||
// P(x, y, z) + P(x, -y, z) = infinity
|
||||
{
|
||||
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
|
||||
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
|
||||
"1",
|
||||
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
|
||||
"f48e156428cf0276dc092da5856e182288d7569f97934a56fe44be60f0d359fd",
|
||||
"1",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
},
|
||||
// Addition with z1=z2=1 same point.
|
||||
// P(x, y, z) + P(x, y, z) = 2P
|
||||
{
|
||||
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
|
||||
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
|
||||
"1",
|
||||
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
|
||||
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
|
||||
"1",
|
||||
"ec9f153b13ee7bd915882859635ea9730bf0dc7611b2c7b0e37ee64f87c50c27",
|
||||
"b082b53702c466dcf6e984a35671756c506c67c2fcb8adb408c44dd0755c8f2a",
|
||||
"16e3d537ae61fb1247eda4b4f523cfbaee5152c0d0d96b520376833c1e594464",
|
||||
},
|
||||
|
||||
// Addition with z1=z2 (!=1) different x values.
|
||||
{
|
||||
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
|
||||
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
|
||||
"2",
|
||||
"5d2fe112c21891d440f65a98473cb626111f8a234d2cd82f22172e369f002147",
|
||||
"98e3386a0a622a35c4561ffb32308d8e1c6758e10ebb1b4ebd3d04b4eb0ecbe8",
|
||||
"2",
|
||||
"cfbc7da1e569b334460788faae0286e68b3af7379d5504efc25e4dba16e46a60",
|
||||
"817de4d86ef80d1ac0ded00426176fd3e787a5579f43452b2a1db021e6ac3778",
|
||||
"129591ad11b8e1de99235b4e04dc367bd56a0ed99baf3a77c6c75f5a6e05f08d",
|
||||
},
|
||||
// Addition with z1=z2 (!=1) same x opposite y.
|
||||
// P(x, y, z) + P(x, -y, z) = infinity
|
||||
{
|
||||
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
|
||||
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
|
||||
"2",
|
||||
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
|
||||
"a470ab21467813b6e0496d2c2b70c11446bab4fcbc9a52b7f225f30e869aea9f",
|
||||
"2",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
},
|
||||
// Addition with z1=z2 (!=1) same point.
|
||||
// P(x, y, z) + P(x, y, z) = 2P
|
||||
{
|
||||
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
|
||||
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
|
||||
"2",
|
||||
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
|
||||
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
|
||||
"2",
|
||||
"9f153b13ee7bd915882859635ea9730bf0dc7611b2c7b0e37ee65073c50fabac",
|
||||
"2b53702c466dcf6e984a35671756c506c67c2fcb8adb408c44dd125dc91cb988",
|
||||
"6e3d537ae61fb1247eda4b4f523cfbaee5152c0d0d96b520376833c2e5944a11",
|
||||
},
|
||||
|
||||
// Addition with z1!=z2 and z2=1 different x values.
|
||||
{
|
||||
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
|
||||
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
|
||||
"2",
|
||||
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
|
||||
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
|
||||
"1",
|
||||
"3ef1f68795a6ccd1181e23eab80a1b9a2cebdcde755413bf097936eb5b91b4f3",
|
||||
"0bef26c377c068d606f6802130bb7e9f3c3d2abcfa1a295950ed81133561cb04",
|
||||
"252b235a2371c3bd3246b69c09b86cf7aad41db3375e74ef8d8ebeb4dc0be11a",
|
||||
},
|
||||
// Addition with z1!=z2 and z2=1 same x opposite y.
|
||||
// P(x, y, z) + P(x, -y, z) = infinity
|
||||
{
|
||||
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
|
||||
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
|
||||
"2",
|
||||
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
|
||||
"f48e156428cf0276dc092da5856e182288d7569f97934a56fe44be60f0d359fd",
|
||||
"1",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
},
|
||||
// Addition with z1!=z2 and z2=1 same point.
|
||||
// P(x, y, z) + P(x, y, z) = 2P
|
||||
{
|
||||
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
|
||||
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
|
||||
"2",
|
||||
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
|
||||
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
|
||||
"1",
|
||||
"9f153b13ee7bd915882859635ea9730bf0dc7611b2c7b0e37ee65073c50fabac",
|
||||
"2b53702c466dcf6e984a35671756c506c67c2fcb8adb408c44dd125dc91cb988",
|
||||
"6e3d537ae61fb1247eda4b4f523cfbaee5152c0d0d96b520376833c2e5944a11",
|
||||
},
|
||||
|
||||
// Addition with z1!=z2 and z2!=1 different x values.
|
||||
// P(x, y, z) + P(x, y, z) = 2P
|
||||
{
|
||||
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
|
||||
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
|
||||
"2",
|
||||
"91abba6a34b7481d922a4bd6a04899d5a686f6cf6da4e66a0cb427fb25c04bd4",
|
||||
"03fede65e30b4e7576a2abefc963ddbf9fdccbf791b77c29beadefe49951f7d1",
|
||||
"3",
|
||||
"3f07081927fd3f6dadd4476614c89a09eba7f57c1c6c3b01fa2d64eac1eef31e",
|
||||
"949166e04ebc7fd95a9d77e5dfd88d1492ecffd189792e3944eb2b765e09e031",
|
||||
"eb8cba81bcffa4f44d75427506737e1f045f21e6d6f65543ee0e1d163540c931",
|
||||
}, // Addition with z1!=z2 and z2!=1 same x opposite y.
|
||||
// P(x, y, z) + P(x, -y, z) = infinity
|
||||
{
|
||||
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
|
||||
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
|
||||
"2",
|
||||
"dcc3768780c74a0325e2851edad0dc8a566fa61a9e7fc4a34d13dcb509f99bc7",
|
||||
"cafc41904dd5428934f7d075129c8ba46eb622d4fc88d72cd1401452664add18",
|
||||
"3",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
},
|
||||
// Addition with z1!=z2 and z2!=1 same point.
|
||||
// P(x, y, z) + P(x, y, z) = 2P
|
||||
{
|
||||
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
|
||||
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
|
||||
"2",
|
||||
"dcc3768780c74a0325e2851edad0dc8a566fa61a9e7fc4a34d13dcb509f99bc7",
|
||||
"3503be6fb22abd76cb082f8aed63745b9149dd2b037728d32ebfebac99b51f17",
|
||||
"3",
|
||||
"9f153b13ee7bd915882859635ea9730bf0dc7611b2c7b0e37ee65073c50fabac",
|
||||
"2b53702c466dcf6e984a35671756c506c67c2fcb8adb408c44dd125dc91cb988",
|
||||
"6e3d537ae61fb1247eda4b4f523cfbaee5152c0d0d96b520376833c2e5944a11",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
// Convert hex to field values.
|
||||
x1 := new(fieldVal).SetHex(test.x1)
|
||||
y1 := new(fieldVal).SetHex(test.y1)
|
||||
z1 := new(fieldVal).SetHex(test.z1)
|
||||
x2 := new(fieldVal).SetHex(test.x2)
|
||||
y2 := new(fieldVal).SetHex(test.y2)
|
||||
z2 := new(fieldVal).SetHex(test.z2)
|
||||
x3 := new(fieldVal).SetHex(test.x3)
|
||||
y3 := new(fieldVal).SetHex(test.y3)
|
||||
z3 := new(fieldVal).SetHex(test.z3)
|
||||
|
||||
// Ensure the test data is using points that are actually on
|
||||
// the curve (or the point at infinity).
|
||||
if !z1.IsZero() && !isJacobianOnS256Curve(x1, y1, z1) {
|
||||
t.Errorf("#%d first point is not on the curve -- "+
|
||||
"invalid test data", i)
|
||||
continue
|
||||
}
|
||||
if !z2.IsZero() && !isJacobianOnS256Curve(x2, y2, z2) {
|
||||
t.Errorf("#%d second point is not on the curve -- "+
|
||||
"invalid test data", i)
|
||||
continue
|
||||
}
|
||||
if !z3.IsZero() && !isJacobianOnS256Curve(x3, y3, z3) {
|
||||
t.Errorf("#%d expected point is not on the curve -- "+
|
||||
"invalid test data", i)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add the two points.
|
||||
rx, ry, rz := new(fieldVal), new(fieldVal), new(fieldVal)
|
||||
S256().addJacobian(x1, y1, z1, x2, y2, z2, rx, ry, rz)
|
||||
|
||||
// Ensure result matches expected.
|
||||
if !rx.Equals(x3) || !ry.Equals(y3) || !rz.Equals(z3) {
|
||||
t.Errorf("#%d wrong result\ngot: (%v, %v, %v)\n"+
|
||||
"want: (%v, %v, %v)", i, rx, ry, rz, x3, y3, z3)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestAddAffine tests addition of points in affine coordinates.
|
||||
func TestAddAffine(t *testing.T) {
|
||||
tests := []struct {
|
||||
x1, y1 string // Coordinates (in hex) of first point to add
|
||||
x2, y2 string // Coordinates (in hex) of second point to add
|
||||
x3, y3 string // Coordinates (in hex) of expected point
|
||||
}{
|
||||
// Addition with a point at infinity (left hand side).
|
||||
// ∞ + P = P
|
||||
{
|
||||
"0",
|
||||
"0",
|
||||
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
|
||||
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
|
||||
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
|
||||
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
|
||||
},
|
||||
// Addition with a point at infinity (right hand side).
|
||||
// P + ∞ = P
|
||||
{
|
||||
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
|
||||
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
|
||||
"0",
|
||||
"0",
|
||||
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
|
||||
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
|
||||
},
|
||||
|
||||
// Addition with different x values.
|
||||
{
|
||||
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
|
||||
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
|
||||
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
|
||||
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
|
||||
"fd5b88c21d3143518d522cd2796f3d726793c88b3e05636bc829448e053fed69",
|
||||
"21cf4f6a5be5ff6380234c50424a970b1f7e718f5eb58f68198c108d642a137f",
|
||||
},
|
||||
// Addition with same x opposite y.
|
||||
// P(x, y) + P(x, -y) = infinity
|
||||
{
|
||||
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
|
||||
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
|
||||
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
|
||||
"f48e156428cf0276dc092da5856e182288d7569f97934a56fe44be60f0d359fd",
|
||||
"0",
|
||||
"0",
|
||||
},
|
||||
// Addition with same point.
|
||||
// P(x, y) + P(x, y) = 2P
|
||||
{
|
||||
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
|
||||
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
|
||||
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
|
||||
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
|
||||
"59477d88ae64a104dbb8d31ec4ce2d91b2fe50fa628fb6a064e22582196b365b",
|
||||
"938dc8c0f13d1e75c987cb1a220501bd614b0d3dd9eb5c639847e1240216e3b6",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
// Convert hex to field values.
|
||||
x1, y1 := fromHex(test.x1), fromHex(test.y1)
|
||||
x2, y2 := fromHex(test.x2), fromHex(test.y2)
|
||||
x3, y3 := fromHex(test.x3), fromHex(test.y3)
|
||||
|
||||
// Ensure the test data is using points that are actually on
|
||||
// the curve (or the point at infinity).
|
||||
if !(x1.Sign() == 0 && y1.Sign() == 0) && !S256().IsOnCurve(x1, y1) {
|
||||
t.Errorf("#%d first point is not on the curve -- "+
|
||||
"invalid test data", i)
|
||||
continue
|
||||
}
|
||||
if !(x2.Sign() == 0 && y2.Sign() == 0) && !S256().IsOnCurve(x2, y2) {
|
||||
t.Errorf("#%d second point is not on the curve -- "+
|
||||
"invalid test data", i)
|
||||
continue
|
||||
}
|
||||
if !(x3.Sign() == 0 && y3.Sign() == 0) && !S256().IsOnCurve(x3, y3) {
|
||||
t.Errorf("#%d expected point is not on the curve -- "+
|
||||
"invalid test data", i)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add the two points.
|
||||
rx, ry := S256().Add(x1, y1, x2, y2)
|
||||
|
||||
// Ensure result matches expected.
|
||||
if rx.Cmp(x3) != 00 || ry.Cmp(y3) != 0 {
|
||||
t.Errorf("#%d wrong result\ngot: (%x, %x)\n"+
|
||||
"want: (%x, %x)", i, rx, ry, x3, y3)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestDoubleJacobian tests doubling of points projected in Jacobian
|
||||
// coordinates.
|
||||
func TestDoubleJacobian(t *testing.T) {
|
||||
tests := []struct {
|
||||
x1, y1, z1 string // Coordinates (in hex) of point to double
|
||||
x3, y3, z3 string // Coordinates (in hex) of expected point
|
||||
}{
|
||||
// Doubling a point at infinity is still infinity.
|
||||
{
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
},
|
||||
// Doubling with z1=1.
|
||||
{
|
||||
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
|
||||
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
|
||||
"1",
|
||||
"ec9f153b13ee7bd915882859635ea9730bf0dc7611b2c7b0e37ee64f87c50c27",
|
||||
"b082b53702c466dcf6e984a35671756c506c67c2fcb8adb408c44dd0755c8f2a",
|
||||
"16e3d537ae61fb1247eda4b4f523cfbaee5152c0d0d96b520376833c1e594464",
|
||||
},
|
||||
// Doubling with z1!=1.
|
||||
{
|
||||
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
|
||||
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
|
||||
"2",
|
||||
"9f153b13ee7bd915882859635ea9730bf0dc7611b2c7b0e37ee65073c50fabac",
|
||||
"2b53702c466dcf6e984a35671756c506c67c2fcb8adb408c44dd125dc91cb988",
|
||||
"6e3d537ae61fb1247eda4b4f523cfbaee5152c0d0d96b520376833c2e5944a11",
|
||||
},
|
||||
// From btcd issue #709.
|
||||
{
|
||||
"201e3f75715136d2f93c4f4598f91826f94ca01f4233a5bd35de9708859ca50d",
|
||||
"bdf18566445e7562c6ada68aef02d498d7301503de5b18c6aef6e2b1722412e1",
|
||||
"0000000000000000000000000000000000000000000000000000000000000001",
|
||||
"4a5e0559863ebb4e9ed85f5c4fa76003d05d9a7626616e614a1f738621e3c220",
|
||||
"00000000000000000000000000000000000000000000000000000001b1388778",
|
||||
"7be30acc88bceac58d5b4d15de05a931ae602a07bcb6318d5dedc563e4482993",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
// Convert hex to field values.
|
||||
x1 := new(fieldVal).SetHex(test.x1)
|
||||
y1 := new(fieldVal).SetHex(test.y1)
|
||||
z1 := new(fieldVal).SetHex(test.z1)
|
||||
x3 := new(fieldVal).SetHex(test.x3)
|
||||
y3 := new(fieldVal).SetHex(test.y3)
|
||||
z3 := new(fieldVal).SetHex(test.z3)
|
||||
|
||||
// Ensure the test data is using points that are actually on
|
||||
// the curve (or the point at infinity).
|
||||
if !z1.IsZero() && !isJacobianOnS256Curve(x1, y1, z1) {
|
||||
t.Errorf("#%d first point is not on the curve -- "+
|
||||
"invalid test data", i)
|
||||
continue
|
||||
}
|
||||
if !z3.IsZero() && !isJacobianOnS256Curve(x3, y3, z3) {
|
||||
t.Errorf("#%d expected point is not on the curve -- "+
|
||||
"invalid test data", i)
|
||||
continue
|
||||
}
|
||||
|
||||
// Double the point.
|
||||
rx, ry, rz := new(fieldVal), new(fieldVal), new(fieldVal)
|
||||
S256().doubleJacobian(x1, y1, z1, rx, ry, rz)
|
||||
|
||||
// Ensure result matches expected.
|
||||
if !rx.Equals(x3) || !ry.Equals(y3) || !rz.Equals(z3) {
|
||||
t.Errorf("#%d wrong result\ngot: (%v, %v, %v)\n"+
|
||||
"want: (%v, %v, %v)", i, rx, ry, rz, x3, y3, z3)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestDoubleAffine tests doubling of points in affine coordinates.
|
||||
func TestDoubleAffine(t *testing.T) {
|
||||
tests := []struct {
|
||||
x1, y1 string // Coordinates (in hex) of point to double
|
||||
x3, y3 string // Coordinates (in hex) of expected point
|
||||
}{
|
||||
// Doubling a point at infinity is still infinity.
|
||||
// 2*∞ = ∞ (point at infinity)
|
||||
|
||||
{
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
},
|
||||
|
||||
// Random points.
|
||||
{
|
||||
"e41387ffd8baaeeb43c2faa44e141b19790e8ac1f7ff43d480dc132230536f86",
|
||||
"1b88191d430f559896149c86cbcb703193105e3cf3213c0c3556399836a2b899",
|
||||
"88da47a089d333371bd798c548ef7caae76e737c1980b452d367b3cfe3082c19",
|
||||
"3b6f659b09a362821dfcfefdbfbc2e59b935ba081b6c249eb147b3c2100b1bc1",
|
||||
},
|
||||
{
|
||||
"b3589b5d984f03ef7c80aeae444f919374799edf18d375cab10489a3009cff0c",
|
||||
"c26cf343875b3630e15bccc61202815b5d8f1fd11308934a584a5babe69db36a",
|
||||
"e193860172998751e527bb12563855602a227fc1f612523394da53b746bb2fb1",
|
||||
"2bfcf13d2f5ab8bb5c611fab5ebbed3dc2f057062b39a335224c22f090c04789",
|
||||
},
|
||||
{
|
||||
"2b31a40fbebe3440d43ac28dba23eee71c62762c3fe3dbd88b4ab82dc6a82340",
|
||||
"9ba7deb02f5c010e217607fd49d58db78ec273371ea828b49891ce2fd74959a1",
|
||||
"2c8d5ef0d343b1a1a48aa336078eadda8481cb048d9305dc4fdf7ee5f65973a2",
|
||||
"bb4914ac729e26d3cd8f8dc8f702f3f4bb7e0e9c5ae43335f6e94c2de6c3dc95",
|
||||
},
|
||||
{
|
||||
"61c64b760b51981fab54716d5078ab7dffc93730b1d1823477e27c51f6904c7a",
|
||||
"ef6eb16ea1a36af69d7f66524c75a3a5e84c13be8fbc2e811e0563c5405e49bd",
|
||||
"5f0dcdd2595f5ad83318a0f9da481039e36f135005420393e72dfca985b482f4",
|
||||
"a01c849b0837065c1cb481b0932c441f49d1cab1b4b9f355c35173d93f110ae0",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
// Convert hex to field values.
|
||||
x1, y1 := fromHex(test.x1), fromHex(test.y1)
|
||||
x3, y3 := fromHex(test.x3), fromHex(test.y3)
|
||||
|
||||
// Ensure the test data is using points that are actually on
|
||||
// the curve (or the point at infinity).
|
||||
if !(x1.Sign() == 0 && y1.Sign() == 0) && !S256().IsOnCurve(x1, y1) {
|
||||
t.Errorf("#%d first point is not on the curve -- "+
|
||||
"invalid test data", i)
|
||||
continue
|
||||
}
|
||||
if !(x3.Sign() == 0 && y3.Sign() == 0) && !S256().IsOnCurve(x3, y3) {
|
||||
t.Errorf("#%d expected point is not on the curve -- "+
|
||||
"invalid test data", i)
|
||||
continue
|
||||
}
|
||||
|
||||
// Double the point.
|
||||
rx, ry := S256().Double(x1, y1)
|
||||
|
||||
// Ensure result matches expected.
|
||||
if rx.Cmp(x3) != 00 || ry.Cmp(y3) != 0 {
|
||||
t.Errorf("#%d wrong result\ngot: (%x, %x)\n"+
|
||||
"want: (%x, %x)", i, rx, ry, x3, y3)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnCurve(t *testing.T) {
|
||||
s256 := S256()
|
||||
if !s256.IsOnCurve(s256.Params().Gx, s256.Params().Gy) {
|
||||
t.Errorf("FAIL S256")
|
||||
}
|
||||
}
|
||||
|
||||
type baseMultTest struct {
|
||||
k string
|
||||
x, y string
|
||||
}
|
||||
|
||||
//TODO: add more test vectors
|
||||
var s256BaseMultTests = []baseMultTest{
|
||||
{
|
||||
"AA5E28D6A97A2479A65527F7290311A3624D4CC0FA1578598EE3C2613BF99522",
|
||||
"34F9460F0E4F08393D192B3C5133A6BA099AA0AD9FD54EBCCFACDFA239FF49C6",
|
||||
"B71EA9BD730FD8923F6D25A7A91E7DD7728A960686CB5A901BB419E0F2CA232",
|
||||
},
|
||||
{
|
||||
"7E2B897B8CEBC6361663AD410835639826D590F393D90A9538881735256DFAE3",
|
||||
"D74BF844B0862475103D96A611CF2D898447E288D34B360BC885CB8CE7C00575",
|
||||
"131C670D414C4546B88AC3FF664611B1C38CEB1C21D76369D7A7A0969D61D97D",
|
||||
},
|
||||
{
|
||||
"6461E6DF0FE7DFD05329F41BF771B86578143D4DD1F7866FB4CA7E97C5FA945D",
|
||||
"E8AECC370AEDD953483719A116711963CE201AC3EB21D3F3257BB48668C6A72F",
|
||||
"C25CAF2F0EBA1DDB2F0F3F47866299EF907867B7D27E95B3873BF98397B24EE1",
|
||||
},
|
||||
{
|
||||
"376A3A2CDCD12581EFFF13EE4AD44C4044B8A0524C42422A7E1E181E4DEECCEC",
|
||||
"14890E61FCD4B0BD92E5B36C81372CA6FED471EF3AA60A3E415EE4FE987DABA1",
|
||||
"297B858D9F752AB42D3BCA67EE0EB6DCD1C2B7B0DBE23397E66ADC272263F982",
|
||||
},
|
||||
{
|
||||
"1B22644A7BE026548810C378D0B2994EEFA6D2B9881803CB02CEFF865287D1B9",
|
||||
"F73C65EAD01C5126F28F442D087689BFA08E12763E0CEC1D35B01751FD735ED3",
|
||||
"F449A8376906482A84ED01479BD18882B919C140D638307F0C0934BA12590BDE",
|
||||
},
|
||||
}
|
||||
|
||||
//TODO: test different curves as well?
|
||||
func TestBaseMult(t *testing.T) {
|
||||
s256 := S256()
|
||||
for i, e := range s256BaseMultTests {
|
||||
k, ok := new(big.Int).SetString(e.k, 16)
|
||||
if !ok {
|
||||
t.Errorf("%d: bad value for k: %s", i, e.k)
|
||||
}
|
||||
x, y := s256.ScalarBaseMult(k.Bytes())
|
||||
if fmt.Sprintf("%X", x) != e.x || fmt.Sprintf("%X", y) != e.y {
|
||||
t.Errorf("%d: bad output for k=%s: got (%X, %X), want (%s, %s)", i, e.k, x, y, e.x, e.y)
|
||||
}
|
||||
if testing.Short() && i > 5 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBaseMultVerify(t *testing.T) {
|
||||
s256 := S256()
|
||||
for bytes := 1; bytes < 40; bytes++ {
|
||||
for i := 0; i < 30; i++ {
|
||||
data := make([]byte, bytes)
|
||||
_, err := rand.Read(data)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read random data for %d", i)
|
||||
continue
|
||||
}
|
||||
x, y := s256.ScalarBaseMult(data)
|
||||
xWant, yWant := s256.ScalarMult(s256.Gx, s256.Gy, data)
|
||||
if x.Cmp(xWant) != 0 || y.Cmp(yWant) != 0 {
|
||||
t.Errorf("%d: bad output for %X: got (%X, %X), want (%X, %X)", i, data, x, y, xWant, yWant)
|
||||
}
|
||||
if testing.Short() && i > 2 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestScalarMult(t *testing.T) {
|
||||
tests := []struct {
|
||||
x string
|
||||
y string
|
||||
k string
|
||||
rx string
|
||||
ry string
|
||||
}{
|
||||
// base mult, essentially.
|
||||
{
|
||||
"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798",
|
||||
"483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8",
|
||||
"18e14a7b6a307f426a94f8114701e7c8e774e7f9a47e2c2035db29a206321725",
|
||||
"50863ad64a87ae8a2fe83c1af1a8403cb53f53e486d8511dad8a04887e5b2352",
|
||||
"2cd470243453a299fa9e77237716103abc11a1df38855ed6f2ee187e9c582ba6",
|
||||
},
|
||||
// From btcd issue #709.
|
||||
{
|
||||
"000000000000000000000000000000000000000000000000000000000000002c",
|
||||
"420e7a99bba18a9d3952597510fd2b6728cfeafc21a4e73951091d4d8ddbe94e",
|
||||
"a2e8ba2e8ba2e8ba2e8ba2e8ba2e8ba219b51835b55cc30ebfe2f6599bc56f58",
|
||||
"a2112dcdfbcd10ae1133a358de7b82db68e0a3eb4b492cc8268d1e7118c98788",
|
||||
"27fc7463b7bb3c5f98ecf2c84a6272bb1681ed553d92c69f2dfe25a9f9fd3836",
|
||||
},
|
||||
}
|
||||
|
||||
s256 := S256()
|
||||
for i, test := range tests {
|
||||
x, _ := new(big.Int).SetString(test.x, 16)
|
||||
y, _ := new(big.Int).SetString(test.y, 16)
|
||||
k, _ := new(big.Int).SetString(test.k, 16)
|
||||
xWant, _ := new(big.Int).SetString(test.rx, 16)
|
||||
yWant, _ := new(big.Int).SetString(test.ry, 16)
|
||||
xGot, yGot := s256.ScalarMult(x, y, k.Bytes())
|
||||
if xGot.Cmp(xWant) != 0 || yGot.Cmp(yWant) != 0 {
|
||||
t.Fatalf("%d: bad output: got (%X, %X), want (%X, %X)", i, xGot, yGot, xWant, yWant)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestScalarMultRand(t *testing.T) {
|
||||
// Strategy for this test:
|
||||
// Get a random exponent from the generator point at first
|
||||
// This creates a new point which is used in the next iteration
|
||||
// Use another random exponent on the new point.
|
||||
// We use BaseMult to verify by multiplying the previous exponent
|
||||
// and the new random exponent together (mod N)
|
||||
s256 := S256()
|
||||
x, y := s256.Gx, s256.Gy
|
||||
exponent := big.NewInt(1)
|
||||
for i := 0; i < 1024; i++ {
|
||||
data := make([]byte, 32)
|
||||
_, err := rand.Read(data)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read random data at %d", i)
|
||||
break
|
||||
}
|
||||
x, y = s256.ScalarMult(x, y, data)
|
||||
exponent.Mul(exponent, new(big.Int).SetBytes(data))
|
||||
xWant, yWant := s256.ScalarBaseMult(exponent.Bytes())
|
||||
if x.Cmp(xWant) != 0 || y.Cmp(yWant) != 0 {
|
||||
t.Fatalf("%d: bad output for %X: got (%X, %X), want (%X, %X)", i, data, x, y, xWant, yWant)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitK(t *testing.T) {
|
||||
tests := []struct {
|
||||
k string
|
||||
k1, k2 string
|
||||
s1, s2 int
|
||||
}{
|
||||
{
|
||||
"6df2b5d30854069ccdec40ae022f5c948936324a4e9ebed8eb82cfd5a6b6d766",
|
||||
"00000000000000000000000000000000b776e53fb55f6b006a270d42d64ec2b1",
|
||||
"00000000000000000000000000000000d6cc32c857f1174b604eefc544f0c7f7",
|
||||
-1, -1,
|
||||
},
|
||||
{
|
||||
"6ca00a8f10632170accc1b3baf2a118fa5725f41473f8959f34b8f860c47d88d",
|
||||
"0000000000000000000000000000000007b21976c1795723c1bfbfa511e95b84",
|
||||
"00000000000000000000000000000000d8d2d5f9d20fc64fd2cf9bda09a5bf90",
|
||||
1, -1,
|
||||
},
|
||||
{
|
||||
"b2eda8ab31b259032d39cbc2a234af17fcee89c863a8917b2740b67568166289",
|
||||
"00000000000000000000000000000000507d930fecda7414fc4a523b95ef3c8c",
|
||||
"00000000000000000000000000000000f65ffb179df189675338c6185cb839be",
|
||||
-1, -1,
|
||||
},
|
||||
{
|
||||
"f6f00e44f179936f2befc7442721b0633f6bafdf7161c167ffc6f7751980e3a0",
|
||||
"0000000000000000000000000000000008d0264f10bcdcd97da3faa38f85308d",
|
||||
"0000000000000000000000000000000065fed1506eb6605a899a54e155665f79",
|
||||
-1, -1,
|
||||
},
|
||||
{
|
||||
"8679085ab081dc92cdd23091ce3ee998f6b320e419c3475fae6b5b7d3081996e",
|
||||
"0000000000000000000000000000000089fbf24fbaa5c3c137b4f1cedc51d975",
|
||||
"00000000000000000000000000000000d38aa615bd6754d6f4d51ccdaf529fea",
|
||||
-1, -1,
|
||||
},
|
||||
{
|
||||
"6b1247bb7931dfcae5b5603c8b5ae22ce94d670138c51872225beae6bba8cdb3",
|
||||
"000000000000000000000000000000008acc2a521b21b17cfb002c83be62f55d",
|
||||
"0000000000000000000000000000000035f0eff4d7430950ecb2d94193dedc79",
|
||||
-1, -1,
|
||||
},
|
||||
{
|
||||
"a2e8ba2e8ba2e8ba2e8ba2e8ba2e8ba219b51835b55cc30ebfe2f6599bc56f58",
|
||||
"0000000000000000000000000000000045c53aa1bb56fcd68c011e2dad6758e4",
|
||||
"00000000000000000000000000000000a2e79d200f27f2360fba57619936159b",
|
||||
-1, -1,
|
||||
},
|
||||
}
|
||||
|
||||
s256 := S256()
|
||||
for i, test := range tests {
|
||||
k, ok := new(big.Int).SetString(test.k, 16)
|
||||
if !ok {
|
||||
t.Errorf("%d: bad value for k: %s", i, test.k)
|
||||
}
|
||||
k1, k2, k1Sign, k2Sign := s256.splitK(k.Bytes())
|
||||
k1str := fmt.Sprintf("%064x", k1)
|
||||
if test.k1 != k1str {
|
||||
t.Errorf("%d: bad k1: got %v, want %v", i, k1str, test.k1)
|
||||
}
|
||||
k2str := fmt.Sprintf("%064x", k2)
|
||||
if test.k2 != k2str {
|
||||
t.Errorf("%d: bad k2: got %v, want %v", i, k2str, test.k2)
|
||||
}
|
||||
if test.s1 != k1Sign {
|
||||
t.Errorf("%d: bad k1 sign: got %d, want %d", i, k1Sign, test.s1)
|
||||
}
|
||||
if test.s2 != k2Sign {
|
||||
t.Errorf("%d: bad k2 sign: got %d, want %d", i, k2Sign, test.s2)
|
||||
}
|
||||
k1Int := new(big.Int).SetBytes(k1)
|
||||
k1SignInt := new(big.Int).SetInt64(int64(k1Sign))
|
||||
k1Int.Mul(k1Int, k1SignInt)
|
||||
k2Int := new(big.Int).SetBytes(k2)
|
||||
k2SignInt := new(big.Int).SetInt64(int64(k2Sign))
|
||||
k2Int.Mul(k2Int, k2SignInt)
|
||||
gotK := new(big.Int).Mul(k2Int, s256.lambda)
|
||||
gotK.Add(k1Int, gotK)
|
||||
gotK.Mod(gotK, s256.N)
|
||||
if k.Cmp(gotK) != 0 {
|
||||
t.Errorf("%d: bad k: got %X, want %X", i, gotK.Bytes(), k.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitKRand(t *testing.T) {
|
||||
s256 := S256()
|
||||
for i := 0; i < 1024; i++ {
|
||||
bytesK := make([]byte, 32)
|
||||
_, err := rand.Read(bytesK)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read random data at %d", i)
|
||||
break
|
||||
}
|
||||
k := new(big.Int).SetBytes(bytesK)
|
||||
k1, k2, k1Sign, k2Sign := s256.splitK(bytesK)
|
||||
k1Int := new(big.Int).SetBytes(k1)
|
||||
k1SignInt := new(big.Int).SetInt64(int64(k1Sign))
|
||||
k1Int.Mul(k1Int, k1SignInt)
|
||||
k2Int := new(big.Int).SetBytes(k2)
|
||||
k2SignInt := new(big.Int).SetInt64(int64(k2Sign))
|
||||
k2Int.Mul(k2Int, k2SignInt)
|
||||
gotK := new(big.Int).Mul(k2Int, s256.lambda)
|
||||
gotK.Add(k1Int, gotK)
|
||||
gotK.Mod(gotK, s256.N)
|
||||
if k.Cmp(gotK) != 0 {
|
||||
t.Errorf("%d: bad k: got %X, want %X", i, gotK.Bytes(), k.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test this curve's usage with the ecdsa package.
|
||||
|
||||
func testKeyGeneration(t *testing.T, c *KoblitzCurve, tag string) {
|
||||
priv, err := NewPrivateKey(c)
|
||||
if err != nil {
|
||||
t.Errorf("%s: error: %s", tag, err)
|
||||
return
|
||||
}
|
||||
if !c.IsOnCurve(priv.PublicKey.X, priv.PublicKey.Y) {
|
||||
t.Errorf("%s: public key invalid: %s", tag, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeyGeneration(t *testing.T) {
|
||||
testKeyGeneration(t, S256(), "S256")
|
||||
}
|
||||
|
||||
func testSignAndVerify(t *testing.T, c *KoblitzCurve, tag string) {
|
||||
priv, _ := NewPrivateKey(c)
|
||||
pub := priv.PubKey()
|
||||
|
||||
hashed := []byte("testing")
|
||||
sig, err := priv.Sign(hashed)
|
||||
if err != nil {
|
||||
t.Errorf("%s: error signing: %s", tag, err)
|
||||
return
|
||||
}
|
||||
|
||||
if !sig.Verify(hashed, pub) {
|
||||
t.Errorf("%s: Verify failed", tag)
|
||||
}
|
||||
|
||||
hashed[0] ^= 0xff
|
||||
if sig.Verify(hashed, pub) {
|
||||
t.Errorf("%s: Verify always works!", tag)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignAndVerify(t *testing.T) {
|
||||
testSignAndVerify(t, S256(), "S256")
|
||||
}
|
||||
|
||||
func TestNAF(t *testing.T) {
|
||||
tests := []string{
|
||||
"6df2b5d30854069ccdec40ae022f5c948936324a4e9ebed8eb82cfd5a6b6d766",
|
||||
"b776e53fb55f6b006a270d42d64ec2b1",
|
||||
"d6cc32c857f1174b604eefc544f0c7f7",
|
||||
"45c53aa1bb56fcd68c011e2dad6758e4",
|
||||
"a2e79d200f27f2360fba57619936159b",
|
||||
}
|
||||
negOne := big.NewInt(-1)
|
||||
one := big.NewInt(1)
|
||||
two := big.NewInt(2)
|
||||
for i, test := range tests {
|
||||
want, _ := new(big.Int).SetString(test, 16)
|
||||
nafPos, nafNeg := NAF(want.Bytes())
|
||||
got := big.NewInt(0)
|
||||
// Check that the NAF representation comes up with the right number
|
||||
for i := 0; i < len(nafPos); i++ {
|
||||
bytePos := nafPos[i]
|
||||
byteNeg := nafNeg[i]
|
||||
for j := 7; j >= 0; j-- {
|
||||
got.Mul(got, two)
|
||||
if bytePos&0x80 == 0x80 {
|
||||
got.Add(got, one)
|
||||
} else if byteNeg&0x80 == 0x80 {
|
||||
got.Add(got, negOne)
|
||||
}
|
||||
bytePos <<= 1
|
||||
byteNeg <<= 1
|
||||
}
|
||||
}
|
||||
if got.Cmp(want) != 0 {
|
||||
t.Errorf("%d: Failed NAF got %X want %X", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNAFRand(t *testing.T) {
|
||||
negOne := big.NewInt(-1)
|
||||
one := big.NewInt(1)
|
||||
two := big.NewInt(2)
|
||||
for i := 0; i < 1024; i++ {
|
||||
data := make([]byte, 32)
|
||||
_, err := rand.Read(data)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read random data at %d", i)
|
||||
break
|
||||
}
|
||||
nafPos, nafNeg := NAF(data)
|
||||
want := new(big.Int).SetBytes(data)
|
||||
got := big.NewInt(0)
|
||||
// Check that the NAF representation comes up with the right number
|
||||
for i := 0; i < len(nafPos); i++ {
|
||||
bytePos := nafPos[i]
|
||||
byteNeg := nafNeg[i]
|
||||
for j := 7; j >= 0; j-- {
|
||||
got.Mul(got, two)
|
||||
if bytePos&0x80 == 0x80 {
|
||||
got.Add(got, one)
|
||||
} else if byteNeg&0x80 == 0x80 {
|
||||
got.Add(got, negOne)
|
||||
}
|
||||
bytePos <<= 1
|
||||
byteNeg <<= 1
|
||||
}
|
||||
}
|
||||
if got.Cmp(want) != 0 {
|
||||
t.Errorf("%d: Failed NAF got %X want %X", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,216 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidMAC occurs when Message Authentication Check (MAC) fails
|
||||
// during decryption. This happens because of either invalid private key or
|
||||
// corrupt ciphertext.
|
||||
ErrInvalidMAC = errors.New("invalid mac hash")
|
||||
|
||||
// errInputTooShort occurs when the input ciphertext to the Decrypt
|
||||
// function is less than 134 bytes long.
|
||||
errInputTooShort = errors.New("ciphertext too short")
|
||||
|
||||
// errUnsupportedCurve occurs when the first two bytes of the encrypted
|
||||
// text aren't 0x02CA (= 712 = secp256k1, from OpenSSL).
|
||||
errUnsupportedCurve = errors.New("unsupported curve")
|
||||
|
||||
errInvalidXLength = errors.New("invalid X length, must be 32")
|
||||
errInvalidYLength = errors.New("invalid Y length, must be 32")
|
||||
errInvalidPadding = errors.New("invalid PKCS#7 padding")
|
||||
|
||||
// 0x02CA = 714
|
||||
ciphCurveBytes = [2]byte{0x02, 0xCA}
|
||||
// 0x20 = 32
|
||||
ciphCoordLength = [2]byte{0x00, 0x20}
|
||||
)
|
||||
|
||||
// GenerateSharedSecret generates a shared secret based on a private key and a
|
||||
// public key using Diffie-Hellman key exchange (ECDH) (RFC 4753).
|
||||
// RFC5903 Section 9 states we should only return x.
|
||||
func GenerateSharedSecret(privkey *PrivateKey, pubkey *PublicKey) []byte {
|
||||
x, _ := pubkey.Curve.ScalarMult(pubkey.X, pubkey.Y, privkey.D.Bytes())
|
||||
return x.Bytes()
|
||||
}
|
||||
|
||||
// Encrypt encrypts data for the target public key using AES-256-CBC. It also
|
||||
// generates a private key (the pubkey of which is also in the output). The only
|
||||
// supported curve is secp256k1. The `structure' that it encodes everything into
|
||||
// is:
|
||||
//
|
||||
// struct {
|
||||
// // Initialization Vector used for AES-256-CBC
|
||||
// IV [16]byte
|
||||
// // Public Key: curve(2) + len_of_pubkeyX(2) + pubkeyX +
|
||||
// // len_of_pubkeyY(2) + pubkeyY (curve = 714)
|
||||
// PublicKey [70]byte
|
||||
// // Cipher text
|
||||
// Data []byte
|
||||
// // HMAC-SHA-256 Message Authentication Code
|
||||
// HMAC [32]byte
|
||||
// }
|
||||
//
|
||||
// The primary aim is to ensure byte compatibility with Pyelliptic. Also, refer
|
||||
// to section 5.8.1 of ANSI X9.63 for rationale on this format.
|
||||
func Encrypt(pubkey *PublicKey, in []byte) ([]byte, error) {
|
||||
ephemeral, err := NewPrivateKey(S256())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ecdhKey := GenerateSharedSecret(ephemeral, pubkey)
|
||||
derivedKey := sha512.Sum512(ecdhKey)
|
||||
keyE := derivedKey[:32]
|
||||
keyM := derivedKey[32:]
|
||||
|
||||
paddedIn := addPKCSPadding(in)
|
||||
// IV + Curve params/X/Y + padded plaintext/ciphertext + HMAC-256
|
||||
out := make([]byte, aes.BlockSize+70+len(paddedIn)+sha256.Size)
|
||||
iv := out[:aes.BlockSize]
|
||||
if _, err = io.ReadFull(rand.Reader, iv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// start writing public key
|
||||
pb := ephemeral.PubKey().SerializeUncompressed()
|
||||
offset := aes.BlockSize
|
||||
|
||||
// curve and X length
|
||||
copy(out[offset:offset+4], append(ciphCurveBytes[:], ciphCoordLength[:]...))
|
||||
offset += 4
|
||||
// X
|
||||
copy(out[offset:offset+32], pb[1:33])
|
||||
offset += 32
|
||||
// Y length
|
||||
copy(out[offset:offset+2], ciphCoordLength[:])
|
||||
offset += 2
|
||||
// Y
|
||||
copy(out[offset:offset+32], pb[33:])
|
||||
offset += 32
|
||||
|
||||
// start encryption
|
||||
block, err := aes.NewCipher(keyE)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mode := cipher.NewCBCEncrypter(block, iv)
|
||||
mode.CryptBlocks(out[offset:len(out)-sha256.Size], paddedIn)
|
||||
|
||||
// start HMAC-SHA-256
|
||||
hm := hmac.New(sha256.New, keyM)
|
||||
hm.Write(out[:len(out)-sha256.Size]) // everything is hashed
|
||||
copy(out[len(out)-sha256.Size:], hm.Sum(nil)) // write checksum
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Decrypt decrypts data that was encrypted using the Encrypt function.
|
||||
func Decrypt(priv *PrivateKey, in []byte) ([]byte, error) {
|
||||
// IV + Curve params/X/Y + 1 block + HMAC-256
|
||||
if len(in) < aes.BlockSize+70+aes.BlockSize+sha256.Size {
|
||||
return nil, errInputTooShort
|
||||
}
|
||||
|
||||
// read iv
|
||||
iv := in[:aes.BlockSize]
|
||||
offset := aes.BlockSize
|
||||
|
||||
// start reading pubkey
|
||||
if !bytes.Equal(in[offset:offset+2], ciphCurveBytes[:]) {
|
||||
return nil, errUnsupportedCurve
|
||||
}
|
||||
offset += 2
|
||||
|
||||
if !bytes.Equal(in[offset:offset+2], ciphCoordLength[:]) {
|
||||
return nil, errInvalidXLength
|
||||
}
|
||||
offset += 2
|
||||
|
||||
xBytes := in[offset : offset+32]
|
||||
offset += 32
|
||||
|
||||
if !bytes.Equal(in[offset:offset+2], ciphCoordLength[:]) {
|
||||
return nil, errInvalidYLength
|
||||
}
|
||||
offset += 2
|
||||
|
||||
yBytes := in[offset : offset+32]
|
||||
offset += 32
|
||||
|
||||
pb := make([]byte, 65)
|
||||
pb[0] = byte(0x04) // uncompressed
|
||||
copy(pb[1:33], xBytes)
|
||||
copy(pb[33:], yBytes)
|
||||
// check if (X, Y) lies on the curve and create a Pubkey if it does
|
||||
pubkey, err := ParsePubKey(pb, S256())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check for cipher text length
|
||||
if (len(in)-aes.BlockSize-offset-sha256.Size)%aes.BlockSize != 0 {
|
||||
return nil, errInvalidPadding // not padded to 16 bytes
|
||||
}
|
||||
|
||||
// read hmac
|
||||
messageMAC := in[len(in)-sha256.Size:]
|
||||
|
||||
// generate shared secret
|
||||
ecdhKey := GenerateSharedSecret(priv, pubkey)
|
||||
derivedKey := sha512.Sum512(ecdhKey)
|
||||
keyE := derivedKey[:32]
|
||||
keyM := derivedKey[32:]
|
||||
|
||||
// verify mac
|
||||
hm := hmac.New(sha256.New, keyM)
|
||||
hm.Write(in[:len(in)-sha256.Size]) // everything is hashed
|
||||
expectedMAC := hm.Sum(nil)
|
||||
if !hmac.Equal(messageMAC, expectedMAC) {
|
||||
return nil, ErrInvalidMAC
|
||||
}
|
||||
|
||||
// start decryption
|
||||
block, err := aes.NewCipher(keyE)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mode := cipher.NewCBCDecrypter(block, iv)
|
||||
// same length as ciphertext
|
||||
plaintext := make([]byte, len(in)-offset-sha256.Size)
|
||||
mode.CryptBlocks(plaintext, in[offset:len(in)-sha256.Size])
|
||||
|
||||
return removePKCSPadding(plaintext)
|
||||
}
|
||||
|
||||
// Implement PKCS#7 padding with block size of 16 (AES block size).
|
||||
|
||||
// addPKCSPadding adds padding to a block of data
|
||||
func addPKCSPadding(src []byte) []byte {
|
||||
padding := aes.BlockSize - len(src)%aes.BlockSize
|
||||
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
|
||||
return append(src, padtext...)
|
||||
}
|
||||
|
||||
// removePKCSPadding removes padding from data that was added with addPKCSPadding
|
||||
func removePKCSPadding(src []byte) ([]byte, error) {
|
||||
length := len(src)
|
||||
padLength := int(src[length-1])
|
||||
if padLength > aes.BlockSize || length < aes.BlockSize {
|
||||
return nil, errInvalidPadding
|
||||
}
|
||||
|
||||
return src[:length-padLength], nil
|
||||
}
|
||||
@@ -1,174 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGenerateSharedSecret(t *testing.T) {
|
||||
privKey1, err := NewPrivateKey(S256())
|
||||
if err != nil {
|
||||
t.Errorf("private key generation error: %s", err)
|
||||
return
|
||||
}
|
||||
privKey2, err := NewPrivateKey(S256())
|
||||
if err != nil {
|
||||
t.Errorf("private key generation error: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
secret1 := GenerateSharedSecret(privKey1, privKey2.PubKey())
|
||||
secret2 := GenerateSharedSecret(privKey2, privKey1.PubKey())
|
||||
|
||||
if !bytes.Equal(secret1, secret2) {
|
||||
t.Errorf("ECDH failed, secrets mismatch - first: %x, second: %x",
|
||||
secret1, secret2)
|
||||
}
|
||||
}
|
||||
|
||||
// Test 1: Encryption and decryption
|
||||
func TestCipheringBasic(t *testing.T) {
|
||||
privkey, err := NewPrivateKey(S256())
|
||||
if err != nil {
|
||||
t.Fatal("failed to generate private key")
|
||||
}
|
||||
|
||||
in := []byte("Hey there dude. How are you doing? This is a test.")
|
||||
|
||||
out, err := Encrypt(privkey.PubKey(), in)
|
||||
if err != nil {
|
||||
t.Fatal("failed to encrypt:", err)
|
||||
}
|
||||
|
||||
dec, err := Decrypt(privkey, out)
|
||||
if err != nil {
|
||||
t.Fatal("failed to decrypt:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(in, dec) {
|
||||
t.Error("decrypted data doesn't match original")
|
||||
}
|
||||
}
|
||||
|
||||
// Test 2: Byte compatibility with Pyelliptic
|
||||
func TestCiphering(t *testing.T) {
|
||||
pb, _ := hex.DecodeString("fe38240982f313ae5afb3e904fb8215fb11af1200592b" +
|
||||
"fca26c96c4738e4bf8f")
|
||||
privkey, _ := PrivKeyFromBytes(S256(), pb)
|
||||
|
||||
in := []byte("This is just a test.")
|
||||
out, _ := hex.DecodeString("b0d66e5adaa5ed4e2f0ca68e17b8f2fc02ca002009e3" +
|
||||
"3487e7fa4ab505cf34d98f131be7bd258391588ca7804acb30251e71a04e0020ecf" +
|
||||
"df0f84608f8add82d7353af780fbb28868c713b7813eb4d4e61f7b75d7534dd9856" +
|
||||
"9b0ba77cf14348fcff80fee10e11981f1b4be372d93923e9178972f69937ec850ed" +
|
||||
"6c3f11ff572ddd5b2bedf9f9c0b327c54da02a28fcdce1f8369ffec")
|
||||
|
||||
dec, err := Decrypt(privkey, out)
|
||||
if err != nil {
|
||||
t.Fatal("failed to decrypt:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(in, dec) {
|
||||
t.Error("decrypted data doesn't match original")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCipheringErrors(t *testing.T) {
|
||||
privkey, err := NewPrivateKey(S256())
|
||||
if err != nil {
|
||||
t.Fatal("failed to generate private key")
|
||||
}
|
||||
|
||||
tests1 := []struct {
|
||||
ciphertext []byte // input ciphertext
|
||||
}{
|
||||
{bytes.Repeat([]byte{0x00}, 133)}, // errInputTooShort
|
||||
{bytes.Repeat([]byte{0x00}, 134)}, // errUnsupportedCurve
|
||||
{bytes.Repeat([]byte{0x02, 0xCA}, 134)}, // errInvalidXLength
|
||||
{bytes.Repeat([]byte{0x02, 0xCA, 0x00, 0x20}, 134)}, // errInvalidYLength
|
||||
{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // IV
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x02, 0xCA, 0x00, 0x20, // curve and X length
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // X
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x20, // Y length
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Y
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ciphertext
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // MAC
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}}, // invalid pubkey
|
||||
{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // IV
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x02, 0xCA, 0x00, 0x20, // curve and X length
|
||||
0x11, 0x5C, 0x42, 0xE7, 0x57, 0xB2, 0xEF, 0xB7, // X
|
||||
0x67, 0x1C, 0x57, 0x85, 0x30, 0xEC, 0x19, 0x1A,
|
||||
0x13, 0x59, 0x38, 0x1E, 0x6A, 0x71, 0x12, 0x7A,
|
||||
0x9D, 0x37, 0xC4, 0x86, 0xFD, 0x30, 0xDA, 0xE5,
|
||||
0x00, 0x20, // Y length
|
||||
0x7E, 0x76, 0xDC, 0x58, 0xF6, 0x93, 0xBD, 0x7E, // Y
|
||||
0x70, 0x10, 0x35, 0x8C, 0xE6, 0xB1, 0x65, 0xE4,
|
||||
0x83, 0xA2, 0x92, 0x10, 0x10, 0xDB, 0x67, 0xAC,
|
||||
0x11, 0xB1, 0xB5, 0x1B, 0x65, 0x19, 0x53, 0xD2,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ciphertext
|
||||
// padding not aligned to 16 bytes
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // MAC
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}}, // errInvalidPadding
|
||||
{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // IV
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x02, 0xCA, 0x00, 0x20, // curve and X length
|
||||
0x11, 0x5C, 0x42, 0xE7, 0x57, 0xB2, 0xEF, 0xB7, // X
|
||||
0x67, 0x1C, 0x57, 0x85, 0x30, 0xEC, 0x19, 0x1A,
|
||||
0x13, 0x59, 0x38, 0x1E, 0x6A, 0x71, 0x12, 0x7A,
|
||||
0x9D, 0x37, 0xC4, 0x86, 0xFD, 0x30, 0xDA, 0xE5,
|
||||
0x00, 0x20, // Y length
|
||||
0x7E, 0x76, 0xDC, 0x58, 0xF6, 0x93, 0xBD, 0x7E, // Y
|
||||
0x70, 0x10, 0x35, 0x8C, 0xE6, 0xB1, 0x65, 0xE4,
|
||||
0x83, 0xA2, 0x92, 0x10, 0x10, 0xDB, 0x67, 0xAC,
|
||||
0x11, 0xB1, 0xB5, 0x1B, 0x65, 0x19, 0x53, 0xD2,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ciphertext
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // MAC
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}}, // ErrInvalidMAC
|
||||
}
|
||||
|
||||
for i, test := range tests1 {
|
||||
_, err = Decrypt(privkey, test.ciphertext)
|
||||
if err == nil {
|
||||
t.Errorf("Decrypt #%d did not get error", i)
|
||||
}
|
||||
}
|
||||
|
||||
// test error from removePKCSPadding
|
||||
tests2 := []struct {
|
||||
in []byte // input data
|
||||
}{
|
||||
{bytes.Repeat([]byte{0x11}, 17)},
|
||||
{bytes.Repeat([]byte{0x07}, 15)},
|
||||
}
|
||||
for i, test := range tests2 {
|
||||
_, err = removePKCSPadding(test.in)
|
||||
if err == nil {
|
||||
t.Errorf("removePKCSPadding #%d did not get error", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
21
btcec/doc.go
21
btcec/doc.go
@@ -1,21 +0,0 @@
|
||||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package btcec implements support for the elliptic curves needed for bitcoin.
|
||||
|
||||
Bitcoin uses elliptic curve cryptography using koblitz curves
|
||||
(specifically secp256k1) for cryptographic functions. See
|
||||
http://www.secg.org/collateral/sec2_final.pdf for details on the
|
||||
standard.
|
||||
|
||||
This package provides the data structures and functions implementing the
|
||||
crypto/elliptic Curve interface in order to permit using these curves
|
||||
with the standard crypto/ecdsa package provided with go. Helper
|
||||
functionality is provided to parse signatures and public keys from
|
||||
standard formats. It was designed for use with btcd, but should be
|
||||
general enough for other uses of elliptic curve crypto. It was originally based
|
||||
on some initial work by ThePiachu, but has significantly diverged since then.
|
||||
*/
|
||||
package btcec
|
||||
@@ -1,168 +0,0 @@
|
||||
// Copyright (c) 2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcec_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/daglabs/btcd/btcec"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
)
|
||||
|
||||
// This example demonstrates signing a message with a secp256k1 private key that
|
||||
// is first parsed form raw bytes and serializing the generated signature.
|
||||
func Example_signMessage() {
|
||||
// Decode a hex-encoded private key.
|
||||
pkBytes, err := hex.DecodeString("22a47fa09a223f2aa079edf85a7c2d4f87" +
|
||||
"20ee63e502ee2869afab7de234b80c")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
privKey, pubKey := btcec.PrivKeyFromBytes(btcec.S256(), pkBytes)
|
||||
|
||||
// Sign a message using the private key.
|
||||
message := "test message"
|
||||
messageHash := daghash.DoubleHashB([]byte(message))
|
||||
signature, err := privKey.Sign(messageHash)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Serialize and display the signature.
|
||||
fmt.Printf("Serialized Signature: %x\n", signature.Serialize())
|
||||
|
||||
// Verify the signature for the message using the public key.
|
||||
verified := signature.Verify(messageHash, pubKey)
|
||||
fmt.Printf("Signature Verified? %v\n", verified)
|
||||
|
||||
// Output:
|
||||
// Serialized Signature: 304402201008e236fa8cd0f25df4482dddbb622e8a8b26ef0ba731719458de3ccd93805b022032f8ebe514ba5f672466eba334639282616bb3c2f0ab09998037513d1f9e3d6d
|
||||
// Signature Verified? true
|
||||
}
|
||||
|
||||
// This example demonstrates verifying a secp256k1 signature against a public
|
||||
// key that is first parsed from raw bytes. The signature is also parsed from
|
||||
// raw bytes.
|
||||
func Example_verifySignature() {
|
||||
// Decode hex-encoded serialized public key.
|
||||
pubKeyBytes, err := hex.DecodeString("02a673638cb9587cb68ea08dbef685c" +
|
||||
"6f2d2a751a8b3c6f2a7e9a4999e6e4bfaf5")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
pubKey, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256())
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Decode hex-encoded serialized signature.
|
||||
sigBytes, err := hex.DecodeString("30450220090ebfb3690a0ff115bb1b38b" +
|
||||
"8b323a667b7653454f1bccb06d4bbdca42c2079022100ec95778b51e707" +
|
||||
"1cb1205f8bde9af6592fc978b0452dafe599481c46d6b2e479")
|
||||
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
signature, err := btcec.ParseSignature(sigBytes, btcec.S256())
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify the signature for the message using the public key.
|
||||
message := "test message"
|
||||
messageHash := daghash.DoubleHashB([]byte(message))
|
||||
verified := signature.Verify(messageHash, pubKey)
|
||||
fmt.Println("Signature Verified?", verified)
|
||||
|
||||
// Output:
|
||||
// Signature Verified? true
|
||||
}
|
||||
|
||||
// This example demonstrates encrypting a message for a public key that is first
|
||||
// parsed from raw bytes, then decrypting it using the corresponding private key.
|
||||
func Example_encryptMessage() {
|
||||
// Decode the hex-encoded pubkey of the recipient.
|
||||
pubKeyBytes, err := hex.DecodeString("04115c42e757b2efb7671c578530ec191a1" +
|
||||
"359381e6a71127a9d37c486fd30dae57e76dc58f693bd7e7010358ce6b165e483a29" +
|
||||
"21010db67ac11b1b51b651953d2") // uncompressed pubkey
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
pubKey, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256())
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Encrypt a message decryptable by the private key corresponding to pubKey
|
||||
message := "test message"
|
||||
ciphertext, err := btcec.Encrypt(pubKey, []byte(message))
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Decode the hex-encoded private key.
|
||||
pkBytes, err := hex.DecodeString("a11b0a4e1a132305652ee7a8eb7848f6ad" +
|
||||
"5ea381e3ce20a2c086a2e388230811")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
// note that we already have corresponding pubKey
|
||||
privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), pkBytes)
|
||||
|
||||
// Try decrypting and verify if it's the same message.
|
||||
plaintext, err := btcec.Decrypt(privKey, ciphertext)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println(string(plaintext))
|
||||
|
||||
// Output:
|
||||
// test message
|
||||
}
|
||||
|
||||
// This example demonstrates decrypting a message using a private key that is
|
||||
// first parsed from raw bytes.
|
||||
func Example_decryptMessage() {
|
||||
// Decode the hex-encoded private key.
|
||||
pkBytes, err := hex.DecodeString("a11b0a4e1a132305652ee7a8eb7848f6ad" +
|
||||
"5ea381e3ce20a2c086a2e388230811")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), pkBytes)
|
||||
|
||||
ciphertext, err := hex.DecodeString("35f644fbfb208bc71e57684c3c8b437402ca" +
|
||||
"002047a2f1b38aa1a8f1d5121778378414f708fe13ebf7b4a7bb74407288c1958969" +
|
||||
"00207cf4ac6057406e40f79961c973309a892732ae7a74ee96cd89823913b8b8d650" +
|
||||
"a44166dc61ea1c419d47077b748a9c06b8d57af72deb2819d98a9d503efc59fc8307" +
|
||||
"d14174f8b83354fac3ff56075162")
|
||||
|
||||
// Try decrypting the message.
|
||||
plaintext, err := btcec.Decrypt(privKey, ciphertext)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println(string(plaintext))
|
||||
|
||||
// Output:
|
||||
// test message
|
||||
}
|
||||
1223
btcec/field.go
1223
btcec/field.go
File diff suppressed because it is too large
Load Diff
@@ -1,822 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Copyright (c) 2013-2016 Dave Collins
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestSetInt ensures that setting a field value to various native integers
|
||||
// works as expected.
|
||||
func TestSetInt(t *testing.T) {
|
||||
tests := []struct {
|
||||
in uint
|
||||
raw [10]uint32
|
||||
}{
|
||||
{5, [10]uint32{5, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
// 2^26
|
||||
{67108864, [10]uint32{67108864, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
// 2^26 + 1
|
||||
{67108865, [10]uint32{67108865, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
// 2^32 - 1
|
||||
{4294967295, [10]uint32{4294967295, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
f := new(fieldVal).SetInt(test.in)
|
||||
if !reflect.DeepEqual(f.n, test.raw) {
|
||||
t.Errorf("fieldVal.Set #%d wrong result\ngot: %v\n"+
|
||||
"want: %v", i, f.n, test.raw)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestZero ensures that zeroing a field value zero works as expected.
|
||||
func TestZero(t *testing.T) {
|
||||
f := new(fieldVal).SetInt(2)
|
||||
f.Zero()
|
||||
for idx, rawInt := range f.n {
|
||||
if rawInt != 0 {
|
||||
t.Errorf("internal field integer at index #%d is not "+
|
||||
"zero - got %d", idx, rawInt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestIsZero ensures that checking if a field IsZero works as expected.
|
||||
func TestIsZero(t *testing.T) {
|
||||
f := new(fieldVal)
|
||||
if !f.IsZero() {
|
||||
t.Errorf("new field value is not zero - got %v (rawints %x)", f,
|
||||
f.n)
|
||||
}
|
||||
|
||||
f.SetInt(1)
|
||||
if f.IsZero() {
|
||||
t.Errorf("field claims it's zero when it's not - got %v "+
|
||||
"(raw rawints %x)", f, f.n)
|
||||
}
|
||||
|
||||
f.Zero()
|
||||
if !f.IsZero() {
|
||||
t.Errorf("field claims it's not zero when it is - got %v "+
|
||||
"(raw rawints %x)", f, f.n)
|
||||
}
|
||||
}
|
||||
|
||||
// TestStringer ensures the stringer returns the appropriate hex string.
|
||||
func TestStringer(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"0", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||
{"1", "0000000000000000000000000000000000000000000000000000000000000001"},
|
||||
{"a", "000000000000000000000000000000000000000000000000000000000000000a"},
|
||||
{"b", "000000000000000000000000000000000000000000000000000000000000000b"},
|
||||
{"c", "000000000000000000000000000000000000000000000000000000000000000c"},
|
||||
{"d", "000000000000000000000000000000000000000000000000000000000000000d"},
|
||||
{"e", "000000000000000000000000000000000000000000000000000000000000000e"},
|
||||
{"f", "000000000000000000000000000000000000000000000000000000000000000f"},
|
||||
{"f0", "00000000000000000000000000000000000000000000000000000000000000f0"},
|
||||
// 2^26-1
|
||||
{
|
||||
"3ffffff",
|
||||
"0000000000000000000000000000000000000000000000000000000003ffffff",
|
||||
},
|
||||
// 2^32-1
|
||||
{
|
||||
"ffffffff",
|
||||
"00000000000000000000000000000000000000000000000000000000ffffffff",
|
||||
},
|
||||
// 2^64-1
|
||||
{
|
||||
"ffffffffffffffff",
|
||||
"000000000000000000000000000000000000000000000000ffffffffffffffff",
|
||||
},
|
||||
// 2^96-1
|
||||
{
|
||||
"ffffffffffffffffffffffff",
|
||||
"0000000000000000000000000000000000000000ffffffffffffffffffffffff",
|
||||
},
|
||||
// 2^128-1
|
||||
{
|
||||
"ffffffffffffffffffffffffffffffff",
|
||||
"00000000000000000000000000000000ffffffffffffffffffffffffffffffff",
|
||||
},
|
||||
// 2^160-1
|
||||
{
|
||||
"ffffffffffffffffffffffffffffffffffffffff",
|
||||
"000000000000000000000000ffffffffffffffffffffffffffffffffffffffff",
|
||||
},
|
||||
// 2^192-1
|
||||
{
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
"0000000000000000ffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
},
|
||||
// 2^224-1
|
||||
{
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
"00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
},
|
||||
// 2^256-4294968273 (the btcec prime, so should result in 0)
|
||||
{
|
||||
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
|
||||
"0000000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
// 2^256-4294968274 (the secp256k1 prime+1, so should result in 1)
|
||||
{
|
||||
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30",
|
||||
"0000000000000000000000000000000000000000000000000000000000000001",
|
||||
},
|
||||
|
||||
// Invalid hex
|
||||
{"g", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||
{"1h", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||
{"i1", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
f := new(fieldVal).SetHex(test.in)
|
||||
result := f.String()
|
||||
if result != test.expected {
|
||||
t.Errorf("fieldVal.String #%d wrong result\ngot: %v\n"+
|
||||
"want: %v", i, result, test.expected)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestNormalize ensures that normalizing the internal field words works as
|
||||
// expected.
|
||||
func TestNormalize(t *testing.T) {
|
||||
tests := []struct {
|
||||
raw [10]uint32 // Intentionally denormalized value
|
||||
normalized [10]uint32 // Normalized form of the raw value
|
||||
}{
|
||||
{
|
||||
[10]uint32{0x00000005, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
[10]uint32{0x00000005, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// 2^26
|
||||
{
|
||||
[10]uint32{0x04000000, 0x0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
[10]uint32{0x00000000, 0x1, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// 2^26 + 1
|
||||
{
|
||||
[10]uint32{0x04000001, 0x0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
[10]uint32{0x00000001, 0x1, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// 2^32 - 1
|
||||
{
|
||||
[10]uint32{0xffffffff, 0x00, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
[10]uint32{0x03ffffff, 0x3f, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// 2^32
|
||||
{
|
||||
[10]uint32{0x04000000, 0x3f, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
[10]uint32{0x00000000, 0x40, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// 2^32 + 1
|
||||
{
|
||||
[10]uint32{0x04000001, 0x3f, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
[10]uint32{0x00000001, 0x40, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// 2^64 - 1
|
||||
{
|
||||
[10]uint32{0xffffffff, 0xffffffc0, 0xfc0, 0, 0, 0, 0, 0, 0, 0},
|
||||
[10]uint32{0x03ffffff, 0x03ffffff, 0xfff, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// 2^64
|
||||
{
|
||||
[10]uint32{0x04000000, 0x03ffffff, 0x0fff, 0, 0, 0, 0, 0, 0, 0},
|
||||
[10]uint32{0x00000000, 0x00000000, 0x1000, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// 2^64 + 1
|
||||
{
|
||||
[10]uint32{0x04000001, 0x03ffffff, 0x0fff, 0, 0, 0, 0, 0, 0, 0},
|
||||
[10]uint32{0x00000001, 0x00000000, 0x1000, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// 2^96 - 1
|
||||
{
|
||||
[10]uint32{0xffffffff, 0xffffffc0, 0xffffffc0, 0x3ffc0, 0, 0, 0, 0, 0, 0},
|
||||
[10]uint32{0x03ffffff, 0x03ffffff, 0x03ffffff, 0x3ffff, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// 2^96
|
||||
{
|
||||
[10]uint32{0x04000000, 0x03ffffff, 0x03ffffff, 0x3ffff, 0, 0, 0, 0, 0, 0},
|
||||
[10]uint32{0x00000000, 0x00000000, 0x00000000, 0x40000, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// 2^128 - 1
|
||||
{
|
||||
[10]uint32{0xffffffff, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffc0, 0, 0, 0, 0, 0},
|
||||
[10]uint32{0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0xffffff, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// 2^128
|
||||
{
|
||||
[10]uint32{0x04000000, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x0ffffff, 0, 0, 0, 0, 0},
|
||||
[10]uint32{0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x1000000, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// 2^256 - 4294968273 (secp256k1 prime)
|
||||
{
|
||||
[10]uint32{0xfffffc2f, 0xffffff80, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0x3fffc0},
|
||||
[10]uint32{0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000000},
|
||||
},
|
||||
// Prime larger than P where both first and second words are larger
|
||||
// than P's first and second words
|
||||
{
|
||||
[10]uint32{0xfffffc30, 0xffffff86, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0x3fffc0},
|
||||
[10]uint32{0x00000001, 0x00000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000000},
|
||||
},
|
||||
// Prime larger than P where only the second word is larger
|
||||
// than P's second words.
|
||||
{
|
||||
[10]uint32{0xfffffc2a, 0xffffff87, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0x3fffc0},
|
||||
[10]uint32{0x03fffffb, 0x00000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000000},
|
||||
},
|
||||
// 2^256 - 1
|
||||
{
|
||||
[10]uint32{0xffffffff, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0x3fffc0},
|
||||
[10]uint32{0x000003d0, 0x00000040, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000000},
|
||||
},
|
||||
// Prime with field representation such that the initial
|
||||
// reduction does not result in a carry to bit 256.
|
||||
//
|
||||
// 2^256 - 4294968273 (secp256k1 prime)
|
||||
{
|
||||
[10]uint32{0x03fffc2f, 0x03ffffbf, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x003fffff},
|
||||
[10]uint32{0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
|
||||
},
|
||||
// Prime larger than P that reduces to a value which is still
|
||||
// larger than P when it has a magnitude of 1 due to its first
|
||||
// word and does not result in a carry to bit 256.
|
||||
//
|
||||
// 2^256 - 4294968272 (secp256k1 prime + 1)
|
||||
{
|
||||
[10]uint32{0x03fffc30, 0x03ffffbf, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x003fffff},
|
||||
[10]uint32{0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
|
||||
},
|
||||
// Prime larger than P that reduces to a value which is still
|
||||
// larger than P when it has a magnitude of 1 due to its second
|
||||
// word and does not result in a carry to bit 256.
|
||||
//
|
||||
// 2^256 - 4227859409 (secp256k1 prime + 0x4000000)
|
||||
{
|
||||
[10]uint32{0x03fffc2f, 0x03ffffc0, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x003fffff},
|
||||
[10]uint32{0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
|
||||
},
|
||||
// Prime larger than P that reduces to a value which is still
|
||||
// larger than P when it has a magnitude of 1 due to a carry to
|
||||
// bit 256, but would not be without the carry. These values
|
||||
// come from the fact that P is 2^256 - 4294968273 and 977 is
|
||||
// the low order word in the internal field representation.
|
||||
//
|
||||
// 2^256 * 5 - ((4294968273 - (977+1)) * 4)
|
||||
{
|
||||
[10]uint32{0x03ffffff, 0x03fffeff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x0013fffff},
|
||||
[10]uint32{0x00001314, 0x00000040, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000000000},
|
||||
},
|
||||
// Prime larger than P that reduces to a value which is still
|
||||
// larger than P when it has a magnitude of 1 due to both a
|
||||
// carry to bit 256 and the first word.
|
||||
{
|
||||
[10]uint32{0x03fffc30, 0x03ffffbf, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x07ffffff, 0x003fffff},
|
||||
[10]uint32{0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001},
|
||||
},
|
||||
// Prime larger than P that reduces to a value which is still
|
||||
// larger than P when it has a magnitude of 1 due to both a
|
||||
// carry to bit 256 and the second word.
|
||||
//
|
||||
{
|
||||
[10]uint32{0x03fffc2f, 0x03ffffc0, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x3ffffff, 0x07ffffff, 0x003fffff},
|
||||
[10]uint32{0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000, 0x00000000, 0x00000001},
|
||||
},
|
||||
// Prime larger than P that reduces to a value which is still
|
||||
// larger than P when it has a magnitude of 1 due to a carry to
|
||||
// bit 256 and the first and second words.
|
||||
//
|
||||
{
|
||||
[10]uint32{0x03fffc30, 0x03ffffc0, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x07ffffff, 0x003fffff},
|
||||
[10]uint32{0x00000001, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001},
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
f := new(fieldVal)
|
||||
f.n = test.raw
|
||||
f.Normalize()
|
||||
if !reflect.DeepEqual(f.n, test.normalized) {
|
||||
t.Errorf("fieldVal.Normalize #%d wrong result\n"+
|
||||
"got: %x\nwant: %x", i, f.n, test.normalized)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestIsOdd ensures that checking if a field value IsOdd works as expected.
|
||||
func TestIsOdd(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string // hex encoded value
|
||||
expected bool // expected oddness
|
||||
}{
|
||||
{"0", false},
|
||||
{"1", true},
|
||||
{"2", false},
|
||||
// 2^32 - 1
|
||||
{"ffffffff", true},
|
||||
// 2^64 - 2
|
||||
{"fffffffffffffffe", false},
|
||||
// secp256k1 prime
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", true},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
f := new(fieldVal).SetHex(test.in)
|
||||
result := f.IsOdd()
|
||||
if result != test.expected {
|
||||
t.Errorf("fieldVal.IsOdd #%d wrong result\n"+
|
||||
"got: %v\nwant: %v", i, result, test.expected)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestEquals ensures that checking two field values for equality via Equals
|
||||
// works as expected.
|
||||
func TestEquals(t *testing.T) {
|
||||
tests := []struct {
|
||||
in1 string // hex encoded value
|
||||
in2 string // hex encoded value
|
||||
expected bool // expected equality
|
||||
}{
|
||||
{"0", "0", true},
|
||||
{"0", "1", false},
|
||||
{"1", "0", false},
|
||||
// 2^32 - 1 == 2^32 - 1?
|
||||
{"ffffffff", "ffffffff", true},
|
||||
// 2^64 - 1 == 2^64 - 2?
|
||||
{"ffffffffffffffff", "fffffffffffffffe", false},
|
||||
// 0 == prime (mod prime)?
|
||||
{"0", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", true},
|
||||
// 1 == prime+1 (mod prime)?
|
||||
{"1", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", true},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
f := new(fieldVal).SetHex(test.in1).Normalize()
|
||||
f2 := new(fieldVal).SetHex(test.in2).Normalize()
|
||||
result := f.Equals(f2)
|
||||
if result != test.expected {
|
||||
t.Errorf("fieldVal.Equals #%d wrong result\n"+
|
||||
"got: %v\nwant: %v", i, result, test.expected)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestNegate ensures that negating field values via Negate works as expected.
|
||||
func TestNegate(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string // hex encoded value
|
||||
expected string // expected hex encoded value
|
||||
}{
|
||||
// secp256k1 prime (aka 0)
|
||||
{"0", "0"},
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", "0"},
|
||||
{"0", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"},
|
||||
// secp256k1 prime-1
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", "1"},
|
||||
{"1", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e"},
|
||||
// secp256k1 prime-2
|
||||
{"2", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d"},
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d", "2"},
|
||||
// Random sampling
|
||||
{
|
||||
"b3d9aac9c5e43910b4385b53c7e78c21d4cd5f8e683c633aed04c233efc2e120",
|
||||
"4c2655363a1bc6ef4bc7a4ac381873de2b32a07197c39cc512fb3dcb103d1b0f",
|
||||
},
|
||||
{
|
||||
"f8a85984fee5a12a7c8dd08830d83423c937d77c379e4a958e447a25f407733f",
|
||||
"757a67b011a5ed583722f77cf27cbdc36c82883c861b56a71bb85d90bf888f0",
|
||||
},
|
||||
{
|
||||
"45ee6142a7fda884211e93352ed6cb2807800e419533be723a9548823ece8312",
|
||||
"ba119ebd5802577bdee16ccad12934d7f87ff1be6acc418dc56ab77cc131791d",
|
||||
},
|
||||
{
|
||||
"53c2a668f07e411a2e473e1c3b6dcb495dec1227af27673761d44afe5b43d22b",
|
||||
"ac3d59970f81bee5d1b8c1e3c49234b6a213edd850d898c89e2bb500a4bc2a04",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
f := new(fieldVal).SetHex(test.in).Normalize()
|
||||
expected := new(fieldVal).SetHex(test.expected).Normalize()
|
||||
result := f.Negate(1).Normalize()
|
||||
if !result.Equals(expected) {
|
||||
t.Errorf("fieldVal.Negate #%d wrong result\n"+
|
||||
"got: %v\nwant: %v", i, result, expected)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestAddInt ensures that adding an integer to field values via AddInt works as
|
||||
// expected.
|
||||
func TestAddInt(t *testing.T) {
|
||||
tests := []struct {
|
||||
in1 string // hex encoded value
|
||||
in2 uint // unsigned integer to add to the value above
|
||||
expected string // expected hex encoded value
|
||||
}{
|
||||
{"0", 1, "1"},
|
||||
{"1", 0, "1"},
|
||||
{"1", 1, "2"},
|
||||
// secp256k1 prime-1 + 1
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", 1, "0"},
|
||||
// secp256k1 prime + 1
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", 1, "1"},
|
||||
// Random samples.
|
||||
{
|
||||
"ff95ad9315aff04ab4af0ce673620c7145dc85d03bab5ba4b09ca2c4dec2d6c1",
|
||||
0x10f,
|
||||
"ff95ad9315aff04ab4af0ce673620c7145dc85d03bab5ba4b09ca2c4dec2d7d0",
|
||||
},
|
||||
{
|
||||
"44bdae6b772e7987941f1ba314e6a5b7804a4c12c00961b57d20f41deea9cecf",
|
||||
0x2cf11d41,
|
||||
"44bdae6b772e7987941f1ba314e6a5b7804a4c12c00961b57d20f41e1b9aec10",
|
||||
},
|
||||
{
|
||||
"88c3ecae67b591935fb1f6a9499c35315ffad766adca665c50b55f7105122c9c",
|
||||
0x4829aa2d,
|
||||
"88c3ecae67b591935fb1f6a9499c35315ffad766adca665c50b55f714d3bd6c9",
|
||||
},
|
||||
{
|
||||
"8523e9edf360ca32a95aae4e57fcde5a542b471d08a974d94ea0ee09a015e2a6",
|
||||
0xa21265a5,
|
||||
"8523e9edf360ca32a95aae4e57fcde5a542b471d08a974d94ea0ee0a4228484b",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
f := new(fieldVal).SetHex(test.in1).Normalize()
|
||||
expected := new(fieldVal).SetHex(test.expected).Normalize()
|
||||
result := f.AddInt(test.in2).Normalize()
|
||||
if !result.Equals(expected) {
|
||||
t.Errorf("fieldVal.AddInt #%d wrong result\n"+
|
||||
"got: %v\nwant: %v", i, result, expected)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestAdd ensures that adding two field values together via Add works as
|
||||
// expected.
|
||||
func TestAdd(t *testing.T) {
|
||||
tests := []struct {
|
||||
in1 string // first hex encoded value
|
||||
in2 string // second hex encoded value to add
|
||||
expected string // expected hex encoded value
|
||||
}{
|
||||
{"0", "1", "1"},
|
||||
{"1", "0", "1"},
|
||||
{"1", "1", "2"},
|
||||
// secp256k1 prime-1 + 1
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", "1", "0"},
|
||||
// secp256k1 prime + 1
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", "1", "1"},
|
||||
// Random samples.
|
||||
{
|
||||
"2b2012f975404e5065b4292fb8bed0a5d315eacf24c74d8b27e73bcc5430edcc",
|
||||
"2c3cefa4e4753e8aeec6ac4c12d99da4d78accefda3b7885d4c6bab46c86db92",
|
||||
"575d029e59b58cdb547ad57bcb986e4aaaa0b7beff02c610fcadf680c0b7c95e",
|
||||
},
|
||||
{
|
||||
"8131e8722fe59bb189692b96c9f38de92885730f1dd39ab025daffb94c97f79c",
|
||||
"ff5454b765f0aab5f0977dcc629becc84cabeb9def48e79c6aadb2622c490fa9",
|
||||
"80863d2995d646677a00a9632c8f7ab175315ead0d1c824c9088b21c78e10b16",
|
||||
},
|
||||
{
|
||||
"c7c95e93d0892b2b2cdd77e80eb646ea61be7a30ac7e097e9f843af73fad5c22",
|
||||
"3afe6f91a74dfc1c7f15c34907ee981656c37236d946767dd53ccad9190e437c",
|
||||
"02c7ce2577d72747abf33b3116a4df00b881ec6785c47ffc74c105d158bba36f",
|
||||
},
|
||||
{
|
||||
"fd1c26f6a23381e5d785ba889494ec059369b888ad8431cd67d8c934b580dbe1",
|
||||
"a475aa5a31dcca90ef5b53c097d9133d6b7117474b41e7877bb199590fc0489c",
|
||||
"a191d150d4104c76c6e10e492c6dff42fedacfcff8c61954e38a628ec541284e",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
f := new(fieldVal).SetHex(test.in1).Normalize()
|
||||
f2 := new(fieldVal).SetHex(test.in2).Normalize()
|
||||
expected := new(fieldVal).SetHex(test.expected).Normalize()
|
||||
result := f.Add(f2).Normalize()
|
||||
if !result.Equals(expected) {
|
||||
t.Errorf("fieldVal.Add #%d wrong result\n"+
|
||||
"got: %v\nwant: %v", i, result, expected)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestAdd2 ensures that adding two field values together via Add2 works as
|
||||
// expected.
|
||||
func TestAdd2(t *testing.T) {
|
||||
tests := []struct {
|
||||
in1 string // first hex encoded value
|
||||
in2 string // second hex encoded value to add
|
||||
expected string // expected hex encoded value
|
||||
}{
|
||||
{"0", "1", "1"},
|
||||
{"1", "0", "1"},
|
||||
{"1", "1", "2"},
|
||||
// secp256k1 prime-1 + 1
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", "1", "0"},
|
||||
// secp256k1 prime + 1
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", "1", "1"},
|
||||
// close but over the secp256k1 prime
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000", "f1ffff000", "1ffff3d1"},
|
||||
// Random samples.
|
||||
{
|
||||
"ad82b8d1cc136e23e9fd77fe2c7db1fe5a2ecbfcbde59ab3529758334f862d28",
|
||||
"4d6a4e95d6d61f4f46b528bebe152d408fd741157a28f415639347a84f6f574b",
|
||||
"faed0767a2e98d7330b2a0bcea92df3eea060d12380e8ec8b62a9fdb9ef58473",
|
||||
},
|
||||
{
|
||||
"f3f43a2540054a86e1df98547ec1c0e157b193e5350fb4a3c3ea214b228ac5e7",
|
||||
"25706572592690ea3ddc951a1b48b504a4c83dc253756e1b96d56fdfb3199522",
|
||||
"19649f97992bdb711fbc2d6e9a0a75e5fc79d1a7888522bf5abf912bd5a45eda",
|
||||
},
|
||||
{
|
||||
"6915bb94eef13ff1bb9b2633d997e13b9b1157c713363cc0e891416d6734f5b8",
|
||||
"11f90d6ac6fe1c4e8900b1c85fb575c251ec31b9bc34b35ada0aea1c21eded22",
|
||||
"7b0ec8ffb5ef5c40449bd7fc394d56fdecfd8980cf6af01bc29c2b898922e2da",
|
||||
},
|
||||
{
|
||||
"48b0c9eae622eed9335b747968544eb3e75cb2dc8128388f948aa30f88cabde4",
|
||||
"0989882b52f85f9d524a3a3061a0e01f46d597839d2ba637320f4b9510c8d2d5",
|
||||
"523a5216391b4e7685a5aea9c9f52ed32e324a601e53dec6c699eea4999390b9",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
f := new(fieldVal).SetHex(test.in1).Normalize()
|
||||
f2 := new(fieldVal).SetHex(test.in2).Normalize()
|
||||
expected := new(fieldVal).SetHex(test.expected).Normalize()
|
||||
result := f.Add2(f, f2).Normalize()
|
||||
if !result.Equals(expected) {
|
||||
t.Errorf("fieldVal.Add2 #%d wrong result\n"+
|
||||
"got: %v\nwant: %v", i, result, expected)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestMulInt ensures that adding an integer to field values via MulInt works as
|
||||
// expected.
|
||||
func TestMulInt(t *testing.T) {
|
||||
tests := []struct {
|
||||
in1 string // hex encoded value
|
||||
in2 uint // unsigned integer to multiply with value above
|
||||
expected string // expected hex encoded value
|
||||
}{
|
||||
{"0", 0, "0"},
|
||||
{"1", 0, "0"},
|
||||
{"0", 1, "0"},
|
||||
{"1", 1, "1"},
|
||||
// secp256k1 prime-1 * 2
|
||||
{
|
||||
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
|
||||
2,
|
||||
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d",
|
||||
},
|
||||
// secp256k1 prime * 3
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", 3, "0"},
|
||||
// secp256k1 prime-1 * 8
|
||||
{
|
||||
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
|
||||
8,
|
||||
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc27",
|
||||
},
|
||||
// Random samples for first value. The second value is limited
|
||||
// to 8 since that is the maximum int used in the elliptic curve
|
||||
// calculations.
|
||||
{
|
||||
"b75674dc9180d306c692163ac5e089f7cef166af99645c0c23568ab6d967288a",
|
||||
6,
|
||||
"4c06bd2b6904f228a76c8560a3433bced9a8681d985a2848d407404d186b0280",
|
||||
},
|
||||
{
|
||||
"54873298ac2b5ba8591c125ae54931f5ea72040aee07b208d6135476fb5b9c0e",
|
||||
3,
|
||||
"fd9597ca048212f90b543710afdb95e1bf560c20ca17161a8239fd64f212d42a",
|
||||
},
|
||||
{
|
||||
"7c30fbd363a74c17e1198f56b090b59bbb6c8755a74927a6cba7a54843506401",
|
||||
5,
|
||||
"6cf4eb20f2447c77657fccb172d38c0aa91ea4ac446dc641fa463a6b5091fba7",
|
||||
},
|
||||
{
|
||||
"fb4529be3e027a3d1587d8a500b72f2d312e3577340ef5175f96d113be4c2ceb",
|
||||
8,
|
||||
"da294df1f013d1e8ac3ec52805b979698971abb9a077a8bafcb688a4f261820f",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
f := new(fieldVal).SetHex(test.in1).Normalize()
|
||||
expected := new(fieldVal).SetHex(test.expected).Normalize()
|
||||
result := f.MulInt(test.in2).Normalize()
|
||||
if !result.Equals(expected) {
|
||||
t.Errorf("fieldVal.MulInt #%d wrong result\n"+
|
||||
"got: %v\nwant: %v", i, result, expected)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestMul ensures that multiplying two field valuess via Mul works as expected.
|
||||
func TestMul(t *testing.T) {
|
||||
tests := []struct {
|
||||
in1 string // first hex encoded value
|
||||
in2 string // second hex encoded value to multiply with
|
||||
expected string // expected hex encoded value
|
||||
}{
|
||||
{"0", "0", "0"},
|
||||
{"1", "0", "0"},
|
||||
{"0", "1", "0"},
|
||||
{"1", "1", "1"},
|
||||
// slightly over prime
|
||||
{
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffff1ffff",
|
||||
"1000",
|
||||
"1ffff3d1",
|
||||
},
|
||||
// secp256k1 prime-1 * 2
|
||||
{
|
||||
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
|
||||
"2",
|
||||
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d",
|
||||
},
|
||||
// secp256k1 prime * 3
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", "3", "0"},
|
||||
// secp256k1 prime-1 * 8
|
||||
{
|
||||
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
|
||||
"8",
|
||||
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc27",
|
||||
},
|
||||
// Random samples.
|
||||
{
|
||||
"cfb81753d5ef499a98ecc04c62cb7768c2e4f1740032946db1c12e405248137e",
|
||||
"58f355ad27b4d75fb7db0442452e732c436c1f7c5a7c4e214fa9cc031426a7d3",
|
||||
"1018cd2d7c2535235b71e18db9cd98027386328d2fa6a14b36ec663c4c87282b",
|
||||
},
|
||||
{
|
||||
"26e9d61d1cdf3920e9928e85fa3df3e7556ef9ab1d14ec56d8b4fc8ed37235bf",
|
||||
"2dfc4bbe537afee979c644f8c97b31e58be5296d6dbc460091eae630c98511cf",
|
||||
"da85f48da2dc371e223a1ae63bd30b7e7ee45ae9b189ac43ff357e9ef8cf107a",
|
||||
},
|
||||
{
|
||||
"5db64ed5afb71646c8b231585d5b2bf7e628590154e0854c4c29920b999ff351",
|
||||
"279cfae5eea5d09ade8e6a7409182f9de40981bc31c84c3d3dfe1d933f152e9a",
|
||||
"2c78fbae91792dd0b157abe3054920049b1879a7cc9d98cfda927d83be411b37",
|
||||
},
|
||||
{
|
||||
"b66dfc1f96820b07d2bdbd559c19319a3a73c97ceb7b3d662f4fe75ecb6819e6",
|
||||
"bf774aba43e3e49eb63a6e18037d1118152568f1a3ac4ec8b89aeb6ff8008ae1",
|
||||
"c4f016558ca8e950c21c3f7fc15f640293a979c7b01754ee7f8b3340d4902ebb",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
f := new(fieldVal).SetHex(test.in1).Normalize()
|
||||
f2 := new(fieldVal).SetHex(test.in2).Normalize()
|
||||
expected := new(fieldVal).SetHex(test.expected).Normalize()
|
||||
result := f.Mul(f2).Normalize()
|
||||
if !result.Equals(expected) {
|
||||
t.Errorf("fieldVal.Mul #%d wrong result\n"+
|
||||
"got: %v\nwant: %v", i, result, expected)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestSquare ensures that squaring field values via Square works as expected.
|
||||
func TestSquare(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string // hex encoded value
|
||||
expected string // expected hex encoded value
|
||||
}{
|
||||
// secp256k1 prime (aka 0)
|
||||
{"0", "0"},
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", "0"},
|
||||
{"0", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"},
|
||||
// secp256k1 prime-1
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", "1"},
|
||||
// secp256k1 prime-2
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d", "4"},
|
||||
// Random sampling
|
||||
{
|
||||
"b0ba920360ea8436a216128047aab9766d8faf468895eb5090fc8241ec758896",
|
||||
"133896b0b69fda8ce9f648b9a3af38f345290c9eea3cbd35bafcadf7c34653d3",
|
||||
},
|
||||
{
|
||||
"c55d0d730b1d0285a1599995938b042a756e6e8857d390165ffab480af61cbd5",
|
||||
"cd81758b3f5877cbe7e5b0a10cebfa73bcbf0957ca6453e63ee8954ab7780bee",
|
||||
},
|
||||
{
|
||||
"e89c1f9a70d93651a1ba4bca5b78658f00de65a66014a25544d3365b0ab82324",
|
||||
"39ffc7a43e5dbef78fd5d0354fb82c6d34f5a08735e34df29da14665b43aa1f",
|
||||
},
|
||||
{
|
||||
"7dc26186079d22bcbe1614aa20ae627e62d72f9be7ad1e99cac0feb438956f05",
|
||||
"bf86bcfc4edb3d81f916853adfda80c07c57745b008b60f560b1912f95bce8ae",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
f := new(fieldVal).SetHex(test.in).Normalize()
|
||||
expected := new(fieldVal).SetHex(test.expected).Normalize()
|
||||
result := f.Square().Normalize()
|
||||
if !result.Equals(expected) {
|
||||
t.Errorf("fieldVal.Square #%d wrong result\n"+
|
||||
"got: %v\nwant: %v", i, result, expected)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestInverse ensures that finding the multiplicative inverse via Inverse works
|
||||
// as expected.
|
||||
func TestInverse(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string // hex encoded value
|
||||
expected string // expected hex encoded value
|
||||
}{
|
||||
// secp256k1 prime (aka 0)
|
||||
{"0", "0"},
|
||||
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", "0"},
|
||||
{"0", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"},
|
||||
// secp256k1 prime-1
|
||||
{
|
||||
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
|
||||
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
|
||||
},
|
||||
// secp256k1 prime-2
|
||||
{
|
||||
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d",
|
||||
"7fffffffffffffffffffffffffffffffffffffffffffffffffffffff7ffffe17",
|
||||
},
|
||||
// Random sampling
|
||||
{
|
||||
"16fb970147a9acc73654d4be233cc48b875ce20a2122d24f073d29bd28805aca",
|
||||
"987aeb257b063df0c6d1334051c47092b6d8766c4bf10c463786d93f5bc54354",
|
||||
},
|
||||
{
|
||||
"69d1323ce9f1f7b3bd3c7320b0d6311408e30281e273e39a0d8c7ee1c8257919",
|
||||
"49340981fa9b8d3dad72de470b34f547ed9179c3953797d0943af67806f4bb6",
|
||||
},
|
||||
{
|
||||
"e0debf988ae098ecda07d0b57713e97c6d213db19753e8c95aa12a2fc1cc5272",
|
||||
"64f58077b68af5b656b413ea366863f7b2819f8d27375d9c4d9804135ca220c2",
|
||||
},
|
||||
{
|
||||
"dcd394f91f74c2ba16aad74a22bb0ed47fe857774b8f2d6c09e28bfb14642878",
|
||||
"fb848ec64d0be572a63c38fe83df5e7f3d032f60bf8c969ef67d36bf4ada22a9",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
f := new(fieldVal).SetHex(test.in).Normalize()
|
||||
expected := new(fieldVal).SetHex(test.expected).Normalize()
|
||||
result := f.Inverse().Normalize()
|
||||
if !result.Equals(expected) {
|
||||
t.Errorf("fieldVal.Inverse #%d wrong result\n"+
|
||||
"got: %v\nwant: %v", i, result, expected)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
// Copyright 2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file is ignored during the regular build due to the following build tag.
|
||||
// It is called by go generate and used to automatically generate pre-computed
|
||||
// tables used to accelerate operations.
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/daglabs/btcd/btcec"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fi, err := os.Create("secp256k1.go")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer fi.Close()
|
||||
|
||||
// Compress the serialized byte points.
|
||||
serialized := btcec.S256().SerializedBytePoints()
|
||||
var compressed bytes.Buffer
|
||||
w := zlib.NewWriter(&compressed)
|
||||
if _, err := w.Write(serialized); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
w.Close()
|
||||
|
||||
// Encode the compressed byte points with base64.
|
||||
encoded := make([]byte, base64.StdEncoding.EncodedLen(compressed.Len()))
|
||||
base64.StdEncoding.Encode(encoded, compressed.Bytes())
|
||||
|
||||
fmt.Fprintln(fi, "// Copyright (c) 2015 The btcsuite developers")
|
||||
fmt.Fprintln(fi, "// Use of this source code is governed by an ISC")
|
||||
fmt.Fprintln(fi, "// license that can be found in the LICENSE file.")
|
||||
fmt.Fprintln(fi)
|
||||
fmt.Fprintln(fi, "package btcec")
|
||||
fmt.Fprintln(fi)
|
||||
fmt.Fprintln(fi, "// Auto-generated file (see genprecomps.go)")
|
||||
fmt.Fprintln(fi, "// DO NOT EDIT")
|
||||
fmt.Fprintln(fi)
|
||||
fmt.Fprintf(fi, "var secp256k1BytePoints = %q\n", string(encoded))
|
||||
|
||||
a1, b1, a2, b2 := btcec.S256().EndomorphismVectors()
|
||||
fmt.Println("The following values are the computed linearly " +
|
||||
"independent vectors needed to make use of the secp256k1 " +
|
||||
"endomorphism:")
|
||||
fmt.Printf("a1: %x\n", a1)
|
||||
fmt.Printf("b1: %x\n", b1)
|
||||
fmt.Printf("a2: %x\n", a2)
|
||||
fmt.Printf("b2: %x\n", b2)
|
||||
}
|
||||
@@ -1,203 +0,0 @@
|
||||
// Copyright (c) 2014-2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file is ignored during the regular build due to the following build tag.
|
||||
// This build tag is set during go generate.
|
||||
// +build gensecp256k1
|
||||
|
||||
package btcec
|
||||
|
||||
// References:
|
||||
// [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone)
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// secp256k1BytePoints are dummy points used so the code which generates the
|
||||
// real values can compile.
|
||||
var secp256k1BytePoints = ""
|
||||
|
||||
// getDoublingPoints returns all the possible G^(2^i) for i in
|
||||
// 0..n-1 where n is the curve's bit size (256 in the case of secp256k1)
|
||||
// the coordinates are recorded as Jacobian coordinates.
|
||||
func (curve *KoblitzCurve) getDoublingPoints() [][3]fieldVal {
|
||||
doublingPoints := make([][3]fieldVal, curve.BitSize)
|
||||
|
||||
// initialize px, py, pz to the Jacobian coordinates for the base point
|
||||
px, py := curve.bigAffineToField(curve.Gx, curve.Gy)
|
||||
pz := new(fieldVal).SetInt(1)
|
||||
for i := 0; i < curve.BitSize; i++ {
|
||||
doublingPoints[i] = [3]fieldVal{*px, *py, *pz}
|
||||
// P = 2*P
|
||||
curve.doubleJacobian(px, py, pz, px, py, pz)
|
||||
}
|
||||
return doublingPoints
|
||||
}
|
||||
|
||||
// SerializedBytePoints returns a serialized byte slice which contains all of
|
||||
// the possible points per 8-bit window. This is used to when generating
|
||||
// secp256k1.go.
|
||||
func (curve *KoblitzCurve) SerializedBytePoints() []byte {
|
||||
doublingPoints := curve.getDoublingPoints()
|
||||
|
||||
// Segregate the bits into byte-sized windows
|
||||
serialized := make([]byte, curve.byteSize*256*3*10*4)
|
||||
offset := 0
|
||||
for byteNum := 0; byteNum < curve.byteSize; byteNum++ {
|
||||
// Grab the 8 bits that make up this byte from doublingPoints.
|
||||
startingBit := 8 * (curve.byteSize - byteNum - 1)
|
||||
computingPoints := doublingPoints[startingBit : startingBit+8]
|
||||
|
||||
// Compute all points in this window and serialize them.
|
||||
for i := 0; i < 256; i++ {
|
||||
px, py, pz := new(fieldVal), new(fieldVal), new(fieldVal)
|
||||
for j := 0; j < 8; j++ {
|
||||
if i>>uint(j)&1 == 1 {
|
||||
curve.addJacobian(px, py, pz, &computingPoints[j][0],
|
||||
&computingPoints[j][1], &computingPoints[j][2], px, py, pz)
|
||||
}
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
binary.LittleEndian.PutUint32(serialized[offset:], px.n[i])
|
||||
offset += 4
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
binary.LittleEndian.PutUint32(serialized[offset:], py.n[i])
|
||||
offset += 4
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
binary.LittleEndian.PutUint32(serialized[offset:], pz.n[i])
|
||||
offset += 4
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return serialized
|
||||
}
|
||||
|
||||
// sqrt returns the square root of the provided big integer using Newton's
|
||||
// method. It's only compiled and used during generation of pre-computed
|
||||
// values, so speed is not a huge concern.
|
||||
func sqrt(n *big.Int) *big.Int {
|
||||
// Initial guess = 2^(log_2(n)/2)
|
||||
guess := big.NewInt(2)
|
||||
guess.Exp(guess, big.NewInt(int64(n.BitLen()/2)), nil)
|
||||
|
||||
// Now refine using Newton's method.
|
||||
big2 := big.NewInt(2)
|
||||
prevGuess := big.NewInt(0)
|
||||
for {
|
||||
prevGuess.Set(guess)
|
||||
guess.Add(guess, new(big.Int).Div(n, guess))
|
||||
guess.Div(guess, big2)
|
||||
if guess.Cmp(prevGuess) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return guess
|
||||
}
|
||||
|
||||
// EndomorphismVectors runs the first 3 steps of algorithm 3.74 from [GECC] to
|
||||
// generate the linearly independent vectors needed to generate a balanced
|
||||
// length-two representation of a multiplier such that k = k1 + k2λ (mod N) and
|
||||
// returns them. Since the values will always be the same given the fact that N
|
||||
// and λ are fixed, the final results can be accelerated by storing the
|
||||
// precomputed values with the curve.
|
||||
func (curve *KoblitzCurve) EndomorphismVectors() (a1, b1, a2, b2 *big.Int) {
|
||||
bigMinus1 := big.NewInt(-1)
|
||||
|
||||
// This section uses an extended Euclidean algorithm to generate a
|
||||
// sequence of equations:
|
||||
// s[i] * N + t[i] * λ = r[i]
|
||||
|
||||
nSqrt := sqrt(curve.N)
|
||||
u, v := new(big.Int).Set(curve.N), new(big.Int).Set(curve.lambda)
|
||||
x1, y1 := big.NewInt(1), big.NewInt(0)
|
||||
x2, y2 := big.NewInt(0), big.NewInt(1)
|
||||
q, r := new(big.Int), new(big.Int)
|
||||
qu, qx1, qy1 := new(big.Int), new(big.Int), new(big.Int)
|
||||
s, t := new(big.Int), new(big.Int)
|
||||
ri, ti := new(big.Int), new(big.Int)
|
||||
a1, b1, a2, b2 = new(big.Int), new(big.Int), new(big.Int), new(big.Int)
|
||||
found, oneMore := false, false
|
||||
for u.Sign() != 0 {
|
||||
// q = v/u
|
||||
q.Div(v, u)
|
||||
|
||||
// r = v - q*u
|
||||
qu.Mul(q, u)
|
||||
r.Sub(v, qu)
|
||||
|
||||
// s = x2 - q*x1
|
||||
qx1.Mul(q, x1)
|
||||
s.Sub(x2, qx1)
|
||||
|
||||
// t = y2 - q*y1
|
||||
qy1.Mul(q, y1)
|
||||
t.Sub(y2, qy1)
|
||||
|
||||
// v = u, u = r, x2 = x1, x1 = s, y2 = y1, y1 = t
|
||||
v.Set(u)
|
||||
u.Set(r)
|
||||
x2.Set(x1)
|
||||
x1.Set(s)
|
||||
y2.Set(y1)
|
||||
y1.Set(t)
|
||||
|
||||
// As soon as the remainder is less than the sqrt of n, the
|
||||
// values of a1 and b1 are known.
|
||||
if !found && r.Cmp(nSqrt) < 0 {
|
||||
// When this condition executes ri and ti represent the
|
||||
// r[i] and t[i] values such that i is the greatest
|
||||
// index for which r >= sqrt(n). Meanwhile, the current
|
||||
// r and t values are r[i+1] and t[i+1], respectively.
|
||||
|
||||
// a1 = r[i+1], b1 = -t[i+1]
|
||||
a1.Set(r)
|
||||
b1.Mul(t, bigMinus1)
|
||||
found = true
|
||||
oneMore = true
|
||||
|
||||
// Skip to the next iteration so ri and ti are not
|
||||
// modified.
|
||||
continue
|
||||
|
||||
} else if oneMore {
|
||||
// When this condition executes ri and ti still
|
||||
// represent the r[i] and t[i] values while the current
|
||||
// r and t are r[i+2] and t[i+2], respectively.
|
||||
|
||||
// sum1 = r[i]^2 + t[i]^2
|
||||
rSquared := new(big.Int).Mul(ri, ri)
|
||||
tSquared := new(big.Int).Mul(ti, ti)
|
||||
sum1 := new(big.Int).Add(rSquared, tSquared)
|
||||
|
||||
// sum2 = r[i+2]^2 + t[i+2]^2
|
||||
r2Squared := new(big.Int).Mul(r, r)
|
||||
t2Squared := new(big.Int).Mul(t, t)
|
||||
sum2 := new(big.Int).Add(r2Squared, t2Squared)
|
||||
|
||||
// if (r[i]^2 + t[i]^2) <= (r[i+2]^2 + t[i+2]^2)
|
||||
if sum1.Cmp(sum2) <= 0 {
|
||||
// a2 = r[i], b2 = -t[i]
|
||||
a2.Set(ri)
|
||||
b2.Mul(ti, bigMinus1)
|
||||
} else {
|
||||
// a2 = r[i+2], b2 = -t[i+2]
|
||||
a2.Set(r)
|
||||
b2.Mul(t, bigMinus1)
|
||||
}
|
||||
|
||||
// All done.
|
||||
break
|
||||
}
|
||||
|
||||
ri.Set(r)
|
||||
ti.Set(t)
|
||||
}
|
||||
|
||||
return a1, b1, a2, b2
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
// Copyright 2015 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//go:generate go run -tags gensecp256k1 genprecomps.go
|
||||
|
||||
// loadS256BytePoints decompresses and deserializes the pre-computed byte points
|
||||
// used to accelerate scalar base multiplication for the secp256k1 curve. This
|
||||
// approach is used since it allows the compile to use significantly less ram
|
||||
// and be performed much faster than it is with hard-coding the final in-memory
|
||||
// data structure. At the same time, it is quite fast to generate the in-memory
|
||||
// data structure at init time with this approach versus computing the table.
|
||||
func loadS256BytePoints() error {
|
||||
// There will be no byte points to load when generating them.
|
||||
bp := secp256k1BytePoints
|
||||
if len(bp) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decompress the pre-computed table used to accelerate scalar base
|
||||
// multiplication.
|
||||
decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(bp))
|
||||
r, err := zlib.NewReader(decoder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
serialized, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Deserialize the precomputed byte points and set the curve to them.
|
||||
offset := 0
|
||||
var bytePoints [32][256][3]fieldVal
|
||||
for byteNum := 0; byteNum < 32; byteNum++ {
|
||||
// All points in this window.
|
||||
for i := 0; i < 256; i++ {
|
||||
px := &bytePoints[byteNum][i][0]
|
||||
py := &bytePoints[byteNum][i][1]
|
||||
pz := &bytePoints[byteNum][i][2]
|
||||
for i := 0; i < 10; i++ {
|
||||
px.n[i] = binary.LittleEndian.Uint32(serialized[offset:])
|
||||
offset += 4
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
py.n[i] = binary.LittleEndian.Uint32(serialized[offset:])
|
||||
offset += 4
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
pz.n[i] = binary.LittleEndian.Uint32(serialized[offset:])
|
||||
offset += 4
|
||||
}
|
||||
}
|
||||
}
|
||||
secp256k1.bytePoints = &bytePoints
|
||||
return nil
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// PrivateKey wraps an ecdsa.PrivateKey as a convenience mainly for signing
|
||||
// things with the the private key without having to directly import the ecdsa
|
||||
// package.
|
||||
type PrivateKey ecdsa.PrivateKey
|
||||
|
||||
// PrivKeyFromBytes returns a private and public key for `curve' based on the
|
||||
// private key passed as an argument as a byte slice.
|
||||
func PrivKeyFromBytes(curve elliptic.Curve, pk []byte) (*PrivateKey,
|
||||
*PublicKey) {
|
||||
x, y := curve.ScalarBaseMult(pk)
|
||||
|
||||
priv := &ecdsa.PrivateKey{
|
||||
PublicKey: ecdsa.PublicKey{
|
||||
Curve: curve,
|
||||
X: x,
|
||||
Y: y,
|
||||
},
|
||||
D: new(big.Int).SetBytes(pk),
|
||||
}
|
||||
|
||||
return (*PrivateKey)(priv), (*PublicKey)(&priv.PublicKey)
|
||||
}
|
||||
|
||||
// NewPrivateKey is a wrapper for ecdsa.GenerateKey that returns a PrivateKey
|
||||
// instead of the normal ecdsa.PrivateKey.
|
||||
func NewPrivateKey(curve elliptic.Curve) (*PrivateKey, error) {
|
||||
key, err := ecdsa.GenerateKey(curve, rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return (*PrivateKey)(key), nil
|
||||
}
|
||||
|
||||
// PubKey returns the PublicKey corresponding to this private key.
|
||||
func (p *PrivateKey) PubKey() *PublicKey {
|
||||
return (*PublicKey)(&p.PublicKey)
|
||||
}
|
||||
|
||||
// ToECDSA returns the private key as a *ecdsa.PrivateKey.
|
||||
func (p *PrivateKey) ToECDSA() *ecdsa.PrivateKey {
|
||||
return (*ecdsa.PrivateKey)(p)
|
||||
}
|
||||
|
||||
// Sign generates an ECDSA signature for the provided hash (which should be the result
|
||||
// of hashing a larger message) using the private key. Produced signature
|
||||
// is deterministic (same message and same key yield the same signature) and canonical
|
||||
// in accordance with RFC6979 and BIP0062.
|
||||
func (p *PrivateKey) Sign(hash []byte) (*Signature, error) {
|
||||
return signRFC6979(p, hash)
|
||||
}
|
||||
|
||||
// PrivKeyBytesLen defines the length in bytes of a serialized private key.
|
||||
const PrivKeyBytesLen = 32
|
||||
|
||||
// Serialize returns the private key number d as a big-endian binary-encoded
|
||||
// number, padded to a length of 32 bytes.
|
||||
func (p *PrivateKey) Serialize() []byte {
|
||||
b := make([]byte, 0, PrivKeyBytesLen)
|
||||
return paddedAppend(PrivKeyBytesLen, b, p.ToECDSA().D.Bytes())
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPrivKeys(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
key []byte
|
||||
}{
|
||||
{
|
||||
name: "check curve",
|
||||
key: []byte{
|
||||
0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6,
|
||||
0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c,
|
||||
0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9,
|
||||
0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
priv, pub := PrivKeyFromBytes(S256(), test.key)
|
||||
|
||||
_, err := ParsePubKey(pub.SerializeUncompressed(), S256())
|
||||
if err != nil {
|
||||
t.Errorf("%s privkey: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
hash := []byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9}
|
||||
sig, err := priv.Sign(hash)
|
||||
if err != nil {
|
||||
t.Errorf("%s could not sign: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !sig.Verify(hash, pub) {
|
||||
t.Errorf("%s could not verify: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
serializedKey := priv.Serialize()
|
||||
if !bytes.Equal(serializedKey, test.key) {
|
||||
t.Errorf("%s unexpected serialized bytes - got: %x, "+
|
||||
"want: %x", test.name, serializedKey, test.key)
|
||||
}
|
||||
}
|
||||
}
|
||||
181
btcec/pubkey.go
181
btcec/pubkey.go
@@ -1,181 +0,0 @@
|
||||
// Copyright (c) 2013-2014 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// These constants define the lengths of serialized public keys.
|
||||
const (
|
||||
PubKeyBytesLenCompressed = 33
|
||||
PubKeyBytesLenUncompressed = 65
|
||||
PubKeyBytesLenHybrid = 65
|
||||
)
|
||||
|
||||
func isOdd(a *big.Int) bool {
|
||||
return a.Bit(0) == 1
|
||||
}
|
||||
|
||||
// decompressPoint decompresses a point on the given curve given the X point and
|
||||
// the solution to use.
|
||||
func decompressPoint(curve *KoblitzCurve, x *big.Int, ybit bool) (*big.Int, error) {
|
||||
// TODO: This will probably only work for secp256k1 due to
|
||||
// optimizations.
|
||||
|
||||
// Y = +-sqrt(x^3 + B)
|
||||
x3 := new(big.Int).Mul(x, x)
|
||||
x3.Mul(x3, x)
|
||||
x3.Add(x3, curve.Params().B)
|
||||
|
||||
// now calculate sqrt mod p of x2 + B
|
||||
// This code used to do a full sqrt based on tonelli/shanks,
|
||||
// but this was replaced by the algorithms referenced in
|
||||
// https://bitcointalk.org/index.php?topic=162805.msg1712294#msg1712294
|
||||
y := new(big.Int).Exp(x3, curve.QPlus1Div4(), curve.Params().P)
|
||||
|
||||
if ybit != isOdd(y) {
|
||||
y.Sub(curve.Params().P, y)
|
||||
}
|
||||
if ybit != isOdd(y) {
|
||||
return nil, fmt.Errorf("ybit doesn't match oddness")
|
||||
}
|
||||
return y, nil
|
||||
}
|
||||
|
||||
const (
|
||||
pubkeyCompressed byte = 0x2 // y_bit + x coord
|
||||
pubkeyUncompressed byte = 0x4 // x coord + y coord
|
||||
pubkeyHybrid byte = 0x6 // y_bit + x coord + y coord
|
||||
)
|
||||
|
||||
// IsCompressedPubKey returns true the the passed serialized public key has
|
||||
// been encoded in compressed format, and false otherwise.
|
||||
func IsCompressedPubKey(pubKey []byte) bool {
|
||||
// The public key is only compressed if it is the correct length and
|
||||
// the format (first byte) is one of the compressed pubkey values.
|
||||
return len(pubKey) == PubKeyBytesLenCompressed &&
|
||||
(pubKey[0]&^byte(0x1) == pubkeyCompressed)
|
||||
}
|
||||
|
||||
// ParsePubKey parses a public key for a koblitz curve from a bytestring into a
|
||||
// ecdsa.Publickey, verifying that it is valid. It supports compressed,
|
||||
// uncompressed and hybrid signature formats.
|
||||
func ParsePubKey(pubKeyStr []byte, curve *KoblitzCurve) (key *PublicKey, err error) {
|
||||
pubkey := PublicKey{}
|
||||
pubkey.Curve = curve
|
||||
|
||||
if len(pubKeyStr) == 0 {
|
||||
return nil, errors.New("pubkey string is empty")
|
||||
}
|
||||
|
||||
format := pubKeyStr[0]
|
||||
ybit := (format & 0x1) == 0x1
|
||||
format &= ^byte(0x1)
|
||||
|
||||
switch len(pubKeyStr) {
|
||||
case PubKeyBytesLenUncompressed:
|
||||
if format != pubkeyUncompressed && format != pubkeyHybrid {
|
||||
return nil, fmt.Errorf("invalid magic in pubkey str: "+
|
||||
"%d", pubKeyStr[0])
|
||||
}
|
||||
|
||||
pubkey.X = new(big.Int).SetBytes(pubKeyStr[1:33])
|
||||
pubkey.Y = new(big.Int).SetBytes(pubKeyStr[33:])
|
||||
// hybrid keys have extra information, make use of it.
|
||||
if format == pubkeyHybrid && ybit != isOdd(pubkey.Y) {
|
||||
return nil, fmt.Errorf("ybit doesn't match oddness")
|
||||
}
|
||||
case PubKeyBytesLenCompressed:
|
||||
// format is 0x2 | solution, <X coordinate>
|
||||
// solution determines which solution of the curve we use.
|
||||
/// y^2 = x^3 + Curve.B
|
||||
if format != pubkeyCompressed {
|
||||
return nil, fmt.Errorf("invalid magic in compressed "+
|
||||
"pubkey string: %d", pubKeyStr[0])
|
||||
}
|
||||
pubkey.X = new(big.Int).SetBytes(pubKeyStr[1:33])
|
||||
pubkey.Y, err = decompressPoint(curve, pubkey.X, ybit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default: // wrong!
|
||||
return nil, fmt.Errorf("invalid pub key length %d",
|
||||
len(pubKeyStr))
|
||||
}
|
||||
|
||||
if pubkey.X.Cmp(pubkey.Curve.Params().P) >= 0 {
|
||||
return nil, fmt.Errorf("pubkey X parameter is >= to P")
|
||||
}
|
||||
if pubkey.Y.Cmp(pubkey.Curve.Params().P) >= 0 {
|
||||
return nil, fmt.Errorf("pubkey Y parameter is >= to P")
|
||||
}
|
||||
if !pubkey.Curve.IsOnCurve(pubkey.X, pubkey.Y) {
|
||||
return nil, fmt.Errorf("pubkey isn't on secp256k1 curve")
|
||||
}
|
||||
return &pubkey, nil
|
||||
}
|
||||
|
||||
// PublicKey is an ecdsa.PublicKey with additional functions to
|
||||
// serialize in uncompressed, compressed, and hybrid formats.
|
||||
type PublicKey ecdsa.PublicKey
|
||||
|
||||
// ToECDSA returns the public key as a *ecdsa.PublicKey.
|
||||
func (p *PublicKey) ToECDSA() *ecdsa.PublicKey {
|
||||
return (*ecdsa.PublicKey)(p)
|
||||
}
|
||||
|
||||
// SerializeUncompressed serializes a public key in a 65-byte uncompressed
|
||||
// format.
|
||||
func (p *PublicKey) SerializeUncompressed() []byte {
|
||||
b := make([]byte, 0, PubKeyBytesLenUncompressed)
|
||||
b = append(b, pubkeyUncompressed)
|
||||
b = paddedAppend(32, b, p.X.Bytes())
|
||||
return paddedAppend(32, b, p.Y.Bytes())
|
||||
}
|
||||
|
||||
// SerializeCompressed serializes a public key in a 33-byte compressed format.
|
||||
func (p *PublicKey) SerializeCompressed() []byte {
|
||||
b := make([]byte, 0, PubKeyBytesLenCompressed)
|
||||
format := pubkeyCompressed
|
||||
if isOdd(p.Y) {
|
||||
format |= 0x1
|
||||
}
|
||||
b = append(b, format)
|
||||
return paddedAppend(32, b, p.X.Bytes())
|
||||
}
|
||||
|
||||
// SerializeHybrid serializes a public key in a 65-byte hybrid format.
|
||||
func (p *PublicKey) SerializeHybrid() []byte {
|
||||
b := make([]byte, 0, PubKeyBytesLenHybrid)
|
||||
format := pubkeyHybrid
|
||||
if isOdd(p.Y) {
|
||||
format |= 0x1
|
||||
}
|
||||
b = append(b, format)
|
||||
b = paddedAppend(32, b, p.X.Bytes())
|
||||
return paddedAppend(32, b, p.Y.Bytes())
|
||||
}
|
||||
|
||||
// IsEqual compares this PublicKey instance to the one passed, returning true if
|
||||
// both PublicKeys are equivalent. A PublicKey is equivalent to another, if they
|
||||
// both have the same X and Y coordinate.
|
||||
func (p *PublicKey) IsEqual(otherPubKey *PublicKey) bool {
|
||||
return p.X.Cmp(otherPubKey.X) == 0 &&
|
||||
p.Y.Cmp(otherPubKey.Y) == 0
|
||||
}
|
||||
|
||||
// paddedAppend appends the src byte slice to dst, returning the new slice.
|
||||
// If the length of the source is smaller than the passed size, leading zero
|
||||
// bytes are appended to the dst slice before appending src.
|
||||
func paddedAppend(size uint, dst, src []byte) []byte {
|
||||
for i := 0; i < int(size)-len(src); i++ {
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
return append(dst, src...)
|
||||
}
|
||||
@@ -1,296 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
type pubKeyTest struct {
|
||||
name string
|
||||
key []byte
|
||||
format byte
|
||||
isValid bool
|
||||
}
|
||||
|
||||
var pubKeyTests = []pubKeyTest{
|
||||
// pubkey from bitcoin blockchain tx
|
||||
// 0437cd7f8525ceed2324359c2d0ba26006d92d85
|
||||
{
|
||||
name: "uncompressed ok",
|
||||
key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
|
||||
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
|
||||
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
|
||||
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
|
||||
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
|
||||
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
|
||||
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
|
||||
0xb4, 0x12, 0xa3,
|
||||
},
|
||||
isValid: true,
|
||||
format: pubkeyUncompressed,
|
||||
},
|
||||
{
|
||||
name: "uncompressed x changed",
|
||||
key: []byte{0x04, 0x15, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
|
||||
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
|
||||
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
|
||||
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
|
||||
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
|
||||
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
|
||||
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
|
||||
0xb4, 0x12, 0xa3,
|
||||
},
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "uncompressed y changed",
|
||||
key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
|
||||
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
|
||||
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
|
||||
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
|
||||
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
|
||||
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
|
||||
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
|
||||
0xb4, 0x12, 0xa4,
|
||||
},
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "uncompressed claims compressed",
|
||||
key: []byte{0x03, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
|
||||
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
|
||||
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
|
||||
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
|
||||
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
|
||||
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
|
||||
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
|
||||
0xb4, 0x12, 0xa3,
|
||||
},
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "uncompressed as hybrid ok",
|
||||
key: []byte{0x07, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
|
||||
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
|
||||
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
|
||||
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
|
||||
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
|
||||
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
|
||||
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
|
||||
0xb4, 0x12, 0xa3,
|
||||
},
|
||||
isValid: true,
|
||||
format: pubkeyHybrid,
|
||||
},
|
||||
{
|
||||
name: "uncompressed as hybrid wrong",
|
||||
key: []byte{0x06, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
|
||||
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
|
||||
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
|
||||
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
|
||||
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
|
||||
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
|
||||
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
|
||||
0xb4, 0x12, 0xa3,
|
||||
},
|
||||
isValid: false,
|
||||
},
|
||||
// from tx 0b09c51c51ff762f00fb26217269d2a18e77a4fa87d69b3c363ab4df16543f20
|
||||
{
|
||||
name: "compressed ok (ybit = 0)",
|
||||
key: []byte{0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
|
||||
0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1,
|
||||
0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21,
|
||||
0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d,
|
||||
},
|
||||
isValid: true,
|
||||
format: pubkeyCompressed,
|
||||
},
|
||||
// from tx fdeb8e72524e8dab0da507ddbaf5f88fe4a933eb10a66bc4745bb0aa11ea393c
|
||||
{
|
||||
name: "compressed ok (ybit = 1)",
|
||||
key: []byte{0x03, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33,
|
||||
0x09, 0xfb, 0x14, 0x3e, 0x0e, 0x8f, 0xe3, 0x96, 0x34,
|
||||
0x25, 0x21, 0x88, 0x7e, 0x97, 0x66, 0x90, 0xb6, 0xb4,
|
||||
0x7f, 0x5b, 0x2a, 0x4b, 0x7d, 0x44, 0x8e,
|
||||
},
|
||||
isValid: true,
|
||||
format: pubkeyCompressed,
|
||||
},
|
||||
{
|
||||
name: "compressed claims uncompressed (ybit = 0)",
|
||||
key: []byte{0x04, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
|
||||
0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1,
|
||||
0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21,
|
||||
0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d,
|
||||
},
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "compressed claims uncompressed (ybit = 1)",
|
||||
key: []byte{0x05, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33,
|
||||
0x09, 0xfb, 0x14, 0x3e, 0x0e, 0x8f, 0xe3, 0x96, 0x34,
|
||||
0x25, 0x21, 0x88, 0x7e, 0x97, 0x66, 0x90, 0xb6, 0xb4,
|
||||
0x7f, 0x5b, 0x2a, 0x4b, 0x7d, 0x44, 0x8e,
|
||||
},
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "wrong length)",
|
||||
key: []byte{0x05},
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "X == P",
|
||||
key: []byte{0x04, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x2F, 0xb2, 0xe0,
|
||||
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
|
||||
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
|
||||
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
|
||||
0xb4, 0x12, 0xa3,
|
||||
},
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "X > P",
|
||||
key: []byte{0x04, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFD, 0x2F, 0xb2, 0xe0,
|
||||
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
|
||||
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
|
||||
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
|
||||
0xb4, 0x12, 0xa3,
|
||||
},
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "Y == P",
|
||||
key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
|
||||
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
|
||||
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
|
||||
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF,
|
||||
0xFF, 0xFC, 0x2F,
|
||||
},
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "Y > P",
|
||||
key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
|
||||
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
|
||||
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
|
||||
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF,
|
||||
0xFF, 0xFD, 0x2F,
|
||||
},
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "hybrid",
|
||||
key: []byte{0x06, 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb,
|
||||
0xac, 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, 0x0b, 0x07,
|
||||
0x02, 0x9b, 0xfc, 0xdb, 0x2d, 0xce, 0x28, 0xd9, 0x59,
|
||||
0xf2, 0x81, 0x5b, 0x16, 0xf8, 0x17, 0x98, 0x48, 0x3a,
|
||||
0xda, 0x77, 0x26, 0xa3, 0xc4, 0x65, 0x5d, 0xa4, 0xfb,
|
||||
0xfc, 0x0e, 0x11, 0x08, 0xa8, 0xfd, 0x17, 0xb4, 0x48,
|
||||
0xa6, 0x85, 0x54, 0x19, 0x9c, 0x47, 0xd0, 0x8f, 0xfb,
|
||||
0x10, 0xd4, 0xb8,
|
||||
},
|
||||
format: pubkeyHybrid,
|
||||
isValid: true,
|
||||
},
|
||||
}
|
||||
|
||||
func TestPubKeys(t *testing.T) {
|
||||
for _, test := range pubKeyTests {
|
||||
pk, err := ParsePubKey(test.key, S256())
|
||||
if err != nil {
|
||||
if test.isValid {
|
||||
t.Errorf("%s pubkey failed when shouldn't %v",
|
||||
test.name, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !test.isValid {
|
||||
t.Errorf("%s counted as valid when it should fail",
|
||||
test.name)
|
||||
continue
|
||||
}
|
||||
var pkStr []byte
|
||||
switch test.format {
|
||||
case pubkeyUncompressed:
|
||||
pkStr = (*PublicKey)(pk).SerializeUncompressed()
|
||||
case pubkeyCompressed:
|
||||
pkStr = (*PublicKey)(pk).SerializeCompressed()
|
||||
case pubkeyHybrid:
|
||||
pkStr = (*PublicKey)(pk).SerializeHybrid()
|
||||
}
|
||||
if !bytes.Equal(test.key, pkStr) {
|
||||
t.Errorf("%s pubkey: serialized keys do not match.",
|
||||
test.name)
|
||||
spew.Dump(test.key)
|
||||
spew.Dump(pkStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPublicKeyIsEqual(t *testing.T) {
|
||||
pubKey1, err := ParsePubKey(
|
||||
[]byte{0x03, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33,
|
||||
0x09, 0xfb, 0x14, 0x3e, 0x0e, 0x8f, 0xe3, 0x96, 0x34,
|
||||
0x25, 0x21, 0x88, 0x7e, 0x97, 0x66, 0x90, 0xb6, 0xb4,
|
||||
0x7f, 0x5b, 0x2a, 0x4b, 0x7d, 0x44, 0x8e,
|
||||
},
|
||||
S256(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse raw bytes for pubKey1: %v", err)
|
||||
}
|
||||
|
||||
pubKey2, err := ParsePubKey(
|
||||
[]byte{0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
|
||||
0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1,
|
||||
0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21,
|
||||
0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d,
|
||||
},
|
||||
S256(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse raw bytes for pubKey2: %v", err)
|
||||
}
|
||||
|
||||
if !pubKey1.IsEqual(pubKey1) {
|
||||
t.Fatalf("value of IsEqual is incorrect, %v is "+
|
||||
"equal to %v", pubKey1, pubKey1)
|
||||
}
|
||||
|
||||
if pubKey1.IsEqual(pubKey2) {
|
||||
t.Fatalf("value of IsEqual is incorrect, %v is not "+
|
||||
"equal to %v", pubKey1, pubKey2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsCompressed(t *testing.T) {
|
||||
for _, test := range pubKeyTests {
|
||||
isCompressed := IsCompressedPubKey(test.key)
|
||||
wantCompressed := (test.format == pubkeyCompressed)
|
||||
if isCompressed != wantCompressed {
|
||||
t.Fatalf("%s (%x) pubkey: unexpected compressed result, "+
|
||||
"got %v, want %v", test.name, test.key,
|
||||
isCompressed, wantCompressed)
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -1,540 +0,0 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// Errors returned by canonicalPadding.
|
||||
var (
|
||||
errNegativeValue = errors.New("value may be interpreted as negative")
|
||||
errExcessivelyPaddedValue = errors.New("value is excessively padded")
|
||||
)
|
||||
|
||||
// Signature is a type representing an ecdsa signature.
|
||||
type Signature struct {
|
||||
R *big.Int
|
||||
S *big.Int
|
||||
}
|
||||
|
||||
var (
|
||||
// Used in RFC6979 implementation when testing the nonce for correctness
|
||||
one = big.NewInt(1)
|
||||
|
||||
// oneInitializer is used to fill a byte slice with byte 0x01. It is provided
|
||||
// here to avoid the need to create it multiple times.
|
||||
oneInitializer = []byte{0x01}
|
||||
)
|
||||
|
||||
// Serialize returns the ECDSA signature in the more strict DER format. Note
|
||||
// that the serialized bytes returned do not include the appended hash type
|
||||
// used in Bitcoin signature scripts.
|
||||
//
|
||||
// encoding/asn1 is broken so we hand roll this output:
|
||||
//
|
||||
// 0x30 <length> 0x02 <length r> r 0x02 <length s> s
|
||||
func (sig *Signature) Serialize() []byte {
|
||||
// low 'S' malleability breaker
|
||||
sigS := sig.S
|
||||
if sigS.Cmp(S256().halfOrder) == 1 {
|
||||
sigS = new(big.Int).Sub(S256().N, sigS)
|
||||
}
|
||||
// Ensure the encoded bytes for the r and s values are canonical and
|
||||
// thus suitable for DER encoding.
|
||||
rb := canonicalizeInt(sig.R)
|
||||
sb := canonicalizeInt(sigS)
|
||||
|
||||
// total length of returned signature is 1 byte for each magic and
|
||||
// length (6 total), plus lengths of r and s
|
||||
length := 6 + len(rb) + len(sb)
|
||||
b := make([]byte, length)
|
||||
|
||||
b[0] = 0x30
|
||||
b[1] = byte(length - 2)
|
||||
b[2] = 0x02
|
||||
b[3] = byte(len(rb))
|
||||
offset := copy(b[4:], rb) + 4
|
||||
b[offset] = 0x02
|
||||
b[offset+1] = byte(len(sb))
|
||||
copy(b[offset+2:], sb)
|
||||
return b
|
||||
}
|
||||
|
||||
// Verify calls ecdsa.Verify to verify the signature of hash using the public
|
||||
// key. It returns true if the signature is valid, false otherwise.
|
||||
func (sig *Signature) Verify(hash []byte, pubKey *PublicKey) bool {
|
||||
return ecdsa.Verify(pubKey.ToECDSA(), hash, sig.R, sig.S)
|
||||
}
|
||||
|
||||
// IsEqual compares this Signature instance to the one passed, returning true
|
||||
// if both Signatures are equivalent. A signature is equivalent to another, if
|
||||
// they both have the same scalar value for R and S.
|
||||
func (sig *Signature) IsEqual(otherSig *Signature) bool {
|
||||
return sig.R.Cmp(otherSig.R) == 0 &&
|
||||
sig.S.Cmp(otherSig.S) == 0
|
||||
}
|
||||
|
||||
// minSigLen is the minimum length of a DER encoded signature and is
|
||||
// when both R and S are 1 byte each.
|
||||
// 0x30 + <1-byte> + 0x02 + 0x01 + <byte> + 0x2 + 0x01 + <byte>
|
||||
const minSigLen = 8
|
||||
|
||||
func parseSig(sigStr []byte, curve elliptic.Curve, der bool) (*Signature, error) {
|
||||
// Originally this code used encoding/asn1 in order to parse the
|
||||
// signature, but a number of problems were found with this approach.
|
||||
// Despite the fact that signatures are stored as DER, the difference
|
||||
// between go's idea of a bignum (and that they have sign) doesn't agree
|
||||
// with the openssl one (where they do not). The above is true as of
|
||||
// Go 1.1. In the end it was simpler to rewrite the code to explicitly
|
||||
// understand the format which is this:
|
||||
// 0x30 <length of whole message> <0x02> <length of R> <R> 0x2
|
||||
// <length of S> <S>.
|
||||
|
||||
signature := &Signature{}
|
||||
|
||||
if len(sigStr) < minSigLen {
|
||||
return nil, errors.New("malformed signature: too short")
|
||||
}
|
||||
// 0x30
|
||||
index := 0
|
||||
if sigStr[index] != 0x30 {
|
||||
return nil, errors.New("malformed signature: no header magic")
|
||||
}
|
||||
index++
|
||||
// length of remaining message
|
||||
siglen := sigStr[index]
|
||||
index++
|
||||
|
||||
// siglen should be less than the entire message and greater than
|
||||
// the minimal message size.
|
||||
if int(siglen+2) > len(sigStr) || int(siglen+2) < minSigLen {
|
||||
return nil, errors.New("malformed signature: bad length")
|
||||
}
|
||||
// trim the slice we're working on so we only look at what matters.
|
||||
sigStr = sigStr[:siglen+2]
|
||||
|
||||
// 0x02
|
||||
if sigStr[index] != 0x02 {
|
||||
return nil,
|
||||
errors.New("malformed signature: no 1st int marker")
|
||||
}
|
||||
index++
|
||||
|
||||
// Length of signature R.
|
||||
rLen := int(sigStr[index])
|
||||
// must be positive, must be able to fit in another 0x2, <len> <s>
|
||||
// hence the -3. We assume that the length must be at least one byte.
|
||||
index++
|
||||
if rLen <= 0 || rLen > len(sigStr)-index-3 {
|
||||
return nil, errors.New("malformed signature: bogus R length")
|
||||
}
|
||||
|
||||
// Then R itself.
|
||||
rBytes := sigStr[index : index+rLen]
|
||||
if der {
|
||||
switch err := canonicalPadding(rBytes); err {
|
||||
case errNegativeValue:
|
||||
return nil, errors.New("signature R is negative")
|
||||
case errExcessivelyPaddedValue:
|
||||
return nil, errors.New("signature R is excessively padded")
|
||||
}
|
||||
}
|
||||
signature.R = new(big.Int).SetBytes(rBytes)
|
||||
index += rLen
|
||||
// 0x02. length already checked in previous if.
|
||||
if sigStr[index] != 0x02 {
|
||||
return nil, errors.New("malformed signature: no 2nd int marker")
|
||||
}
|
||||
index++
|
||||
|
||||
// Length of signature S.
|
||||
sLen := int(sigStr[index])
|
||||
index++
|
||||
// S should be the rest of the string.
|
||||
if sLen <= 0 || sLen > len(sigStr)-index {
|
||||
return nil, errors.New("malformed signature: bogus S length")
|
||||
}
|
||||
|
||||
// Then S itself.
|
||||
sBytes := sigStr[index : index+sLen]
|
||||
if der {
|
||||
switch err := canonicalPadding(sBytes); err {
|
||||
case errNegativeValue:
|
||||
return nil, errors.New("signature S is negative")
|
||||
case errExcessivelyPaddedValue:
|
||||
return nil, errors.New("signature S is excessively padded")
|
||||
}
|
||||
}
|
||||
signature.S = new(big.Int).SetBytes(sBytes)
|
||||
index += sLen
|
||||
|
||||
// sanity check length parsing
|
||||
if index != len(sigStr) {
|
||||
return nil, fmt.Errorf("malformed signature: bad final length %d != %d",
|
||||
index, len(sigStr))
|
||||
}
|
||||
|
||||
// Verify also checks this, but we can be more sure that we parsed
|
||||
// correctly if we verify here too.
|
||||
// FWIW the ecdsa spec states that R and S must be | 1, N - 1 |
|
||||
// but crypto/ecdsa only checks for Sign != 0. Mirror that.
|
||||
if signature.R.Sign() != 1 {
|
||||
return nil, errors.New("signature R isn't 1 or more")
|
||||
}
|
||||
if signature.S.Sign() != 1 {
|
||||
return nil, errors.New("signature S isn't 1 or more")
|
||||
}
|
||||
if signature.R.Cmp(curve.Params().N) >= 0 {
|
||||
return nil, errors.New("signature R is >= curve.N")
|
||||
}
|
||||
if signature.S.Cmp(curve.Params().N) >= 0 {
|
||||
return nil, errors.New("signature S is >= curve.N")
|
||||
}
|
||||
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
// ParseSignature parses a signature in BER format for the curve type `curve'
|
||||
// into a Signature type, perfoming some basic sanity checks. If parsing
|
||||
// according to the more strict DER format is needed, use ParseDERSignature.
|
||||
func ParseSignature(sigStr []byte, curve elliptic.Curve) (*Signature, error) {
|
||||
return parseSig(sigStr, curve, false)
|
||||
}
|
||||
|
||||
// ParseDERSignature parses a signature in DER format for the curve type
|
||||
// `curve` into a Signature type. If parsing according to the less strict
|
||||
// BER format is needed, use ParseSignature.
|
||||
func ParseDERSignature(sigStr []byte, curve elliptic.Curve) (*Signature, error) {
|
||||
return parseSig(sigStr, curve, true)
|
||||
}
|
||||
|
||||
// canonicalizeInt returns the bytes for the passed big integer adjusted as
|
||||
// necessary to ensure that a big-endian encoded integer can't possibly be
|
||||
// misinterpreted as a negative number. This can happen when the most
|
||||
// significant bit is set, so it is padded by a leading zero byte in this case.
|
||||
// Also, the returned bytes will have at least a single byte when the passed
|
||||
// value is 0. This is required for DER encoding.
|
||||
func canonicalizeInt(val *big.Int) []byte {
|
||||
b := val.Bytes()
|
||||
if len(b) == 0 {
|
||||
b = []byte{0x00}
|
||||
}
|
||||
if b[0]&0x80 != 0 {
|
||||
paddedBytes := make([]byte, len(b)+1)
|
||||
copy(paddedBytes[1:], b)
|
||||
b = paddedBytes
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// canonicalPadding checks whether a big-endian encoded integer could
|
||||
// possibly be misinterpreted as a negative number (even though OpenSSL
|
||||
// treats all numbers as unsigned), or if there is any unnecessary
|
||||
// leading zero padding.
|
||||
func canonicalPadding(b []byte) error {
|
||||
switch {
|
||||
case b[0]&0x80 == 0x80:
|
||||
return errNegativeValue
|
||||
case len(b) > 1 && b[0] == 0x00 && b[1]&0x80 != 0x80:
|
||||
return errExcessivelyPaddedValue
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// hashToInt converts a hash value to an integer. There is some disagreement
|
||||
// about how this is done. [NSA] suggests that this is done in the obvious
|
||||
// manner, but [SECG] truncates the hash to the bit-length of the curve order
|
||||
// first. We follow [SECG] because that's what OpenSSL does. Additionally,
|
||||
// OpenSSL right shifts excess bits from the number if the hash is too large
|
||||
// and we mirror that too.
|
||||
// This is borrowed from crypto/ecdsa.
|
||||
func hashToInt(hash []byte, c elliptic.Curve) *big.Int {
|
||||
orderBits := c.Params().N.BitLen()
|
||||
orderBytes := (orderBits + 7) / 8
|
||||
if len(hash) > orderBytes {
|
||||
hash = hash[:orderBytes]
|
||||
}
|
||||
|
||||
ret := new(big.Int).SetBytes(hash)
|
||||
excess := len(hash)*8 - orderBits
|
||||
if excess > 0 {
|
||||
ret.Rsh(ret, uint(excess))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// recoverKeyFromSignature recovers a public key from the signature "sig" on the
|
||||
// given message hash "msg". Based on the algorithm found in section 5.1.5 of
|
||||
// SEC 1 Ver 2.0, page 47-48 (53 and 54 in the pdf). This performs the details
|
||||
// in the inner loop in Step 1. The counter provided is actually the j parameter
|
||||
// of the loop * 2 - on the first iteration of j we do the R case, else the -R
|
||||
// case in step 1.6. This counter is used in the bitcoin compressed signature
|
||||
// format and thus we match bitcoind's behaviour here.
|
||||
func recoverKeyFromSignature(curve *KoblitzCurve, sig *Signature, msg []byte,
|
||||
iter int, doChecks bool) (*PublicKey, error) {
|
||||
// 1.1 x = (n * i) + r
|
||||
Rx := new(big.Int).Mul(curve.Params().N,
|
||||
new(big.Int).SetInt64(int64(iter/2)))
|
||||
Rx.Add(Rx, sig.R)
|
||||
if Rx.Cmp(curve.Params().P) != -1 {
|
||||
return nil, errors.New("calculated Rx is larger than curve P")
|
||||
}
|
||||
|
||||
// convert 02<Rx> to point R. (step 1.2 and 1.3). If we are on an odd
|
||||
// iteration then 1.6 will be done with -R, so we calculate the other
|
||||
// term when uncompressing the point.
|
||||
Ry, err := decompressPoint(curve, Rx, iter%2 == 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 1.4 Check n*R is point at infinity
|
||||
if doChecks {
|
||||
nRx, nRy := curve.ScalarMult(Rx, Ry, curve.Params().N.Bytes())
|
||||
if nRx.Sign() != 0 || nRy.Sign() != 0 {
|
||||
return nil, errors.New("n*R does not equal the point at infinity")
|
||||
}
|
||||
}
|
||||
|
||||
// 1.5 calculate e from message using the same algorithm as ecdsa
|
||||
// signature calculation.
|
||||
e := hashToInt(msg, curve)
|
||||
|
||||
// Step 1.6.1:
|
||||
// We calculate the two terms sR and eG separately multiplied by the
|
||||
// inverse of r (from the signature). We then add them to calculate
|
||||
// Q = r^-1(sR-eG)
|
||||
invr := new(big.Int).ModInverse(sig.R, curve.Params().N)
|
||||
|
||||
// first term.
|
||||
invrS := new(big.Int).Mul(invr, sig.S)
|
||||
invrS.Mod(invrS, curve.Params().N)
|
||||
sRx, sRy := curve.ScalarMult(Rx, Ry, invrS.Bytes())
|
||||
|
||||
// second term.
|
||||
e.Neg(e)
|
||||
e.Mod(e, curve.Params().N)
|
||||
e.Mul(e, invr)
|
||||
e.Mod(e, curve.Params().N)
|
||||
minuseGx, minuseGy := curve.ScalarBaseMult(e.Bytes())
|
||||
|
||||
// TODO: this would be faster if we did a mult and add in one
|
||||
// step to prevent the jacobian conversion back and forth.
|
||||
Qx, Qy := curve.Add(sRx, sRy, minuseGx, minuseGy)
|
||||
|
||||
return &PublicKey{
|
||||
Curve: curve,
|
||||
X: Qx,
|
||||
Y: Qy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SignCompact produces a compact signature of the data in hash with the given
|
||||
// private key on the given koblitz curve. The isCompressed parameter should
|
||||
// be used to detail if the given signature should reference a compressed
|
||||
// public key or not. If successful the bytes of the compact signature will be
|
||||
// returned in the format:
|
||||
// <(byte of 27+public key solution)+4 if compressed >< padded bytes for signature R><padded bytes for signature S>
|
||||
// where the R and S parameters are padde up to the bitlengh of the curve.
|
||||
func SignCompact(curve *KoblitzCurve, key *PrivateKey,
|
||||
hash []byte, isCompressedKey bool) ([]byte, error) {
|
||||
sig, err := key.Sign(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// bitcoind checks the bit length of R and S here. The ecdsa signature
|
||||
// algorithm returns R and S mod N therefore they will be the bitsize of
|
||||
// the curve, and thus correctly sized.
|
||||
for i := 0; i < (curve.H+1)*2; i++ {
|
||||
pk, err := recoverKeyFromSignature(curve, sig, hash, i, true)
|
||||
if err == nil && pk.X.Cmp(key.X) == 0 && pk.Y.Cmp(key.Y) == 0 {
|
||||
result := make([]byte, 1, 2*curve.byteSize+1)
|
||||
result[0] = 27 + byte(i)
|
||||
if isCompressedKey {
|
||||
result[0] += 4
|
||||
}
|
||||
// Not sure this needs rounding but safer to do so.
|
||||
curvelen := (curve.BitSize + 7) / 8
|
||||
|
||||
// Pad R and S to curvelen if needed.
|
||||
bytelen := (sig.R.BitLen() + 7) / 8
|
||||
if bytelen < curvelen {
|
||||
result = append(result,
|
||||
make([]byte, curvelen-bytelen)...)
|
||||
}
|
||||
result = append(result, sig.R.Bytes()...)
|
||||
|
||||
bytelen = (sig.S.BitLen() + 7) / 8
|
||||
if bytelen < curvelen {
|
||||
result = append(result,
|
||||
make([]byte, curvelen-bytelen)...)
|
||||
}
|
||||
result = append(result, sig.S.Bytes()...)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("no valid solution for pubkey found")
|
||||
}
|
||||
|
||||
// RecoverCompact verifies the compact signature "signature" of "hash" for the
|
||||
// Koblitz curve in "curve". If the signature matches then the recovered public
|
||||
// key will be returned as well as a boolen if the original key was compressed
|
||||
// or not, else an error will be returned.
|
||||
func RecoverCompact(curve *KoblitzCurve, signature,
|
||||
hash []byte) (*PublicKey, bool, error) {
|
||||
bitlen := (curve.BitSize + 7) / 8
|
||||
if len(signature) != 1+bitlen*2 {
|
||||
return nil, false, errors.New("invalid compact signature size")
|
||||
}
|
||||
|
||||
iteration := int((signature[0] - 27) & ^byte(4))
|
||||
|
||||
// format is <header byte><bitlen R><bitlen S>
|
||||
sig := &Signature{
|
||||
R: new(big.Int).SetBytes(signature[1 : bitlen+1]),
|
||||
S: new(big.Int).SetBytes(signature[bitlen+1:]),
|
||||
}
|
||||
// The iteration used here was encoded
|
||||
key, err := recoverKeyFromSignature(curve, sig, hash, iteration, false)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
return key, ((signature[0] - 27) & 4) == 4, nil
|
||||
}
|
||||
|
||||
// signRFC6979 generates a deterministic ECDSA signature according to RFC 6979 and BIP 62.
|
||||
func signRFC6979(privateKey *PrivateKey, hash []byte) (*Signature, error) {
|
||||
|
||||
privkey := privateKey.ToECDSA()
|
||||
N := S256().N
|
||||
halfOrder := S256().halfOrder
|
||||
k := nonceRFC6979(privkey.D, hash)
|
||||
inv := new(big.Int).ModInverse(k, N)
|
||||
r, _ := privkey.Curve.ScalarBaseMult(k.Bytes())
|
||||
r.Mod(r, N)
|
||||
|
||||
if r.Sign() == 0 {
|
||||
return nil, errors.New("calculated R is zero")
|
||||
}
|
||||
|
||||
e := hashToInt(hash, privkey.Curve)
|
||||
s := new(big.Int).Mul(privkey.D, r)
|
||||
s.Add(s, e)
|
||||
s.Mul(s, inv)
|
||||
s.Mod(s, N)
|
||||
|
||||
if s.Cmp(halfOrder) == 1 {
|
||||
s.Sub(N, s)
|
||||
}
|
||||
if s.Sign() == 0 {
|
||||
return nil, errors.New("calculated S is zero")
|
||||
}
|
||||
return &Signature{R: r, S: s}, nil
|
||||
}
|
||||
|
||||
// nonceRFC6979 generates an ECDSA nonce (`k`) deterministically according to RFC 6979.
|
||||
// It takes a 32-byte hash as an input and returns 32-byte nonce to be used in ECDSA algorithm.
|
||||
func nonceRFC6979(privkey *big.Int, hash []byte) *big.Int {
|
||||
|
||||
curve := S256()
|
||||
q := curve.Params().N
|
||||
x := privkey
|
||||
alg := sha256.New
|
||||
|
||||
qlen := q.BitLen()
|
||||
holen := alg().Size()
|
||||
rolen := (qlen + 7) >> 3
|
||||
bx := append(int2octets(x, rolen), bits2octets(hash, curve, rolen)...)
|
||||
|
||||
// Step B
|
||||
v := bytes.Repeat(oneInitializer, holen)
|
||||
|
||||
// Step C (Go zeroes the all allocated memory)
|
||||
k := make([]byte, holen)
|
||||
|
||||
// Step D
|
||||
k = mac(alg, k, append(append(v, 0x00), bx...))
|
||||
|
||||
// Step E
|
||||
v = mac(alg, k, v)
|
||||
|
||||
// Step F
|
||||
k = mac(alg, k, append(append(v, 0x01), bx...))
|
||||
|
||||
// Step G
|
||||
v = mac(alg, k, v)
|
||||
|
||||
// Step H
|
||||
for {
|
||||
// Step H1
|
||||
var t []byte
|
||||
|
||||
// Step H2
|
||||
for len(t)*8 < qlen {
|
||||
v = mac(alg, k, v)
|
||||
t = append(t, v...)
|
||||
}
|
||||
|
||||
// Step H3
|
||||
secret := hashToInt(t, curve)
|
||||
if secret.Cmp(one) >= 0 && secret.Cmp(q) < 0 {
|
||||
return secret
|
||||
}
|
||||
k = mac(alg, k, append(v, 0x00))
|
||||
v = mac(alg, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// mac returns an HMAC of the given key and message.
|
||||
func mac(alg func() hash.Hash, k, m []byte) []byte {
|
||||
h := hmac.New(alg, k)
|
||||
h.Write(m)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// https://tools.ietf.org/html/rfc6979#section-2.3.3
|
||||
func int2octets(v *big.Int, rolen int) []byte {
|
||||
out := v.Bytes()
|
||||
|
||||
// left pad with zeros if it's too short
|
||||
if len(out) < rolen {
|
||||
out2 := make([]byte, rolen)
|
||||
copy(out2[rolen-len(out):], out)
|
||||
return out2
|
||||
}
|
||||
|
||||
// drop most significant bytes if it's too long
|
||||
if len(out) > rolen {
|
||||
out2 := make([]byte, rolen)
|
||||
copy(out2, out[len(out)-rolen:])
|
||||
return out2
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// https://tools.ietf.org/html/rfc6979#section-2.3.4
|
||||
func bits2octets(in []byte, curve elliptic.Curve, rolen int) []byte {
|
||||
z1 := hashToInt(in, curve)
|
||||
z2 := new(big.Int).Sub(z1, curve.Params().N)
|
||||
if z2.Sign() < 0 {
|
||||
return int2octets(z1, rolen)
|
||||
}
|
||||
return int2octets(z2, rolen)
|
||||
}
|
||||
@@ -1,639 +0,0 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type signatureTest struct {
|
||||
name string
|
||||
sig []byte
|
||||
der bool
|
||||
isValid bool
|
||||
}
|
||||
|
||||
// decodeHex decodes the passed hex string and returns the resulting bytes. It
|
||||
// panics if an error occurs. This is only used in the tests as a helper since
|
||||
// the only way it can fail is if there is an error in the test source code.
|
||||
func decodeHex(hexStr string) []byte {
|
||||
b, err := hex.DecodeString(hexStr)
|
||||
if err != nil {
|
||||
panic("invalid hex string in test source: err " + err.Error() +
|
||||
", hex: " + hexStr)
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
var signatureTests = []signatureTest{
|
||||
// signatures from bitcoin blockchain tx
|
||||
// 0437cd7f8525ceed2324359c2d0ba26006d92d85
|
||||
{
|
||||
name: "valid signature.",
|
||||
sig: []byte{0x30, 0x44, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x20, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca,
|
||||
0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90,
|
||||
0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22,
|
||||
0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: true,
|
||||
isValid: true,
|
||||
},
|
||||
{
|
||||
name: "empty.",
|
||||
sig: []byte{},
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "bad magic.",
|
||||
sig: []byte{0x31, 0x44, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x20, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca,
|
||||
0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90,
|
||||
0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22,
|
||||
0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: true,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "bad 1st int marker magic.",
|
||||
sig: []byte{0x30, 0x44, 0x03, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x20, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca,
|
||||
0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90,
|
||||
0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22,
|
||||
0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: true,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "bad 2nd int marker.",
|
||||
sig: []byte{0x30, 0x44, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x03, 0x20, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca,
|
||||
0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90,
|
||||
0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22,
|
||||
0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: true,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "short len",
|
||||
sig: []byte{0x30, 0x43, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x20, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca,
|
||||
0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90,
|
||||
0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22,
|
||||
0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: true,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "invalid message length",
|
||||
sig: []byte{0x30, 0x00, 0x02, 0x01, 0x00, 0x02, 0x01, 0x00},
|
||||
der: false,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "too short signature",
|
||||
sig: []byte{0x30, 0x00, 0x02, 0x01, 0x00, 0x02, 0x01},
|
||||
der: false,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "long len",
|
||||
sig: []byte{0x30, 0x45, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x20, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca,
|
||||
0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90,
|
||||
0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22,
|
||||
0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: true,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "long X",
|
||||
sig: []byte{0x30, 0x44, 0x02, 0x42, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x20, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca,
|
||||
0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90,
|
||||
0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22,
|
||||
0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: true,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "long Y",
|
||||
sig: []byte{0x30, 0x44, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x21, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca,
|
||||
0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90,
|
||||
0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22,
|
||||
0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: true,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "short Y",
|
||||
sig: []byte{0x30, 0x44, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x19, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca,
|
||||
0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90,
|
||||
0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22,
|
||||
0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: true,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "trailing crap.",
|
||||
sig: []byte{0x30, 0x44, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x20, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca,
|
||||
0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90,
|
||||
0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22,
|
||||
0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09, 0x01,
|
||||
},
|
||||
der: true,
|
||||
|
||||
// This test is now passing (used to be failing) because there
|
||||
// are signatures in the blockchain that have trailing zero
|
||||
// bytes before the hashtype. So ParseSignature was fixed to
|
||||
// permit buffers with trailing nonsense after the actual
|
||||
// signature.
|
||||
isValid: true,
|
||||
},
|
||||
{
|
||||
name: "X == N ",
|
||||
sig: []byte{0x30, 0x44, 0x02, 0x20, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFE, 0xBA, 0xAE, 0xDC, 0xE6, 0xAF, 0x48,
|
||||
0xA0, 0x3B, 0xBF, 0xD2, 0x5E, 0x8C, 0xD0, 0x36, 0x41,
|
||||
0x41, 0x02, 0x20, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca,
|
||||
0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90,
|
||||
0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22,
|
||||
0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: true,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "X == N ",
|
||||
sig: []byte{0x30, 0x44, 0x02, 0x20, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFE, 0xBA, 0xAE, 0xDC, 0xE6, 0xAF, 0x48,
|
||||
0xA0, 0x3B, 0xBF, 0xD2, 0x5E, 0x8C, 0xD0, 0x36, 0x41,
|
||||
0x42, 0x02, 0x20, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca,
|
||||
0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90,
|
||||
0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22,
|
||||
0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: false,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "Y == N",
|
||||
sig: []byte{0x30, 0x44, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x20, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFE, 0xBA, 0xAE, 0xDC, 0xE6, 0xAF, 0x48, 0xA0, 0x3B,
|
||||
0xBF, 0xD2, 0x5E, 0x8C, 0xD0, 0x36, 0x41, 0x41,
|
||||
},
|
||||
der: true,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "Y > N",
|
||||
sig: []byte{0x30, 0x44, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x20, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFE, 0xBA, 0xAE, 0xDC, 0xE6, 0xAF, 0x48, 0xA0, 0x3B,
|
||||
0xBF, 0xD2, 0x5E, 0x8C, 0xD0, 0x36, 0x41, 0x42,
|
||||
},
|
||||
der: false,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "0 len X.",
|
||||
sig: []byte{0x30, 0x24, 0x02, 0x00, 0x02, 0x20, 0x18, 0x15,
|
||||
0x22, 0xec, 0x8e, 0xca, 0x07, 0xde, 0x48, 0x60, 0xa4,
|
||||
0xac, 0xdd, 0x12, 0x90, 0x9d, 0x83, 0x1c, 0xc5, 0x6c,
|
||||
0xbb, 0xac, 0x46, 0x22, 0x08, 0x22, 0x21, 0xa8, 0x76,
|
||||
0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: true,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "0 len Y.",
|
||||
sig: []byte{0x30, 0x24, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x00,
|
||||
},
|
||||
der: true,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "extra R padding.",
|
||||
sig: []byte{0x30, 0x45, 0x02, 0x21, 0x00, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x20, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca,
|
||||
0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90,
|
||||
0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22,
|
||||
0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: true,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "extra S padding.",
|
||||
sig: []byte{0x30, 0x45, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x21, 0x00, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca,
|
||||
0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90,
|
||||
0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22,
|
||||
0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: true,
|
||||
isValid: false,
|
||||
},
|
||||
// Standard checks (in BER format, without checking for 'canonical' DER
|
||||
// signatures) don't test for negative numbers here because there isn't
|
||||
// a way that is the same between openssl and go that will mark a number
|
||||
// as negative. The Go ASN.1 parser marks numbers as negative when
|
||||
// openssl does not (it doesn't handle negative numbers that I can tell
|
||||
// at all. When not parsing DER signatures, which is done by by bitcoind
|
||||
// when accepting transactions into its mempool, we otherwise only check
|
||||
// for the coordinates being zero.
|
||||
{
|
||||
name: "X == 0",
|
||||
sig: []byte{0x30, 0x25, 0x02, 0x01, 0x00, 0x02, 0x20, 0x18,
|
||||
0x15, 0x22, 0xec, 0x8e, 0xca, 0x07, 0xde, 0x48, 0x60,
|
||||
0xa4, 0xac, 0xdd, 0x12, 0x90, 0x9d, 0x83, 0x1c, 0xc5,
|
||||
0x6c, 0xbb, 0xac, 0x46, 0x22, 0x08, 0x22, 0x21, 0xa8,
|
||||
0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
der: false,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
name: "Y == 0.",
|
||||
sig: []byte{0x30, 0x25, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1,
|
||||
0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6,
|
||||
0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd,
|
||||
0x41, 0x02, 0x01, 0x00,
|
||||
},
|
||||
der: false,
|
||||
isValid: false,
|
||||
},
|
||||
}
|
||||
|
||||
func TestSignatures(t *testing.T) {
|
||||
for _, test := range signatureTests {
|
||||
var err error
|
||||
if test.der {
|
||||
_, err = ParseDERSignature(test.sig, S256())
|
||||
} else {
|
||||
_, err = ParseSignature(test.sig, S256())
|
||||
}
|
||||
if err != nil {
|
||||
if test.isValid {
|
||||
t.Errorf("%s signature failed when shouldn't %v",
|
||||
test.name, err)
|
||||
} /* else {
|
||||
t.Errorf("%s got error %v", test.name, err)
|
||||
} */
|
||||
continue
|
||||
}
|
||||
if !test.isValid {
|
||||
t.Errorf("%s counted as valid when it should fail",
|
||||
test.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestSignatureSerialize ensures that serializing signatures works as expected.
|
||||
func TestSignatureSerialize(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ecsig *Signature
|
||||
expected []byte
|
||||
}{
|
||||
// signature from bitcoin blockchain tx
|
||||
// 0437cd7f8525ceed2324359c2d0ba26006d92d85
|
||||
{
|
||||
"valid 1 - r and s most significant bits are zero",
|
||||
&Signature{
|
||||
R: fromHex("4e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd41"),
|
||||
S: fromHex("181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d09"),
|
||||
},
|
||||
[]byte{
|
||||
0x30, 0x44, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69,
|
||||
0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3,
|
||||
0xa1, 0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32,
|
||||
0xe9, 0xd6, 0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab,
|
||||
0x5f, 0xb8, 0xcd, 0x41, 0x02, 0x20, 0x18, 0x15,
|
||||
0x22, 0xec, 0x8e, 0xca, 0x07, 0xde, 0x48, 0x60,
|
||||
0xa4, 0xac, 0xdd, 0x12, 0x90, 0x9d, 0x83, 0x1c,
|
||||
0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22, 0x08, 0x22,
|
||||
0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09,
|
||||
},
|
||||
},
|
||||
// signature from bitcoin blockchain tx
|
||||
// cb00f8a0573b18faa8c4f467b049f5d202bf1101d9ef2633bc611be70376a4b4
|
||||
{
|
||||
"valid 2 - r most significant bit is one",
|
||||
&Signature{
|
||||
R: fromHex("0082235e21a2300022738dabb8e1bbd9d19cfb1e7ab8c30a23b0afbb8d178abcf3"),
|
||||
S: fromHex("24bf68e256c534ddfaf966bf908deb944305596f7bdcc38d69acad7f9c868724"),
|
||||
},
|
||||
[]byte{
|
||||
0x30, 0x45, 0x02, 0x21, 0x00, 0x82, 0x23, 0x5e,
|
||||
0x21, 0xa2, 0x30, 0x00, 0x22, 0x73, 0x8d, 0xab,
|
||||
0xb8, 0xe1, 0xbb, 0xd9, 0xd1, 0x9c, 0xfb, 0x1e,
|
||||
0x7a, 0xb8, 0xc3, 0x0a, 0x23, 0xb0, 0xaf, 0xbb,
|
||||
0x8d, 0x17, 0x8a, 0xbc, 0xf3, 0x02, 0x20, 0x24,
|
||||
0xbf, 0x68, 0xe2, 0x56, 0xc5, 0x34, 0xdd, 0xfa,
|
||||
0xf9, 0x66, 0xbf, 0x90, 0x8d, 0xeb, 0x94, 0x43,
|
||||
0x05, 0x59, 0x6f, 0x7b, 0xdc, 0xc3, 0x8d, 0x69,
|
||||
0xac, 0xad, 0x7f, 0x9c, 0x86, 0x87, 0x24,
|
||||
},
|
||||
},
|
||||
// signature from bitcoin blockchain tx
|
||||
// fda204502a3345e08afd6af27377c052e77f1fefeaeb31bdd45f1e1237ca5470
|
||||
{
|
||||
"valid 3 - s most significant bit is one",
|
||||
&Signature{
|
||||
R: fromHex("1cadddc2838598fee7dc35a12b340c6bde8b389f7bfd19a1252a17c4b5ed2d71"),
|
||||
S: new(big.Int).Add(fromHex("00c1a251bbecb14b058a8bd77f65de87e51c47e95904f4c0e9d52eddc21c1415ac"), S256().N),
|
||||
},
|
||||
[]byte{
|
||||
0x30, 0x45, 0x02, 0x20, 0x1c, 0xad, 0xdd, 0xc2,
|
||||
0x83, 0x85, 0x98, 0xfe, 0xe7, 0xdc, 0x35, 0xa1,
|
||||
0x2b, 0x34, 0x0c, 0x6b, 0xde, 0x8b, 0x38, 0x9f,
|
||||
0x7b, 0xfd, 0x19, 0xa1, 0x25, 0x2a, 0x17, 0xc4,
|
||||
0xb5, 0xed, 0x2d, 0x71, 0x02, 0x21, 0x00, 0xc1,
|
||||
0xa2, 0x51, 0xbb, 0xec, 0xb1, 0x4b, 0x05, 0x8a,
|
||||
0x8b, 0xd7, 0x7f, 0x65, 0xde, 0x87, 0xe5, 0x1c,
|
||||
0x47, 0xe9, 0x59, 0x04, 0xf4, 0xc0, 0xe9, 0xd5,
|
||||
0x2e, 0xdd, 0xc2, 0x1c, 0x14, 0x15, 0xac,
|
||||
},
|
||||
},
|
||||
{
|
||||
"valid 4 - s is bigger than half order",
|
||||
&Signature{
|
||||
R: fromHex("a196ed0e7ebcbe7b63fe1d8eecbdbde03a67ceba4fc8f6482bdcb9606a911404"),
|
||||
S: fromHex("971729c7fa944b465b35250c6570a2f31acbb14b13d1565fab7330dcb2b3dfb1"),
|
||||
},
|
||||
[]byte{
|
||||
0x30, 0x45, 0x02, 0x21, 0x00, 0xa1, 0x96, 0xed,
|
||||
0x0e, 0x7e, 0xbc, 0xbe, 0x7b, 0x63, 0xfe, 0x1d,
|
||||
0x8e, 0xec, 0xbd, 0xbd, 0xe0, 0x3a, 0x67, 0xce,
|
||||
0xba, 0x4f, 0xc8, 0xf6, 0x48, 0x2b, 0xdc, 0xb9,
|
||||
0x60, 0x6a, 0x91, 0x14, 0x04, 0x02, 0x20, 0x68,
|
||||
0xe8, 0xd6, 0x38, 0x05, 0x6b, 0xb4, 0xb9, 0xa4,
|
||||
0xca, 0xda, 0xf3, 0x9a, 0x8f, 0x5d, 0x0b, 0x9f,
|
||||
0xe3, 0x2b, 0x9b, 0x9b, 0x77, 0x49, 0xdc, 0x14,
|
||||
0x5f, 0x2d, 0xb0, 0x1d, 0x82, 0x61, 0x90,
|
||||
},
|
||||
},
|
||||
{
|
||||
"zero signature",
|
||||
&Signature{
|
||||
R: big.NewInt(0),
|
||||
S: big.NewInt(0),
|
||||
},
|
||||
[]byte{0x30, 0x06, 0x02, 0x01, 0x00, 0x02, 0x01, 0x00},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
result := test.ecsig.Serialize()
|
||||
if !bytes.Equal(result, test.expected) {
|
||||
t.Errorf("Serialize #%d (%s) unexpected result:\n"+
|
||||
"got: %x\nwant: %x", i, test.name, result,
|
||||
test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testSignCompact(t *testing.T, tag string, curve *KoblitzCurve,
|
||||
data []byte, isCompressed bool) {
|
||||
tmp, _ := NewPrivateKey(curve)
|
||||
priv := (*PrivateKey)(tmp)
|
||||
|
||||
hashed := []byte("testing")
|
||||
sig, err := SignCompact(curve, priv, hashed, isCompressed)
|
||||
if err != nil {
|
||||
t.Errorf("%s: error signing: %s", tag, err)
|
||||
return
|
||||
}
|
||||
|
||||
pk, wasCompressed, err := RecoverCompact(curve, sig, hashed)
|
||||
if err != nil {
|
||||
t.Errorf("%s: error recovering: %s", tag, err)
|
||||
return
|
||||
}
|
||||
if pk.X.Cmp(priv.X) != 0 || pk.Y.Cmp(priv.Y) != 0 {
|
||||
t.Errorf("%s: recovered pubkey doesn't match original "+
|
||||
"(%v,%v) vs (%v,%v) ", tag, pk.X, pk.Y, priv.X, priv.Y)
|
||||
return
|
||||
}
|
||||
if wasCompressed != isCompressed {
|
||||
t.Errorf("%s: recovered pubkey doesn't match compressed state "+
|
||||
"(%v vs %v)", tag, isCompressed, wasCompressed)
|
||||
return
|
||||
}
|
||||
|
||||
// If we change the compressed bit we should get the same key back,
|
||||
// but the compressed flag should be reversed.
|
||||
if isCompressed {
|
||||
sig[0] -= 4
|
||||
} else {
|
||||
sig[0] += 4
|
||||
}
|
||||
|
||||
pk, wasCompressed, err = RecoverCompact(curve, sig, hashed)
|
||||
if err != nil {
|
||||
t.Errorf("%s: error recovering (2): %s", tag, err)
|
||||
return
|
||||
}
|
||||
if pk.X.Cmp(priv.X) != 0 || pk.Y.Cmp(priv.Y) != 0 {
|
||||
t.Errorf("%s: recovered pubkey (2) doesn't match original "+
|
||||
"(%v,%v) vs (%v,%v) ", tag, pk.X, pk.Y, priv.X, priv.Y)
|
||||
return
|
||||
}
|
||||
if wasCompressed == isCompressed {
|
||||
t.Errorf("%s: recovered pubkey doesn't match reversed "+
|
||||
"compressed state (%v vs %v)", tag, isCompressed,
|
||||
wasCompressed)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignCompact(t *testing.T) {
|
||||
for i := 0; i < 256; i++ {
|
||||
name := fmt.Sprintf("test %d", i)
|
||||
data := make([]byte, 32)
|
||||
_, err := rand.Read(data)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read random data for %s", name)
|
||||
continue
|
||||
}
|
||||
compressed := i%2 != 0
|
||||
testSignCompact(t, name, S256(), data, compressed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRFC6979(t *testing.T) {
|
||||
// Test vectors matching Trezor and CoreBitcoin implementations.
|
||||
// - https://github.com/trezor/trezor-crypto/blob/9fea8f8ab377dc514e40c6fd1f7c89a74c1d8dc6/tests.c#L432-L453
|
||||
// - https://github.com/oleganza/CoreBitcoin/blob/e93dd71207861b5bf044415db5fa72405e7d8fbc/CoreBitcoin/BTCKey%2BTests.m#L23-L49
|
||||
tests := []struct {
|
||||
key string
|
||||
msg string
|
||||
nonce string
|
||||
signature string
|
||||
}{
|
||||
{
|
||||
"cca9fbcc1b41e5a95d369eaa6ddcff73b61a4efaa279cfc6567e8daa39cbaf50",
|
||||
"sample",
|
||||
"2df40ca70e639d89528a6b670d9d48d9165fdc0febc0974056bdce192b8e16a3",
|
||||
"3045022100af340daf02cc15c8d5d08d7735dfe6b98a474ed373bdb5fbecf7571be52b384202205009fb27f37034a9b24b707b7c6b79ca23ddef9e25f7282e8a797efe53a8f124",
|
||||
},
|
||||
{
|
||||
// This signature hits the case when S is higher than halforder.
|
||||
// If S is not canonicalized (lowered by halforder), this test will fail.
|
||||
"0000000000000000000000000000000000000000000000000000000000000001",
|
||||
"Satoshi Nakamoto",
|
||||
"8f8a276c19f4149656b280621e358cce24f5f52542772691ee69063b74f15d15",
|
||||
"3045022100934b1ea10a4b3c1757e2b0c017d0b6143ce3c9a7e6a4a49860d7a6ab210ee3d802202442ce9d2b916064108014783e923ec36b49743e2ffa1c4496f01a512aafd9e5",
|
||||
},
|
||||
{
|
||||
"fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140",
|
||||
"Satoshi Nakamoto",
|
||||
"33a19b60e25fb6f4435af53a3d42d493644827367e6453928554f43e49aa6f90",
|
||||
"3045022100fd567d121db66e382991534ada77a6bd3106f0a1098c231e47993447cd6af2d002206b39cd0eb1bc8603e159ef5c20a5c8ad685a45b06ce9bebed3f153d10d93bed5",
|
||||
},
|
||||
{
|
||||
"f8b8af8ce3c7cca5e300d33939540c10d45ce001b8f252bfbc57ba0342904181",
|
||||
"Alan Turing",
|
||||
"525a82b70e67874398067543fd84c83d30c175fdc45fdeee082fe13b1d7cfdf1",
|
||||
"304402207063ae83e7f62bbb171798131b4a0564b956930092b33b07b395615d9ec7e15c022058dfcc1e00a35e1572f366ffe34ba0fc47db1e7189759b9fb233c5b05ab388ea",
|
||||
},
|
||||
{
|
||||
"0000000000000000000000000000000000000000000000000000000000000001",
|
||||
"All those moments will be lost in time, like tears in rain. Time to die...",
|
||||
"38aa22d72376b4dbc472e06c3ba403ee0a394da63fc58d88686c611aba98d6b3",
|
||||
"30450221008600dbd41e348fe5c9465ab92d23e3db8b98b873beecd930736488696438cb6b0220547fe64427496db33bf66019dacbf0039c04199abb0122918601db38a72cfc21",
|
||||
},
|
||||
{
|
||||
"e91671c46231f833a6406ccbea0e3e392c76c167bac1cb013f6f1013980455c2",
|
||||
"There is a computer disease that anybody who works with computers knows about. It's a very serious disease and it interferes completely with the work. The trouble with computers is that you 'play' with them!",
|
||||
"1f4b84c23a86a221d233f2521be018d9318639d5b8bbd6374a8a59232d16ad3d",
|
||||
"3045022100b552edd27580141f3b2a5463048cb7cd3e047b97c9f98076c32dbdf85a68718b0220279fa72dd19bfae05577e06c7c0c1900c371fcd5893f7e1d56a37d30174671f6",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
privKey, _ := PrivKeyFromBytes(S256(), decodeHex(test.key))
|
||||
hash := sha256.Sum256([]byte(test.msg))
|
||||
|
||||
// Ensure deterministically generated nonce is the expected value.
|
||||
gotNonce := nonceRFC6979(privKey.D, hash[:]).Bytes()
|
||||
wantNonce := decodeHex(test.nonce)
|
||||
if !bytes.Equal(gotNonce, wantNonce) {
|
||||
t.Errorf("NonceRFC6979 #%d (%s): Nonce is incorrect: "+
|
||||
"%x (expected %x)", i, test.msg, gotNonce,
|
||||
wantNonce)
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure deterministically generated signature is the expected value.
|
||||
gotSig, err := privKey.Sign(hash[:])
|
||||
if err != nil {
|
||||
t.Errorf("Sign #%d (%s): unexpected error: %v", i,
|
||||
test.msg, err)
|
||||
continue
|
||||
}
|
||||
gotSigBytes := gotSig.Serialize()
|
||||
wantSigBytes := decodeHex(test.signature)
|
||||
if !bytes.Equal(gotSigBytes, wantSigBytes) {
|
||||
t.Errorf("Sign #%d (%s): mismatched signature: %x "+
|
||||
"(expected %x)", i, test.msg, gotSigBytes,
|
||||
wantSigBytes)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignatureIsEqual(t *testing.T) {
|
||||
sig1 := &Signature{
|
||||
R: fromHex("0082235e21a2300022738dabb8e1bbd9d19cfb1e7ab8c30a23b0afbb8d178abcf3"),
|
||||
S: fromHex("24bf68e256c534ddfaf966bf908deb944305596f7bdcc38d69acad7f9c868724"),
|
||||
}
|
||||
sig2 := &Signature{
|
||||
R: fromHex("4e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd41"),
|
||||
S: fromHex("181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d09"),
|
||||
}
|
||||
|
||||
if !sig1.IsEqual(sig1) {
|
||||
t.Fatalf("value of IsEqual is incorrect, %v is "+
|
||||
"equal to %v", sig1, sig1)
|
||||
}
|
||||
|
||||
if sig1.IsEqual(sig2) {
|
||||
t.Fatalf("value of IsEqual is incorrect, %v is not "+
|
||||
"equal to %v", sig1, sig2)
|
||||
}
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
btcjson
|
||||
=======
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/daglabs/btcd/btcjson)
|
||||
|
||||
Package btcjson implements concrete types for marshalling to and from the
|
||||
bitcoin JSON-RPC API. A comprehensive suite of tests is provided to ensure
|
||||
proper functionality.
|
||||
|
||||
Although this package was primarily written for the btcsuite, it has
|
||||
intentionally been designed so it can be used as a standalone package for any
|
||||
projects needing to marshal to and from bitcoin JSON-RPC requests and responses.
|
||||
|
||||
Note that although it's possible to use this package directly to implement an
|
||||
RPC client, it is not recommended since it is only intended as an infrastructure
|
||||
package. Instead, RPC clients should use the
|
||||
[btcrpcclient](https://github.com/btcsuite/btcrpcclient) package which provides
|
||||
a full blown RPC client with many features such as automatic connection
|
||||
management, websocket support, automatic notification re-registration on
|
||||
reconnect, and conversion from the raw underlying RPC types (strings, floats,
|
||||
ints, etc) to higher-level types with many nice and useful properties.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/daglabs/btcd/btcjson
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
* [Marshal Command](http://godoc.org/github.com/daglabs/btcd/btcjson#example-MarshalCmd)
|
||||
Demonstrates how to create and marshal a command into a JSON-RPC request.
|
||||
|
||||
* [Unmarshal Command](http://godoc.org/github.com/daglabs/btcd/btcjson#example-UnmarshalCmd)
|
||||
Demonstrates how to unmarshal a JSON-RPC request and then unmarshal the
|
||||
concrete request into a concrete command.
|
||||
|
||||
* [Marshal Response](http://godoc.org/github.com/daglabs/btcd/btcjson#example-MarshalResponse)
|
||||
Demonstrates how to marshal a JSON-RPC response.
|
||||
|
||||
* [Unmarshal Response](http://godoc.org/github.com/daglabs/btcd/btcjson#example-package--UnmarshalResponse)
|
||||
Demonstrates how to unmarshal a JSON-RPC response and then unmarshal the
|
||||
result field in the response to a concrete type.
|
||||
|
||||
## GPG Verification Key
|
||||
|
||||
All official release tags are signed by Conformal so users can ensure the code
|
||||
has not been tampered with and is coming from the btcsuite developers. To
|
||||
verify the signature perform the following:
|
||||
|
||||
- Download the public key from the Conformal website at
|
||||
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
|
||||
|
||||
- Import the public key into your GPG keyring:
|
||||
```bash
|
||||
gpg --import GIT-GPG-KEY-conformal.txt
|
||||
```
|
||||
|
||||
- Verify the release tag with the following command where `TAG_NAME` is a
|
||||
placeholder for the specific tag:
|
||||
```bash
|
||||
git tag -v TAG_NAME
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Package btcjson is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
||||
@@ -1,152 +0,0 @@
|
||||
// Copyright (c) 2014-2016 The btcsuite developers
|
||||
// Copyright (c) 2015-2016 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// NOTE: This file is intended to house the RPC commands that are supported by
|
||||
// a dag server with btcd extensions.
|
||||
|
||||
package btcjson
|
||||
|
||||
// NodeSubCmd defines the type used in the `node` JSON-RPC command for the
|
||||
// sub command field.
|
||||
type NodeSubCmd string
|
||||
|
||||
const (
|
||||
// NConnect indicates the specified host that should be connected to.
|
||||
NConnect NodeSubCmd = "connect"
|
||||
|
||||
// NRemove indicates the specified peer that should be removed as a
|
||||
// persistent peer.
|
||||
NRemove NodeSubCmd = "remove"
|
||||
|
||||
// NDisconnect indicates the specified peer should be disonnected.
|
||||
NDisconnect NodeSubCmd = "disconnect"
|
||||
)
|
||||
|
||||
// NodeCmd defines the dropnode JSON-RPC command.
|
||||
type NodeCmd struct {
|
||||
SubCmd NodeSubCmd `jsonrpcusage:"\"connect|remove|disconnect\""`
|
||||
Target string
|
||||
ConnectSubCmd *string `jsonrpcusage:"\"perm|temp\""`
|
||||
}
|
||||
|
||||
// NewNodeCmd returns a new instance which can be used to issue a `node`
|
||||
// JSON-RPC command.
|
||||
//
|
||||
// The parameters which are pointers indicate they are optional. Passing nil
|
||||
// for optional parameters will use the default value.
|
||||
func NewNodeCmd(subCmd NodeSubCmd, target string, connectSubCmd *string) *NodeCmd {
|
||||
return &NodeCmd{
|
||||
SubCmd: subCmd,
|
||||
Target: target,
|
||||
ConnectSubCmd: connectSubCmd,
|
||||
}
|
||||
}
|
||||
|
||||
// DebugLevelCmd defines the debugLevel JSON-RPC command. This command is not a
|
||||
// standard Bitcoin command. It is an extension for btcd.
|
||||
type DebugLevelCmd struct {
|
||||
LevelSpec string
|
||||
}
|
||||
|
||||
// NewDebugLevelCmd returns a new DebugLevelCmd which can be used to issue a
|
||||
// debugLevel JSON-RPC command. This command is not a standard Bitcoin command.
|
||||
// It is an extension for btcd.
|
||||
func NewDebugLevelCmd(levelSpec string) *DebugLevelCmd {
|
||||
return &DebugLevelCmd{
|
||||
LevelSpec: levelSpec,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateCmd defines the generate JSON-RPC command.
|
||||
type GenerateCmd struct {
|
||||
NumBlocks uint32
|
||||
}
|
||||
|
||||
// NewGenerateCmd returns a new instance which can be used to issue a generate
|
||||
// JSON-RPC command.
|
||||
func NewGenerateCmd(numBlocks uint32) *GenerateCmd {
|
||||
return &GenerateCmd{
|
||||
NumBlocks: numBlocks,
|
||||
}
|
||||
}
|
||||
|
||||
// GetBestBlockCmd defines the getBestBlock JSON-RPC command.
|
||||
type GetBestBlockCmd struct{}
|
||||
|
||||
// NewGetBestBlockCmd returns a new instance which can be used to issue a
|
||||
// getBestBlock JSON-RPC command.
|
||||
func NewGetBestBlockCmd() *GetBestBlockCmd {
|
||||
return &GetBestBlockCmd{}
|
||||
}
|
||||
|
||||
// GetCurrentNetCmd defines the getCurrentNet JSON-RPC command.
|
||||
type GetCurrentNetCmd struct{}
|
||||
|
||||
// NewGetCurrentNetCmd returns a new instance which can be used to issue a
|
||||
// getCurrentNet JSON-RPC command.
|
||||
func NewGetCurrentNetCmd() *GetCurrentNetCmd {
|
||||
return &GetCurrentNetCmd{}
|
||||
}
|
||||
|
||||
// GetTopHeadersCmd defined the getTopHeaders JSON-RPC command.
|
||||
type GetTopHeadersCmd struct {
|
||||
StartHash *string `json:"startHash"`
|
||||
}
|
||||
|
||||
// NewGetTopHeadersCmd returns a new instance which can be used to issue a
|
||||
// getTopHeaders JSON-RPC command.
|
||||
func NewGetTopHeadersCmd(startHash *string) *GetTopHeadersCmd {
|
||||
return &GetTopHeadersCmd{
|
||||
StartHash: startHash,
|
||||
}
|
||||
}
|
||||
|
||||
// GetHeadersCmd defines the getHeaders JSON-RPC command.
|
||||
//
|
||||
// NOTE: This is a btcsuite extension ported from
|
||||
// github.com/decred/dcrd/dcrjson.
|
||||
type GetHeadersCmd struct {
|
||||
BlockLocators []string `json:"blockLocators"`
|
||||
HashStop string `json:"hashStop"`
|
||||
}
|
||||
|
||||
// NewGetHeadersCmd returns a new instance which can be used to issue a
|
||||
// getHeaders JSON-RPC command.
|
||||
//
|
||||
// NOTE: This is a btcsuite extension ported from
|
||||
// github.com/decred/dcrd/dcrjson.
|
||||
func NewGetHeadersCmd(blockLocators []string, hashStop string) *GetHeadersCmd {
|
||||
return &GetHeadersCmd{
|
||||
BlockLocators: blockLocators,
|
||||
HashStop: hashStop,
|
||||
}
|
||||
}
|
||||
|
||||
// VersionCmd defines the version JSON-RPC command.
|
||||
//
|
||||
// NOTE: This is a btcsuite extension ported from
|
||||
// github.com/decred/dcrd/dcrjson.
|
||||
type VersionCmd struct{}
|
||||
|
||||
// NewVersionCmd returns a new instance which can be used to issue a JSON-RPC
|
||||
// version command.
|
||||
//
|
||||
// NOTE: This is a btcsuite extension ported from
|
||||
// github.com/decred/dcrd/dcrjson.
|
||||
func NewVersionCmd() *VersionCmd { return new(VersionCmd) }
|
||||
|
||||
func init() {
|
||||
// No special flags for commands in this file.
|
||||
flags := UsageFlag(0)
|
||||
|
||||
MustRegisterCmd("debugLevel", (*DebugLevelCmd)(nil), flags)
|
||||
MustRegisterCmd("node", (*NodeCmd)(nil), flags)
|
||||
MustRegisterCmd("generate", (*GenerateCmd)(nil), flags)
|
||||
MustRegisterCmd("getBestBlock", (*GetBestBlockCmd)(nil), flags)
|
||||
MustRegisterCmd("getCurrentNet", (*GetCurrentNetCmd)(nil), flags)
|
||||
MustRegisterCmd("getHeaders", (*GetHeadersCmd)(nil), flags)
|
||||
MustRegisterCmd("getTopHeaders", (*GetTopHeadersCmd)(nil), flags)
|
||||
MustRegisterCmd("version", (*VersionCmd)(nil), flags)
|
||||
}
|
||||
@@ -1,285 +0,0 @@
|
||||
// Copyright (c) 2014-2016 The btcsuite developers
|
||||
// Copyright (c) 2015-2016 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcjson_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/btcjson"
|
||||
)
|
||||
|
||||
// TestBtcdExtCmds tests all of the btcd extended commands marshal and unmarshal
|
||||
// into valid results include handling of optional fields being omitted in the
|
||||
// marshalled command, while optional fields with defaults have the default
|
||||
// assigned on unmarshalled commands.
|
||||
func TestBtcdExtCmds(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testID := int(1)
|
||||
tests := []struct {
|
||||
name string
|
||||
newCmd func() (interface{}, error)
|
||||
staticCmd func() interface{}
|
||||
marshalled string
|
||||
unmarshalled interface{}
|
||||
}{
|
||||
{
|
||||
name: "debugLevel",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("debugLevel", "trace")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewDebugLevelCmd("trace")
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"debugLevel","params":["trace"],"id":1}`,
|
||||
unmarshalled: &btcjson.DebugLevelCmd{
|
||||
LevelSpec: "trace",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "node",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("node", btcjson.NRemove, "1.1.1.1")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewNodeCmd("remove", "1.1.1.1", nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"node","params":["remove","1.1.1.1"],"id":1}`,
|
||||
unmarshalled: &btcjson.NodeCmd{
|
||||
SubCmd: btcjson.NRemove,
|
||||
Target: "1.1.1.1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "node",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("node", btcjson.NDisconnect, "1.1.1.1")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewNodeCmd("disconnect", "1.1.1.1", nil)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"node","params":["disconnect","1.1.1.1"],"id":1}`,
|
||||
unmarshalled: &btcjson.NodeCmd{
|
||||
SubCmd: btcjson.NDisconnect,
|
||||
Target: "1.1.1.1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "node",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("node", btcjson.NConnect, "1.1.1.1", "perm")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewNodeCmd("connect", "1.1.1.1", btcjson.String("perm"))
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"node","params":["connect","1.1.1.1","perm"],"id":1}`,
|
||||
unmarshalled: &btcjson.NodeCmd{
|
||||
SubCmd: btcjson.NConnect,
|
||||
Target: "1.1.1.1",
|
||||
ConnectSubCmd: btcjson.String("perm"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "node",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("node", btcjson.NConnect, "1.1.1.1", "temp")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewNodeCmd("connect", "1.1.1.1", btcjson.String("temp"))
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"node","params":["connect","1.1.1.1","temp"],"id":1}`,
|
||||
unmarshalled: &btcjson.NodeCmd{
|
||||
SubCmd: btcjson.NConnect,
|
||||
Target: "1.1.1.1",
|
||||
ConnectSubCmd: btcjson.String("temp"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "generate",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("generate", 1)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGenerateCmd(1)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"generate","params":[1],"id":1}`,
|
||||
unmarshalled: &btcjson.GenerateCmd{
|
||||
NumBlocks: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "getBestBlock",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("getBestBlock")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetBestBlockCmd()
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getBestBlock","params":[],"id":1}`,
|
||||
unmarshalled: &btcjson.GetBestBlockCmd{},
|
||||
},
|
||||
{
|
||||
name: "getCurrentNet",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("getCurrentNet")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetCurrentNetCmd()
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getCurrentNet","params":[],"id":1}`,
|
||||
unmarshalled: &btcjson.GetCurrentNetCmd{},
|
||||
},
|
||||
{
|
||||
name: "getHeaders",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("getHeaders", []string{}, "")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetHeadersCmd(
|
||||
[]string{},
|
||||
"",
|
||||
)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getHeaders","params":[[],""],"id":1}`,
|
||||
unmarshalled: &btcjson.GetHeadersCmd{
|
||||
BlockLocators: []string{},
|
||||
HashStop: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "getHeaders - with arguments",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("getHeaders", []string{"000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16", "0000000000000000026f4b7f56eef057b32167eb5ad9ff62006f1807b7336d10"}, "000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetHeadersCmd(
|
||||
[]string{
|
||||
"000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16",
|
||||
"0000000000000000026f4b7f56eef057b32167eb5ad9ff62006f1807b7336d10",
|
||||
},
|
||||
"000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7",
|
||||
)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getHeaders","params":[["000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16","0000000000000000026f4b7f56eef057b32167eb5ad9ff62006f1807b7336d10"],"000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"],"id":1}`,
|
||||
unmarshalled: &btcjson.GetHeadersCmd{
|
||||
BlockLocators: []string{
|
||||
"000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16",
|
||||
"0000000000000000026f4b7f56eef057b32167eb5ad9ff62006f1807b7336d10",
|
||||
},
|
||||
HashStop: "000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "getTopHeaders",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("getTopHeaders")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetTopHeadersCmd(
|
||||
nil,
|
||||
)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getTopHeaders","params":[],"id":1}`,
|
||||
unmarshalled: &btcjson.GetTopHeadersCmd{},
|
||||
},
|
||||
{
|
||||
name: "getTopHeaders - with start hash",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("getTopHeaders", "000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetTopHeadersCmd(
|
||||
btcjson.String("000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"),
|
||||
)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getTopHeaders","params":["000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"],"id":1}`,
|
||||
unmarshalled: &btcjson.GetTopHeadersCmd{
|
||||
StartHash: btcjson.String("000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("version")
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewVersionCmd()
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"version","params":[],"id":1}`,
|
||||
unmarshalled: &btcjson.VersionCmd{},
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
// Marshal the command as created by the new static command
|
||||
// creation function.
|
||||
marshalled, err := btcjson.MarshalCmd(testID, test.staticCmd())
|
||||
if err != nil {
|
||||
t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i,
|
||||
test.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !bytes.Equal(marshalled, []byte(test.marshalled)) {
|
||||
t.Errorf("Test #%d (%s) unexpected marshalled data - "+
|
||||
"got %s, want %s", i, test.name, marshalled,
|
||||
test.marshalled)
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure the command is created without error via the generic
|
||||
// new command creation function.
|
||||
cmd, err := test.newCmd()
|
||||
if err != nil {
|
||||
t.Errorf("Test #%d (%s) unexpected NewCmd error: %v ",
|
||||
i, test.name, err)
|
||||
}
|
||||
|
||||
// Marshal the command as created by the generic new command
|
||||
// creation function.
|
||||
marshalled, err = btcjson.MarshalCmd(testID, cmd)
|
||||
if err != nil {
|
||||
t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i,
|
||||
test.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !bytes.Equal(marshalled, []byte(test.marshalled)) {
|
||||
t.Errorf("Test #%d (%s) unexpected marshalled data - "+
|
||||
"got %s, want %s", i, test.name, marshalled,
|
||||
test.marshalled)
|
||||
continue
|
||||
}
|
||||
|
||||
var request btcjson.Request
|
||||
if err := json.Unmarshal(marshalled, &request); err != nil {
|
||||
t.Errorf("Test #%d (%s) unexpected error while "+
|
||||
"unmarshalling JSON-RPC request: %v", i,
|
||||
test.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
cmd, err = btcjson.UnmarshalCmd(&request)
|
||||
if err != nil {
|
||||
t.Errorf("UnmarshalCmd #%d (%s) unexpected error: %v", i,
|
||||
test.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cmd, test.unmarshalled) {
|
||||
t.Errorf("Test #%d (%s) unexpected unmarshalled command "+
|
||||
"- got %s, want %s", i, test.name,
|
||||
fmt.Sprintf("(%T) %+[1]v", cmd),
|
||||
fmt.Sprintf("(%T) %+[1]v\n", test.unmarshalled))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Copyright (c) 2015-2017 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package btcjson
|
||||
|
||||
// VersionResult models objects included in the version response. In the actual
|
||||
// result, these objects are keyed by the program or API name.
|
||||
//
|
||||
// NOTE: This is a btcsuite extension ported from
|
||||
// github.com/decred/dcrd/dcrjson.
|
||||
type VersionResult struct {
|
||||
VersionString string `json:"versionString"`
|
||||
Major uint32 `json:"major"`
|
||||
Minor uint32 `json:"minor"`
|
||||
Patch uint32 `json:"patch"`
|
||||
Prerelease string `json:"prerelease"`
|
||||
BuildMetadata string `json:"buildMetadata"`
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user