mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-22 19:45:36 +00:00
Compare commits
218 Commits
v0.0.2-dev
...
v0.0.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cba346d753 | ||
|
|
0f34cfb1a2 | ||
|
|
ea846a3284 | ||
|
|
63bfac9740 | ||
|
|
7284815c21 | ||
|
|
80307d108b | ||
|
|
722437afe9 | ||
|
|
684cf4b5fa | ||
|
|
c95a7b13a6 | ||
|
|
1ce7f21026 | ||
|
|
7d7df10493 | ||
|
|
8179862e0b | ||
|
|
6828f623b4 | ||
|
|
2c88a5b2fe | ||
|
|
a7f08598f3 | ||
|
|
83bad65d3a | ||
|
|
1f35378a4d | ||
|
|
39eab7a6d5 | ||
|
|
9dd025d4da | ||
|
|
bb75ea5020 | ||
|
|
8dbd4a2bed | ||
|
|
24305cda68 | ||
|
|
770dfd147d | ||
|
|
a9ff9b0e70 | ||
|
|
3cc6f2d648 | ||
|
|
a8f0d7b05b | ||
|
|
13f06ca293 | ||
|
|
c88fa1492e | ||
|
|
40657a83f5 | ||
|
|
44dd58b461 | ||
|
|
47891b17ab | ||
|
|
f7fbfbf5c4 | ||
|
|
0e278ca22b | ||
|
|
c66fb294c8 | ||
|
|
88b7e7ca03 | ||
|
|
a9b659a36f | ||
|
|
90fc6ba3e7 | ||
|
|
8ea97aa3fd | ||
|
|
7c9f5a65d8 | ||
|
|
e2d3c4c821 | ||
|
|
92578e2853 | ||
|
|
3018c18616 | ||
|
|
3ac9fa83c1 | ||
|
|
c5b0398dac | ||
|
|
76f23d8a9b | ||
|
|
089cee0e1d | ||
|
|
982340456d | ||
|
|
13cf1f7715 | ||
|
|
d99af7424c | ||
|
|
40ad9c5d2b | ||
|
|
9dfc3091b4 | ||
|
|
e6a4ed04f3 | ||
|
|
e3aa8d65dc | ||
|
|
ece0fb83e8 | ||
|
|
683830d574 | ||
|
|
c5108a4abd | ||
|
|
40342eb45a | ||
|
|
adf4b4380e | ||
|
|
7371120481 | ||
|
|
1064b5009d | ||
|
|
850876e6a7 | ||
|
|
d4083cbdbe | ||
|
|
47c5eddf38 | ||
|
|
f6a6508eff | ||
|
|
a036618b44 | ||
|
|
2429b623fc | ||
|
|
f4850b9e7a | ||
|
|
e81ac5f19e | ||
|
|
31ccedf136 | ||
|
|
502b510ccd | ||
|
|
369031f963 | ||
|
|
a789680db1 | ||
|
|
90bda69931 | ||
|
|
9647cb3e08 | ||
|
|
79c9060909 | ||
|
|
20206789e0 | ||
|
|
1ddae35277 | ||
|
|
75a8c6459a | ||
|
|
7fc2430ab1 | ||
|
|
cf9af0fb5d | ||
|
|
db6d6293c7 | ||
|
|
ae25ec2e6b | ||
|
|
7521545682 | ||
|
|
169e96e851 | ||
|
|
893b8a88c8 | ||
|
|
c60711ab15 | ||
|
|
1b00e01030 | ||
|
|
f0c80905eb | ||
|
|
b07a118431 | ||
|
|
0ae06cd277 | ||
|
|
ed9165f533 | ||
|
|
c73113a12e | ||
|
|
480b2ca07c | ||
|
|
c72b914050 | ||
|
|
5cf7f01d3f | ||
|
|
552a5917c2 | ||
|
|
5c14719f14 | ||
|
|
d2353a189a | ||
|
|
4fcd705ae3 | ||
|
|
744c17b4c8 | ||
|
|
e2eca24b33 | ||
|
|
36d5ac189f | ||
|
|
1a569c7bd7 | ||
|
|
6bb53eaae3 | ||
|
|
747a9bb944 | ||
|
|
d2daf334a5 | ||
|
|
70737e4e94 | ||
|
|
5f49115cac | ||
|
|
534cb2bf5b | ||
|
|
187c525667 | ||
|
|
6032727965 | ||
|
|
bb3f23b6dc | ||
|
|
e5485ac5e6 | ||
|
|
594a209f83 | ||
|
|
9981ce7adb | ||
|
|
49ac97c7db | ||
|
|
bfdf7a2cf2 | ||
|
|
54b681460d | ||
|
|
2147d16c1f | ||
|
|
7c1cb47bd0 | ||
|
|
6acfa18d7c | ||
|
|
f0a675162c | ||
|
|
7a4deb6f18 | ||
|
|
96842353de | ||
|
|
5ce8875ce0 | ||
|
|
812819e92f | ||
|
|
5cb536643e | ||
|
|
4c6b8969d3 | ||
|
|
8ccc63752c | ||
|
|
1088b69616 | ||
|
|
541119dda2 | ||
|
|
7400eabc6d | ||
|
|
c3c429494f | ||
|
|
6d20202354 | ||
|
|
d6297a3192 | ||
|
|
e2f8d4e0aa | ||
|
|
589763e8ec | ||
|
|
c14c64d534 | ||
|
|
f7f44995d6 | ||
|
|
263737b3fb | ||
|
|
0c5f3d72bd | ||
|
|
ffd886498a | ||
|
|
76f5619de7 | ||
|
|
35703e7956 | ||
|
|
29231d8d14 | ||
|
|
396842ae40 | ||
|
|
072c753323 | ||
|
|
6250342b86 | ||
|
|
e4b2d869d4 | ||
|
|
ccca580a4b | ||
|
|
84970a8378 | ||
|
|
901bde1fd4 | ||
|
|
33a4183bfa | ||
|
|
0bc6e5bc92 | ||
|
|
8323e468da | ||
|
|
7912fe4c35 | ||
|
|
266e471941 | ||
|
|
4e6edd4ffd | ||
|
|
7069d173c6 | ||
|
|
aa51b5f071 | ||
|
|
da7c9c7dfb | ||
|
|
ec10346e79 | ||
|
|
2481871c10 | ||
|
|
ac1fd11a42 | ||
|
|
b1d3ca0206 | ||
|
|
5c5491e1e4 | ||
|
|
8dedca693e | ||
|
|
ca0619bbcf | ||
|
|
d7a2ab52a1 | ||
|
|
3b72aafbc6 | ||
|
|
dfd12cdaac | ||
|
|
08d94c7a47 | ||
|
|
b7b41f1a94 | ||
|
|
42109ec4d5 | ||
|
|
39ccc4b225 | ||
|
|
8acc738b27 | ||
|
|
945b3f8fbf | ||
|
|
a73f218402 | ||
|
|
eded4c2285 | ||
|
|
33036278ac | ||
|
|
6163d3b4ec | ||
|
|
22046bebc5 | ||
|
|
c67d4507b6 | ||
|
|
ea5e18ea11 | ||
|
|
1cc479dbf8 | ||
|
|
b4e7b59e7b | ||
|
|
8592ae9641 | ||
|
|
1362fc45e0 | ||
|
|
b34894e4da | ||
|
|
30f5ebd6d1 | ||
|
|
4292bcac72 | ||
|
|
8683258e4a | ||
|
|
e9ec8cd39c | ||
|
|
068a8d117d | ||
|
|
83a012de12 | ||
|
|
f36ae25baf | ||
|
|
298cda0617 | ||
|
|
b9e3fff5d1 | ||
|
|
ed76e2c962 | ||
|
|
77fae7b522 | ||
|
|
cd71e80eb3 | ||
|
|
3f7c73f331 | ||
|
|
4845a7f16c | ||
|
|
77fb901706 | ||
|
|
d3e70810af | ||
|
|
daa4481282 | ||
|
|
a3735da12a | ||
|
|
311c96122e | ||
|
|
b612426ead | ||
|
|
e99af346bf | ||
|
|
e22bc9af8f | ||
|
|
89ca293dc1 | ||
|
|
194ceace6f | ||
|
|
a79c6cecdb | ||
|
|
c5827febf7 | ||
|
|
7353a49469 | ||
|
|
1a2166cddf | ||
|
|
9276494820 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -38,6 +38,7 @@ _testmain.go
|
||||
.vscode
|
||||
debug
|
||||
debug.test
|
||||
__debug_bin
|
||||
|
||||
# CI
|
||||
version.txt
|
||||
|
||||
4
CHANGES
4
CHANGES
@@ -513,7 +513,7 @@ Changes in 0.8.0-beta (Sun May 25 2014)
|
||||
- Reduce max bytes allowed for a standard nulldata transaction to 40 for
|
||||
compatibility with the reference client
|
||||
- Introduce a new btcnet package which houses all of the network params
|
||||
for each network (mainnet, testnet3, regtest) to ultimately enable
|
||||
for each network (mainnet, testnet, regtest) to ultimately enable
|
||||
easier addition and tweaking of networks without needing to change
|
||||
several packages
|
||||
- Fix several script discrepancies found by reference client test data
|
||||
@@ -530,7 +530,7 @@ Changes in 0.8.0-beta (Sun May 25 2014)
|
||||
- Provide options to control block template creation settings
|
||||
- Support the getwork RPC
|
||||
- Allow address identifiers to apply to more than one network since both
|
||||
testnet3 and the regression test network unfortunately use the same
|
||||
testnet and the regression test network unfortunately use the same
|
||||
identifier
|
||||
- RPC changes:
|
||||
- Set the content type for HTTP POST RPC connections to application/json
|
||||
|
||||
105
Gopkg.lock
generated
105
Gopkg.lock
generated
@@ -1,105 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "bou.ke/monkey"
|
||||
packages = ["."]
|
||||
revision = "bdf6dea004c6fd1cdf4b25da8ad45a606c09409a"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aead/siphash"
|
||||
packages = ["."]
|
||||
revision = "83563a290f60225eb120d724600b9690c3fb536f"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/btclog"
|
||||
packages = ["."]
|
||||
revision = "84c8d2346e9fc8c7b947e243b9c24e6df9fd206a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/go-socks"
|
||||
packages = ["socks"]
|
||||
revision = "4720035b7bfd2a9bb130b1c184f8bbe41b6f0d0f"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/btcsuite/goleveldb"
|
||||
packages = ["leveldb","leveldb/cache","leveldb/comparer","leveldb/errors","leveldb/filter","leveldb/iterator","leveldb/journal","leveldb/memdb","leveldb/opt","leveldb/storage","leveldb/table","leveldb/util"]
|
||||
revision = "3fd0373267b6461dbefe91cef614278064d05465"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/btcsuite/snappy-go"
|
||||
packages = ["."]
|
||||
revision = "b3db38edf0a9a11a115eb6b022d8c946024a9ac0"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/websocket"
|
||||
packages = ["."]
|
||||
revision = "31079b6807923eb23992c421b114992b95131b55"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/btcsuite/winsvc"
|
||||
packages = ["eventlog","mgr","registry","svc","winapi"]
|
||||
revision = "f8fb11f83f7e860e3769a08e6811d1b399a43722"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jessevdk/go-flags"
|
||||
packages = ["."]
|
||||
revision = "c6ca198ec95c841fdb89fc0de7496fed11ab854e"
|
||||
version = "v1.4.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jrick/logrotate"
|
||||
packages = ["rotator"]
|
||||
revision = "a93b200c26cbae3bb09dd0dc2c7c7fe1468a034a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kkdai/bstream"
|
||||
packages = ["."]
|
||||
revision = "b3251f7901ec4dd4ec66b3210e8f4bd5c0f1c5a3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/miekg/dns"
|
||||
packages = ["."]
|
||||
revision = "cc8cd02140663157ce797c6650488d6c8563f31f"
|
||||
version = "v1.1.6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ed25519","ed25519/internal/edwards25519","ripemd160"]
|
||||
revision = "c2843e01d9a2bc60bb26ad24e09734fdc2d9ec58"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["bpf","internal/iana","internal/socket","ipv4","ipv6"]
|
||||
revision = "d8887717615a059821345a5c23649351b52a1c0b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "fead79001313d15903fb4605b4a1b781532cd93e"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "00392a00928f96fc94e2c8c65ce3a98cc6f5e2f93dda64d3c4502f2f38026e96"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
78
Gopkg.toml
78
Gopkg.toml
@@ -1,78 +0,0 @@
|
||||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "bou.ke/monkey"
|
||||
version = "1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aead/siphash"
|
||||
version = "1.0.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/btclog"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/go-socks"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/btcsuite/goleveldb"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/websocket"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/btcsuite/winsvc"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
version = "1.1.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/jessevdk/go-flags"
|
||||
version = "1.4.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/jrick/logrotate"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/kkdai/bstream"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/miekg/dns"
|
||||
version = "1.1.6"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"encoding/base32"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
@@ -44,6 +44,7 @@ type AddrManager struct {
|
||||
addrNewFullNodes newBucket
|
||||
addrTried map[subnetworkid.SubnetworkID]*triedBucket
|
||||
addrTriedFullNodes triedBucket
|
||||
addrTrying map[*KnownAddress]bool
|
||||
started int32
|
||||
shutdown int32
|
||||
wg sync.WaitGroup
|
||||
@@ -160,6 +161,10 @@ const (
|
||||
// will consider evicting an address.
|
||||
minBadDays = 7
|
||||
|
||||
// getAddrMin is the least addresses that we will send in response
|
||||
// to a getAddr. If we have less than this amount, we send everything.
|
||||
getAddrMin = 50
|
||||
|
||||
// getAddrMax is the most addresses that we will send in response
|
||||
// to a getAddr (in practise the most addresses we will return from a
|
||||
// call to AddressCache()).
|
||||
@@ -560,7 +565,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
}
|
||||
r, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s error opening file: %s", filePath, err)
|
||||
return errors.Errorf("%s error opening file: %s", filePath, err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
@@ -568,11 +573,11 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
dec := json.NewDecoder(r)
|
||||
err = dec.Decode(&sam)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading %s: %s", filePath, err)
|
||||
return errors.Errorf("error reading %s: %s", filePath, err)
|
||||
}
|
||||
|
||||
if sam.Version != serialisationVersion {
|
||||
return fmt.Errorf("unknown version %d in serialized "+
|
||||
return errors.Errorf("unknown version %d in serialized "+
|
||||
"addrmanager", sam.Version)
|
||||
}
|
||||
copy(a.key[:], sam.Key[:])
|
||||
@@ -581,18 +586,18 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
ka := new(KnownAddress)
|
||||
ka.na, err = a.DeserializeNetAddress(v.Addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to deserialize netaddress "+
|
||||
return errors.Errorf("failed to deserialize netaddress "+
|
||||
"%s: %s", v.Addr, err)
|
||||
}
|
||||
ka.srcAddr, err = a.DeserializeNetAddress(v.Src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to deserialize netaddress "+
|
||||
return errors.Errorf("failed to deserialize netaddress "+
|
||||
"%s: %s", v.Src, err)
|
||||
}
|
||||
if v.SubnetworkID != "" {
|
||||
ka.subnetworkID, err = subnetworkid.NewFromStr(v.SubnetworkID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to deserialize subnetwork id "+
|
||||
return errors.Errorf("failed to deserialize subnetwork id "+
|
||||
"%s: %s", v.SubnetworkID, err)
|
||||
}
|
||||
}
|
||||
@@ -611,7 +616,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
for _, val := range subnetworkNewBucket {
|
||||
ka, ok := a.addrIndex[val]
|
||||
if !ok {
|
||||
return fmt.Errorf("newbucket contains %s but "+
|
||||
return errors.Errorf("newbucket contains %s but "+
|
||||
"none in address list", val)
|
||||
}
|
||||
|
||||
@@ -628,7 +633,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
for _, val := range newBucket {
|
||||
ka, ok := a.addrIndex[val]
|
||||
if !ok {
|
||||
return fmt.Errorf("full nodes newbucket contains %s but "+
|
||||
return errors.Errorf("full nodes newbucket contains %s but "+
|
||||
"none in address list", val)
|
||||
}
|
||||
|
||||
@@ -649,7 +654,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
for _, val := range subnetworkTriedBucket {
|
||||
ka, ok := a.addrIndex[val]
|
||||
if !ok {
|
||||
return fmt.Errorf("Tried bucket contains %s but "+
|
||||
return errors.Errorf("Tried bucket contains %s but "+
|
||||
"none in address list", val)
|
||||
}
|
||||
|
||||
@@ -664,7 +669,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
for _, val := range triedBucket {
|
||||
ka, ok := a.addrIndex[val]
|
||||
if !ok {
|
||||
return fmt.Errorf("Full nodes tried bucket contains %s but "+
|
||||
return errors.Errorf("Full nodes tried bucket contains %s but "+
|
||||
"none in address list", val)
|
||||
}
|
||||
|
||||
@@ -677,12 +682,12 @@ func (a *AddrManager) deserializePeers(filePath string) error {
|
||||
// Sanity checking.
|
||||
for k, v := range a.addrIndex {
|
||||
if v.refs == 0 && !v.tried {
|
||||
return fmt.Errorf("address %s after serialisation "+
|
||||
return errors.Errorf("address %s after serialisation "+
|
||||
"with no references", k)
|
||||
}
|
||||
|
||||
if v.refs > 0 && v.tried {
|
||||
return fmt.Errorf("address %s after serialisation "+
|
||||
return errors.Errorf("address %s after serialisation "+
|
||||
"which is both new and tried!", k)
|
||||
}
|
||||
}
|
||||
@@ -719,7 +724,7 @@ func (a *AddrManager) Start() {
|
||||
|
||||
// Start the address ticker to save addresses periodically.
|
||||
a.wg.Add(1)
|
||||
go a.addressHandler()
|
||||
spawn(a.addressHandler)
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down the address manager by stopping the main handler.
|
||||
@@ -769,11 +774,11 @@ func (a *AddrManager) AddAddressByIP(addrIP string, subnetworkID *subnetworkid.S
|
||||
// Put it in wire.Netaddress
|
||||
ip := net.ParseIP(addr)
|
||||
if ip == nil {
|
||||
return fmt.Errorf("invalid ip address %s", addr)
|
||||
return errors.Errorf("invalid ip address %s", addr)
|
||||
}
|
||||
port, err := strconv.ParseUint(portStr, 10, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid port %s: %s", portStr, err)
|
||||
return errors.Errorf("invalid port %s: %s", portStr, err)
|
||||
}
|
||||
na := wire.NewNetAddressIPPort(ip, uint16(port), 0)
|
||||
a.AddAddress(na, na, subnetworkID) // XXX use correct src address
|
||||
@@ -844,6 +849,12 @@ func (a *AddrManager) AddressCache(includeAllSubnetworks bool, subnetworkID *sub
|
||||
if numAddresses > getAddrMax {
|
||||
numAddresses = getAddrMax
|
||||
}
|
||||
if len(allAddr) < getAddrMin {
|
||||
numAddresses = len(allAddr)
|
||||
}
|
||||
if len(allAddr) > getAddrMin && numAddresses < getAddrMin {
|
||||
numAddresses = getAddrMin
|
||||
}
|
||||
|
||||
// Fisher-Yates shuffle the array. We only need to do the first
|
||||
// `numAddresses' since we are throwing the rest.
|
||||
@@ -879,6 +890,8 @@ func (a *AddrManager) reset() {
|
||||
}
|
||||
a.nNewFullNodes = 0
|
||||
a.nTriedFullNodes = 0
|
||||
|
||||
a.addrTrying = make(map[*KnownAddress]bool)
|
||||
}
|
||||
|
||||
// HostToNetAddress returns a netaddress given a host address. If the address
|
||||
@@ -904,7 +917,7 @@ func (a *AddrManager) HostToNetAddress(host string, port uint16, services wire.S
|
||||
return nil, err
|
||||
}
|
||||
if len(ips) == 0 {
|
||||
return nil, fmt.Errorf("no addresses found for %s", host)
|
||||
return nil, errors.Errorf("no addresses found for %s", host)
|
||||
}
|
||||
ip = ips[0]
|
||||
}
|
||||
@@ -942,15 +955,25 @@ func (a *AddrManager) GetAddress() *KnownAddress {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
var knownAddress *KnownAddress
|
||||
if a.localSubnetworkID == nil {
|
||||
return a.getAddress(&a.addrTriedFullNodes, a.nTriedFullNodes,
|
||||
knownAddress = a.getAddress(&a.addrTriedFullNodes, a.nTriedFullNodes,
|
||||
&a.addrNewFullNodes, a.nNewFullNodes)
|
||||
} else {
|
||||
subnetworkID := *a.localSubnetworkID
|
||||
knownAddress = a.getAddress(a.addrTried[subnetworkID], a.nTried[subnetworkID],
|
||||
a.addrNew[subnetworkID], a.nNew[subnetworkID])
|
||||
}
|
||||
|
||||
subnetworkID := *a.localSubnetworkID
|
||||
if knownAddress != nil {
|
||||
if a.addrTrying[knownAddress] {
|
||||
return nil
|
||||
}
|
||||
a.addrTrying[knownAddress] = true
|
||||
}
|
||||
|
||||
return knownAddress
|
||||
|
||||
return a.getAddress(a.addrTried[subnetworkID], a.nTried[subnetworkID],
|
||||
a.addrNew[subnetworkID], a.nNew[subnetworkID])
|
||||
}
|
||||
|
||||
// see GetAddress for details
|
||||
@@ -1033,6 +1056,8 @@ func (a *AddrManager) Attempt(addr *wire.NetAddress) {
|
||||
// set last tried time to now
|
||||
ka.attempts++
|
||||
ka.lastattempt = time.Now()
|
||||
|
||||
delete(a.addrTrying, ka)
|
||||
}
|
||||
|
||||
// Connected Marks the given address as currently connected and working at the
|
||||
@@ -1224,7 +1249,7 @@ func (a *AddrManager) Good(addr *wire.NetAddress, subnetworkID *subnetworkid.Sub
|
||||
// with the given priority.
|
||||
func (a *AddrManager) AddLocalAddress(na *wire.NetAddress, priority AddressPriority) error {
|
||||
if !IsRoutable(na) {
|
||||
return fmt.Errorf("address %s is not routable", na.IP)
|
||||
return errors.Errorf("address %s is not routable", na.IP)
|
||||
}
|
||||
|
||||
a.lamtx.Lock()
|
||||
|
||||
@@ -5,8 +5,11 @@
|
||||
package addrmgr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"bou.ke/monkey"
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/config"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
@@ -113,7 +116,17 @@ func TestStartStop(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddAddressByIP(t *testing.T) {
|
||||
fmtErr := fmt.Errorf("")
|
||||
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimNetParams},
|
||||
},
|
||||
}
|
||||
})
|
||||
defer activeConfigPatch.Unpatch()
|
||||
|
||||
fmtErr := errors.Errorf("")
|
||||
addrErr := &net.AddrError{}
|
||||
var tests = []struct {
|
||||
addrIP string
|
||||
@@ -157,6 +170,16 @@ func TestAddAddressByIP(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddLocalAddress(t *testing.T) {
|
||||
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimNetParams},
|
||||
},
|
||||
}
|
||||
})
|
||||
defer activeConfigPatch.Unpatch()
|
||||
|
||||
var tests = []struct {
|
||||
address wire.NetAddress
|
||||
priority AddressPriority
|
||||
@@ -210,6 +233,16 @@ func TestAddLocalAddress(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAttempt(t *testing.T) {
|
||||
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimNetParams},
|
||||
},
|
||||
}
|
||||
})
|
||||
defer activeConfigPatch.Unpatch()
|
||||
|
||||
n := New("testattempt", lookupFunc, nil)
|
||||
|
||||
// Add a new address and get it
|
||||
@@ -232,6 +265,16 @@ func TestAttempt(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConnected(t *testing.T) {
|
||||
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimNetParams},
|
||||
},
|
||||
}
|
||||
})
|
||||
defer activeConfigPatch.Unpatch()
|
||||
|
||||
n := New("testconnected", lookupFunc, nil)
|
||||
|
||||
// Add a new address and get it
|
||||
@@ -252,6 +295,16 @@ func TestConnected(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNeedMoreAddresses(t *testing.T) {
|
||||
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimNetParams},
|
||||
},
|
||||
}
|
||||
})
|
||||
defer activeConfigPatch.Unpatch()
|
||||
|
||||
n := New("testneedmoreaddresses", lookupFunc, nil)
|
||||
addrsToAdd := 1500
|
||||
b := n.NeedMoreAddresses()
|
||||
@@ -284,6 +337,16 @@ func TestNeedMoreAddresses(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGood(t *testing.T) {
|
||||
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimNetParams},
|
||||
},
|
||||
}
|
||||
})
|
||||
defer activeConfigPatch.Unpatch()
|
||||
|
||||
n := New("testgood", lookupFunc, nil)
|
||||
addrsToAdd := 64 * 64
|
||||
addrs := make([]*wire.NetAddress, addrsToAdd)
|
||||
@@ -331,6 +394,16 @@ func TestGood(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimNetParams},
|
||||
},
|
||||
}
|
||||
})
|
||||
defer activeConfigPatch.Unpatch()
|
||||
|
||||
n := New("test_good_change_subnetwork_id", lookupFunc, nil)
|
||||
addr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
|
||||
addrKey := NetAddressKey(addr)
|
||||
@@ -400,6 +473,16 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetAddress(t *testing.T) {
|
||||
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimNetParams},
|
||||
},
|
||||
}
|
||||
})
|
||||
defer activeConfigPatch.Unpatch()
|
||||
|
||||
localSubnetworkID := &subnetworkid.SubnetworkID{0xff}
|
||||
n := New("testgetaddress", lookupFunc, localSubnetworkID)
|
||||
|
||||
@@ -417,6 +500,7 @@ func TestGetAddress(t *testing.T) {
|
||||
if ka == nil {
|
||||
t.Fatalf("Did not get an address where there is one in the pool")
|
||||
}
|
||||
n.Attempt(ka.NetAddress())
|
||||
|
||||
// Checks that we don't get it if we find that it has other subnetwork ID than expected.
|
||||
actualSubnetworkID := &subnetworkid.SubnetworkID{0xfe}
|
||||
@@ -449,6 +533,7 @@ func TestGetAddress(t *testing.T) {
|
||||
if !ka.SubnetworkID().IsEqual(localSubnetworkID) {
|
||||
t.Errorf("Wrong Subnetwork ID: got %v, want %v", *ka.SubnetworkID(), localSubnetworkID)
|
||||
}
|
||||
n.Attempt(ka.NetAddress())
|
||||
|
||||
// Mark this as a good address and get it
|
||||
n.Good(ka.NetAddress(), localSubnetworkID)
|
||||
@@ -470,6 +555,16 @@ func TestGetAddress(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetBestLocalAddress(t *testing.T) {
|
||||
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimNetParams},
|
||||
},
|
||||
}
|
||||
})
|
||||
defer activeConfigPatch.Unpatch()
|
||||
|
||||
localAddrs := []wire.NetAddress{
|
||||
{IP: net.ParseIP("192.168.0.100")},
|
||||
{IP: net.ParseIP("::1")},
|
||||
|
||||
@@ -5,15 +5,9 @@
|
||||
package addrmgr
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/daglabs/btcd/logger"
|
||||
"github.com/daglabs/btcd/util/panics"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This
|
||||
// means the package will not perform any logging by default until the caller
|
||||
// requests it.
|
||||
var log btclog.Logger
|
||||
|
||||
func init() {
|
||||
log, _ = logger.Get(logger.SubsystemTags.ADXR)
|
||||
}
|
||||
var log, _ = logger.Get(logger.SubsystemTags.ADXR)
|
||||
var spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
|
||||
@@ -6,9 +6,10 @@ package addrmgr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/config"
|
||||
"net"
|
||||
|
||||
"github.com/daglabs/btcd/config"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
@@ -224,8 +225,8 @@ func IsValid(na *wire.NetAddress) bool {
|
||||
// the public internet. This is true as long as the address is valid and is not
|
||||
// in any reserved ranges.
|
||||
func IsRoutable(na *wire.NetAddress) bool {
|
||||
if config.ActiveNetParams().AcceptUnroutable {
|
||||
return true
|
||||
if config.ActiveConfig().NetParams().AcceptUnroutable {
|
||||
return !IsLocal(na)
|
||||
}
|
||||
|
||||
return IsValid(na) && !(IsRFC1918(na) || IsRFC2544(na) ||
|
||||
|
||||
@@ -5,6 +5,9 @@
|
||||
package addrmgr_test
|
||||
|
||||
import (
|
||||
"bou.ke/monkey"
|
||||
"github.com/daglabs/btcd/config"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
@@ -15,6 +18,16 @@ import (
|
||||
// TestIPTypes ensures the various functions which determine the type of an IP
|
||||
// address based on RFCs work as intended.
|
||||
func TestIPTypes(t *testing.T) {
|
||||
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimNetParams},
|
||||
},
|
||||
}
|
||||
})
|
||||
defer activeConfigPatch.Unpatch()
|
||||
|
||||
type ipTest struct {
|
||||
in wire.NetAddress
|
||||
rfc1918 bool
|
||||
@@ -145,6 +158,16 @@ func TestIPTypes(t *testing.T) {
|
||||
// TestGroupKey tests the GroupKey function to ensure it properly groups various
|
||||
// IP addresses.
|
||||
func TestGroupKey(t *testing.T) {
|
||||
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
|
||||
return &config.Config{
|
||||
Flags: &config.Flags{
|
||||
NetworkFlags: config.NetworkFlags{
|
||||
ActiveNetParams: &dagconfig.SimNetParams},
|
||||
},
|
||||
}
|
||||
})
|
||||
defer activeConfigPatch.Unpatch()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
ip string
|
||||
|
||||
6
apiserver/apimodels/request_types.go
Normal file
6
apiserver/apimodels/request_types.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package apimodels
|
||||
|
||||
// RawTransaction represents a raw transaction posted to the API server
|
||||
type RawTransaction struct {
|
||||
RawTransaction string `json:"rawTransaction"`
|
||||
}
|
||||
64
apiserver/apimodels/response_types.go
Normal file
64
apiserver/apimodels/response_types.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package apimodels
|
||||
|
||||
// TransactionResponse is a json representation of a transaction
|
||||
type TransactionResponse struct {
|
||||
TransactionHash string `json:"transactionHash"`
|
||||
TransactionID string `json:"transactionId"`
|
||||
AcceptingBlockHash string `json:"acceptingBlockHash,omitempty"`
|
||||
AcceptingBlockBlueScore uint64 `json:"acceptingBlockBlueScore,omitempty"`
|
||||
SubnetworkID string `json:"subnetworkId"`
|
||||
LockTime uint64 `json:"lockTime"`
|
||||
Gas uint64 `json:"gas,omitempty"`
|
||||
PayloadHash string `json:"payloadHash,omitempty"`
|
||||
Payload string `json:"payload,omitempty"`
|
||||
Inputs []*TransactionInputResponse `json:"inputs"`
|
||||
Outputs []*TransactionOutputResponse `json:"outputs"`
|
||||
Mass uint64 `json:"mass"`
|
||||
}
|
||||
|
||||
// TransactionOutputResponse is a json representation of a transaction output
|
||||
type TransactionOutputResponse struct {
|
||||
TransactionID string `json:"transactionId,omitempty"`
|
||||
Value uint64 `json:"value"`
|
||||
ScriptPubKey string `json:"scriptPubKey"`
|
||||
Address string `json:"address,omitempty"`
|
||||
AcceptingBlockHash *string `json:"acceptingBlockHash,omitempty"`
|
||||
AcceptingBlockBlueScore uint64 `json:"acceptingBlockBlueScore,omitempty"`
|
||||
Index uint32 `json:"index"`
|
||||
IsCoinbase *bool `json:"isCoinbase,omitempty"`
|
||||
IsSpendable *bool `json:"isSpendable,omitempty"`
|
||||
Confirmations *uint64 `json:"confirmations,omitempty"`
|
||||
}
|
||||
|
||||
// TransactionInputResponse is a json representation of a transaction input
|
||||
type TransactionInputResponse struct {
|
||||
TransactionID string `json:"transactionId,omitempty"`
|
||||
PreviousTransactionID string `json:"previousTransactionId"`
|
||||
PreviousTransactionOutputIndex uint32 `json:"previousTransactionOutputIndex"`
|
||||
SignatureScript string `json:"signatureScript"`
|
||||
Sequence uint64 `json:"sequence"`
|
||||
Address string `json:"address"`
|
||||
}
|
||||
|
||||
// BlockResponse is a json representation of a block
|
||||
type BlockResponse struct {
|
||||
BlockHash string `json:"blockHash"`
|
||||
Version int32 `json:"version"`
|
||||
HashMerkleRoot string `json:"hashMerkleRoot"`
|
||||
AcceptedIDMerkleRoot string `json:"acceptedIDMerkleRoot"`
|
||||
UTXOCommitment string `json:"utxoCommitment"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
Bits uint32 `json:"bits"`
|
||||
Nonce uint64 `json:"nonce"`
|
||||
AcceptingBlockHash *string `json:"acceptingBlockHash"`
|
||||
BlueScore uint64 `json:"blueScore"`
|
||||
IsChainBlock bool `json:"isChainBlock"`
|
||||
Mass uint64 `json:"mass"`
|
||||
}
|
||||
|
||||
// FeeEstimateResponse is a json representation of a fee estimate
|
||||
type FeeEstimateResponse struct {
|
||||
HighPriority float64 `json:"highPriority"`
|
||||
NormalPriority float64 `json:"normalPriority"`
|
||||
LowPriority float64 `json:"lowPriority"`
|
||||
}
|
||||
98
apiserver/config/config.go
Normal file
98
apiserver/config/config.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/apiserver/logger"
|
||||
"github.com/daglabs/btcd/config"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/jessevdk/go-flags"
|
||||
"github.com/pkg/errors"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLogFilename = "apiserver.log"
|
||||
defaultErrLogFilename = "apiserver_err.log"
|
||||
)
|
||||
|
||||
var (
|
||||
// Default configuration options
|
||||
defaultLogDir = util.AppDataDir("apiserver", false)
|
||||
defaultDBAddress = "localhost:3306"
|
||||
defaultHTTPListen = "0.0.0.0:8080"
|
||||
activeConfig *Config
|
||||
)
|
||||
|
||||
// ActiveConfig returns the active configuration struct
|
||||
func ActiveConfig() *Config {
|
||||
return activeConfig
|
||||
}
|
||||
|
||||
// Config defines the configuration options for the API server.
|
||||
type Config struct {
|
||||
LogDir string `long:"logdir" description:"Directory to log output."`
|
||||
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
|
||||
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
|
||||
DisableTLS bool `long:"notls" description:"Disable TLS"`
|
||||
DBAddress string `long:"dbaddress" description:"Database address"`
|
||||
DBUser string `long:"dbuser" description:"Database user" required:"true"`
|
||||
DBPassword string `long:"dbpass" description:"Database password" required:"true"`
|
||||
DBName string `long:"dbname" description:"Database name" required:"true"`
|
||||
HTTPListen string `long:"listen" description:"HTTP address to listen on (default: 0.0.0.0:8080)"`
|
||||
Migrate bool `long:"migrate" description:"Migrate the database to the latest version. The server will not start when using this flag."`
|
||||
MQTTBrokerAddress string `long:"mqttaddress" description:"MQTT broker address" required:"false"`
|
||||
MQTTUser string `long:"mqttuser" description:"MQTT server user" required:"false"`
|
||||
MQTTPassword string `long:"mqttpass" description:"MQTT server password" required:"false"`
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
// Parse parses the CLI arguments and returns a config struct.
|
||||
func Parse() (*Config, error) {
|
||||
activeConfig = &Config{
|
||||
LogDir: defaultLogDir,
|
||||
DBAddress: defaultDBAddress,
|
||||
HTTPListen: defaultHTTPListen,
|
||||
}
|
||||
parser := flags.NewParser(activeConfig, flags.PrintErrors|flags.HelpFlag)
|
||||
_, err := parser.Parse()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !activeConfig.Migrate {
|
||||
if activeConfig.RPCUser == "" {
|
||||
return nil, errors.New("--rpcuser is required if --migrate flag is not used")
|
||||
}
|
||||
if activeConfig.RPCPassword == "" {
|
||||
return nil, errors.New("--rpcpass is required if --migrate flag is not used")
|
||||
}
|
||||
if activeConfig.RPCServer == "" {
|
||||
return nil, errors.New("--rpcserver is required if --migrate flag is not used")
|
||||
}
|
||||
}
|
||||
|
||||
if activeConfig.RPCCert == "" && !activeConfig.DisableTLS {
|
||||
return nil, errors.New("--notls has to be disabled if --cert is used")
|
||||
}
|
||||
|
||||
if activeConfig.RPCCert != "" && activeConfig.DisableTLS {
|
||||
return nil, errors.New("--cert should be omitted if --notls is used")
|
||||
}
|
||||
|
||||
if (activeConfig.MQTTBrokerAddress != "" || activeConfig.MQTTUser != "" || activeConfig.MQTTPassword != "") &&
|
||||
(activeConfig.MQTTBrokerAddress == "" || activeConfig.MQTTUser == "" || activeConfig.MQTTPassword == "") {
|
||||
return nil, errors.New("--mqttaddress, --mqttuser, and --mqttpass must be passed all together")
|
||||
}
|
||||
|
||||
err = activeConfig.ResolveNetwork(parser)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logFile := filepath.Join(activeConfig.LogDir, defaultLogFilename)
|
||||
errLogFile := filepath.Join(activeConfig.LogDir, defaultErrLogFilename)
|
||||
logger.InitLog(logFile, errLogFile)
|
||||
|
||||
return activeConfig, nil
|
||||
}
|
||||
82
apiserver/controllers/block.go
Normal file
82
apiserver/controllers/block.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/apimodels"
|
||||
"github.com/daglabs/btcd/apiserver/dbmodels"
|
||||
"github.com/daglabs/btcd/httpserverutils"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/database"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
const (
|
||||
// OrderAscending is parameter that can be used
|
||||
// in a get list handler to get a list ordered
|
||||
// in an ascending order.
|
||||
OrderAscending = "asc"
|
||||
|
||||
// OrderDescending is parameter that can be used
|
||||
// in a get list handler to get a list ordered
|
||||
// in an ascending order.
|
||||
OrderDescending = "desc"
|
||||
)
|
||||
|
||||
const maxGetBlocksLimit = 100
|
||||
|
||||
// GetBlockByHashHandler returns a block by a given hash.
|
||||
func GetBlockByHashHandler(blockHash string) (interface{}, error) {
|
||||
if bytes, err := hex.DecodeString(blockHash); err != nil || len(bytes) != daghash.HashSize {
|
||||
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
errors.Errorf("The given block hash is not a hex-encoded %d-byte hash.", daghash.HashSize))
|
||||
}
|
||||
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
block := &dbmodels.Block{}
|
||||
dbResult := db.Where(&dbmodels.Block{BlockHash: blockHash}).Preload("AcceptingBlock").First(block)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if httpserverutils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return nil, httpserverutils.NewHandlerError(http.StatusNotFound, errors.New("No block with the given block hash was found"))
|
||||
}
|
||||
if httpserverutils.HasDBError(dbErrors) {
|
||||
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transactions from the database:",
|
||||
dbResult.GetErrors())
|
||||
}
|
||||
return convertBlockModelToBlockResponse(block), nil
|
||||
}
|
||||
|
||||
// GetBlocksHandler searches for all blocks
|
||||
func GetBlocksHandler(order string, skip uint64, limit uint64) (interface{}, error) {
|
||||
if limit > maxGetBlocksLimit {
|
||||
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, errors.Errorf("The maximum allowed value for the limit is %d", maxGetTransactionsLimit))
|
||||
}
|
||||
blocks := []*dbmodels.Block{}
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query := db.
|
||||
Limit(limit).
|
||||
Offset(skip).
|
||||
Preload("AcceptingBlock")
|
||||
if order == OrderAscending {
|
||||
query = query.Order("`id` ASC")
|
||||
} else if order == OrderDescending {
|
||||
query = query.Order("`id` DESC")
|
||||
} else {
|
||||
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, errors.Errorf("'%s' is not a valid order", order))
|
||||
}
|
||||
query.Find(&blocks)
|
||||
blockResponses := make([]*apimodels.BlockResponse, len(blocks))
|
||||
for i, block := range blocks {
|
||||
blockResponses[i] = convertBlockModelToBlockResponse(block)
|
||||
}
|
||||
return blockResponses, nil
|
||||
}
|
||||
63
apiserver/controllers/common.go
Normal file
63
apiserver/controllers/common.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/daglabs/btcd/apiserver/apimodels"
|
||||
"github.com/daglabs/btcd/apiserver/dbmodels"
|
||||
"github.com/daglabs/btcd/btcjson"
|
||||
)
|
||||
|
||||
func convertTxDBModelToTxResponse(tx *dbmodels.Transaction) *apimodels.TransactionResponse {
|
||||
txRes := &apimodels.TransactionResponse{
|
||||
TransactionHash: tx.TransactionHash,
|
||||
TransactionID: tx.TransactionID,
|
||||
AcceptingBlockHash: tx.AcceptingBlock.BlockHash,
|
||||
AcceptingBlockBlueScore: tx.AcceptingBlock.BlueScore,
|
||||
SubnetworkID: tx.Subnetwork.SubnetworkID,
|
||||
LockTime: tx.LockTime,
|
||||
Gas: tx.Gas,
|
||||
PayloadHash: tx.PayloadHash,
|
||||
Payload: hex.EncodeToString(tx.Payload),
|
||||
Inputs: make([]*apimodels.TransactionInputResponse, len(tx.TransactionInputs)),
|
||||
Outputs: make([]*apimodels.TransactionOutputResponse, len(tx.TransactionOutputs)),
|
||||
Mass: tx.Mass,
|
||||
}
|
||||
for i, txOut := range tx.TransactionOutputs {
|
||||
txRes.Outputs[i] = &apimodels.TransactionOutputResponse{
|
||||
Value: txOut.Value,
|
||||
ScriptPubKey: hex.EncodeToString(txOut.ScriptPubKey),
|
||||
Address: txOut.Address.Address,
|
||||
Index: txOut.Index,
|
||||
}
|
||||
}
|
||||
for i, txIn := range tx.TransactionInputs {
|
||||
txRes.Inputs[i] = &apimodels.TransactionInputResponse{
|
||||
PreviousTransactionID: txIn.PreviousTransactionOutput.Transaction.TransactionID,
|
||||
PreviousTransactionOutputIndex: txIn.PreviousTransactionOutput.Index,
|
||||
SignatureScript: hex.EncodeToString(txIn.SignatureScript),
|
||||
Sequence: txIn.Sequence,
|
||||
Address: txIn.PreviousTransactionOutput.Address.Address,
|
||||
}
|
||||
}
|
||||
return txRes
|
||||
}
|
||||
|
||||
func convertBlockModelToBlockResponse(block *dbmodels.Block) *apimodels.BlockResponse {
|
||||
blockRes := &apimodels.BlockResponse{
|
||||
BlockHash: block.BlockHash,
|
||||
Version: block.Version,
|
||||
HashMerkleRoot: block.HashMerkleRoot,
|
||||
AcceptedIDMerkleRoot: block.AcceptedIDMerkleRoot,
|
||||
UTXOCommitment: block.UTXOCommitment,
|
||||
Timestamp: uint64(block.Timestamp.Unix()),
|
||||
Bits: block.Bits,
|
||||
Nonce: block.Nonce,
|
||||
BlueScore: block.BlueScore,
|
||||
IsChainBlock: block.IsChainBlock,
|
||||
Mass: block.Mass,
|
||||
}
|
||||
if block.AcceptingBlock != nil {
|
||||
blockRes.AcceptingBlockHash = btcjson.String(block.AcceptingBlock.BlockHash)
|
||||
}
|
||||
return blockRes
|
||||
}
|
||||
16
apiserver/controllers/feeestimate.go
Normal file
16
apiserver/controllers/feeestimate.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/apiserver/apimodels"
|
||||
"github.com/daglabs/btcd/httpserverutils"
|
||||
)
|
||||
|
||||
// GetFeeEstimatesHandler returns the fee estimates for different priorities
|
||||
// for accepting a transaction in the DAG.
|
||||
func GetFeeEstimatesHandler() (interface{}, *httpserverutils.HandlerError) {
|
||||
return &apimodels.FeeEstimateResponse{
|
||||
HighPriority: 3,
|
||||
NormalPriority: 2,
|
||||
LowPriority: 1,
|
||||
}, nil
|
||||
}
|
||||
276
apiserver/controllers/transaction.go
Normal file
276
apiserver/controllers/transaction.go
Normal file
@@ -0,0 +1,276 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/apimodels"
|
||||
"github.com/daglabs/btcd/apiserver/config"
|
||||
"github.com/daglabs/btcd/apiserver/dbmodels"
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/httpserverutils"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/database"
|
||||
"github.com/daglabs/btcd/apiserver/jsonrpc"
|
||||
"github.com/daglabs/btcd/btcjson"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
const maxGetTransactionsLimit = 1000
|
||||
|
||||
// GetTransactionByIDHandler returns a transaction by a given transaction ID.
|
||||
func GetTransactionByIDHandler(txID string) (interface{}, error) {
|
||||
if bytes, err := hex.DecodeString(txID); err != nil || len(bytes) != daghash.TxIDSize {
|
||||
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
errors.Errorf("The given txid is not a hex-encoded %d-byte hash.", daghash.TxIDSize))
|
||||
}
|
||||
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tx := &dbmodels.Transaction{}
|
||||
query := db.Where(&dbmodels.Transaction{TransactionID: txID})
|
||||
dbResult := addTxPreloadedFields(query).First(&tx)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if httpserverutils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return nil, httpserverutils.NewHandlerError(http.StatusNotFound, errors.New("No transaction with the given txid was found"))
|
||||
}
|
||||
if httpserverutils.HasDBError(dbErrors) {
|
||||
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transaction from the database:", dbErrors)
|
||||
}
|
||||
return convertTxDBModelToTxResponse(tx), nil
|
||||
}
|
||||
|
||||
// GetTransactionByHashHandler returns a transaction by a given transaction hash.
|
||||
func GetTransactionByHashHandler(txHash string) (interface{}, error) {
|
||||
if bytes, err := hex.DecodeString(txHash); err != nil || len(bytes) != daghash.HashSize {
|
||||
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
errors.Errorf("The given txhash is not a hex-encoded %d-byte hash.", daghash.HashSize))
|
||||
}
|
||||
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tx := &dbmodels.Transaction{}
|
||||
query := db.Where(&dbmodels.Transaction{TransactionHash: txHash})
|
||||
dbResult := addTxPreloadedFields(query).First(&tx)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if httpserverutils.IsDBRecordNotFoundError(dbErrors) {
|
||||
return nil, httpserverutils.NewHandlerError(http.StatusNotFound, errors.Errorf("No transaction with the given txhash was found."))
|
||||
}
|
||||
if httpserverutils.HasDBError(dbErrors) {
|
||||
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transaction from the database:", dbErrors)
|
||||
}
|
||||
return convertTxDBModelToTxResponse(tx), nil
|
||||
}
|
||||
|
||||
// GetTransactionsByAddressHandler searches for all transactions
|
||||
// where the given address is either an input or an output.
|
||||
func GetTransactionsByAddressHandler(address string, skip uint64, limit uint64) (interface{}, error) {
|
||||
if limit > maxGetTransactionsLimit {
|
||||
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
errors.Errorf("The maximum allowed value for the limit is %d", maxGetTransactionsLimit))
|
||||
}
|
||||
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txs := []*dbmodels.Transaction{}
|
||||
query := db.
|
||||
Joins("LEFT JOIN `transaction_outputs` ON `transaction_outputs`.`transaction_id` = `transactions`.`id`").
|
||||
Joins("LEFT JOIN `addresses` AS `out_addresses` ON `out_addresses`.`id` = `transaction_outputs`.`address_id`").
|
||||
Joins("LEFT JOIN `transaction_inputs` ON `transaction_inputs`.`transaction_id` = `transactions`.`id`").
|
||||
Joins("LEFT JOIN `transaction_outputs` AS `inputs_outs` ON `inputs_outs`.`id` = `transaction_inputs`.`previous_transaction_output_id`").
|
||||
Joins("LEFT JOIN `addresses` AS `in_addresses` ON `in_addresses`.`id` = `inputs_outs`.`address_id`").
|
||||
Where("`out_addresses`.`address` = ?", address).
|
||||
Or("`in_addresses`.`address` = ?", address).
|
||||
Limit(limit).
|
||||
Offset(skip).
|
||||
Order("`transactions`.`id` ASC")
|
||||
dbResult := addTxPreloadedFields(query).Find(&txs)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if httpserverutils.HasDBError(dbErrors) {
|
||||
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transactions from the database:", dbErrors)
|
||||
}
|
||||
txResponses := make([]*apimodels.TransactionResponse, len(txs))
|
||||
for i, tx := range txs {
|
||||
txResponses[i] = convertTxDBModelToTxResponse(tx)
|
||||
}
|
||||
return txResponses, nil
|
||||
}
|
||||
|
||||
func fetchSelectedTip() (*dbmodels.Block, error) {
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block := &dbmodels.Block{}
|
||||
dbResult := db.Order("blue_score DESC").
|
||||
Where(&dbmodels.Block{IsChainBlock: true}).
|
||||
First(block)
|
||||
dbErrors := dbResult.GetErrors()
|
||||
if httpserverutils.HasDBError(dbErrors) {
|
||||
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transactions from the database:", dbErrors)
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
func areTxsInBlock(blockID uint64, txIDs []uint64) (map[uint64]bool, error) {
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactionBlocks := []*dbmodels.TransactionBlock{}
|
||||
dbErrors := db.
|
||||
Where(&dbmodels.TransactionBlock{BlockID: blockID}).
|
||||
Where("transaction_id in (?)", txIDs).
|
||||
Find(&transactionBlocks).GetErrors()
|
||||
|
||||
if len(dbErrors) > 0 {
|
||||
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading UTXOs from the database:", dbErrors)
|
||||
}
|
||||
|
||||
isInBlock := make(map[uint64]bool)
|
||||
for _, transactionBlock := range transactionBlocks {
|
||||
isInBlock[transactionBlock.TransactionID] = true
|
||||
}
|
||||
return isInBlock, nil
|
||||
}
|
||||
|
||||
// GetUTXOsByAddressHandler searches for all UTXOs that belong to a certain address.
|
||||
func GetUTXOsByAddressHandler(address string) (interface{}, error) {
|
||||
db, err := database.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var transactionOutputs []*dbmodels.TransactionOutput
|
||||
dbErrors := db.
|
||||
Joins("LEFT JOIN `addresses` ON `addresses`.`id` = `transaction_outputs`.`address_id`").
|
||||
Where("`addresses`.`address` = ? AND `transaction_outputs`.`is_spent` = 0", address).
|
||||
Preload("Transaction.AcceptingBlock").
|
||||
Preload("Transaction.Subnetwork").
|
||||
Find(&transactionOutputs).GetErrors()
|
||||
if len(dbErrors) > 0 {
|
||||
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading UTXOs from the database:", dbErrors)
|
||||
}
|
||||
|
||||
nonAcceptedTxIds := make([]uint64, len(transactionOutputs))
|
||||
for i, txOut := range transactionOutputs {
|
||||
if txOut.Transaction.AcceptingBlock == nil {
|
||||
nonAcceptedTxIds[i] = txOut.TransactionID
|
||||
}
|
||||
}
|
||||
|
||||
var selectedTip *dbmodels.Block
|
||||
var isTxInSelectedTip map[uint64]bool
|
||||
if len(nonAcceptedTxIds) != 0 {
|
||||
selectedTip, err = fetchSelectedTip()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isTxInSelectedTip, err = areTxsInBlock(selectedTip.ID, nonAcceptedTxIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
activeNetParams := config.ActiveConfig().NetParams()
|
||||
|
||||
UTXOsResponses := make([]*apimodels.TransactionOutputResponse, len(transactionOutputs))
|
||||
for i, transactionOutput := range transactionOutputs {
|
||||
subnetworkID := &subnetworkid.SubnetworkID{}
|
||||
err := subnetworkid.Decode(subnetworkID, transactionOutput.Transaction.Subnetwork.SubnetworkID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("Couldn't decode subnetwork id %s", transactionOutput.Transaction.Subnetwork.SubnetworkID))
|
||||
}
|
||||
var acceptingBlockHash *string
|
||||
var confirmations uint64
|
||||
acceptingBlockBlueScore := blockdag.UnacceptedBlueScore
|
||||
if isTxInSelectedTip[transactionOutput.ID] {
|
||||
confirmations = 1
|
||||
} else if transactionOutput.Transaction.AcceptingBlock != nil {
|
||||
acceptingBlockHash = btcjson.String(transactionOutput.Transaction.AcceptingBlock.BlockHash)
|
||||
acceptingBlockBlueScore = transactionOutput.Transaction.AcceptingBlock.BlueScore
|
||||
confirmations = selectedTip.BlueScore - acceptingBlockBlueScore + 2
|
||||
}
|
||||
isCoinbase := subnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase)
|
||||
UTXOsResponses[i] = &apimodels.TransactionOutputResponse{
|
||||
TransactionID: transactionOutput.Transaction.TransactionID,
|
||||
Value: transactionOutput.Value,
|
||||
ScriptPubKey: hex.EncodeToString(transactionOutput.ScriptPubKey),
|
||||
AcceptingBlockHash: acceptingBlockHash,
|
||||
AcceptingBlockBlueScore: acceptingBlockBlueScore,
|
||||
Index: transactionOutput.Index,
|
||||
IsCoinbase: btcjson.Bool(isCoinbase),
|
||||
Confirmations: btcjson.Uint64(confirmations),
|
||||
IsSpendable: btcjson.Bool(!isCoinbase || confirmations >= activeNetParams.BlockCoinbaseMaturity),
|
||||
}
|
||||
}
|
||||
return UTXOsResponses, nil
|
||||
}
|
||||
|
||||
func addTxPreloadedFields(query *gorm.DB) *gorm.DB {
|
||||
return query.Preload("AcceptingBlock").
|
||||
Preload("Subnetwork").
|
||||
Preload("TransactionOutputs").
|
||||
Preload("TransactionOutputs.Address").
|
||||
Preload("TransactionInputs.PreviousTransactionOutput.Transaction").
|
||||
Preload("TransactionInputs.PreviousTransactionOutput.Address")
|
||||
}
|
||||
|
||||
// PostTransaction forwards a raw transaction to the JSON-RPC API server
|
||||
func PostTransaction(requestBody []byte) error {
|
||||
client, err := jsonrpc.GetClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rawTx := &apimodels.RawTransaction{}
|
||||
err = json.Unmarshal(requestBody, rawTx)
|
||||
if err != nil {
|
||||
return httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
|
||||
errors.Wrap(err, "Error unmarshalling request body"),
|
||||
"The request body is not json-formatted")
|
||||
}
|
||||
|
||||
txBytes, err := hex.DecodeString(rawTx.RawTransaction)
|
||||
if err != nil {
|
||||
return httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
|
||||
errors.Wrap(err, "Error decoding hex raw transaction"),
|
||||
"The raw transaction is not a hex-encoded transaction")
|
||||
}
|
||||
|
||||
txReader := bytes.NewReader(txBytes)
|
||||
tx := &wire.MsgTx{}
|
||||
err = tx.BtcDecode(txReader, 0)
|
||||
if err != nil {
|
||||
return httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
|
||||
errors.Wrap(err, "Error decoding raw transaction"),
|
||||
"Error decoding raw transaction")
|
||||
}
|
||||
|
||||
_, err = client.SendRawTransaction(tx, true)
|
||||
if err != nil {
|
||||
if rpcErr, ok := err.(*btcjson.RPCError); ok && rpcErr.Code == btcjson.ErrRPCVerify {
|
||||
return httpserverutils.NewHandlerError(http.StatusInternalServerError, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
142
apiserver/database/database.go
Normal file
142
apiserver/database/database.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
nativeerrors "errors"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/config"
|
||||
"github.com/golang-migrate/migrate/v4/source"
|
||||
"github.com/jinzhu/gorm"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
)
|
||||
|
||||
// db is the API server database.
|
||||
var db *gorm.DB
|
||||
|
||||
// DB returns a reference to the database connection
|
||||
func DB() (*gorm.DB, error) {
|
||||
if db == nil {
|
||||
return nil, errors.New("Database is not connected")
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
type gormLogger struct{}
|
||||
|
||||
func (l gormLogger) Print(v ...interface{}) {
|
||||
str := fmt.Sprint(v...)
|
||||
log.Errorf(str)
|
||||
}
|
||||
|
||||
// Connect connects to the database mentioned in
|
||||
// config variable.
|
||||
func Connect() error {
|
||||
connectionString := buildConnectionString()
|
||||
migrator, driver, err := openMigrator(connectionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isCurrent, version, err := isCurrent(migrator, driver)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error checking whether the database is current: %s", err)
|
||||
}
|
||||
if !isCurrent {
|
||||
return errors.Errorf("Database is not current (version %d). Please migrate"+
|
||||
" the database by running the server with --migrate flag and then run it again.", version)
|
||||
}
|
||||
|
||||
db, err = gorm.Open("mysql", connectionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db.SetLogger(gormLogger{})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the connection to the database
|
||||
func Close() error {
|
||||
if db == nil {
|
||||
return nil
|
||||
}
|
||||
err := db.Close()
|
||||
db = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func buildConnectionString() string {
|
||||
cfg := config.ActiveConfig()
|
||||
return fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True",
|
||||
cfg.DBUser, cfg.DBPassword, cfg.DBAddress, cfg.DBName)
|
||||
}
|
||||
|
||||
// isCurrent resolves whether the database is on the latest
|
||||
// version of the schema.
|
||||
func isCurrent(migrator *migrate.Migrate, driver source.Driver) (bool, uint, error) {
|
||||
// Get the current version
|
||||
version, isDirty, err := migrator.Version()
|
||||
if nativeerrors.Is(err, migrate.ErrNilVersion) {
|
||||
return false, 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, 0, errors.WithStack(err)
|
||||
}
|
||||
if isDirty {
|
||||
return false, 0, errors.Errorf("Database is dirty")
|
||||
}
|
||||
|
||||
// The database is current if Next returns ErrNotExist
|
||||
_, err = driver.Next(version)
|
||||
if pathErr, ok := err.(*os.PathError); ok {
|
||||
if pathErr.Err == os.ErrNotExist {
|
||||
return true, version, nil
|
||||
}
|
||||
}
|
||||
return false, version, err
|
||||
}
|
||||
|
||||
func openMigrator(connectionString string) (*migrate.Migrate, source.Driver, error) {
|
||||
driver, err := source.Open("file://migrations")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
migrator, err := migrate.NewWithSourceInstance(
|
||||
"migrations", driver, "mysql://"+connectionString)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return migrator, driver, nil
|
||||
}
|
||||
|
||||
// Migrate database to the latest version.
|
||||
func Migrate() error {
|
||||
connectionString := buildConnectionString()
|
||||
migrator, driver, err := openMigrator(connectionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isCurrent, version, err := isCurrent(migrator, driver)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error checking whether the database is current: %s", err)
|
||||
}
|
||||
if isCurrent {
|
||||
log.Infof("Database is already up-to-date (version %d)", version)
|
||||
return nil
|
||||
}
|
||||
err = migrator.Up()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
version, isDirty, err := migrator.Version()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isDirty {
|
||||
return errors.Errorf("error migrating database: database is dirty")
|
||||
}
|
||||
log.Infof("Migrated database to the latest version (version %d)", version)
|
||||
return nil
|
||||
}
|
||||
9
apiserver/database/log.go
Normal file
9
apiserver/database/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package database
|
||||
|
||||
import "github.com/daglabs/btcd/util/panics"
|
||||
import "github.com/daglabs/btcd/apiserver/logger"
|
||||
|
||||
var (
|
||||
log = logger.BackendLog.Logger("DTBS")
|
||||
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
)
|
||||
111
apiserver/dbmodels/models.go
Normal file
111
apiserver/dbmodels/models.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package dbmodels
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Block is the gorm model for the 'blocks' table
|
||||
type Block struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
BlockHash string
|
||||
AcceptingBlockID *uint64
|
||||
AcceptingBlock *Block
|
||||
Version int32
|
||||
HashMerkleRoot string
|
||||
AcceptedIDMerkleRoot string
|
||||
UTXOCommitment string
|
||||
Timestamp time.Time
|
||||
Bits uint32
|
||||
Nonce uint64
|
||||
BlueScore uint64
|
||||
IsChainBlock bool
|
||||
Mass uint64
|
||||
ParentBlocks []Block `gorm:"many2many:parent_blocks;"`
|
||||
}
|
||||
|
||||
// ParentBlock is the gorm model for the 'parent_blocks' table
|
||||
type ParentBlock struct {
|
||||
BlockID uint64
|
||||
Block Block
|
||||
ParentBlockID uint64
|
||||
ParentBlock Block
|
||||
}
|
||||
|
||||
// RawBlock is the gorm model for the 'raw_blocks' table
|
||||
type RawBlock struct {
|
||||
BlockID uint64
|
||||
Block Block
|
||||
BlockData []byte
|
||||
}
|
||||
|
||||
// Subnetwork is the gorm model for the 'subnetworks' table
|
||||
type Subnetwork struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
SubnetworkID string
|
||||
GasLimit *uint64
|
||||
}
|
||||
|
||||
// Transaction is the gorm model for the 'transactions' table
|
||||
type Transaction struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
AcceptingBlockID *uint64
|
||||
AcceptingBlock *Block
|
||||
TransactionHash string
|
||||
TransactionID string
|
||||
LockTime uint64
|
||||
SubnetworkID uint64
|
||||
Subnetwork Subnetwork
|
||||
Gas uint64
|
||||
PayloadHash string
|
||||
Payload []byte
|
||||
Mass uint64
|
||||
Blocks []Block `gorm:"many2many:transactions_to_blocks;"`
|
||||
TransactionOutputs []TransactionOutput
|
||||
TransactionInputs []TransactionInput
|
||||
}
|
||||
|
||||
// TransactionBlock is the gorm model for the 'transactions_to_blocks' table
|
||||
type TransactionBlock struct {
|
||||
TransactionID uint64
|
||||
Transaction Transaction
|
||||
BlockID uint64
|
||||
Block Block
|
||||
Index uint32
|
||||
}
|
||||
|
||||
// TableName returns the table name associated to the
|
||||
// TransactionBlock gorm model
|
||||
func (TransactionBlock) TableName() string {
|
||||
return "transactions_to_blocks"
|
||||
}
|
||||
|
||||
// TransactionOutput is the gorm model for the 'transaction_outputs' table
|
||||
type TransactionOutput struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
TransactionID uint64
|
||||
Transaction Transaction
|
||||
Index uint32
|
||||
Value uint64
|
||||
ScriptPubKey []byte
|
||||
IsSpent bool
|
||||
AddressID uint64
|
||||
Address Address
|
||||
}
|
||||
|
||||
// TransactionInput is the gorm model for the 'transaction_inputs' table
|
||||
type TransactionInput struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
TransactionID uint64
|
||||
Transaction Transaction
|
||||
PreviousTransactionOutputID uint64
|
||||
PreviousTransactionOutput TransactionOutput
|
||||
Index uint32
|
||||
SignatureScript []byte
|
||||
Sequence uint64
|
||||
}
|
||||
|
||||
// Address is the gorm model for the 'addresses' table
|
||||
type Address struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
Address string
|
||||
}
|
||||
28
apiserver/docker/Dockerfile
Normal file
28
apiserver/docker/Dockerfile
Normal file
@@ -0,0 +1,28 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.13-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/daglabs/btcd
|
||||
|
||||
WORKDIR /go/src/github.com/daglabs/btcd
|
||||
|
||||
RUN apk add --no-cache curl git
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN cd apiserver && CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o apiserver .
|
||||
|
||||
# --- multistage docker build: stage #2: runtime image
|
||||
FROM alpine
|
||||
WORKDIR /app
|
||||
|
||||
RUN apk add --no-cache tini
|
||||
|
||||
COPY --from=build /go/src/github.com/daglabs/btcd/apiserver/ /app/
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD ["/app/apiserver"]
|
||||
125
apiserver/jsonrpc/client.go
Normal file
125
apiserver/jsonrpc/client.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package jsonrpc
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/config"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
|
||||
"github.com/daglabs/btcd/rpcclient"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// Client represents a connection to the JSON-RPC API of a full node
|
||||
type Client struct {
|
||||
*rpcclient.Client
|
||||
OnBlockAdded chan *BlockAddedMsg
|
||||
OnChainChanged chan *ChainChangedMsg
|
||||
}
|
||||
|
||||
var client *Client
|
||||
|
||||
// GetClient returns an instance of the JSON-RPC client, in case we have an active connection
|
||||
func GetClient() (*Client, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("JSON-RPC is not connected")
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// BlockAddedMsg defines the message received in onBlockAdded
|
||||
type BlockAddedMsg struct {
|
||||
ChainHeight uint64
|
||||
Header *wire.BlockHeader
|
||||
}
|
||||
|
||||
// ChainChangedMsg defines the message received in onChainChanged
|
||||
type ChainChangedMsg struct {
|
||||
RemovedChainBlockHashes []*daghash.Hash
|
||||
AddedChainBlocks []*rpcclient.ChainBlock
|
||||
}
|
||||
|
||||
// Close closes the connection to the JSON-RPC API server
|
||||
func Close() {
|
||||
if client == nil {
|
||||
return
|
||||
}
|
||||
|
||||
client.Disconnect()
|
||||
client = nil
|
||||
}
|
||||
|
||||
// Connect initiates a connection to the JSON-RPC API Server
|
||||
func Connect() error {
|
||||
cfg := config.ActiveConfig()
|
||||
var cert []byte
|
||||
if !cfg.DisableTLS {
|
||||
var err error
|
||||
cert, err = ioutil.ReadFile(cfg.RPCCert)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error reading certificates file: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
connCfg := &rpcclient.ConnConfig{
|
||||
Host: cfg.RPCServer,
|
||||
Endpoint: "ws",
|
||||
User: cfg.RPCUser,
|
||||
Pass: cfg.RPCPassword,
|
||||
DisableTLS: cfg.DisableTLS,
|
||||
RequestTimeout: time.Second * 5,
|
||||
}
|
||||
|
||||
if !cfg.DisableTLS {
|
||||
connCfg.Certificates = cert
|
||||
}
|
||||
|
||||
var err error
|
||||
client, err = newClient(connCfg)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error connecting to address %s: %s", cfg.RPCServer, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newClient(connCfg *rpcclient.ConnConfig) (*Client, error) {
|
||||
client = &Client{
|
||||
OnBlockAdded: make(chan *BlockAddedMsg),
|
||||
OnChainChanged: make(chan *ChainChangedMsg),
|
||||
}
|
||||
notificationHandlers := &rpcclient.NotificationHandlers{
|
||||
OnFilteredBlockAdded: func(height uint64, header *wire.BlockHeader,
|
||||
txs []*util.Tx) {
|
||||
client.OnBlockAdded <- &BlockAddedMsg{
|
||||
ChainHeight: height,
|
||||
Header: header,
|
||||
}
|
||||
},
|
||||
OnChainChanged: func(removedChainBlockHashes []*daghash.Hash,
|
||||
addedChainBlocks []*rpcclient.ChainBlock) {
|
||||
client.OnChainChanged <- &ChainChangedMsg{
|
||||
RemovedChainBlockHashes: removedChainBlockHashes,
|
||||
AddedChainBlocks: addedChainBlocks,
|
||||
}
|
||||
},
|
||||
}
|
||||
var err error
|
||||
client.Client, err = rpcclient.New(connCfg, notificationHandlers)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Error connecting to address %s: %s", connCfg.Host, err)
|
||||
}
|
||||
|
||||
if err = client.NotifyBlocks(); err != nil {
|
||||
return nil, errors.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err)
|
||||
}
|
||||
if err = client.NotifyChainChanges(); err != nil {
|
||||
return nil, errors.Errorf("Error while registering client %s for chain changes notifications: %s", client.Host(), err)
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
11
apiserver/log.go
Normal file
11
apiserver/log.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/logger"
|
||||
"github.com/daglabs/btcd/util/panics"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logger.BackendLog.Logger("APIS")
|
||||
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
)
|
||||
24
apiserver/logger/logger.go
Normal file
24
apiserver/logger/logger.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/logs"
|
||||
"os"
|
||||
)
|
||||
|
||||
// BackendLog is the logging backend used to create all subsystem loggers.
|
||||
var BackendLog = logs.NewBackend()
|
||||
|
||||
// InitLog attaches log file and error log file to the backend log.
|
||||
func InitLog(logFile, errLogFile string) {
|
||||
err := BackendLog.AddLogFile(logFile, logs.LevelTrace)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logs.LevelTrace, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = BackendLog.AddLogFile(errLogFile, logs.LevelWarn)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logs.LevelWarn, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
81
apiserver/main.go
Normal file
81
apiserver/main.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/apiserver/mqtt"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/config"
|
||||
"github.com/daglabs/btcd/apiserver/database"
|
||||
"github.com/daglabs/btcd/apiserver/jsonrpc"
|
||||
"github.com/daglabs/btcd/apiserver/server"
|
||||
"github.com/daglabs/btcd/logger"
|
||||
"github.com/daglabs/btcd/signal"
|
||||
"github.com/daglabs/btcd/util/panics"
|
||||
_ "github.com/golang-migrate/migrate/v4/database/mysql"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
_ "github.com/jinzhu/gorm/dialects/mysql"
|
||||
)
|
||||
|
||||
func main() {
|
||||
defer panics.HandlePanic(log, logger.BackendLog, nil)
|
||||
|
||||
cfg, err := config.Parse()
|
||||
if err != nil {
|
||||
errString := fmt.Sprintf("Error parsing command-line arguments: %s", err)
|
||||
_, fErr := fmt.Fprintf(os.Stderr, errString)
|
||||
if fErr != nil {
|
||||
panic(errString)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if cfg.Migrate {
|
||||
err := database.Migrate()
|
||||
if err != nil {
|
||||
panic(errors.Errorf("Error migrating database: %s", err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = database.Connect()
|
||||
if err != nil {
|
||||
panic(errors.Errorf("Error connecting to database: %s", err))
|
||||
}
|
||||
defer func() {
|
||||
err := database.Close()
|
||||
if err != nil {
|
||||
panic(errors.Errorf("Error closing the database: %s", err))
|
||||
}
|
||||
}()
|
||||
|
||||
err = mqtt.Connect()
|
||||
if err != nil {
|
||||
panic(errors.Errorf("Error connecting to MQTT: %s", err))
|
||||
}
|
||||
defer mqtt.Close()
|
||||
|
||||
err = jsonrpc.Connect()
|
||||
if err != nil {
|
||||
panic(errors.Errorf("Error connecting to servers: %s", err))
|
||||
}
|
||||
defer jsonrpc.Close()
|
||||
|
||||
shutdownServer := server.Start(config.ActiveConfig().HTTPListen)
|
||||
defer shutdownServer()
|
||||
|
||||
doneChan := make(chan struct{}, 1)
|
||||
spawn(func() {
|
||||
err := startSync(doneChan)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
|
||||
interrupt := signal.InterruptListener()
|
||||
<-interrupt
|
||||
|
||||
// Gracefully stop syncing
|
||||
doneChan <- struct{}{}
|
||||
}
|
||||
1
apiserver/migrations/000001_create_blocks_table.down.sql
Normal file
1
apiserver/migrations/000001_create_blocks_table.down.sql
Normal file
@@ -0,0 +1 @@
|
||||
DROP TABLE `blocks`;
|
||||
23
apiserver/migrations/000001_create_blocks_table.up.sql
Normal file
23
apiserver/migrations/000001_create_blocks_table.up.sql
Normal file
@@ -0,0 +1,23 @@
|
||||
CREATE TABLE `blocks`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`block_hash` CHAR(64) NOT NULL,
|
||||
`accepting_block_id` BIGINT UNSIGNED NULL,
|
||||
`version` INT NOT NULL,
|
||||
`hash_merkle_root` CHAR(64) NOT NULL,
|
||||
`accepted_id_merkle_root` CHAR(64) NOT NULL,
|
||||
`utxo_commitment` CHAR(64) NOT NULL,
|
||||
`timestamp` DATETIME NOT NULL,
|
||||
`bits` INT UNSIGNED NOT NULL,
|
||||
`nonce` BIGINT UNSIGNED NOT NULL,
|
||||
`blue_score` BIGINT UNSIGNED NOT NULL,
|
||||
`is_chain_block` TINYINT NOT NULL,
|
||||
`mass` BIGINT NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE INDEX `idx_blocks_block_hash` (`block_hash`),
|
||||
INDEX `idx_blocks_timestamp` (`timestamp`),
|
||||
INDEX `idx_blocks_is_chain_block` (`is_chain_block`),
|
||||
CONSTRAINT `fk_blocks_accepting_block_id`
|
||||
FOREIGN KEY (`accepting_block_id`)
|
||||
REFERENCES `blocks` (`id`)
|
||||
);
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE `parent_blocks`;
|
||||
@@ -0,0 +1,12 @@
|
||||
CREATE TABLE `parent_blocks`
|
||||
(
|
||||
`block_id` BIGINT UNSIGNED NOT NULL,
|
||||
`parent_block_id` BIGINT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY (`block_id`, `parent_block_id`),
|
||||
CONSTRAINT `fk_parent_blocks_block_id`
|
||||
FOREIGN KEY (`block_id`)
|
||||
REFERENCES `blocks` (`id`),
|
||||
CONSTRAINT `fk_parent_blocks_parent_block_id`
|
||||
FOREIGN KEY (`parent_block_id`)
|
||||
REFERENCES `blocks` (`id`)
|
||||
);
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE `raw_blocks`;
|
||||
@@ -0,0 +1,9 @@
|
||||
CREATE TABLE `raw_blocks`
|
||||
(
|
||||
`block_id` BIGINT UNSIGNED NOT NULL,
|
||||
`block_data` BLOB NOT NULL,
|
||||
PRIMARY KEY (`block_id`),
|
||||
CONSTRAINT `fk_raw_blocks_block_id`
|
||||
FOREIGN KEY (`block_id`)
|
||||
REFERENCES `blocks` (`id`)
|
||||
);
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE `subnetworks`;
|
||||
@@ -0,0 +1,8 @@
|
||||
CREATE TABLE `subnetworks`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`subnetwork_id` CHAR(64) NOT NULL,
|
||||
`gas_limit` BIGINT UNSIGNED NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE INDEX `idx_subnetworks_subnetwork_id` (`subnetwork_id`)
|
||||
);
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE `transactions`;
|
||||
19
apiserver/migrations/000005_create_transactions_table.up.sql
Normal file
19
apiserver/migrations/000005_create_transactions_table.up.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE `transactions`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`accepting_block_id` BIGINT UNSIGNED NULL,
|
||||
`transaction_hash` CHAR(64) NOT NULL,
|
||||
`transaction_id` CHAR(64) NOT NULL,
|
||||
`lock_time` BIGINT UNSIGNED NOT NULL,
|
||||
`subnetwork_id` BIGINT UNSIGNED NOT NULL,
|
||||
`gas` BIGINT UNSIGNED NOT NULL,
|
||||
`payload_hash` CHAR(64) NOT NULL,
|
||||
`payload` BLOB NOT NULL,
|
||||
`mass` BIGINT NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE INDEX `idx_transactions_transaction_hash` (`transaction_hash`),
|
||||
INDEX `idx_transactions_transaction_id` (`transaction_id`),
|
||||
CONSTRAINT `fk_transactions_accepting_block_id`
|
||||
FOREIGN KEY (`accepting_block_id`)
|
||||
REFERENCES `blocks` (`id`)
|
||||
);
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE `transactions_to_blocks`;
|
||||
@@ -0,0 +1,14 @@
|
||||
CREATE TABLE `transactions_to_blocks`
|
||||
(
|
||||
`transaction_id` BIGINT UNSIGNED NOT NULL,
|
||||
`block_id` BIGINT UNSIGNED NOT NULL,
|
||||
`index` INT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY (`transaction_id`, `block_id`),
|
||||
INDEX `idx_transactions_to_blocks_index` (`index`),
|
||||
CONSTRAINT `fk_transactions_to_blocks_block_id`
|
||||
FOREIGN KEY (`block_id`)
|
||||
REFERENCES `blocks` (`id`),
|
||||
CONSTRAINT `fk_transactions_to_blocks_transaction_id`
|
||||
FOREIGN KEY (`transaction_id`)
|
||||
REFERENCES `transactions` (`id`)
|
||||
);
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE `addresses`;
|
||||
@@ -0,0 +1,7 @@
|
||||
CREATE TABLE `addresses`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`address` CHAR(50) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE INDEX `idx_addresses_address` (`address`)
|
||||
)
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE `transaction_outputs`;
|
||||
@@ -0,0 +1,18 @@
|
||||
CREATE TABLE `transaction_outputs`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`transaction_id` BIGINT UNSIGNED NOT NULL,
|
||||
`index` INT UNSIGNED NOT NULL,
|
||||
`value` BIGINT UNSIGNED NOT NULL,
|
||||
`script_pub_key` BLOB NOT NULL,
|
||||
`is_spent` TINYINT NOT NULL,
|
||||
`address_id` BIGINT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
INDEX `idx_transaction_outputs_transaction_id` (`transaction_id`),
|
||||
CONSTRAINT `fk_transaction_outputs_transaction_id`
|
||||
FOREIGN KEY (`transaction_id`)
|
||||
REFERENCES `transactions` (`id`),
|
||||
CONSTRAINT `fk_transaction_outputs_address_id`
|
||||
FOREIGN KEY (`address_id`)
|
||||
REFERENCES `addresses` (`id`)
|
||||
);
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE `transaction_inputs`;
|
||||
@@ -0,0 +1,18 @@
|
||||
CREATE TABLE `transaction_inputs`
|
||||
(
|
||||
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`transaction_id` BIGINT UNSIGNED NULL,
|
||||
`previous_transaction_output_id` BIGINT UNSIGNED NOT NULL,
|
||||
`index` INT UNSIGNED NOT NULL,
|
||||
`signature_script` BLOB NOT NULL,
|
||||
`sequence` BIGINT UNSIGNED NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
INDEX `idx_transaction_inputs_transaction_id` (`transaction_id`),
|
||||
INDEX `idx_transaction_inputs_previous_transaction_output_id` (`previous_transaction_output_id`),
|
||||
CONSTRAINT `fk_transaction_inputs_transaction_id`
|
||||
FOREIGN KEY (`transaction_id`)
|
||||
REFERENCES `transactions` (`id`),
|
||||
CONSTRAINT `fk_transaction_inputs_previous_transaction_output_id`
|
||||
FOREIGN KEY (`previous_transaction_output_id`)
|
||||
REFERENCES `transaction_outputs` (`id`)
|
||||
);
|
||||
9
apiserver/mqtt/log.go
Normal file
9
apiserver/mqtt/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package mqtt
|
||||
|
||||
import "github.com/daglabs/btcd/util/panics"
|
||||
import "github.com/daglabs/btcd/apiserver/logger"
|
||||
|
||||
var (
|
||||
log = logger.BackendLog.Logger("MQTT")
|
||||
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
)
|
||||
50
apiserver/mqtt/mqtt.go
Normal file
50
apiserver/mqtt/mqtt.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/daglabs/btcd/apiserver/config"
|
||||
mqtt "github.com/eclipse/paho.mqtt.golang"
|
||||
)
|
||||
|
||||
// client is an instance of the MQTT client, in case we have an active connection
|
||||
var client mqtt.Client
|
||||
|
||||
// GetClient returns an instance of the MQTT client, in case we have an active connection
|
||||
func GetClient() (mqtt.Client, error) {
|
||||
if client == nil {
|
||||
return nil, errors.New("MQTT is not connected")
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Connect initiates a connection to the MQTT server, if defined
|
||||
func Connect() error {
|
||||
cfg := config.ActiveConfig()
|
||||
if cfg.MQTTBrokerAddress == "" {
|
||||
// MQTT broker not defined -- nothing to do
|
||||
return nil
|
||||
}
|
||||
|
||||
options := mqtt.NewClientOptions()
|
||||
options.AddBroker(cfg.MQTTBrokerAddress)
|
||||
options.SetUsername(cfg.MQTTUser)
|
||||
options.SetPassword(cfg.MQTTPassword)
|
||||
options.SetAutoReconnect(true)
|
||||
|
||||
newClient := mqtt.NewClient(options)
|
||||
if token := newClient.Connect(); token.Wait() && token.Error() != nil {
|
||||
return token.Error()
|
||||
}
|
||||
client = newClient
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the connection to the MQTT server, if previously connected
|
||||
func Close() {
|
||||
if client == nil {
|
||||
return
|
||||
}
|
||||
client.Disconnect(250)
|
||||
client = nil
|
||||
}
|
||||
9
apiserver/server/log.go
Normal file
9
apiserver/server/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package server
|
||||
|
||||
import "github.com/daglabs/btcd/util/panics"
|
||||
import "github.com/daglabs/btcd/apiserver/logger"
|
||||
|
||||
var (
|
||||
log = logger.BackendLog.Logger("REST")
|
||||
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
)
|
||||
172
apiserver/server/routes.go
Normal file
172
apiserver/server/routes.go
Normal file
@@ -0,0 +1,172 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/httpserverutils"
|
||||
"github.com/pkg/errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/daglabs/btcd/apiserver/controllers"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
const (
|
||||
routeParamTxID = "txID"
|
||||
routeParamTxHash = "txHash"
|
||||
routeParamAddress = "address"
|
||||
routeParamBlockHash = "blockHash"
|
||||
)
|
||||
|
||||
const (
|
||||
queryParamSkip = "skip"
|
||||
queryParamLimit = "limit"
|
||||
queryParamOrder = "order"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultGetTransactionsLimit = 100
|
||||
defaultGetBlocksLimit = 25
|
||||
defaultGetBlocksOrder = controllers.OrderAscending
|
||||
)
|
||||
|
||||
func mainHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, _ map[string]string, _ []byte) (interface{}, error) {
|
||||
return struct {
|
||||
Message string `json:"message"`
|
||||
}{
|
||||
Message: "API server is running",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func addRoutes(router *mux.Router) {
|
||||
router.HandleFunc("/", httpserverutils.MakeHandler(mainHandler))
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/transaction/id/{%s}", routeParamTxID),
|
||||
httpserverutils.MakeHandler(getTransactionByIDHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/transaction/hash/{%s}", routeParamTxHash),
|
||||
httpserverutils.MakeHandler(getTransactionByHashHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/transactions/address/{%s}", routeParamAddress),
|
||||
httpserverutils.MakeHandler(getTransactionsByAddressHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/utxos/address/{%s}", routeParamAddress),
|
||||
httpserverutils.MakeHandler(getUTXOsByAddressHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
fmt.Sprintf("/block/{%s}", routeParamBlockHash),
|
||||
httpserverutils.MakeHandler(getBlockByHashHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
"/blocks",
|
||||
httpserverutils.MakeHandler(getBlocksHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
"/fee-estimates",
|
||||
httpserverutils.MakeHandler(getFeeEstimatesHandler)).
|
||||
Methods("GET")
|
||||
|
||||
router.HandleFunc(
|
||||
"/transaction",
|
||||
httpserverutils.MakeHandler(postTransactionHandler)).
|
||||
Methods("POST")
|
||||
}
|
||||
|
||||
func convertQueryParamToInt(queryParams map[string]string, param string, defaultValue int) (int, error) {
|
||||
if _, ok := queryParams[param]; ok {
|
||||
intValue, err := strconv.Atoi(queryParams[param])
|
||||
if err != nil {
|
||||
return 0, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, errors.Wrap(err, fmt.Sprintf("Couldn't parse the '%s' query parameter", param)))
|
||||
}
|
||||
return intValue, nil
|
||||
}
|
||||
return defaultValue, nil
|
||||
}
|
||||
|
||||
func getTransactionByIDHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, error) {
|
||||
|
||||
return controllers.GetTransactionByIDHandler(routeParams[routeParamTxID])
|
||||
}
|
||||
|
||||
func getTransactionByHashHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, error) {
|
||||
|
||||
return controllers.GetTransactionByHashHandler(routeParams[routeParamTxHash])
|
||||
}
|
||||
|
||||
func getTransactionsByAddressHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, queryParams map[string]string,
|
||||
_ []byte) (interface{}, error) {
|
||||
|
||||
skip, err := convertQueryParamToInt(queryParams, queryParamSkip, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
limit, err := convertQueryParamToInt(queryParams, queryParamLimit, defaultGetTransactionsLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := queryParams[queryParamLimit]; ok {
|
||||
var err error
|
||||
skip, err = strconv.Atoi(queryParams[queryParamLimit])
|
||||
if err != nil {
|
||||
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
|
||||
errors.Wrap(err, fmt.Sprintf("Couldn't parse the '%s' query parameter", queryParamLimit)))
|
||||
}
|
||||
}
|
||||
return controllers.GetTransactionsByAddressHandler(routeParams[routeParamAddress], uint64(skip), uint64(limit))
|
||||
}
|
||||
|
||||
func getUTXOsByAddressHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, error) {
|
||||
|
||||
return controllers.GetUTXOsByAddressHandler(routeParams[routeParamAddress])
|
||||
}
|
||||
|
||||
func getBlockByHashHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, error) {
|
||||
|
||||
return controllers.GetBlockByHashHandler(routeParams[routeParamBlockHash])
|
||||
}
|
||||
|
||||
func getFeeEstimatesHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, _ map[string]string,
|
||||
_ []byte) (interface{}, error) {
|
||||
|
||||
return controllers.GetFeeEstimatesHandler()
|
||||
}
|
||||
|
||||
func getBlocksHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, queryParams map[string]string,
|
||||
_ []byte) (interface{}, error) {
|
||||
|
||||
skip, err := convertQueryParamToInt(queryParams, queryParamSkip, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
limit, err := convertQueryParamToInt(queryParams, queryParamLimit, defaultGetBlocksLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
order := defaultGetBlocksOrder
|
||||
if orderParamValue, ok := queryParams[queryParamOrder]; ok {
|
||||
if orderParamValue != controllers.OrderAscending && orderParamValue != controllers.OrderDescending {
|
||||
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, errors.Errorf("'%s' is not a valid value for the '%s' query parameter", orderParamValue, queryParamLimit))
|
||||
}
|
||||
order = orderParamValue
|
||||
}
|
||||
return controllers.GetBlocksHandler(order, uint64(skip), uint64(limit))
|
||||
}
|
||||
|
||||
func postTransactionHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, _ map[string]string,
|
||||
requestBody []byte) (interface{}, error) {
|
||||
return nil, controllers.PostTransaction(requestBody)
|
||||
}
|
||||
40
apiserver/server/server.go
Normal file
40
apiserver/server/server.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/daglabs/btcd/httpserverutils"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
const gracefulShutdownTimeout = 30 * time.Second
|
||||
|
||||
// Start starts the HTTP REST server and returns a
|
||||
// function to gracefully shutdown it.
|
||||
func Start(listenAddr string) func() {
|
||||
router := mux.NewRouter()
|
||||
router.Use(httpserverutils.AddRequestMetadataMiddleware)
|
||||
router.Use(httpserverutils.RecoveryMiddleware)
|
||||
router.Use(httpserverutils.LoggingMiddleware)
|
||||
router.Use(httpserverutils.SetJSONMiddleware)
|
||||
addRoutes(router)
|
||||
httpServer := &http.Server{
|
||||
Addr: listenAddr,
|
||||
Handler: handlers.CORS()(router),
|
||||
}
|
||||
spawn(func() {
|
||||
log.Errorf("%s", httpServer.ListenAndServe())
|
||||
})
|
||||
|
||||
return func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), gracefulShutdownTimeout)
|
||||
defer cancel()
|
||||
err := httpServer.Shutdown(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("Error shutting down HTTP server: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
1033
apiserver/sync.go
Normal file
1033
apiserver/sync.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -6,11 +6,18 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error {
|
||||
blockHeader := &block.MsgBlock().Header
|
||||
newNode := newBlockNode(blockHeader, newSet(), dag.dagParams.K)
|
||||
newNode.status = statusInvalidAncestor
|
||||
dag.index.AddNode(newNode)
|
||||
return dag.index.flushToDB()
|
||||
}
|
||||
|
||||
// maybeAcceptBlock potentially accepts a block into the block DAG. It
|
||||
// performs several validation checks which depend on its position within
|
||||
// the block DAG before adding it. The block is expected to have already
|
||||
@@ -21,27 +28,29 @@ import (
|
||||
//
|
||||
// This function MUST be called with the dagLock held (for writes).
|
||||
func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error {
|
||||
// The height of this block is one more than the referenced previous
|
||||
// block.
|
||||
parents, err := lookupParentNodes(block, dag)
|
||||
if err != nil {
|
||||
if rErr, ok := err.(RuleError); ok && rErr.ErrorCode == ErrInvalidAncestorBlock {
|
||||
err := dag.addNodeToIndexWithInvalidAncestor(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
bluestParent := parents.bluest()
|
||||
blockHeight := int32(0)
|
||||
if !block.IsGenesis() {
|
||||
blockHeight = parents.maxHeight() + 1
|
||||
}
|
||||
block.SetHeight(blockHeight)
|
||||
|
||||
// The block must pass all of the validation rules which depend on the
|
||||
// position of the block within the block DAG.
|
||||
err = dag.checkBlockContext(block, parents, bluestParent, flags)
|
||||
err = dag.checkBlockContext(block, parents, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a new block node for the block and add it to the node index.
|
||||
newNode := newBlockNode(&block.MsgBlock().Header, parents, dag.dagParams.K)
|
||||
newNode.status = statusDataStored
|
||||
dag.index.AddNode(newNode)
|
||||
|
||||
// Insert the block into the database if it's not already there. Even
|
||||
// though it is possible the block will ultimately fail to connect, it
|
||||
// has already passed all proof-of-work and validity tests which means
|
||||
@@ -52,26 +61,30 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
|
||||
// such as making blocks that never become part of the DAG or
|
||||
// blocks that fail to connect available for further analysis.
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
return dbStoreBlock(dbTx, block)
|
||||
err := dbStoreBlock(dbTx, block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dag.index.flushToDBWithTx(dbTx)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a new block node for the block and add it to the node index.
|
||||
blockHeader := &block.MsgBlock().Header
|
||||
newNode := newBlockNode(blockHeader, parents, dag.dagParams.K)
|
||||
newNode.status = statusDataStored
|
||||
|
||||
dag.index.AddNode(newNode)
|
||||
err = dag.index.flushToDB()
|
||||
if err != nil {
|
||||
return err
|
||||
// Make sure that all the block's transactions are finalized
|
||||
fastAdd := flags&BFFastAdd == BFFastAdd
|
||||
bluestParent := parents.bluest()
|
||||
if !fastAdd {
|
||||
if err := dag.validateAllTxsFinalized(block, newNode, bluestParent); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
block.SetChainHeight(newNode.chainHeight)
|
||||
|
||||
// Connect the passed block to the DAG. This also handles validation of the
|
||||
// transaction scripts.
|
||||
err = dag.addBlock(newNode, parents, block, flags)
|
||||
chainUpdates, err := dag.addBlock(newNode, parents, block, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -80,7 +93,16 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
|
||||
// DAG. The caller would typically want to react by relaying the
|
||||
// inventory to other peers.
|
||||
dag.dagLock.Unlock()
|
||||
dag.sendNotification(NTBlockAdded, block)
|
||||
dag.sendNotification(NTBlockAdded, &BlockAddedNotificationData{
|
||||
Block: block,
|
||||
WasUnorphaned: flags&BFWasUnorphaned != 0,
|
||||
})
|
||||
if len(chainUpdates.addedChainBlockHashes) > 0 {
|
||||
dag.sendNotification(NTChainChanged, &ChainChangedNotificationData{
|
||||
RemovedChainBlockHashes: chainUpdates.removedChainBlockHashes,
|
||||
AddedChainBlockHashes: chainUpdates.addedChainBlockHashes,
|
||||
})
|
||||
}
|
||||
dag.dagLock.Lock()
|
||||
|
||||
return nil
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/pkg/errors"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -21,11 +22,11 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
dag.TestSetBlockRewardMaturity(1)
|
||||
dag.TestSetCoinbaseMaturity(0)
|
||||
|
||||
// Test rejecting the block if its parents are missing
|
||||
orphanBlockFile := "blk_3B.dat"
|
||||
loadedBlocks, err := loadBlocks(orphanBlockFile)
|
||||
loadedBlocks, err := LoadBlocks(filepath.Join("testdata/", orphanBlockFile))
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: "+
|
||||
"Error loading file '%s': %s\n", orphanBlockFile, err)
|
||||
@@ -48,7 +49,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
|
||||
// Test rejecting the block if its parents are invalid
|
||||
blocksFile := "blk_0_to_4.dat"
|
||||
blocks, err := loadBlocks(blocksFile)
|
||||
blocks, err := LoadBlocks(filepath.Join("testdata/", blocksFile))
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: "+
|
||||
"Error loading file '%s': %s\n", blocksFile, err)
|
||||
@@ -56,10 +57,13 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
|
||||
// Add a valid block and mark it as invalid
|
||||
block1 := blocks[1]
|
||||
isOrphan, err := dag.ProcessBlock(block1, BFNone)
|
||||
isOrphan, delay, err := dag.ProcessBlock(block1, BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: Valid block unexpectedly returned an error: %s", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: block 1 is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestMaybeAcceptBlockErrors: incorrectly returned block 1 is an orphan")
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package blockdag
|
||||
import (
|
||||
"container/heap"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// baseHeap is an implementation for heap.Interface that sorts blocks by their height
|
||||
@@ -28,61 +28,61 @@ func (h *baseHeap) Pop() interface{} {
|
||||
type upHeap struct{ baseHeap }
|
||||
|
||||
func (h upHeap) Less(i, j int) bool {
|
||||
if h.baseHeap[i].height == h.baseHeap[j].height {
|
||||
if h.baseHeap[i].blueScore == h.baseHeap[j].blueScore {
|
||||
return daghash.HashToBig(h.baseHeap[i].hash).Cmp(daghash.HashToBig(h.baseHeap[j].hash)) < 0
|
||||
}
|
||||
|
||||
return h.baseHeap[i].height < h.baseHeap[j].height
|
||||
return h.baseHeap[i].blueScore < h.baseHeap[j].blueScore
|
||||
}
|
||||
|
||||
// downHeap extends baseHeap to include Less operation that traverses from top to bottom
|
||||
type downHeap struct{ baseHeap }
|
||||
|
||||
func (h downHeap) Less(i, j int) bool {
|
||||
if h.baseHeap[i].height == h.baseHeap[j].height {
|
||||
if h.baseHeap[i].blueScore == h.baseHeap[j].blueScore {
|
||||
return daghash.HashToBig(h.baseHeap[i].hash).Cmp(daghash.HashToBig(h.baseHeap[j].hash)) > 0
|
||||
}
|
||||
|
||||
return h.baseHeap[i].height > h.baseHeap[j].height
|
||||
return h.baseHeap[i].blueScore > h.baseHeap[j].blueScore
|
||||
}
|
||||
|
||||
// BlockHeap represents a mutable heap of Blocks, sorted by their height
|
||||
type BlockHeap struct {
|
||||
// blockHeap represents a mutable heap of Blocks, sorted by their height
|
||||
type blockHeap struct {
|
||||
impl heap.Interface
|
||||
}
|
||||
|
||||
// NewDownHeap initializes and returns a new BlockHeap
|
||||
func NewDownHeap() BlockHeap {
|
||||
h := BlockHeap{impl: &downHeap{}}
|
||||
// newDownHeap initializes and returns a new blockHeap
|
||||
func newDownHeap() blockHeap {
|
||||
h := blockHeap{impl: &downHeap{}}
|
||||
heap.Init(h.impl)
|
||||
return h
|
||||
}
|
||||
|
||||
// NewUpHeap initializes and returns a new BlockHeap
|
||||
func NewUpHeap() BlockHeap {
|
||||
h := BlockHeap{impl: &upHeap{}}
|
||||
// newUpHeap initializes and returns a new blockHeap
|
||||
func newUpHeap() blockHeap {
|
||||
h := blockHeap{impl: &upHeap{}}
|
||||
heap.Init(h.impl)
|
||||
return h
|
||||
}
|
||||
|
||||
// pop removes the block with lowest height from this heap and returns it
|
||||
func (bh BlockHeap) pop() *blockNode {
|
||||
func (bh blockHeap) pop() *blockNode {
|
||||
return heap.Pop(bh.impl).(*blockNode)
|
||||
}
|
||||
|
||||
// Push pushes the block onto the heap
|
||||
func (bh BlockHeap) Push(block *blockNode) {
|
||||
func (bh blockHeap) Push(block *blockNode) {
|
||||
heap.Push(bh.impl, block)
|
||||
}
|
||||
|
||||
// pushSet pushes a blockset to the heap.
|
||||
func (bh BlockHeap) pushSet(bs blockSet) {
|
||||
func (bh blockHeap) pushSet(bs blockSet) {
|
||||
for _, block := range bs {
|
||||
heap.Push(bh.impl, block)
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the length of this heap
|
||||
func (bh BlockHeap) Len() int {
|
||||
func (bh blockHeap) Len() int {
|
||||
return bh.impl.Len()
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// TestBlockHeap tests pushing, popping, and determining the length of the heap.
|
||||
@@ -81,7 +81,7 @@ func TestBlockHeap(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
dHeap := NewDownHeap()
|
||||
dHeap := newDownHeap()
|
||||
for _, block := range test.toPush {
|
||||
dHeap.Push(block)
|
||||
}
|
||||
@@ -99,7 +99,7 @@ func TestBlockHeap(t *testing.T) {
|
||||
"Expected: %v, got: %v", test.name, test.expectedPopDown, poppedBlock)
|
||||
}
|
||||
|
||||
uHeap := NewUpHeap()
|
||||
uHeap := newUpHeap()
|
||||
for _, block := range test.toPush {
|
||||
uHeap.Push(block)
|
||||
}
|
||||
|
||||
136
blockdag/blockidhash.go
Normal file
136
blockdag/blockidhash.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// idByHashIndexBucketName is the name of the db bucket used to house
|
||||
// the block hash -> block id index.
|
||||
idByHashIndexBucketName = []byte("idbyhashidx")
|
||||
|
||||
// hashByIDIndexBucketName is the name of the db bucket used to house
|
||||
// the block id -> block hash index.
|
||||
hashByIDIndexBucketName = []byte("hashbyididx")
|
||||
|
||||
currentBlockIDKey = []byte("currentblockid")
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// This is a mapping between block hashes and unique IDs. The ID
|
||||
// is simply a sequentially incremented uint64 that is used instead of block hash
|
||||
// for the indexers. This is useful because it is only 8 bytes versus 32 bytes
|
||||
// hashes and thus saves a ton of space when a block is referenced in an index.
|
||||
// It consists of three buckets: the first bucket maps the hash of each
|
||||
// block to the unique ID and the second maps that ID back to the block hash.
|
||||
// The third bucket contains the last received block ID, and is used
|
||||
// when starting the node to check that the enabled indexes are up to date
|
||||
// with the latest received block, and if not, initiate recovery process.
|
||||
//
|
||||
// The serialized format for keys and values in the block hash to ID bucket is:
|
||||
// <hash> = <ID>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash 32 bytes
|
||||
// ID uint64 8 bytes
|
||||
// -----
|
||||
// Total: 40 bytes
|
||||
//
|
||||
// The serialized format for keys and values in the ID to block hash bucket is:
|
||||
// <ID> = <hash>
|
||||
//
|
||||
// Field Type Size
|
||||
// ID uint64 8 bytes
|
||||
// hash daghash.Hash 32 bytes
|
||||
// -----
|
||||
// Total: 40 bytes
|
||||
//
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const blockIDSize = 8 // 8 bytes for block ID
|
||||
|
||||
// DBFetchBlockIDByHash uses an existing database transaction to retrieve the
|
||||
// block id for the provided hash from the index.
|
||||
func DBFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint64, error) {
|
||||
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
|
||||
serializedID := hashIndex.Get(hash[:])
|
||||
if serializedID == nil {
|
||||
return 0, errors.Errorf("no entry in the block ID index for block with hash %s", hash)
|
||||
}
|
||||
|
||||
return DeserializeBlockID(serializedID), nil
|
||||
}
|
||||
|
||||
// DBFetchBlockHashBySerializedID uses an existing database transaction to
|
||||
// retrieve the hash for the provided serialized block id from the index.
|
||||
func DBFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
|
||||
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
|
||||
hashBytes := idIndex.Get(serializedID)
|
||||
if hashBytes == nil {
|
||||
return nil, errors.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID))
|
||||
}
|
||||
|
||||
var hash daghash.Hash
|
||||
copy(hash[:], hashBytes)
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
|
||||
// the index entries for the hash to id and id to hash mappings for the provided
|
||||
// values.
|
||||
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, serializedID []byte) error {
|
||||
// Add the block hash to ID mapping to the index.
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(idByHashIndexBucketName)
|
||||
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the block ID to hash mapping to the index.
|
||||
idIndex := meta.Bucket(hashByIDIndexBucketName)
|
||||
return idIndex.Put(serializedID[:], hash[:])
|
||||
}
|
||||
|
||||
// DBFetchCurrentBlockID returns the last known block ID.
|
||||
func DBFetchCurrentBlockID(dbTx database.Tx) uint64 {
|
||||
serializedID := dbTx.Metadata().Get(currentBlockIDKey)
|
||||
if serializedID == nil {
|
||||
return 0
|
||||
}
|
||||
return DeserializeBlockID(serializedID)
|
||||
}
|
||||
|
||||
// DeserializeBlockID returns a deserialized block id
|
||||
func DeserializeBlockID(serializedID []byte) uint64 {
|
||||
return byteOrder.Uint64(serializedID)
|
||||
}
|
||||
|
||||
// SerializeBlockID returns a serialized block id
|
||||
func SerializeBlockID(blockID uint64) []byte {
|
||||
serializedBlockID := make([]byte, blockIDSize)
|
||||
byteOrder.PutUint64(serializedBlockID, blockID)
|
||||
return serializedBlockID
|
||||
}
|
||||
|
||||
// DBFetchBlockHashByID uses an existing database transaction to retrieve the
|
||||
// hash for the provided block id from the index.
|
||||
func DBFetchBlockHashByID(dbTx database.Tx, id uint64) (*daghash.Hash, error) {
|
||||
return DBFetchBlockHashBySerializedID(dbTx, SerializeBlockID(id))
|
||||
}
|
||||
|
||||
func createBlockID(dbTx database.Tx, blockHash *daghash.Hash) (uint64, error) {
|
||||
currentBlockID := DBFetchCurrentBlockID(dbTx)
|
||||
newBlockID := currentBlockID + 1
|
||||
serializedNewBlockID := SerializeBlockID(newBlockID)
|
||||
err := dbTx.Metadata().Put(currentBlockIDKey, serializedNewBlockID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dbPutBlockIDIndexEntry(dbTx, blockHash, serializedNewBlockID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return newBlockID, nil
|
||||
}
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// blockIndex provides facilities for keeping track of an in-memory index of the
|
||||
@@ -117,27 +117,28 @@ func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
|
||||
// flushToDB writes all dirty block nodes to the database. If all writes
|
||||
// succeed, this clears the dirty set.
|
||||
func (bi *blockIndex) flushToDB() error {
|
||||
return bi.db.Update(func(dbTx database.Tx) error {
|
||||
return bi.flushToDBWithTx(dbTx)
|
||||
})
|
||||
}
|
||||
|
||||
// flushToDBWithTx writes all dirty block nodes to the database. If all
|
||||
// writes succeed, this clears the dirty set.
|
||||
func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
if len(bi.dirty) == 0 {
|
||||
bi.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
err := bi.db.Update(func(dbTx database.Tx) error {
|
||||
for node := range bi.dirty {
|
||||
err := dbStoreBlockNode(dbTx, node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for node := range bi.dirty {
|
||||
err := dbStoreBlockNode(dbTx, node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// If write was successful, clear the dirty set.
|
||||
if err == nil {
|
||||
bi.dirty = make(map[*blockNode]struct{})
|
||||
}
|
||||
|
||||
bi.Unlock()
|
||||
return err
|
||||
bi.dirty = make(map[*blockNode]struct{})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/pkg/errors"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
func TestAncestorErrors(t *testing.T) {
|
||||
node := newTestNode(newSet(), int32(0x10000000), 0, time.Unix(0, 0), dagconfig.MainNetParams.K)
|
||||
node.height = 2
|
||||
node.chainHeight = 2
|
||||
ancestor := node.SelectedAncestor(3)
|
||||
if ancestor != nil {
|
||||
t.Errorf("TestAncestorErrors: Ancestor() unexpectedly returned a node. Expected: <nil>")
|
||||
|
||||
143
blockdag/blocklocator.go
Normal file
143
blockdag/blocklocator.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// BlockLocator is used to help locate a specific block. The algorithm for
|
||||
// building the block locator is to add block hashes in reverse order on the
|
||||
// block's selected parent chain until the desired stop block is reached.
|
||||
// In order to keep the list of locator hashes to a reasonable number of entries,
|
||||
// the step between each entry is doubled each loop iteration to exponentially
|
||||
// decrease the number of hashes as a function of the distance from the block
|
||||
// being located.
|
||||
//
|
||||
// For example, assume a selected parent chain with IDs as depicted below, and the
|
||||
// stop block is genesis:
|
||||
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
|
||||
//
|
||||
// The block locator for block 17 would be the hashes of blocks:
|
||||
// [17 16 14 11 7 2 genesis]
|
||||
type BlockLocator []*daghash.Hash
|
||||
|
||||
// BlockLocatorFromHashes returns a block locator from start and stop hash.
|
||||
// See BlockLocator for details on the algorithm used to create a block locator.
|
||||
//
|
||||
// In addition to the general algorithm referenced above, this function will
|
||||
// return the block locator for the selected tip if the passed hash is not currently
|
||||
// known.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) BlockLocatorFromHashes(startHash, stopHash *daghash.Hash) BlockLocator {
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
startNode := dag.index.LookupNode(startHash)
|
||||
var stopNode *blockNode
|
||||
if !stopHash.IsEqual(&daghash.ZeroHash) {
|
||||
stopNode = dag.index.LookupNode(stopHash)
|
||||
}
|
||||
return dag.blockLocator(startNode, stopNode)
|
||||
}
|
||||
|
||||
// LatestBlockLocator returns a block locator for the current tips of the DAG.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) LatestBlockLocator() BlockLocator {
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
return dag.blockLocator(nil, nil)
|
||||
}
|
||||
|
||||
// blockLocator returns a block locator for the passed start and stop nodes.
|
||||
// The default value for the start node is the selected tip, and the default
|
||||
// values of the stop node is the genesis block.
|
||||
//
|
||||
// See the BlockLocator type comments for more details.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for reads).
|
||||
func (dag *BlockDAG) blockLocator(startNode, stopNode *blockNode) BlockLocator {
|
||||
// Use the selected tip if requested.
|
||||
if startNode == nil {
|
||||
startNode = dag.virtual.selectedParent
|
||||
}
|
||||
|
||||
if stopNode == nil {
|
||||
stopNode = dag.genesis
|
||||
}
|
||||
|
||||
// We use the selected parent of the start node, so the
|
||||
// block locator won't contain the start node.
|
||||
startNode = startNode.selectedParent
|
||||
|
||||
// If the start node or the stop node are not in the
|
||||
// virtual's selected parent chain, we replace them with their
|
||||
// closest selected parent that is part of the virtual's
|
||||
// selected parent chain.
|
||||
for !dag.IsInSelectedParentChain(stopNode.hash) {
|
||||
stopNode = stopNode.selectedParent
|
||||
}
|
||||
|
||||
for !dag.IsInSelectedParentChain(startNode.hash) {
|
||||
startNode = startNode.selectedParent
|
||||
}
|
||||
|
||||
// Calculate the max number of entries that will ultimately be in the
|
||||
// block locator. See the description of the algorithm for how these
|
||||
// numbers are derived.
|
||||
|
||||
// startNode.hash + stopNode.hash.
|
||||
// Then floor(log2(startNode.chainHeight-stopNode.chainHeight)) entries for the skip portion.
|
||||
maxEntries := 2 + util.FastLog2Floor(startNode.chainHeight-stopNode.chainHeight)
|
||||
locator := make(BlockLocator, 0, maxEntries)
|
||||
|
||||
step := uint64(1)
|
||||
for node := startNode; node != nil; {
|
||||
locator = append(locator, node.hash)
|
||||
|
||||
// Nothing more to add once the stop node has been added.
|
||||
if node.chainHeight == stopNode.chainHeight {
|
||||
break
|
||||
}
|
||||
|
||||
// Calculate chainHeight of previous node to include ensuring the
|
||||
// final node is stopNode.
|
||||
nextChainHeight := node.chainHeight - step
|
||||
if nextChainHeight < stopNode.chainHeight {
|
||||
nextChainHeight = stopNode.chainHeight
|
||||
}
|
||||
|
||||
// walk backwards through the nodes to the correct ancestor.
|
||||
node = node.SelectedAncestor(nextChainHeight)
|
||||
|
||||
// Double the distance between included hashes.
|
||||
step *= 2
|
||||
}
|
||||
|
||||
return locator
|
||||
}
|
||||
|
||||
// FindNextLocatorBoundaries returns the lowest unknown block locator, hash
|
||||
// and the highest known block locator hash. This is used to create the
|
||||
// next block locator to find the highest shared known chain block with the
|
||||
// sync peer.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for reads).
|
||||
func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (startHash, stopHash *daghash.Hash) {
|
||||
// Find the most recent locator block hash in the DAG. In the case none of
|
||||
// the hashes in the locator are in the DAG, fall back to the genesis block.
|
||||
stopNode := dag.genesis
|
||||
nextBlockLocatorIndex := int64(len(locator) - 1)
|
||||
for i, hash := range locator {
|
||||
node := dag.index.LookupNode(hash)
|
||||
if node != nil {
|
||||
stopNode = node
|
||||
nextBlockLocatorIndex = int64(i) - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
if nextBlockLocatorIndex < 0 {
|
||||
return nil, stopNode.hash
|
||||
}
|
||||
return locator[nextBlockLocatorIndex], stopNode.hash
|
||||
}
|
||||
@@ -6,12 +6,9 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
@@ -78,67 +75,57 @@ type blockNode struct {
|
||||
// blueScore is the count of all the blue blocks in this block's past
|
||||
blueScore uint64
|
||||
|
||||
// diff is the UTXO representation of the block
|
||||
// A block's UTXO is reconstituted by applying diffWith on every block in the chain of diffChildren
|
||||
// from the virtual block down to the block. See diffChild
|
||||
diff *UTXODiff
|
||||
|
||||
// diffChild is the child that diff will be built from. See diff
|
||||
diffChild *blockNode
|
||||
|
||||
// hash is the double sha 256 of the block.
|
||||
hash *daghash.Hash
|
||||
|
||||
// workSum is the total amount of work in the DAG up to and including
|
||||
// this node.
|
||||
workSum *big.Int
|
||||
|
||||
// height is the position in the block DAG.
|
||||
height int32
|
||||
height uint64
|
||||
|
||||
// chainHeight is the number of hops you need to go down the selected parent chain in order to get to the genesis block.
|
||||
chainHeight uint32
|
||||
chainHeight uint64
|
||||
|
||||
// Some fields from block headers to aid in best chain selection and
|
||||
// reconstructing headers from memory. These must be treated as
|
||||
// immutable and are intentionally ordered to avoid padding on 64-bit
|
||||
// platforms.
|
||||
version int32
|
||||
bits uint32
|
||||
nonce uint64
|
||||
timestamp int64
|
||||
hashMerkleRoot *daghash.Hash
|
||||
idMerkleRoot *daghash.Hash
|
||||
version int32
|
||||
bits uint32
|
||||
nonce uint64
|
||||
timestamp int64
|
||||
hashMerkleRoot *daghash.Hash
|
||||
acceptedIDMerkleRoot *daghash.Hash
|
||||
utxoCommitment *daghash.Hash
|
||||
|
||||
// status is a bitfield representing the validation state of the block. The
|
||||
// status field, unlike the other fields, may be written to and so should
|
||||
// only be accessed using the concurrent-safe NodeStatus method on
|
||||
// blockIndex once the node has been added to the global index.
|
||||
status blockStatus
|
||||
|
||||
// isFinalized determines whether the node is below the finality point.
|
||||
isFinalized bool
|
||||
}
|
||||
|
||||
// initBlockNode initializes a block node from the given header and parent nodes,
|
||||
// calculating the height and workSum from the respective fields on the first parent.
|
||||
// initBlockNode initializes a block node from the given header and parent nodes.
|
||||
// This function is NOT safe for concurrent access. It must only be called when
|
||||
// initially creating a node.
|
||||
func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents blockSet, phantomK uint32) {
|
||||
*node = blockNode{
|
||||
parents: parents,
|
||||
children: make(blockSet),
|
||||
workSum: big.NewInt(0),
|
||||
timestamp: time.Now().Unix(),
|
||||
}
|
||||
|
||||
// blockHeader is nil only for the virtual block
|
||||
if blockHeader != nil {
|
||||
node.hash = blockHeader.BlockHash()
|
||||
node.workSum = util.CalcWork(blockHeader.Bits)
|
||||
node.version = blockHeader.Version
|
||||
node.bits = blockHeader.Bits
|
||||
node.nonce = blockHeader.Nonce
|
||||
node.timestamp = blockHeader.Timestamp.Unix()
|
||||
node.hashMerkleRoot = blockHeader.HashMerkleRoot
|
||||
node.idMerkleRoot = blockHeader.IDMerkleRoot
|
||||
node.acceptedIDMerkleRoot = blockHeader.AcceptedIDMerkleRoot
|
||||
node.utxoCommitment = blockHeader.UTXOCommitment
|
||||
} else {
|
||||
node.hash = &daghash.ZeroHash
|
||||
}
|
||||
@@ -147,15 +134,17 @@ func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents block
|
||||
node.blues, node.selectedParent, node.blueScore = phantom(node, phantomK)
|
||||
node.height = calculateNodeHeight(node)
|
||||
node.chainHeight = calculateChainHeight(node)
|
||||
node.workSum = node.workSum.Add(node.selectedParent.workSum, node.workSum)
|
||||
}
|
||||
}
|
||||
|
||||
func calculateNodeHeight(node *blockNode) int32 {
|
||||
func calculateNodeHeight(node *blockNode) uint64 {
|
||||
if node.isGenesis() {
|
||||
return 0
|
||||
}
|
||||
return node.parents.maxHeight() + 1
|
||||
}
|
||||
|
||||
func calculateChainHeight(node *blockNode) uint32 {
|
||||
func calculateChainHeight(node *blockNode) uint64 {
|
||||
if node.isGenesis() {
|
||||
return 0
|
||||
}
|
||||
@@ -163,8 +152,7 @@ func calculateChainHeight(node *blockNode) uint32 {
|
||||
}
|
||||
|
||||
// newBlockNode returns a new block node for the given block header and parent
|
||||
// nodes, calculating the height and workSum from the respective fields on the
|
||||
// parent. This function is NOT safe for concurrent access.
|
||||
//nodes. This function is NOT safe for concurrent access.
|
||||
func newBlockNode(blockHeader *wire.BlockHeader, parents blockSet, phantomK uint32) *blockNode {
|
||||
var node blockNode
|
||||
initBlockNode(&node, blockHeader, parents, phantomK)
|
||||
@@ -184,67 +172,54 @@ func (node *blockNode) updateParentsChildren() {
|
||||
func (node *blockNode) Header() *wire.BlockHeader {
|
||||
// No lock is needed because all accessed fields are immutable.
|
||||
return &wire.BlockHeader{
|
||||
Version: node.version,
|
||||
ParentHashes: node.ParentHashes(),
|
||||
HashMerkleRoot: node.hashMerkleRoot,
|
||||
IDMerkleRoot: node.idMerkleRoot,
|
||||
Timestamp: time.Unix(node.timestamp, 0),
|
||||
Bits: node.bits,
|
||||
Nonce: node.nonce,
|
||||
Version: node.version,
|
||||
ParentHashes: node.ParentHashes(),
|
||||
HashMerkleRoot: node.hashMerkleRoot,
|
||||
AcceptedIDMerkleRoot: node.acceptedIDMerkleRoot,
|
||||
UTXOCommitment: node.utxoCommitment,
|
||||
Timestamp: time.Unix(node.timestamp, 0),
|
||||
Bits: node.bits,
|
||||
Nonce: node.nonce,
|
||||
}
|
||||
}
|
||||
|
||||
// SelectedAncestor returns the ancestor block node at the provided height by following
|
||||
// the selected chain backwards from this node. The returned block will be nil when a
|
||||
// height is requested that is after the height of the passed node or is less than zero.
|
||||
// SelectedAncestor returns the ancestor block node at the provided chain-height by following
|
||||
// the selected-parents chain backwards from this node. The returned block will be nil when a
|
||||
// height is requested that is after the height of the passed node.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) SelectedAncestor(height int32) *blockNode {
|
||||
if height < 0 || height > node.height {
|
||||
func (node *blockNode) SelectedAncestor(chainHeight uint64) *blockNode {
|
||||
if chainHeight < 0 || chainHeight > node.chainHeight {
|
||||
return nil
|
||||
}
|
||||
|
||||
n := node
|
||||
for ; n != nil && n.height != height; n = n.selectedParent {
|
||||
for ; n != nil && n.chainHeight != chainHeight; n = n.selectedParent {
|
||||
// Intentionally left blank
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// RelativeAncestor returns the ancestor block node a relative 'distance' blocks
|
||||
// before this node. This is equivalent to calling Ancestor with the node's
|
||||
// height minus provided distance.
|
||||
// RelativeAncestor returns the ancestor block node a relative 'distance' of
|
||||
// chain-blocks before this node. This is equivalent to calling Ancestor with
|
||||
// the node's chain-height minus provided distance.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) RelativeAncestor(distance int32) *blockNode {
|
||||
return node.SelectedAncestor(node.height - distance)
|
||||
func (node *blockNode) RelativeAncestor(distance uint64) *blockNode {
|
||||
return node.SelectedAncestor(node.chainHeight - distance)
|
||||
}
|
||||
|
||||
// PastMedianTime returns the median time of the previous few blocks
|
||||
// CalcPastMedianTime returns the median time of the previous few blocks
|
||||
// prior to, and including, the block node.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) PastMedianTime() time.Time {
|
||||
// Create a slice of the previous few block timestamps used to calculate
|
||||
// the median per the number defined by the constant medianTimeBlocks.
|
||||
// If there aren't enough blocks yet - pad remaining with genesis block's timestamp.
|
||||
timestamps := make([]int64, medianTimeBlocks)
|
||||
iterNode := node
|
||||
for i := 0; i < medianTimeBlocks; i++ {
|
||||
timestamps[i] = iterNode.timestamp
|
||||
|
||||
if !iterNode.isGenesis() {
|
||||
iterNode = iterNode.selectedParent
|
||||
}
|
||||
func (node *blockNode) PastMedianTime(dag *BlockDAG) time.Time {
|
||||
window := blueBlockWindow(node, 2*dag.TimestampDeviationTolerance-1)
|
||||
medianTimestamp, err := window.medianTimestamp()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("blueBlockWindow: %s", err))
|
||||
}
|
||||
|
||||
sort.Sort(timeSorter(timestamps))
|
||||
|
||||
// Note: This works when medianTimeBlockCount is an odd number.
|
||||
// If it is to be changed to an even number - must take avarage of two middle values
|
||||
// Since medianTimeBlockCount is a constant, we can skip the odd/even check
|
||||
medianTimestamp := timestamps[medianTimeBlocks/2]
|
||||
return time.Unix(medianTimestamp, 0)
|
||||
}
|
||||
|
||||
@@ -257,11 +232,11 @@ func (node *blockNode) isGenesis() bool {
|
||||
return len(node.parents) == 0
|
||||
}
|
||||
|
||||
func (node *blockNode) finalityScore() uint64 {
|
||||
return node.blueScore / FinalityInterval
|
||||
func (node *blockNode) finalityScore(dag *BlockDAG) uint64 {
|
||||
return node.blueScore / uint64(dag.dagParams.FinalityInterval)
|
||||
}
|
||||
|
||||
// String returns a string that contains the block hash and height.
|
||||
// String returns a string that contains the block hash.
|
||||
func (node blockNode) String() string {
|
||||
return fmt.Sprintf("%s (%d)", node.hash, node.height)
|
||||
return node.hash.String()
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func TestChainHeight(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
node *blockNode
|
||||
expectedChainHeight uint32
|
||||
expectedChainHeight uint64
|
||||
}{
|
||||
{
|
||||
node: node0,
|
||||
|
||||
@@ -3,7 +3,7 @@ package blockdag
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// blockSet implements a basic unsorted set of blocks
|
||||
@@ -24,8 +24,8 @@ func setFromSlice(blocks ...*blockNode) blockSet {
|
||||
}
|
||||
|
||||
// maxHeight returns the height of the highest block in the block set
|
||||
func (bs blockSet) maxHeight() int32 {
|
||||
var maxHeight int32
|
||||
func (bs blockSet) maxHeight() uint64 {
|
||||
var maxHeight uint64
|
||||
for _, node := range bs {
|
||||
if maxHeight < node.height {
|
||||
maxHeight = node.height
|
||||
@@ -34,19 +34,6 @@ func (bs blockSet) maxHeight() int32 {
|
||||
return maxHeight
|
||||
}
|
||||
|
||||
func (bs blockSet) highest() *blockNode {
|
||||
var highest *blockNode
|
||||
for _, node := range bs {
|
||||
if highest == nil ||
|
||||
highest.height < node.height ||
|
||||
(highest.height == node.height && daghash.Less(node.hash, highest.hash)) {
|
||||
|
||||
highest = node
|
||||
}
|
||||
}
|
||||
return highest
|
||||
}
|
||||
|
||||
// add adds a block to this BlockSet
|
||||
func (bs blockSet) add(block *blockNode) {
|
||||
bs[*block.hash] = block
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
func TestHashes(t *testing.T) {
|
||||
@@ -35,47 +35,6 @@ func TestHashes(t *testing.T) {
|
||||
t.Errorf("TestHashes: hashes order is %s but expected %s", hashes, expected)
|
||||
}
|
||||
}
|
||||
func TestBlockSetHighest(t *testing.T) {
|
||||
node1 := &blockNode{hash: &daghash.Hash{10}, height: 1}
|
||||
node2a := &blockNode{hash: &daghash.Hash{20}, height: 2}
|
||||
node2b := &blockNode{hash: &daghash.Hash{21}, height: 2}
|
||||
node3 := &blockNode{hash: &daghash.Hash{30}, height: 3}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
set blockSet
|
||||
expectedHighest *blockNode
|
||||
}{
|
||||
{
|
||||
name: "empty set",
|
||||
set: setFromSlice(),
|
||||
expectedHighest: nil,
|
||||
},
|
||||
{
|
||||
name: "set with one member",
|
||||
set: setFromSlice(node1),
|
||||
expectedHighest: node1,
|
||||
},
|
||||
{
|
||||
name: "same-height highest members in set",
|
||||
set: setFromSlice(node2b, node1, node2a),
|
||||
expectedHighest: node2a,
|
||||
},
|
||||
{
|
||||
name: "typical set",
|
||||
set: setFromSlice(node2b, node3, node1, node2a),
|
||||
expectedHighest: node3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
highest := test.set.highest()
|
||||
if highest != test.expectedHighest {
|
||||
t.Errorf("blockSet.highest: unexpected value in test '%s'. "+
|
||||
"Expected: %v, got: %v", test.name, test.expectedHighest, highest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSetSubtract(t *testing.T) {
|
||||
node1 := &blockNode{hash: &daghash.Hash{10}}
|
||||
|
||||
75
blockdag/blockwindow.go
Normal file
75
blockdag/blockwindow.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type blockWindow []*blockNode
|
||||
|
||||
// blueBlockWindow returns a blockWindow of the given size that contains the
|
||||
// blues in the past of startindNode, sorted by phantom order.
|
||||
// If the number of blues in the past of startingNode is less then windowSize,
|
||||
// the window will be padded by genesis blocks to achieve a size of windowSize.
|
||||
func blueBlockWindow(startingNode *blockNode, windowSize uint64) blockWindow {
|
||||
window := make(blockWindow, 0, windowSize)
|
||||
currentNode := startingNode
|
||||
for uint64(len(window)) < windowSize && currentNode.selectedParent != nil {
|
||||
if currentNode.selectedParent != nil {
|
||||
for _, blue := range currentNode.blues {
|
||||
window = append(window, blue)
|
||||
if uint64(len(window)) == windowSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
currentNode = currentNode.selectedParent
|
||||
}
|
||||
}
|
||||
|
||||
if uint64(len(window)) < windowSize {
|
||||
genesis := currentNode
|
||||
for uint64(len(window)) < windowSize {
|
||||
window = append(window, genesis)
|
||||
}
|
||||
}
|
||||
|
||||
return window
|
||||
}
|
||||
|
||||
func (window blockWindow) minMaxTimestamps() (min, max int64) {
|
||||
min = math.MaxInt64
|
||||
max = 0
|
||||
for _, node := range window {
|
||||
if node.timestamp < min {
|
||||
min = node.timestamp
|
||||
}
|
||||
if node.timestamp > max {
|
||||
max = node.timestamp
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (window blockWindow) averageTarget() *big.Int {
|
||||
averageTarget := big.NewInt(0)
|
||||
for _, node := range window {
|
||||
target := util.CompactToBig(node.bits)
|
||||
averageTarget.Add(averageTarget, target)
|
||||
}
|
||||
return averageTarget.Div(averageTarget, big.NewInt(int64(len(window))))
|
||||
}
|
||||
|
||||
func (window blockWindow) medianTimestamp() (int64, error) {
|
||||
if len(window) == 0 {
|
||||
return 0, errors.New("Cannot calculate median timestamp for an empty block window")
|
||||
}
|
||||
timestamps := make([]int64, len(window))
|
||||
for i, node := range window {
|
||||
timestamps[i] = node.timestamp
|
||||
}
|
||||
sort.Sort(timeSorter(timestamps))
|
||||
return timestamps[len(timestamps)/2], nil
|
||||
}
|
||||
138
blockdag/blockwindow_test.go
Normal file
138
blockdag/blockwindow_test.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestBlueBlockWindow(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.K = 1
|
||||
dag := newTestDAG(¶ms)
|
||||
|
||||
windowSize := uint64(10)
|
||||
genesisNode := dag.genesis
|
||||
blockTime := genesisNode.Header().Timestamp
|
||||
blockByIDMap := make(map[string]*blockNode)
|
||||
idByBlockMap := make(map[*blockNode]string)
|
||||
blockByIDMap["A"] = genesisNode
|
||||
idByBlockMap[genesisNode] = "A"
|
||||
blockVersion := int32(0x10000000)
|
||||
|
||||
blocksData := []*struct {
|
||||
parents []string
|
||||
id string //id is a virtual entity that is used only for tests so we can define relations between blocks without knowing their hash
|
||||
expectedWindowWithGenesisPadding []string
|
||||
}{
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "B",
|
||||
expectedWindowWithGenesisPadding: []string{"A", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "C",
|
||||
expectedWindowWithGenesisPadding: []string{"B", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"B"},
|
||||
id: "D",
|
||||
expectedWindowWithGenesisPadding: []string{"B", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
id: "E",
|
||||
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
id: "F",
|
||||
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
id: "G",
|
||||
expectedWindowWithGenesisPadding: []string{"A", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"G"},
|
||||
id: "H",
|
||||
expectedWindowWithGenesisPadding: []string{"G", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"H", "F"},
|
||||
id: "I",
|
||||
expectedWindowWithGenesisPadding: []string{"F", "D", "C", "B", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "J",
|
||||
expectedWindowWithGenesisPadding: []string{"I", "F", "D", "C", "B", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J"},
|
||||
id: "K",
|
||||
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "D", "C", "B", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K"},
|
||||
id: "L",
|
||||
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "D", "C", "B", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L"},
|
||||
id: "M",
|
||||
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "D", "C", "B", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "N",
|
||||
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "D", "C", "B", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N"},
|
||||
id: "O",
|
||||
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "B"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, blockData := range blocksData {
|
||||
blockTime = blockTime.Add(time.Second)
|
||||
parents := blockSet{}
|
||||
for _, parentID := range blockData.parents {
|
||||
parent := blockByIDMap[parentID]
|
||||
parents.add(parent)
|
||||
}
|
||||
node := newTestNode(parents, blockVersion, 0, blockTime, dag.dagParams.K)
|
||||
node.hash = &daghash.Hash{} // It helps to predict hash order
|
||||
for i, char := range blockData.id {
|
||||
node.hash[i] = byte(char)
|
||||
}
|
||||
|
||||
dag.index.AddNode(node)
|
||||
node.updateParentsChildren()
|
||||
|
||||
blockByIDMap[blockData.id] = node
|
||||
idByBlockMap[node] = blockData.id
|
||||
|
||||
window := blueBlockWindow(node, windowSize)
|
||||
if err := checkWindowIDs(window, blockData.expectedWindowWithGenesisPadding, idByBlockMap); err != nil {
|
||||
t.Errorf("Unexpected values for window for block %s: %s", blockData.id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkWindowIDs(window []*blockNode, expectedIDs []string, idByBlockMap map[*blockNode]string) error {
|
||||
ids := make([]string, len(window))
|
||||
for i, node := range window {
|
||||
ids[i] = idByBlockMap[node]
|
||||
}
|
||||
if !reflect.DeepEqual(ids, expectedIDs) {
|
||||
return errors.Errorf("window expected to have blocks %s but got %s", expectedIDs, ids)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -6,12 +6,11 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// CheckpointConfirmations is the number of blocks before the end of the current
|
||||
@@ -64,16 +63,16 @@ func (dag *BlockDAG) LatestCheckpoint() *dagconfig.Checkpoint {
|
||||
return &dag.checkpoints[len(dag.checkpoints)-1]
|
||||
}
|
||||
|
||||
// verifyCheckpoint returns whether the passed block height and hash combination
|
||||
// verifyCheckpoint returns whether the passed block chain height and hash combination
|
||||
// match the checkpoint data. It also returns true if there is no checkpoint
|
||||
// data for the passed block height.
|
||||
func (dag *BlockDAG) verifyCheckpoint(height int32, hash *daghash.Hash) bool {
|
||||
// data for the passed block chain height.
|
||||
func (dag *BlockDAG) verifyCheckpoint(chainHeight uint64, hash *daghash.Hash) bool {
|
||||
if !dag.HasCheckpoints() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Nothing to check if there is no checkpoint data for the block height.
|
||||
checkpoint, exists := dag.checkpointsByHeight[height]
|
||||
// Nothing to check if there is no checkpoint data for the block chainHeight.
|
||||
checkpoint, exists := dag.checkpointsByChainHeight[chainHeight]
|
||||
if !exists {
|
||||
return true
|
||||
}
|
||||
@@ -82,7 +81,7 @@ func (dag *BlockDAG) verifyCheckpoint(height int32, hash *daghash.Hash) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
log.Infof("Verified checkpoint at height %d/block %s", checkpoint.Height,
|
||||
log.Infof("Verified checkpoint at chainHeight %d/block %s", checkpoint.ChainHeight,
|
||||
checkpoint.Hash)
|
||||
return true
|
||||
}
|
||||
@@ -136,10 +135,10 @@ func (dag *BlockDAG) findPreviousCheckpoint() (*blockNode, error) {
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
// When there is a next checkpoint and the height of the current best
|
||||
// chain does not exceed it, the current checkpoint lockin is still
|
||||
// the latest known checkpoint.
|
||||
if dag.selectedTip().height < dag.nextCheckpoint.Height {
|
||||
// When there is a next checkpoint and the chain height of the current
|
||||
// selected tip of the DAG does not exceed it, the current checkpoint
|
||||
// lockin is still the latest known checkpoint.
|
||||
if dag.selectedTip().chainHeight < dag.nextCheckpoint.ChainHeight {
|
||||
return dag.checkpointNode, nil
|
||||
}
|
||||
|
||||
@@ -181,7 +180,7 @@ func (dag *BlockDAG) findPreviousCheckpoint() (*blockNode, error) {
|
||||
func isNonstandardTransaction(tx *util.Tx) bool {
|
||||
// Check all of the output public key scripts for non-standard scripts.
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
scriptClass := txscript.GetScriptClass(txOut.PkScript)
|
||||
scriptClass := txscript.GetScriptClass(txOut.ScriptPubKey)
|
||||
if scriptClass == txscript.NonStandardTy {
|
||||
return true
|
||||
}
|
||||
@@ -216,19 +215,19 @@ func (dag *BlockDAG) IsCheckpointCandidate(block *util.Block) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Ensure the height of the passed block and the entry for the block in
|
||||
// the main chain match. This should always be the case unless the
|
||||
// Ensure the chain height of the passed block and the entry for the block
|
||||
// in the DAG match. This should always be the case unless the
|
||||
// caller provided an invalid block.
|
||||
if node.height != block.Height() {
|
||||
return false, fmt.Errorf("passed block height of %d does not "+
|
||||
"match the main chain height of %d", block.Height(),
|
||||
node.height)
|
||||
if node.chainHeight != block.ChainHeight() {
|
||||
return false, errors.Errorf("passed block chain height of %d does not "+
|
||||
"match the its height in the DAG: %d", block.ChainHeight(),
|
||||
node.chainHeight)
|
||||
}
|
||||
|
||||
// A checkpoint must be at least CheckpointConfirmations blocks
|
||||
// before the end of the main chain.
|
||||
dagHeight := dag.selectedTip().height
|
||||
if node.height > (dagHeight - CheckpointConfirmations) {
|
||||
dagChainHeight := dag.selectedTip().chainHeight
|
||||
if node.chainHeight > (dagChainHeight - CheckpointConfirmations) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -237,8 +236,7 @@ func (dag *BlockDAG) IsCheckpointCandidate(block *util.Block) (bool, error) {
|
||||
// This should always succeed since the check above already made sure it
|
||||
// is CheckpointConfirmations back, but be safe in case the constant
|
||||
// changes.
|
||||
nextNode := node.diffChild
|
||||
if nextNode == nil {
|
||||
if len(node.children) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -247,16 +245,6 @@ func (dag *BlockDAG) IsCheckpointCandidate(block *util.Block) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must have timestamps for the block and the blocks on
|
||||
// either side of it in order (due to the median time allowance this is
|
||||
// not always the case).
|
||||
prevTime := time.Unix(node.selectedParent.timestamp, 0)
|
||||
curTime := block.MsgBlock().Header.Timestamp
|
||||
nextTime := time.Unix(nextNode.timestamp, 0)
|
||||
if prevTime.After(curTime) || nextTime.Before(curTime) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A checkpoint must have transactions that only contain standard
|
||||
// scripts.
|
||||
for _, tx := range block.Transactions() {
|
||||
|
||||
278
blockdag/coinbase.go
Normal file
278
blockdag/coinbase.go
Normal file
@@ -0,0 +1,278 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/util/txsort"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// compactFeeData is a specialized data type to store a compact list of fees
|
||||
// inside a block.
|
||||
// Every transaction gets a single uint64 value, stored as a plain binary list.
|
||||
// The transactions are ordered the same way they are ordered inside the block, making it easy
|
||||
// to traverse every transaction in a block and extract its fee.
|
||||
//
|
||||
// compactFeeFactory is used to create such a list.
|
||||
// compactFeeIterator is used to iterate over such a list.
|
||||
|
||||
type compactFeeData []byte
|
||||
|
||||
func (cfd compactFeeData) Len() int {
|
||||
return len(cfd) / 8
|
||||
}
|
||||
|
||||
type compactFeeFactory struct {
|
||||
buffer *bytes.Buffer
|
||||
writer *bufio.Writer
|
||||
}
|
||||
|
||||
func newCompactFeeFactory() *compactFeeFactory {
|
||||
buffer := bytes.NewBuffer([]byte{})
|
||||
return &compactFeeFactory{
|
||||
buffer: buffer,
|
||||
writer: bufio.NewWriter(buffer),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfw *compactFeeFactory) add(txFee uint64) error {
|
||||
return binary.Write(cfw.writer, binary.LittleEndian, txFee)
|
||||
}
|
||||
|
||||
func (cfw *compactFeeFactory) data() (compactFeeData, error) {
|
||||
err := cfw.writer.Flush()
|
||||
|
||||
return compactFeeData(cfw.buffer.Bytes()), err
|
||||
}
|
||||
|
||||
type compactFeeIterator struct {
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func (cfd compactFeeData) iterator() *compactFeeIterator {
|
||||
return &compactFeeIterator{
|
||||
reader: bufio.NewReader(bytes.NewBuffer(cfd)),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfr *compactFeeIterator) next() (uint64, error) {
|
||||
var txFee uint64
|
||||
|
||||
err := binary.Read(cfr.reader, binary.LittleEndian, &txFee)
|
||||
|
||||
return txFee, err
|
||||
}
|
||||
|
||||
// The following functions relate to storing and retrieving fee data from the database
|
||||
var feeBucket = []byte("fees")
|
||||
|
||||
// getBluesFeeData returns the compactFeeData for all nodes's blues,
|
||||
// used to calculate the fees this blockNode needs to pay
|
||||
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
|
||||
bluesFeeData := make(map[daghash.Hash]compactFeeData)
|
||||
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
|
||||
}
|
||||
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bluesFeeData, nil
|
||||
}
|
||||
|
||||
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
|
||||
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error creating or retrieving fee bucket: %s", err)
|
||||
}
|
||||
|
||||
return feeBucket.Put(blockHash.CloneBytes(), feeData)
|
||||
}
|
||||
|
||||
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
|
||||
feeBucket := dbTx.Metadata().Bucket(feeBucket)
|
||||
if feeBucket == nil {
|
||||
return nil, errors.New("Fee bucket does not exist")
|
||||
}
|
||||
|
||||
feeData := feeBucket.Get(blockHash.CloneBytes())
|
||||
if feeData == nil {
|
||||
return nil, errors.Errorf("No fee data found for block %s", blockHash)
|
||||
}
|
||||
|
||||
return feeData, nil
|
||||
}
|
||||
|
||||
// The following functions deal with building and validating the coinbase transaction
|
||||
|
||||
func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
|
||||
if node.isGenesis() {
|
||||
return nil
|
||||
}
|
||||
blockCoinbaseTx := block.CoinbaseTransaction().MsgTx()
|
||||
scriptPubKey, extraData, err := DeserializeCoinbasePayload(blockCoinbaseTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expectedCoinbaseTransaction, err := node.expectedCoinbaseTransaction(dag, txsAcceptanceData, scriptPubKey, extraData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !expectedCoinbaseTransaction.Hash().IsEqual(block.CoinbaseTransaction().Hash()) {
|
||||
return ruleError(ErrBadCoinbaseTransaction, "Coinbase transaction is not built as expected")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// expectedCoinbaseTransaction returns the coinbase transaction for the current block
|
||||
func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData, scriptPubKey []byte, extraData []byte) (*util.Tx, error) {
|
||||
bluesFeeData, err := node.getBluesFeeData(dag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txIns := []*wire.TxIn{}
|
||||
txOuts := []*wire.TxOut{}
|
||||
|
||||
for _, blue := range node.blues {
|
||||
txIn, txOut, err := coinbaseInputAndOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txIns = append(txIns, txIn)
|
||||
if txOut != nil {
|
||||
txOuts = append(txOuts, txOut)
|
||||
}
|
||||
}
|
||||
payload, err := SerializeCoinbasePayload(scriptPubKey, extraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
coinbaseTx := wire.NewSubnetworkMsgTx(wire.TxVersion, txIns, txOuts, subnetworkid.SubnetworkIDCoinbase, 0, payload)
|
||||
sortedCoinbaseTx := txsort.Sort(coinbaseTx)
|
||||
return util.NewTx(sortedCoinbaseTx), nil
|
||||
}
|
||||
|
||||
// SerializeCoinbasePayload builds the coinbase payload based on the provided scriptPubKey and extra data.
|
||||
func SerializeCoinbasePayload(scriptPubKey []byte, extraData []byte) ([]byte, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := wire.WriteVarInt(w, uint64(len(scriptPubKey)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = w.Write(scriptPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = w.Write(extraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w.Bytes(), nil
|
||||
}
|
||||
|
||||
// DeserializeCoinbasePayload deserialize the coinbase payload to its component (scriptPubKey and extra data).
|
||||
func DeserializeCoinbasePayload(tx *wire.MsgTx) (scriptPubKey []byte, extraData []byte, err error) {
|
||||
r := bytes.NewReader(tx.Payload)
|
||||
scriptPubKeyLen, err := wire.ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
scriptPubKey = make([]byte, scriptPubKeyLen)
|
||||
_, err = r.Read(scriptPubKey)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
extraData = make([]byte, r.Len())
|
||||
if r.Len() != 0 {
|
||||
_, err = r.Read(extraData)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return scriptPubKey, extraData, nil
|
||||
}
|
||||
|
||||
// feeInputAndOutputForBlueBlock calculates the input and output that should go into the coinbase transaction of blueBlock
|
||||
// If blueBlock gets no fee - returns only txIn and nil for txOut
|
||||
func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (
|
||||
*wire.TxIn, *wire.TxOut, error) {
|
||||
|
||||
blockTxsAcceptanceData, ok := txsAcceptanceData.FindAcceptanceData(blueBlock.hash)
|
||||
if !ok {
|
||||
return nil, nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
|
||||
}
|
||||
blockFeeData, ok := feeData[*blueBlock.hash]
|
||||
if !ok {
|
||||
return nil, nil, errors.Errorf("No feeData for block %s", blueBlock.hash)
|
||||
}
|
||||
|
||||
if len(blockTxsAcceptanceData.TxAcceptanceData) != blockFeeData.Len() {
|
||||
return nil, nil, errors.Errorf(
|
||||
"length of accepted transaction data(%d) and fee data(%d) is not equal for block %s",
|
||||
len(blockTxsAcceptanceData.TxAcceptanceData), blockFeeData.Len(), blueBlock.hash)
|
||||
}
|
||||
|
||||
txIn := &wire.TxIn{
|
||||
SignatureScript: []byte{},
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID(*blueBlock.hash),
|
||||
Index: math.MaxUint32,
|
||||
},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
|
||||
totalFees := uint64(0)
|
||||
feeIterator := blockFeeData.iterator()
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
|
||||
fee, err := feeIterator.next()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
|
||||
}
|
||||
if txAcceptanceData.IsAccepted {
|
||||
totalFees += fee
|
||||
}
|
||||
}
|
||||
|
||||
totalReward := CalcBlockSubsidy(blueBlock.height, dag.dagParams) + totalFees
|
||||
|
||||
if totalReward == 0 {
|
||||
return txIn, nil, nil
|
||||
}
|
||||
|
||||
// the ScriptPubKey for the coinbase is parsed from the coinbase payload
|
||||
scriptPubKey, _, err := DeserializeCoinbasePayload(blockTxsAcceptanceData.TxAcceptanceData[0].Tx.MsgTx())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
txOut := &wire.TxOut{
|
||||
Value: totalReward,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
|
||||
return txIn, txOut, nil
|
||||
}
|
||||
@@ -7,78 +7,31 @@ package blockdag
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
_ "github.com/daglabs/btcd/database/ffldb"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// loadBlocks reads files containing bitcoin block data (gzipped but otherwise
|
||||
// in the format bitcoind writes) from disk and returns them as an array of
|
||||
// util.Block. This is largely borrowed from the test code in btcdb.
|
||||
func loadBlocks(filename string) (blocks []*util.Block, err error) {
|
||||
filename = filepath.Join("testdata/", filename)
|
||||
|
||||
var network = wire.MainNet
|
||||
var dr io.Reader
|
||||
var fi io.ReadCloser
|
||||
|
||||
fi, err = os.Open(filename)
|
||||
if err != nil {
|
||||
return
|
||||
func loadBlocksWithLog(t *testing.T, filename string) ([]*util.Block, error) {
|
||||
blocks, err := LoadBlocks(filename)
|
||||
if err == nil {
|
||||
t.Logf("Loaded %d blocks from file %s", len(blocks), filename)
|
||||
for i, b := range blocks {
|
||||
t.Logf("Block #%d: %s", i, b.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
if strings.HasSuffix(filename, ".bz2") {
|
||||
dr = bzip2.NewReader(fi)
|
||||
} else {
|
||||
dr = fi
|
||||
}
|
||||
defer fi.Close()
|
||||
|
||||
var block *util.Block
|
||||
|
||||
err = nil
|
||||
for height := 0; err == nil; height++ {
|
||||
var rintbuf uint32
|
||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
||||
if err == io.EOF {
|
||||
// hit end of file at expected offset: no warning
|
||||
height--
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if rintbuf != uint32(network) {
|
||||
break
|
||||
}
|
||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
||||
blocklen := rintbuf
|
||||
|
||||
rbytes := make([]byte, blocklen)
|
||||
|
||||
// read block
|
||||
dr.Read(rbytes)
|
||||
|
||||
block, err = util.NewBlockFromBytes(rbytes)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
block.SetHeight(int32(height))
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
|
||||
return
|
||||
return blocks, err
|
||||
}
|
||||
|
||||
// loadUTXOSet returns a utxo view loaded from a file.
|
||||
@@ -143,16 +96,16 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoSet.utxoCollection[wire.OutPoint{TxID: txID, Index: index}] = entry
|
||||
utxoSet.utxoCollection[wire.Outpoint{TxID: txID, Index: index}] = entry
|
||||
}
|
||||
|
||||
return utxoSet, nil
|
||||
}
|
||||
|
||||
// TestSetBlockRewardMaturity makes the ability to set the block reward maturity
|
||||
// TestSetCoinbaseMaturity makes the ability to set the coinbase maturity
|
||||
// available when running tests.
|
||||
func (dag *BlockDAG) TestSetBlockRewardMaturity(maturity uint16) {
|
||||
dag.dagParams.BlockRewardMaturity = maturity
|
||||
func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
|
||||
dag.dagParams.BlockCoinbaseMaturity = maturity
|
||||
}
|
||||
|
||||
// newTestDAG returns a DAG that is usable for syntetic tests. It is
|
||||
@@ -166,20 +119,19 @@ func newTestDAG(params *dagconfig.Params) *BlockDAG {
|
||||
index := newBlockIndex(nil, params)
|
||||
index.AddNode(node)
|
||||
|
||||
targetTimespan := int64(params.TargetTimespan / time.Second)
|
||||
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
|
||||
adjustmentFactor := params.RetargetAdjustmentFactor
|
||||
return &BlockDAG{
|
||||
dagParams: params,
|
||||
timeSource: NewMedianTime(),
|
||||
minRetargetTimespan: targetTimespan / adjustmentFactor,
|
||||
maxRetargetTimespan: targetTimespan * adjustmentFactor,
|
||||
blocksPerRetarget: int32(targetTimespan / targetTimePerBlock),
|
||||
index: index,
|
||||
virtual: newVirtualBlock(setFromSlice(node), params.K),
|
||||
genesis: index.LookupNode(params.GenesisHash),
|
||||
warningCaches: newThresholdCaches(vbNumBits),
|
||||
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
|
||||
dagParams: params,
|
||||
timeSource: NewMedianTime(),
|
||||
targetTimePerBlock: targetTimePerBlock,
|
||||
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,
|
||||
TimestampDeviationTolerance: params.TimestampDeviationTolerance,
|
||||
powMaxBits: util.BigToCompact(params.PowMax),
|
||||
index: index,
|
||||
virtual: newVirtualBlock(setFromSlice(node), params.K),
|
||||
genesis: index.LookupNode(params.GenesisHash),
|
||||
warningCaches: newThresholdCaches(vbNumBits),
|
||||
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,12 +140,13 @@ func newTestDAG(params *dagconfig.Params) *BlockDAG {
|
||||
func newTestNode(parents blockSet, blockVersion int32, bits uint32, timestamp time.Time, phantomK uint32) *blockNode {
|
||||
// Make up a header and create a block node from it.
|
||||
header := &wire.BlockHeader{
|
||||
Version: blockVersion,
|
||||
ParentHashes: parents.hashes(),
|
||||
Bits: bits,
|
||||
Timestamp: timestamp,
|
||||
HashMerkleRoot: &daghash.ZeroHash,
|
||||
IDMerkleRoot: &daghash.ZeroHash,
|
||||
Version: blockVersion,
|
||||
ParentHashes: parents.hashes(),
|
||||
Bits: bits,
|
||||
Timestamp: timestamp,
|
||||
HashMerkleRoot: &daghash.ZeroHash,
|
||||
AcceptedIDMerkleRoot: &daghash.ZeroHash,
|
||||
UTXOCommitment: &daghash.ZeroHash,
|
||||
}
|
||||
return newBlockNode(header, parents, phantomK)
|
||||
}
|
||||
@@ -232,7 +185,7 @@ func checkRuleError(gotErr, wantErr error) error {
|
||||
// Ensure the error code is of the expected type and the error
|
||||
// code matches the value specified in the test instance.
|
||||
if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) {
|
||||
return fmt.Errorf("wrong error - got %T (%[1]v), want %T",
|
||||
return errors.Errorf("wrong error - got %T (%[1]v), want %T",
|
||||
gotErr, wantErr)
|
||||
}
|
||||
if gotErr == nil {
|
||||
@@ -242,7 +195,7 @@ func checkRuleError(gotErr, wantErr error) error {
|
||||
// Ensure the want error type is a script error.
|
||||
werr, ok := wantErr.(RuleError)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected test error type %T", wantErr)
|
||||
return errors.Errorf("unexpected test error type %T", wantErr)
|
||||
}
|
||||
|
||||
// Ensure the error codes match. It's safe to use a raw type assert
|
||||
@@ -250,7 +203,7 @@ func checkRuleError(gotErr, wantErr error) error {
|
||||
// the want error is a script error.
|
||||
gotErrorCode := gotErr.(RuleError).ErrorCode
|
||||
if gotErrorCode != werr.ErrorCode {
|
||||
return fmt.Errorf("mismatched error code - got %v (%v), want %v",
|
||||
return errors.Errorf("mismatched error code - got %v (%v), want %v",
|
||||
gotErrorCode, gotErr, werr.ErrorCode)
|
||||
}
|
||||
|
||||
|
||||
@@ -241,27 +241,27 @@ func isPubKey(script []byte) (bool, []byte) {
|
||||
|
||||
// compressedScriptSize returns the number of bytes the passed script would take
|
||||
// when encoded with the domain specific compression algorithm described above.
|
||||
func compressedScriptSize(pkScript []byte) int {
|
||||
func compressedScriptSize(scriptPubKey []byte) int {
|
||||
// Pay-to-pubkey-hash script.
|
||||
if valid, _ := isPubKeyHash(pkScript); valid {
|
||||
if valid, _ := isPubKeyHash(scriptPubKey); valid {
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-script-hash script.
|
||||
if valid, _ := isScriptHash(pkScript); valid {
|
||||
if valid, _ := isScriptHash(scriptPubKey); valid {
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-pubkey (compressed or uncompressed) script.
|
||||
if valid, _ := isPubKey(pkScript); valid {
|
||||
if valid, _ := isPubKey(scriptPubKey); valid {
|
||||
return 33
|
||||
}
|
||||
|
||||
// When none of the above special cases apply, encode the script as is
|
||||
// preceded by the sum of its size and the number of special cases
|
||||
// encoded as a variable length quantity.
|
||||
return serializeSizeVLQ(uint64(len(pkScript)+numSpecialScripts)) +
|
||||
len(pkScript)
|
||||
return serializeSizeVLQ(uint64(len(scriptPubKey)+numSpecialScripts)) +
|
||||
len(scriptPubKey)
|
||||
}
|
||||
|
||||
// decodeCompressedScriptSize treats the passed serialized bytes as a compressed
|
||||
@@ -296,23 +296,23 @@ func decodeCompressedScriptSize(serialized []byte) int {
|
||||
// target byte slice. The target byte slice must be at least large enough to
|
||||
// handle the number of bytes returned by the compressedScriptSize function or
|
||||
// it will panic.
|
||||
func putCompressedScript(target, pkScript []byte) int {
|
||||
func putCompressedScript(target, scriptPubKey []byte) int {
|
||||
// Pay-to-pubkey-hash script.
|
||||
if valid, hash := isPubKeyHash(pkScript); valid {
|
||||
if valid, hash := isPubKeyHash(scriptPubKey); valid {
|
||||
target[0] = cstPayToPubKeyHash
|
||||
copy(target[1:21], hash)
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-script-hash script.
|
||||
if valid, hash := isScriptHash(pkScript); valid {
|
||||
if valid, hash := isScriptHash(scriptPubKey); valid {
|
||||
target[0] = cstPayToScriptHash
|
||||
copy(target[1:21], hash)
|
||||
return 21
|
||||
}
|
||||
|
||||
// Pay-to-pubkey (compressed or uncompressed) script.
|
||||
if valid, serializedPubKey := isPubKey(pkScript); valid {
|
||||
if valid, serializedPubKey := isPubKey(scriptPubKey); valid {
|
||||
pubKeyFormat := serializedPubKey[0]
|
||||
switch pubKeyFormat {
|
||||
case 0x02, 0x03:
|
||||
@@ -331,10 +331,10 @@ func putCompressedScript(target, pkScript []byte) int {
|
||||
// When none of the above special cases apply, encode the unmodified
|
||||
// script preceded by the sum of its size and the number of special
|
||||
// cases encoded as a variable length quantity.
|
||||
encodedSize := uint64(len(pkScript) + numSpecialScripts)
|
||||
encodedSize := uint64(len(scriptPubKey) + numSpecialScripts)
|
||||
vlqSizeLen := putVLQ(target, encodedSize)
|
||||
copy(target[vlqSizeLen:], pkScript)
|
||||
return vlqSizeLen + len(pkScript)
|
||||
copy(target[vlqSizeLen:], scriptPubKey)
|
||||
return vlqSizeLen + len(scriptPubKey)
|
||||
}
|
||||
|
||||
// decompressScript returns the original script obtained by decompressing the
|
||||
@@ -344,50 +344,50 @@ func putCompressedScript(target, pkScript []byte) int {
|
||||
// NOTE: The script parameter must already have been proven to be long enough
|
||||
// to contain the number of bytes returned by decodeCompressedScriptSize or it
|
||||
// will panic. This is acceptable since it is only an internal function.
|
||||
func decompressScript(compressedPkScript []byte) []byte {
|
||||
func decompressScript(compressedScriptPubKey []byte) []byte {
|
||||
// In practice this function will not be called with a zero-length or
|
||||
// nil script since the nil script encoding includes the length, however
|
||||
// the code below assumes the length exists, so just return nil now if
|
||||
// the function ever ends up being called with a nil script in the
|
||||
// future.
|
||||
if len(compressedPkScript) == 0 {
|
||||
if len(compressedScriptPubKey) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode the script size and examine it for the special cases.
|
||||
encodedScriptSize, bytesRead := deserializeVLQ(compressedPkScript)
|
||||
encodedScriptSize, bytesRead := deserializeVLQ(compressedScriptPubKey)
|
||||
switch encodedScriptSize {
|
||||
// Pay-to-pubkey-hash script. The resulting script is:
|
||||
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
|
||||
case cstPayToPubKeyHash:
|
||||
pkScript := make([]byte, 25)
|
||||
pkScript[0] = txscript.OpDup
|
||||
pkScript[1] = txscript.OpHash160
|
||||
pkScript[2] = txscript.OpData20
|
||||
copy(pkScript[3:], compressedPkScript[bytesRead:bytesRead+20])
|
||||
pkScript[23] = txscript.OpEqualVerify
|
||||
pkScript[24] = txscript.OpCheckSig
|
||||
return pkScript
|
||||
scriptPubKey := make([]byte, 25)
|
||||
scriptPubKey[0] = txscript.OpDup
|
||||
scriptPubKey[1] = txscript.OpHash160
|
||||
scriptPubKey[2] = txscript.OpData20
|
||||
copy(scriptPubKey[3:], compressedScriptPubKey[bytesRead:bytesRead+20])
|
||||
scriptPubKey[23] = txscript.OpEqualVerify
|
||||
scriptPubKey[24] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-script-hash script. The resulting script is:
|
||||
// <OP_HASH160><20 byte script hash><OP_EQUAL>
|
||||
case cstPayToScriptHash:
|
||||
pkScript := make([]byte, 23)
|
||||
pkScript[0] = txscript.OpHash160
|
||||
pkScript[1] = txscript.OpData20
|
||||
copy(pkScript[2:], compressedPkScript[bytesRead:bytesRead+20])
|
||||
pkScript[22] = txscript.OpEqual
|
||||
return pkScript
|
||||
scriptPubKey := make([]byte, 23)
|
||||
scriptPubKey[0] = txscript.OpHash160
|
||||
scriptPubKey[1] = txscript.OpData20
|
||||
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+20])
|
||||
scriptPubKey[22] = txscript.OpEqual
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-compressed-pubkey script. The resulting script is:
|
||||
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
|
||||
case cstPayToPubKeyComp2, cstPayToPubKeyComp3:
|
||||
pkScript := make([]byte, 35)
|
||||
pkScript[0] = txscript.OpData33
|
||||
pkScript[1] = byte(encodedScriptSize)
|
||||
copy(pkScript[2:], compressedPkScript[bytesRead:bytesRead+32])
|
||||
pkScript[34] = txscript.OpCheckSig
|
||||
return pkScript
|
||||
scriptPubKey := make([]byte, 35)
|
||||
scriptPubKey[0] = txscript.OpData33
|
||||
scriptPubKey[1] = byte(encodedScriptSize)
|
||||
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+32])
|
||||
scriptPubKey[34] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
|
||||
// Pay-to-uncompressed-pubkey script. The resulting script is:
|
||||
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
|
||||
@@ -398,26 +398,26 @@ func decompressScript(compressedPkScript []byte) []byte {
|
||||
// encoding ensures it is valid before compressing to this type.
|
||||
compressedKey := make([]byte, 33)
|
||||
compressedKey[0] = byte(encodedScriptSize - 2)
|
||||
copy(compressedKey[1:], compressedPkScript[1:])
|
||||
copy(compressedKey[1:], compressedScriptPubKey[1:])
|
||||
key, err := btcec.ParsePubKey(compressedKey, btcec.S256())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
pkScript := make([]byte, 67)
|
||||
pkScript[0] = txscript.OpData65
|
||||
copy(pkScript[1:], key.SerializeUncompressed())
|
||||
pkScript[66] = txscript.OpCheckSig
|
||||
return pkScript
|
||||
scriptPubKey := make([]byte, 67)
|
||||
scriptPubKey[0] = txscript.OpData65
|
||||
copy(scriptPubKey[1:], key.SerializeUncompressed())
|
||||
scriptPubKey[66] = txscript.OpCheckSig
|
||||
return scriptPubKey
|
||||
}
|
||||
|
||||
// When none of the special cases apply, the script was encoded using
|
||||
// the general format, so reduce the script size by the number of
|
||||
// special cases and return the unmodified script.
|
||||
scriptSize := int(encodedScriptSize - numSpecialScripts)
|
||||
pkScript := make([]byte, scriptSize)
|
||||
copy(pkScript, compressedPkScript[bytesRead:bytesRead+scriptSize])
|
||||
return pkScript
|
||||
scriptPubKey := make([]byte, scriptSize)
|
||||
copy(scriptPubKey, compressedScriptPubKey[bytesRead:bytesRead+scriptSize])
|
||||
return scriptPubKey
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
@@ -543,9 +543,9 @@ func decompressTxOutAmount(amount uint64) uint64 {
|
||||
|
||||
// compressedTxOutSize returns the number of bytes the passed transaction output
|
||||
// fields would take when encoded with the format described above.
|
||||
func compressedTxOutSize(amount uint64, pkScript []byte) int {
|
||||
func compressedTxOutSize(amount uint64, scriptPubKey []byte) int {
|
||||
return serializeSizeVLQ(compressTxOutAmount(amount)) +
|
||||
compressedScriptSize(pkScript)
|
||||
compressedScriptSize(scriptPubKey)
|
||||
}
|
||||
|
||||
// putCompressedTxOut compresses the passed amount and script according to their
|
||||
@@ -553,9 +553,9 @@ func compressedTxOutSize(amount uint64, pkScript []byte) int {
|
||||
// passed target byte slice with the format described above. The target byte
|
||||
// slice must be at least large enough to handle the number of bytes returned by
|
||||
// the compressedTxOutSize function or it will panic.
|
||||
func putCompressedTxOut(target []byte, amount uint64, pkScript []byte) int {
|
||||
func putCompressedTxOut(target []byte, amount uint64, scriptPubKey []byte) int {
|
||||
offset := putVLQ(target, compressTxOutAmount(amount))
|
||||
offset += putCompressedScript(target[offset:], pkScript)
|
||||
offset += putCompressedScript(target[offset:], scriptPubKey)
|
||||
return offset
|
||||
}
|
||||
|
||||
|
||||
@@ -162,11 +162,6 @@ func TestScriptCompression(t *testing.T) {
|
||||
uncompressed: hexToBytes("3302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
|
||||
compressed: hexToBytes("293302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
|
||||
},
|
||||
{
|
||||
name: "null data",
|
||||
uncompressed: hexToBytes("6a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
|
||||
compressed: hexToBytes("286a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
|
||||
},
|
||||
{
|
||||
name: "requires 2 size bytes - data push 200 bytes",
|
||||
uncompressed: append(hexToBytes("4cc8"), bytes.Repeat([]byte{0x00}, 200)...),
|
||||
@@ -265,7 +260,7 @@ func TestAmountCompression(t *testing.T) {
|
||||
compressed uint64
|
||||
}{
|
||||
{
|
||||
name: "0 BTC (sometimes used in nulldata)",
|
||||
name: "0 BTC",
|
||||
uncompressed: 0,
|
||||
compressed: 0,
|
||||
},
|
||||
@@ -338,35 +333,29 @@ func TestCompressedTxOut(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
amount uint64
|
||||
pkScript []byte
|
||||
compressed []byte
|
||||
name string
|
||||
amount uint64
|
||||
scriptPubKey []byte
|
||||
compressed []byte
|
||||
}{
|
||||
{
|
||||
name: "nulldata with 0 BTC",
|
||||
amount: 0,
|
||||
pkScript: hexToBytes("6a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
|
||||
compressed: hexToBytes("00286a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
|
||||
name: "pay-to-pubkey-hash dust",
|
||||
amount: 546,
|
||||
scriptPubKey: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
|
||||
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey-hash dust",
|
||||
amount: 546,
|
||||
pkScript: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
|
||||
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
|
||||
},
|
||||
{
|
||||
name: "pay-to-pubkey uncompressed 1 BTC",
|
||||
amount: 100000000,
|
||||
pkScript: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
|
||||
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
name: "pay-to-pubkey uncompressed 1 BTC",
|
||||
amount: 100000000,
|
||||
scriptPubKey: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
|
||||
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the function to calculate the serialized size without
|
||||
// actually serializing the txout is calculated properly.
|
||||
gotSize := compressedTxOutSize(test.amount, test.pkScript)
|
||||
gotSize := compressedTxOutSize(test.amount, test.scriptPubKey)
|
||||
if gotSize != len(test.compressed) {
|
||||
t.Errorf("compressedTxOutSize (%s): did not get "+
|
||||
"expected size - got %d, want %d", test.name,
|
||||
@@ -377,7 +366,7 @@ func TestCompressedTxOut(t *testing.T) {
|
||||
// Ensure the txout compresses to the expected value.
|
||||
gotCompressed := make([]byte, gotSize)
|
||||
gotBytesWritten := putCompressedTxOut(gotCompressed,
|
||||
test.amount, test.pkScript)
|
||||
test.amount, test.scriptPubKey)
|
||||
if !bytes.Equal(gotCompressed, test.compressed) {
|
||||
t.Errorf("compressTxOut (%s): did not get expected "+
|
||||
"bytes - got %x, want %x", test.name,
|
||||
@@ -407,10 +396,10 @@ func TestCompressedTxOut(t *testing.T) {
|
||||
test.name, gotAmount, test.amount)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(gotScript, test.pkScript) {
|
||||
if !bytes.Equal(gotScript, test.scriptPubKey) {
|
||||
t.Errorf("decodeCompressedTxOut (%s): did not get "+
|
||||
"expected script - got %x, want %x",
|
||||
test.name, gotScript, test.pkScript)
|
||||
test.name, gotScript, test.scriptPubKey)
|
||||
continue
|
||||
}
|
||||
if gotBytesRead != len(test.compressed) {
|
||||
|
||||
1432
blockdag/dag.go
1432
blockdag/dag.go
File diff suppressed because it is too large
Load Diff
1084
blockdag/dag_test.go
1084
blockdag/dag_test.go
File diff suppressed because it is too large
Load Diff
@@ -9,11 +9,14 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/binaryserializer"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
@@ -34,14 +37,6 @@ var (
|
||||
// block headers and contextual information.
|
||||
blockIndexBucketName = []byte("blockheaderidx")
|
||||
|
||||
// hashIndexBucketName is the name of the db bucket used to house to the
|
||||
// block hash -> block height index.
|
||||
hashIndexBucketName = []byte("hashidx")
|
||||
|
||||
// heightIndexBucketName is the name of the db bucket used to house to
|
||||
// the block height -> block hash index.
|
||||
heightIndexBucketName = []byte("heightidx")
|
||||
|
||||
// dagStateKeyName is the name of the db key used to store the DAG
|
||||
// tip hashes.
|
||||
dagStateKeyName = []byte("dagstate")
|
||||
@@ -54,6 +49,10 @@ var (
|
||||
// unspent transaction output set.
|
||||
utxoSetBucketName = []byte("utxoset")
|
||||
|
||||
// utxoDiffsBucketName is the name of the db bucket used to house the
|
||||
// diffs and diff children of blocks.
|
||||
utxoDiffsBucketName = []byte("utxodiffs")
|
||||
|
||||
// subnetworksBucketName is the name of the db bucket used to store the
|
||||
// subnetwork registry.
|
||||
subnetworksBucketName = []byte("subnetworks")
|
||||
@@ -137,7 +136,7 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
|
||||
// compressed script []byte variable
|
||||
//
|
||||
// The serialized header code format is:
|
||||
// bit 0 - containing transaction is a block reward
|
||||
// bit 0 - containing transaction is a coinbase
|
||||
// bits 1-x - height of the block that contains the unspent txout
|
||||
//
|
||||
// Example 1:
|
||||
@@ -205,7 +204,7 @@ var outpointKeyPool = sync.Pool{
|
||||
// returned to the free list by using the recycleOutpointKey function when the
|
||||
// caller is done with it _unless_ the slice will need to live for longer than
|
||||
// the caller can calculate such as when used to write to the database.
|
||||
func outpointKey(outpoint wire.OutPoint) *[]byte {
|
||||
func outpointKey(outpoint wire.Outpoint) *[]byte {
|
||||
// A VLQ employs an MSB encoding, so they are useful not only to reduce
|
||||
// the amount of storage space, but also so iteration of UTXOs when
|
||||
// doing byte-wise comparisons will produce them in order.
|
||||
@@ -223,101 +222,14 @@ func recycleOutpointKey(key *[]byte) {
|
||||
outpointKeyPool.Put(key)
|
||||
}
|
||||
|
||||
// utxoEntryHeaderCode returns the calculated header code to be used when
|
||||
// serializing the provided utxo entry.
|
||||
func utxoEntryHeaderCode(entry *UTXOEntry) uint64 {
|
||||
|
||||
// As described in the serialization format comments, the header code
|
||||
// encodes the height shifted over one bit and the block reward flag in the
|
||||
// lowest bit.
|
||||
headerCode := uint64(entry.BlockHeight()) << 1
|
||||
if entry.IsBlockReward() {
|
||||
headerCode |= 0x01
|
||||
}
|
||||
|
||||
return headerCode
|
||||
}
|
||||
|
||||
// serializeUTXOEntry returns the entry serialized to a format that is suitable
|
||||
// for long-term storage. The format is described in detail above.
|
||||
func serializeUTXOEntry(entry *UTXOEntry) ([]byte, error) {
|
||||
|
||||
// Encode the header code.
|
||||
headerCode := utxoEntryHeaderCode(entry)
|
||||
|
||||
// Calculate the size needed to serialize the entry.
|
||||
size := serializeSizeVLQ(headerCode) +
|
||||
compressedTxOutSize(uint64(entry.Amount()), entry.PkScript())
|
||||
|
||||
// Serialize the header code followed by the compressed unspent
|
||||
// transaction output.
|
||||
serialized := make([]byte, size)
|
||||
offset := putVLQ(serialized, headerCode)
|
||||
offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()),
|
||||
entry.PkScript())
|
||||
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
// deserializeOutPoint decodes an outPoint from the passed serialized byte
|
||||
// slice into a new wire.OutPoint using a format that is suitable for long-
|
||||
// term storage. this format is described in detail above.
|
||||
func deserializeOutPoint(serialized []byte) (*wire.OutPoint, error) {
|
||||
if len(serialized) <= daghash.HashSize {
|
||||
return nil, errDeserialize("unexpected end of data")
|
||||
}
|
||||
|
||||
txID := daghash.TxID{}
|
||||
txID.SetBytes(serialized[:daghash.HashSize])
|
||||
index, _ := deserializeVLQ(serialized[daghash.HashSize:])
|
||||
return wire.NewOutPoint(&txID, uint32(index)), nil
|
||||
}
|
||||
|
||||
// deserializeUTXOEntry decodes a UTXO entry from the passed serialized byte
|
||||
// slice into a new UTXOEntry using a format that is suitable for long-term
|
||||
// storage. The format is described in detail above.
|
||||
func deserializeUTXOEntry(serialized []byte) (*UTXOEntry, error) {
|
||||
// Deserialize the header code.
|
||||
code, offset := deserializeVLQ(serialized)
|
||||
if offset >= len(serialized) {
|
||||
return nil, errDeserialize("unexpected end of data after header")
|
||||
}
|
||||
|
||||
// Decode the header code.
|
||||
//
|
||||
// Bit 0 indicates whether the containing transaction is a block reward.
|
||||
// Bits 1-x encode height of containing transaction.
|
||||
isBlockReward := code&0x01 != 0
|
||||
blockHeight := int32(code >> 1)
|
||||
|
||||
// Decode the compressed unspent transaction output.
|
||||
amount, pkScript, _, err := decodeCompressedTxOut(serialized[offset:])
|
||||
if err != nil {
|
||||
return nil, errDeserialize(fmt.Sprintf("unable to decode "+
|
||||
"UTXO: %s", err))
|
||||
}
|
||||
|
||||
entry := &UTXOEntry{
|
||||
amount: amount,
|
||||
pkScript: pkScript,
|
||||
blockHeight: blockHeight,
|
||||
packedFlags: 0,
|
||||
}
|
||||
if isBlockReward {
|
||||
entry.packedFlags |= tfBlockReward
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// dbPutUTXODiff uses an existing database transaction to update the UTXO set
|
||||
// in the database based on the provided UTXO view contents and state. In
|
||||
// particular, only the entries that have been marked as modified are written
|
||||
// to the database.
|
||||
func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
|
||||
utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
|
||||
for outPoint := range diff.toRemove {
|
||||
key := outpointKey(outPoint)
|
||||
for outpoint := range diff.toRemove {
|
||||
key := outpointKey(outpoint)
|
||||
err := utxoBucket.Delete(*key)
|
||||
recycleOutpointKey(key)
|
||||
if err != nil {
|
||||
@@ -325,15 +237,12 @@ func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
|
||||
}
|
||||
}
|
||||
|
||||
for outPoint, entry := range diff.toAdd {
|
||||
for outpoint, entry := range diff.toAdd {
|
||||
// Serialize and store the UTXO entry.
|
||||
serialized, err := serializeUTXOEntry(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
serialized := serializeUTXOEntry(entry)
|
||||
|
||||
key := outpointKey(outPoint)
|
||||
err = utxoBucket.Put(*key, serialized)
|
||||
key := outpointKey(outpoint)
|
||||
err := utxoBucket.Put(*key, serialized)
|
||||
// NOTE: The key is intentionally not recycled here since the
|
||||
// database interface contract prohibits modifications. It will
|
||||
// be garbage collected normally when the database is done with
|
||||
@@ -346,58 +255,6 @@ func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The block index consists of two buckets with an entry for every block in the
|
||||
// main chain. One bucket is for the hash to height mapping and the other is
|
||||
// for the height to hash mapping.
|
||||
//
|
||||
// The serialized format for values in the hash to height bucket is:
|
||||
// <height>
|
||||
//
|
||||
// Field Type Size
|
||||
// height uint32 4 bytes
|
||||
//
|
||||
// The serialized format for values in the height to hash bucket is:
|
||||
// <hash>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash daghash.HashSize
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// dbPutBlockIndex uses an existing database transaction to update or add the
|
||||
// block index entries for the hash to height and height to hash mappings for
|
||||
// the provided values.
|
||||
func dbPutBlockIndex(dbTx database.Tx, hash *daghash.Hash, height int32) error {
|
||||
// Serialize the height for use in the index entries.
|
||||
var serializedHeight [4]byte
|
||||
byteOrder.PutUint32(serializedHeight[:], uint32(height))
|
||||
|
||||
// Add the block hash to height mapping to the index.
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(hashIndexBucketName)
|
||||
if err := hashIndex.Put(hash[:], serializedHeight[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the block height to hash mapping to the index.
|
||||
heightIndex := meta.Bucket(heightIndexBucketName)
|
||||
return heightIndex.Put(serializedHeight[:], hash[:])
|
||||
}
|
||||
|
||||
// dbFetchHeightByHash uses an existing database transaction to retrieve the
|
||||
// height for the provided hash from the index.
|
||||
func dbFetchHeightByHash(dbTx database.Tx, hash *daghash.Hash) (int32, error) {
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(hashIndexBucketName)
|
||||
serializedHeight := hashIndex.Get(hash[:])
|
||||
if serializedHeight == nil {
|
||||
str := fmt.Sprintf("block %s is not in the main chain", hash)
|
||||
return 0, errNotInDAG(str)
|
||||
}
|
||||
|
||||
return int32(byteOrder.Uint32(serializedHeight)), nil
|
||||
}
|
||||
|
||||
type dagState struct {
|
||||
TipHashes []*daghash.Hash
|
||||
LastFinalityPoint *daghash.Hash
|
||||
@@ -452,28 +309,18 @@ func (dag *BlockDAG) createDAGState() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the bucket that houses the chain block hash to height
|
||||
// index.
|
||||
_, err = meta.CreateBucket(hashIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the bucket that houses the chain block height to hash
|
||||
// index.
|
||||
_, err = meta.CreateBucket(heightIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the bucket that houses the utxo set and store its
|
||||
// version. Note that the genesis block coinbase transaction is
|
||||
// intentionally not inserted here since it is not spendable by
|
||||
// consensus rules.
|
||||
// Create the buckets that house the utxo set, the utxo diffs, and their
|
||||
// version.
|
||||
_, err = meta.CreateBucket(utxoSetBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = meta.CreateBucket(utxoDiffsBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbPutVersion(dbTx, utxoSetVersionKeyName,
|
||||
latestUTXOSetBucketVersion)
|
||||
if err != nil {
|
||||
@@ -489,6 +336,56 @@ func (dag *BlockDAG) createDAGState() error {
|
||||
if err := dbPutLocalSubnetworkID(dbTx, dag.subnetworkID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := meta.CreateBucketIfNotExists(idByHashIndexBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucketIfNotExists(hashByIDIndexBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) removeDAGState() error {
|
||||
err := dag.db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
|
||||
err := meta.DeleteBucket(blockIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(utxoSetBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(utxoDiffsBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Metadata().Delete(utxoSetVersionKeyName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(subnetworksBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Metadata().Delete(localSubnetworkKeyName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -522,7 +419,7 @@ func (dag *BlockDAG) initDAGState() error {
|
||||
localSubnetworkID.SetBytes(localSubnetworkIDBytes)
|
||||
}
|
||||
if !localSubnetworkID.IsEqual(dag.subnetworkID) {
|
||||
return fmt.Errorf("Cannot start btcd with subnetwork ID %s because"+
|
||||
return errors.Errorf("Cannot start btcd with subnetwork ID %s because"+
|
||||
" its database is already built with subnetwork ID %s. If you"+
|
||||
" want to switch to a new database, please reset the"+
|
||||
" database by starting btcd with --reset-db flag", dag.subnetworkID, localSubnetworkID)
|
||||
@@ -562,55 +459,42 @@ func (dag *BlockDAG) initDAGState() error {
|
||||
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
|
||||
// Determine how many blocks will be loaded into the index so we can
|
||||
// allocate the right amount.
|
||||
var blockCount int32
|
||||
cursor := blockIndexBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
blockCount++
|
||||
}
|
||||
blockNodes := make([]blockNode, blockCount)
|
||||
|
||||
var i int32
|
||||
var lastNode *blockNode
|
||||
cursor = blockIndexBucket.Cursor()
|
||||
var unprocessedBlockNodes []*blockNode
|
||||
cursor := blockIndexBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
header, status, err := deserializeBlockRow(cursor.Value())
|
||||
node, err := dag.deserializeBlockNode(cursor.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parents := newSet()
|
||||
// Check to see if this node had been stored in the the block DB
|
||||
// but not yet accepted. If so, add it to a slice to be processed later.
|
||||
if node.status == statusDataStored {
|
||||
unprocessedBlockNodes = append(unprocessedBlockNodes, node)
|
||||
continue
|
||||
}
|
||||
|
||||
if lastNode == nil {
|
||||
blockHash := header.BlockHash()
|
||||
if !blockHash.IsEqual(dag.dagParams.GenesisHash) {
|
||||
if !node.hash.IsEqual(dag.dagParams.GenesisHash) {
|
||||
return AssertError(fmt.Sprintf("initDAGState: Expected "+
|
||||
"first entry in block index to be genesis block, "+
|
||||
"found %s", blockHash))
|
||||
"found %s", node.hash))
|
||||
}
|
||||
} else {
|
||||
for _, hash := range header.ParentHashes {
|
||||
parent := dag.index.LookupNode(hash)
|
||||
if parent == nil {
|
||||
return AssertError(fmt.Sprintf("initDAGState: Could "+
|
||||
"not find parent %s for block %s", hash, header.BlockHash()))
|
||||
}
|
||||
parents.add(parent)
|
||||
}
|
||||
if len(parents) == 0 {
|
||||
if len(node.parents) == 0 {
|
||||
return AssertError(fmt.Sprintf("initDAGState: Could "+
|
||||
"not find any parent for block %s", header.BlockHash()))
|
||||
"not find any parent for block %s", node.hash))
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize the block node for the block, connect it,
|
||||
// Add the node to its parents children, connect it,
|
||||
// and add it to the block index.
|
||||
node := &blockNodes[i]
|
||||
initBlockNode(node, header, parents, dag.dagParams.K)
|
||||
node.status = status
|
||||
node.updateParentsChildren()
|
||||
dag.index.addNode(node)
|
||||
|
||||
if blockStatus(status).KnownValid() {
|
||||
if node.status.KnownValid() {
|
||||
dag.blockCount++
|
||||
}
|
||||
|
||||
@@ -636,15 +520,15 @@ func (dag *BlockDAG) initDAGState() error {
|
||||
|
||||
fullUTXOCollection := make(utxoCollection, utxoEntryCount)
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
// Deserialize the outPoint
|
||||
outPoint, err := deserializeOutPoint(cursor.Key())
|
||||
// Deserialize the outpoint
|
||||
outpoint, err := deserializeOutpoint(cursor.Key())
|
||||
if err != nil {
|
||||
// Ensure any deserialization errors are returned as database
|
||||
// corruption errors.
|
||||
if isDeserializeErr(err) {
|
||||
return database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt outPoint: %s", err),
|
||||
Description: fmt.Sprintf("corrupt outpoint: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -666,11 +550,14 @@ func (dag *BlockDAG) initDAGState() error {
|
||||
return err
|
||||
}
|
||||
|
||||
fullUTXOCollection[*outPoint] = entry
|
||||
fullUTXOCollection[*outpoint] = entry
|
||||
}
|
||||
|
||||
// Apply the loaded utxoCollection to the virtual block.
|
||||
dag.virtual.utxoSet.utxoCollection = fullUTXOCollection
|
||||
dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection)
|
||||
if err != nil {
|
||||
return AssertError(fmt.Sprintf("Error loading UTXOSet: %s", err))
|
||||
}
|
||||
|
||||
// Apply the stored tips to the virtual block.
|
||||
tips := newSet()
|
||||
@@ -686,33 +573,126 @@ func (dag *BlockDAG) initDAGState() error {
|
||||
|
||||
// Set the last finality point
|
||||
dag.lastFinalityPoint = dag.index.LookupNode(state.LastFinalityPoint)
|
||||
dag.finalizeNodesBelowFinalityPoint(false)
|
||||
|
||||
// Go over any unprocessed blockNodes and process them now.
|
||||
for _, node := range unprocessedBlockNodes {
|
||||
// Check to see if the block exists in the block DB. If it
|
||||
// doesn't, the database has certainly been corrupted.
|
||||
blockExists, err := dbTx.HasBlock(node.hash)
|
||||
if err != nil {
|
||||
return AssertError(fmt.Sprintf("initDAGState: HasBlock "+
|
||||
"for block %s failed: %s", node.hash, err))
|
||||
}
|
||||
if !blockExists {
|
||||
return AssertError(fmt.Sprintf("initDAGState: block %s "+
|
||||
"exists in block index but not in block db", node.hash))
|
||||
}
|
||||
|
||||
// Attempt to accept the block.
|
||||
block, err := dbFetchBlockByNode(dbTx, node)
|
||||
isOrphan, delay, err := dag.ProcessBlock(block, BFWasStored)
|
||||
if err != nil {
|
||||
log.Warnf("Block %s, which was not previously processed, "+
|
||||
"failed to be accepted to the DAG: %s", node.hash, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// If the block is an orphan or is delayed then it couldn't have
|
||||
// possibly been written to the block index in the first place.
|
||||
if isOrphan {
|
||||
return AssertError(fmt.Sprintf("Block %s, which was not "+
|
||||
"previously processed, turned out to be an orphan, which is "+
|
||||
"impossible.", node.hash))
|
||||
}
|
||||
if delay != 0 {
|
||||
return AssertError(fmt.Sprintf("Block %s, which was not "+
|
||||
"previously processed, turned out to be delayed, which is "+
|
||||
"impossible.", node.hash))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// deserializeBlockRow parses a value in the block index bucket into a block
|
||||
// header and block status bitfield.
|
||||
func deserializeBlockRow(blockRow []byte) (*wire.BlockHeader, blockStatus, error) {
|
||||
// deserializeBlockNode parses a value in the block index bucket and returns a block node.
|
||||
func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
|
||||
buffer := bytes.NewReader(blockRow)
|
||||
|
||||
var header wire.BlockHeader
|
||||
err := header.Deserialize(buffer)
|
||||
if err != nil {
|
||||
return nil, statusNone, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node := &blockNode{
|
||||
hash: header.BlockHash(),
|
||||
version: header.Version,
|
||||
bits: header.Bits,
|
||||
nonce: header.Nonce,
|
||||
timestamp: header.Timestamp.Unix(),
|
||||
hashMerkleRoot: header.HashMerkleRoot,
|
||||
acceptedIDMerkleRoot: header.AcceptedIDMerkleRoot,
|
||||
utxoCommitment: header.UTXOCommitment,
|
||||
}
|
||||
|
||||
node.children = newSet()
|
||||
node.parents = newSet()
|
||||
|
||||
for _, hash := range header.ParentHashes {
|
||||
parent := dag.index.LookupNode(hash)
|
||||
if parent == nil {
|
||||
return nil, AssertError(fmt.Sprintf("deserializeBlockNode: Could "+
|
||||
"not find parent %s for block %s", hash, header.BlockHash()))
|
||||
}
|
||||
node.parents.add(parent)
|
||||
}
|
||||
|
||||
statusByte, err := buffer.ReadByte()
|
||||
if err != nil {
|
||||
return nil, statusNone, err
|
||||
return nil, err
|
||||
}
|
||||
node.status = blockStatus(statusByte)
|
||||
|
||||
selectedParentHash := &daghash.Hash{}
|
||||
if _, err := io.ReadFull(buffer, selectedParentHash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &header, blockStatus(statusByte), nil
|
||||
// Because genesis doesn't have selected parent, it's serialized as zero hash
|
||||
if !selectedParentHash.IsEqual(&daghash.ZeroHash) {
|
||||
node.selectedParent = dag.index.LookupNode(selectedParentHash)
|
||||
}
|
||||
|
||||
node.blueScore, err = binaryserializer.Uint64(buffer, byteOrder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bluesCount, err := wire.ReadVarInt(buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node.blues = make([]*blockNode, bluesCount)
|
||||
for i := uint64(0); i < bluesCount; i++ {
|
||||
hash := &daghash.Hash{}
|
||||
if _, err := io.ReadFull(buffer, hash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.blues[i] = dag.index.LookupNode(hash)
|
||||
}
|
||||
|
||||
node.height = calculateNodeHeight(node)
|
||||
node.chainHeight = calculateChainHeight(node)
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// dbFetchBlockByNode uses an existing database transaction to retrieve the
|
||||
// raw block for the provided node, deserialize it, and return a util.Block
|
||||
// with the height set.
|
||||
// of it.
|
||||
func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error) {
|
||||
// Load the raw block bytes from the database.
|
||||
blockBytes, err := dbTx.FetchBlock(node.hash)
|
||||
@@ -720,17 +700,15 @@ func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the encapsulated block and set the height appropriately.
|
||||
// Create the encapsulated block.
|
||||
block, err := util.NewBlockFromBytes(blockBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block.SetHeight(node.height)
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
// dbStoreBlockNode stores the block header and validation status to the block
|
||||
// dbStoreBlockNode stores the block node data into the block
|
||||
// index bucket. This overwrites the current entry if there exists one.
|
||||
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
|
||||
// Serialize block data to be stored.
|
||||
@@ -740,15 +718,44 @@ func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = w.WriteByte(byte(node.status))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Because genesis doesn't have selected parent, it's serialized as zero hash
|
||||
selectedParentHash := &daghash.ZeroHash
|
||||
if node.selectedParent != nil {
|
||||
selectedParentHash = node.selectedParent.hash
|
||||
}
|
||||
_, err = w.Write(selectedParentHash[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = binaryserializer.PutUint64(w, byteOrder, node.blueScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = wire.WriteVarInt(w, uint64(len(node.blues)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, blue := range node.blues {
|
||||
_, err = w.Write(blue.hash[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
value := w.Bytes()
|
||||
|
||||
// Write block header data to block index bucket.
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
key := blockIndexKey(node.hash, uint32(node.height))
|
||||
key := BlockIndexKey(node.hash, node.blueScore)
|
||||
return blockIndexBucket.Put(key, value)
|
||||
}
|
||||
|
||||
@@ -765,23 +772,26 @@ func dbStoreBlock(dbTx database.Tx, block *util.Block) error {
|
||||
return dbTx.StoreBlock(block)
|
||||
}
|
||||
|
||||
// blockIndexKey generates the binary key for an entry in the block index
|
||||
// bucket. The key is composed of the block height encoded as a big-endian
|
||||
// 32-bit unsigned int followed by the 32 byte block hash.
|
||||
func blockIndexKey(blockHash *daghash.Hash, blockHeight uint32) []byte {
|
||||
indexKey := make([]byte, daghash.HashSize+4)
|
||||
binary.BigEndian.PutUint32(indexKey[0:4], blockHeight)
|
||||
copy(indexKey[4:daghash.HashSize+4], blockHash[:])
|
||||
// BlockIndexKey generates the binary key for an entry in the block index
|
||||
// bucket. The key is composed of the block blue score encoded as a big-endian
|
||||
// 64-bit unsigned int followed by the 32 byte block hash.
|
||||
// The blue score component is important for iteration order.
|
||||
func BlockIndexKey(blockHash *daghash.Hash, blueScore uint64) []byte {
|
||||
indexKey := make([]byte, daghash.HashSize+8)
|
||||
binary.BigEndian.PutUint64(indexKey[0:8], blueScore)
|
||||
copy(indexKey[8:daghash.HashSize+8], blockHash[:])
|
||||
return indexKey
|
||||
}
|
||||
|
||||
// BlockByHash returns the block from the main chain with the given hash with
|
||||
// the appropriate chain height set.
|
||||
func blockHashFromBlockIndexKey(BlockIndexKey []byte) (*daghash.Hash, error) {
|
||||
return daghash.NewHash(BlockIndexKey[8 : daghash.HashSize+8])
|
||||
}
|
||||
|
||||
// BlockByHash returns the block from the DAG with the given hash.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
|
||||
// Lookup the block hash in block index and ensure it is in the best
|
||||
// chain.
|
||||
// Lookup the block hash in block index and ensure it is in the DAG
|
||||
node := dag.index.LookupNode(hash)
|
||||
if node == nil {
|
||||
str := fmt.Sprintf("block %s is not in the main chain", hash)
|
||||
@@ -797,3 +807,49 @@ func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
|
||||
})
|
||||
return block, err
|
||||
}
|
||||
|
||||
// BlockHashesFrom returns a slice of blocks starting from startHash
|
||||
// ordered by blueScore. If startHash is nil then the genesis block is used.
|
||||
//
|
||||
// This method MUST be called with the DAG lock held
|
||||
func (dag *BlockDAG) BlockHashesFrom(startHash *daghash.Hash, limit int) ([]*daghash.Hash, error) {
|
||||
blockHashes := make([]*daghash.Hash, 0, limit)
|
||||
if startHash == nil {
|
||||
startHash = dag.genesis.hash
|
||||
|
||||
// If we're starting from the beginning we should include the
|
||||
// genesis hash in the result
|
||||
blockHashes = append(blockHashes, dag.genesis.hash)
|
||||
}
|
||||
if !dag.BlockExists(startHash) {
|
||||
return nil, errors.Errorf("block %s not found", startHash)
|
||||
}
|
||||
blueScore, err := dag.BlueScoreByBlockHash(startHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dag.index.db.View(func(dbTx database.Tx) error {
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
startKey := BlockIndexKey(startHash, blueScore)
|
||||
|
||||
cursor := blockIndexBucket.Cursor()
|
||||
cursor.Seek(startKey)
|
||||
for ok := cursor.Next(); ok; ok = cursor.Next() {
|
||||
key := cursor.Key()
|
||||
blockHash, err := blockHashFromBlockIndexKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHashes = append(blockHashes, blockHash)
|
||||
if len(blockHashes) == limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockHashes, nil
|
||||
}
|
||||
|
||||
@@ -6,12 +6,12 @@ package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"github.com/pkg/errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// TestErrNotInDAG ensures the functions related to errNotInDAG work
|
||||
@@ -49,24 +49,24 @@ func TestUtxoSerialization(t *testing.T) {
|
||||
// From tx in main blockchain:
|
||||
// b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
|
||||
{
|
||||
name: "height 1, coinbase",
|
||||
name: "blue score 1, coinbase",
|
||||
entry: &UTXOEntry{
|
||||
amount: 5000000000,
|
||||
pkScript: hexToBytes("410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
|
||||
blockHeight: 1,
|
||||
packedFlags: tfBlockReward,
|
||||
amount: 5000000000,
|
||||
scriptPubKey: hexToBytes("410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
|
||||
blockBlueScore: 1,
|
||||
packedFlags: tfCoinbase,
|
||||
},
|
||||
serialized: hexToBytes("03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"),
|
||||
},
|
||||
// From tx in main blockchain:
|
||||
// 8131ffb0a2c945ecaf9b9063e59558784f9c3a74741ce6ae2a18d0571dac15bb:1
|
||||
{
|
||||
name: "height 100001, not coinbase",
|
||||
name: "blue score 100001, not coinbase",
|
||||
entry: &UTXOEntry{
|
||||
amount: 1000000,
|
||||
pkScript: hexToBytes("76a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
|
||||
blockHeight: 100001,
|
||||
packedFlags: 0,
|
||||
amount: 1000000,
|
||||
scriptPubKey: hexToBytes("76a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
|
||||
blockBlueScore: 100001,
|
||||
packedFlags: 0,
|
||||
},
|
||||
serialized: hexToBytes("8b99420700ee8bd501094a7d5ca318da2506de35e1cb025ddc"),
|
||||
},
|
||||
@@ -74,12 +74,7 @@ func TestUtxoSerialization(t *testing.T) {
|
||||
|
||||
for i, test := range tests {
|
||||
// Ensure the utxo entry serializes to the expected value.
|
||||
gotBytes, err := serializeUTXOEntry(test.entry)
|
||||
if err != nil {
|
||||
t.Errorf("serializeUTXOEntry #%d (%s) unexpected "+
|
||||
"error: %v", i, test.name, err)
|
||||
continue
|
||||
}
|
||||
gotBytes := serializeUTXOEntry(test.entry)
|
||||
if !bytes.Equal(gotBytes, test.serialized) {
|
||||
t.Errorf("serializeUTXOEntry #%d (%s): mismatched "+
|
||||
"bytes - got %x, want %x", i, test.name,
|
||||
@@ -104,22 +99,22 @@ func TestUtxoSerialization(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !bytes.Equal(utxoEntry.PkScript(), test.entry.PkScript()) {
|
||||
if !bytes.Equal(utxoEntry.ScriptPubKey(), test.entry.ScriptPubKey()) {
|
||||
t.Errorf("deserializeUTXOEntry #%d (%s) mismatched "+
|
||||
"scripts: got %x, want %x", i, test.name,
|
||||
utxoEntry.PkScript(), test.entry.PkScript())
|
||||
utxoEntry.ScriptPubKey(), test.entry.ScriptPubKey())
|
||||
continue
|
||||
}
|
||||
if utxoEntry.BlockHeight() != test.entry.BlockHeight() {
|
||||
if utxoEntry.BlockBlueScore() != test.entry.BlockBlueScore() {
|
||||
t.Errorf("deserializeUTXOEntry #%d (%s) mismatched "+
|
||||
"block height: got %d, want %d", i, test.name,
|
||||
utxoEntry.BlockHeight(), test.entry.BlockHeight())
|
||||
"block blue score: got %d, want %d", i, test.name,
|
||||
utxoEntry.BlockBlueScore(), test.entry.BlockBlueScore())
|
||||
continue
|
||||
}
|
||||
if utxoEntry.IsBlockReward() != test.entry.IsBlockReward() {
|
||||
if utxoEntry.IsCoinbase() != test.entry.IsCoinbase() {
|
||||
t.Errorf("deserializeUTXOEntry #%d (%s) mismatched "+
|
||||
"coinbase flag: got %v, want %v", i, test.name,
|
||||
utxoEntry.IsBlockReward(), test.entry.IsBlockReward())
|
||||
utxoEntry.IsCoinbase(), test.entry.IsCoinbase())
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,159 +11,42 @@ import (
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
// calcEasiestDifficulty calculates the easiest possible difficulty that a block
|
||||
// can have given starting difficulty bits and a duration. It is mainly used to
|
||||
// verify that claimed proof of work by a block is sane as compared to a
|
||||
// known good checkpoint.
|
||||
func (dag *BlockDAG) calcEasiestDifficulty(bits uint32, duration time.Duration) uint32 {
|
||||
// Convert types used in the calculations below.
|
||||
durationVal := int64(duration / time.Second)
|
||||
adjustmentFactor := big.NewInt(dag.dagParams.RetargetAdjustmentFactor)
|
||||
|
||||
// The test network rules allow minimum difficulty blocks after more
|
||||
// than twice the desired amount of time needed to generate a block has
|
||||
// elapsed.
|
||||
if dag.dagParams.ReduceMinDifficulty {
|
||||
reductionTime := int64(dag.dagParams.MinDiffReductionTime /
|
||||
time.Second)
|
||||
if durationVal > reductionTime {
|
||||
return dag.dagParams.PowLimitBits
|
||||
}
|
||||
}
|
||||
|
||||
// Since easier difficulty equates to higher numbers, the easiest
|
||||
// difficulty for a given duration is the largest value possible given
|
||||
// the number of retargets for the duration and starting difficulty
|
||||
// multiplied by the max adjustment factor.
|
||||
newTarget := util.CompactToBig(bits)
|
||||
for durationVal > 0 && newTarget.Cmp(dag.dagParams.PowLimit) < 0 {
|
||||
newTarget.Mul(newTarget, adjustmentFactor)
|
||||
durationVal -= dag.maxRetargetTimespan
|
||||
}
|
||||
|
||||
// Limit new value to the proof of work limit.
|
||||
if newTarget.Cmp(dag.dagParams.PowLimit) > 0 {
|
||||
newTarget.Set(dag.dagParams.PowLimit)
|
||||
}
|
||||
|
||||
return util.BigToCompact(newTarget)
|
||||
}
|
||||
|
||||
// findPrevTestNetDifficulty returns the difficulty of the previous block which
|
||||
// did not have the special testnet minimum difficulty rule applied.
|
||||
//
|
||||
// This function MUST be called with the chain state lock held (for writes).
|
||||
func (dag *BlockDAG) findPrevTestNetDifficulty(startNode *blockNode) uint32 {
|
||||
// Search backwards through the chain for the last block without
|
||||
// the special rule applied.
|
||||
iterNode := startNode
|
||||
for iterNode != nil && iterNode.height%dag.blocksPerRetarget != 0 &&
|
||||
iterNode.bits == dag.dagParams.PowLimitBits {
|
||||
|
||||
iterNode = iterNode.selectedParent
|
||||
}
|
||||
|
||||
// Return the found difficulty or the minimum difficulty if no
|
||||
// appropriate block was found.
|
||||
lastBits := dag.dagParams.PowLimitBits
|
||||
if iterNode != nil {
|
||||
lastBits = iterNode.bits
|
||||
}
|
||||
return lastBits
|
||||
}
|
||||
|
||||
// calcNextRequiredDifficulty calculates the required difficulty for the block
|
||||
// after the passed previous block node based on the difficulty retarget rules.
|
||||
// This function differs from the exported CalcNextRequiredDifficulty in that
|
||||
// the exported version uses the current best chain as the previous block node
|
||||
// while this function accepts any block node.
|
||||
func (dag *BlockDAG) calcNextRequiredDifficulty(bluestParent *blockNode, newBlockTime time.Time) (uint32, error) {
|
||||
// requiredDifficulty calculates the required difficulty for a
|
||||
// block given its bluest parent.
|
||||
func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime time.Time) uint32 {
|
||||
// Genesis block.
|
||||
if bluestParent == nil {
|
||||
return dag.dagParams.PowLimitBits, nil
|
||||
if bluestParent == nil || bluestParent.blueScore < dag.difficultyAdjustmentWindowSize+1 {
|
||||
return dag.powMaxBits
|
||||
}
|
||||
|
||||
// Return the previous block's difficulty requirements if this block
|
||||
// is not at a difficulty retarget interval.
|
||||
if (bluestParent.height+1)%dag.blocksPerRetarget != 0 {
|
||||
// For networks that support it, allow special reduction of the
|
||||
// required difficulty once too much time has elapsed without
|
||||
// mining a block.
|
||||
if dag.dagParams.ReduceMinDifficulty {
|
||||
// Return minimum difficulty when more than the desired
|
||||
// amount of time has elapsed without mining a block.
|
||||
reductionTime := int64(dag.dagParams.MinDiffReductionTime /
|
||||
time.Second)
|
||||
allowMinTime := bluestParent.timestamp + reductionTime
|
||||
if newBlockTime.Unix() > allowMinTime {
|
||||
return dag.dagParams.PowLimitBits, nil
|
||||
}
|
||||
// Fetch window of dag.difficultyAdjustmentWindowSize + 1 so we can have dag.difficultyAdjustmentWindowSize block intervals
|
||||
timestampsWindow := blueBlockWindow(bluestParent, dag.difficultyAdjustmentWindowSize+1)
|
||||
windowMinTimestamp, windowMaxTimeStamp := timestampsWindow.minMaxTimestamps()
|
||||
|
||||
// The block was mined within the desired timeframe, so
|
||||
// return the difficulty for the last block which did
|
||||
// not have the special minimum difficulty rule applied.
|
||||
return dag.findPrevTestNetDifficulty(bluestParent), nil
|
||||
}
|
||||
|
||||
// For the main network (or any unrecognized networks), simply
|
||||
// return the previous block's difficulty requirements.
|
||||
return bluestParent.bits, nil
|
||||
}
|
||||
|
||||
// Get the block node at the previous retarget (targetTimespan days
|
||||
// worth of blocks).
|
||||
firstNode := bluestParent.RelativeAncestor(dag.blocksPerRetarget - 1)
|
||||
if firstNode == nil {
|
||||
return 0, AssertError("unable to obtain previous retarget block")
|
||||
}
|
||||
|
||||
// Limit the amount of adjustment that can occur to the previous
|
||||
// difficulty.
|
||||
actualTimespan := bluestParent.timestamp - firstNode.timestamp
|
||||
adjustedTimespan := actualTimespan
|
||||
if actualTimespan < dag.minRetargetTimespan {
|
||||
adjustedTimespan = dag.minRetargetTimespan
|
||||
} else if actualTimespan > dag.maxRetargetTimespan {
|
||||
adjustedTimespan = dag.maxRetargetTimespan
|
||||
}
|
||||
// Remove the last block from the window so to calculate the average target of dag.difficultyAdjustmentWindowSize blocks
|
||||
targetsWindow := timestampsWindow[:dag.difficultyAdjustmentWindowSize]
|
||||
|
||||
// Calculate new target difficulty as:
|
||||
// currentDifficulty * (adjustedTimespan / targetTimespan)
|
||||
// averageWindowTarget * (windowMinTimestamp / (targetTimePerBlock * windowSize))
|
||||
// The result uses integer division which means it will be slightly
|
||||
// rounded down. Bitcoind also uses integer division to calculate this
|
||||
// result.
|
||||
oldTarget := util.CompactToBig(bluestParent.bits)
|
||||
newTarget := new(big.Int).Mul(oldTarget, big.NewInt(adjustedTimespan))
|
||||
targetTimeSpan := int64(dag.dagParams.TargetTimespan / time.Second)
|
||||
newTarget.Div(newTarget, big.NewInt(targetTimeSpan))
|
||||
|
||||
// Limit new value to the proof of work limit.
|
||||
if newTarget.Cmp(dag.dagParams.PowLimit) > 0 {
|
||||
newTarget.Set(dag.dagParams.PowLimit)
|
||||
// rounded down.
|
||||
newTarget := targetsWindow.averageTarget()
|
||||
newTarget.
|
||||
Mul(newTarget, big.NewInt(windowMaxTimeStamp-windowMinTimestamp)).
|
||||
Div(newTarget, big.NewInt(dag.targetTimePerBlock)).
|
||||
Div(newTarget, big.NewInt(int64(dag.difficultyAdjustmentWindowSize)))
|
||||
if newTarget.Cmp(dag.dagParams.PowMax) > 0 {
|
||||
return dag.powMaxBits
|
||||
}
|
||||
|
||||
// Log new target difficulty and return it. The new target logging is
|
||||
// intentionally converting the bits back to a number instead of using
|
||||
// newTarget since conversion to the compact representation loses
|
||||
// precision.
|
||||
newTargetBits := util.BigToCompact(newTarget)
|
||||
log.Debugf("Difficulty retarget at block height %d", bluestParent.height+1)
|
||||
log.Debugf("Old target %08x (%064x)", bluestParent.bits, oldTarget)
|
||||
log.Debugf("New target %08x (%064x)", newTargetBits, util.CompactToBig(newTargetBits))
|
||||
log.Debugf("Actual timespan %s, adjusted timespan %s, target timespan %s",
|
||||
time.Duration(actualTimespan)*time.Second,
|
||||
time.Duration(adjustedTimespan)*time.Second,
|
||||
dag.dagParams.TargetTimespan)
|
||||
|
||||
return newTargetBits, nil
|
||||
return newTargetBits
|
||||
}
|
||||
|
||||
// CalcNextRequiredDifficulty calculates the required difficulty for the block
|
||||
// after the end of the current best chain based on the difficulty retarget
|
||||
// rules.
|
||||
// NextRequiredDifficulty calculates the required difficulty for a block that will
|
||||
// be built on top of the current tips.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (dag *BlockDAG) CalcNextRequiredDifficulty(timestamp time.Time) (uint32, error) {
|
||||
difficulty, err := dag.calcNextRequiredDifficulty(dag.selectedTip(), timestamp)
|
||||
return difficulty, err
|
||||
func (dag *BlockDAG) NextRequiredDifficulty(timestamp time.Time) uint32 {
|
||||
difficulty := dag.requiredDifficulty(dag.virtual.parents.bluest(), timestamp)
|
||||
return difficulty
|
||||
}
|
||||
|
||||
@@ -5,8 +5,12 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
@@ -75,3 +79,120 @@ func TestCalcWork(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDifficulty(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.K = 1
|
||||
dag := newTestDAG(¶ms)
|
||||
nonce := uint64(0)
|
||||
zeroTime := time.Unix(0, 0)
|
||||
addNode := func(parents blockSet, blockTime time.Time) *blockNode {
|
||||
bluestParent := parents.bluest()
|
||||
if blockTime == zeroTime {
|
||||
blockTime = time.Unix(bluestParent.timestamp+1, 0)
|
||||
}
|
||||
header := &wire.BlockHeader{
|
||||
ParentHashes: parents.hashes(),
|
||||
Bits: dag.requiredDifficulty(bluestParent, blockTime),
|
||||
Nonce: nonce,
|
||||
Timestamp: blockTime,
|
||||
HashMerkleRoot: &daghash.ZeroHash,
|
||||
AcceptedIDMerkleRoot: &daghash.ZeroHash,
|
||||
UTXOCommitment: &daghash.ZeroHash,
|
||||
}
|
||||
node := newBlockNode(header, parents, dag.dagParams.K)
|
||||
node.updateParentsChildren()
|
||||
nonce++
|
||||
return node
|
||||
}
|
||||
tip := dag.genesis
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
if tip.bits != dag.genesis.bits {
|
||||
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment window size, the difficulty should be the same as genesis'")
|
||||
}
|
||||
}
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize+1000; i++ {
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
if tip.bits != dag.genesis.bits {
|
||||
t.Fatalf("As long as the block rate remains the same, the difficulty shouldn't change")
|
||||
}
|
||||
}
|
||||
nodeInThePast := addNode(setFromSlice(tip), tip.PastMedianTime(dag))
|
||||
if nodeInThePast.bits != tip.bits {
|
||||
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
|
||||
}
|
||||
tip = nodeInThePast
|
||||
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
if tip.bits != nodeInThePast.bits {
|
||||
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
|
||||
}
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
if compareBits(tip.bits, nodeInThePast.bits) >= 0 {
|
||||
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the block rate, so the difficulty should increase as well")
|
||||
}
|
||||
expectedBits := uint32(0x207ff395)
|
||||
if tip.bits != expectedBits {
|
||||
t.Errorf("tip.bits was expected to be %x but got %x", expectedBits, tip.bits)
|
||||
}
|
||||
|
||||
// Increase block rate to increase difficulty
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
|
||||
tip = addNode(setFromSlice(tip), tip.PastMedianTime(dag))
|
||||
if compareBits(tip.bits, tip.parents.bluest().bits) > 0 {
|
||||
t.Fatalf("Because we're increasing the block rate, the difficulty can't decrease")
|
||||
}
|
||||
}
|
||||
|
||||
// Add blocks until difficulty stabilizes
|
||||
lastBits := tip.bits
|
||||
sameBitsCount := uint64(0)
|
||||
for sameBitsCount < dag.difficultyAdjustmentWindowSize+1 {
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
if tip.bits == lastBits {
|
||||
sameBitsCount++
|
||||
} else {
|
||||
lastBits = tip.bits
|
||||
sameBitsCount = 0
|
||||
}
|
||||
}
|
||||
slowNode := addNode(setFromSlice(tip), time.Unix(tip.timestamp+2, 0))
|
||||
if slowNode.bits != tip.bits {
|
||||
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
|
||||
}
|
||||
|
||||
tip = slowNode
|
||||
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
if tip.bits != slowNode.bits {
|
||||
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
|
||||
}
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
if compareBits(tip.bits, slowNode.bits) <= 0 {
|
||||
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block rate, so the difficulty should decrease as well")
|
||||
}
|
||||
|
||||
splitNode := addNode(setFromSlice(tip), zeroTime)
|
||||
tip = splitNode
|
||||
for i := 0; i < 100; i++ {
|
||||
tip = addNode(setFromSlice(tip), zeroTime)
|
||||
}
|
||||
blueTip := tip
|
||||
|
||||
redChainTip := splitNode
|
||||
for i := 0; i < 10; i++ {
|
||||
redChainTip = addNode(setFromSlice(redChainTip), redChainTip.PastMedianTime(dag))
|
||||
}
|
||||
tipWithRedPast := addNode(setFromSlice(redChainTip, blueTip), zeroTime)
|
||||
tipWithoutRedPast := addNode(setFromSlice(blueTip), zeroTime)
|
||||
if tipWithoutRedPast.bits != tipWithRedPast.bits {
|
||||
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks shouldn't affect the difficulty")
|
||||
}
|
||||
}
|
||||
|
||||
func compareBits(a uint32, b uint32) int {
|
||||
aTarget := util.CompactToBig(a)
|
||||
bTarget := util.CompactToBig(b)
|
||||
return aTarget.Cmp(bTarget)
|
||||
}
|
||||
|
||||
@@ -37,9 +37,9 @@ const (
|
||||
// exists.
|
||||
ErrDuplicateBlock ErrorCode = iota
|
||||
|
||||
// ErrBlockTooBig indicates the serialized block size exceeds the
|
||||
// maximum allowed size.
|
||||
ErrBlockTooBig
|
||||
// ErrBlockMassTooHigh indicates the mass of a block exceeds the maximum
|
||||
// allowed limits.
|
||||
ErrBlockMassTooHigh
|
||||
|
||||
// ErrBlockVersionTooOld indicates the block version is too old and is
|
||||
// no longer accepted since the majority of the network has upgraded
|
||||
@@ -84,17 +84,17 @@ const (
|
||||
// the expected value.
|
||||
ErrBadMerkleRoot
|
||||
|
||||
// ErrBadUTXOCommitment indicates the calculated UTXO commitment does not match
|
||||
// the expected value.
|
||||
ErrBadUTXOCommitment
|
||||
|
||||
// ErrBadCheckpoint indicates a block that is expected to be at a
|
||||
// checkpoint height does not match the expected one.
|
||||
ErrBadCheckpoint
|
||||
|
||||
// ErrForkTooOld indicates a block is attempting to fork the block chain
|
||||
// before the most recent checkpoint.
|
||||
ErrForkTooOld
|
||||
|
||||
// ErrCheckpointTimeTooOld indicates a block has a timestamp before the
|
||||
// most recent checkpoint.
|
||||
ErrCheckpointTimeTooOld
|
||||
// ErrFinalityPointTimeTooOld indicates a block has a timestamp before the
|
||||
// last finality point.
|
||||
ErrFinalityPointTimeTooOld
|
||||
|
||||
// ErrNoTransactions indicates the block does not have a least one
|
||||
// transaction. A valid block must have at least the coinbase
|
||||
@@ -105,9 +105,9 @@ const (
|
||||
// valid transaction must have at least one input.
|
||||
ErrNoTxInputs
|
||||
|
||||
// ErrTxTooBig indicates a transaction exceeds the maximum allowed size
|
||||
// when serialized.
|
||||
ErrTxTooBig
|
||||
// ErrTxMassTooHigh indicates the mass of a transaction exceeds the maximum
|
||||
// allowed limits.
|
||||
ErrTxMassTooHigh
|
||||
|
||||
// ErrBadTxOutValue indicates an output value for a transaction is
|
||||
// invalid in some way such as being out of range.
|
||||
@@ -141,7 +141,7 @@ const (
|
||||
ErrOverwriteTx
|
||||
|
||||
// ErrImmatureSpend indicates a transaction is attempting to spend a
|
||||
// block reward that has not yet reached the required maturity.
|
||||
// coinbase that has not yet reached the required maturity.
|
||||
ErrImmatureSpend
|
||||
|
||||
// ErrSpendTooHigh indicates a transaction is attempting to spend more
|
||||
@@ -164,34 +164,12 @@ const (
|
||||
// coinbase transaction.
|
||||
ErrMultipleCoinbases
|
||||
|
||||
// ErrBadCoinbaseScriptLen indicates the length of the signature script
|
||||
// for a coinbase transaction is not within the valid range.
|
||||
ErrBadCoinbaseScriptLen
|
||||
// ErrBadCoinbasePayloadLen indicates the length of the payload
|
||||
// for a coinbase transaction is too high.
|
||||
ErrBadCoinbasePayloadLen
|
||||
|
||||
// ErrBadCoinbaseValue indicates the amount of a coinbase value does
|
||||
// not match the expected value of the subsidy plus the sum of all fees.
|
||||
ErrBadCoinbaseValue
|
||||
|
||||
// ErrMissingCoinbaseHeight indicates the coinbase transaction for a
|
||||
// block does not start with the serialized block block height as
|
||||
// required for version 2 and higher blocks.
|
||||
ErrMissingCoinbaseHeight
|
||||
|
||||
// ErrBadCoinbaseHeight indicates the serialized block height in the
|
||||
// coinbase transaction for version 2 and higher blocks does not match
|
||||
// the expected value.
|
||||
ErrBadCoinbaseHeight
|
||||
|
||||
// ErrSecondTxNotFeeTransaction indicates the second transaction in
|
||||
// a block is not a fee transaction.
|
||||
ErrSecondTxNotFeeTransaction
|
||||
|
||||
// ErrBadFeeTransaction indicates that the block's fee transaction is not build as expected
|
||||
ErrBadFeeTransaction
|
||||
|
||||
// ErrMultipleFeeTransactions indicates a block contains more than one
|
||||
// fee transaction.
|
||||
ErrMultipleFeeTransactions
|
||||
// ErrBadCoinbaseTransaction indicates that the block's coinbase transaction is not build as expected
|
||||
ErrBadCoinbaseTransaction
|
||||
|
||||
// ErrScriptMalformed indicates a transaction script is malformed in
|
||||
// some way. For example, it might be longer than the maximum allowed
|
||||
@@ -240,12 +218,16 @@ const (
|
||||
// ErrSubnetwork indicates that a block doesn't adhere to the subnetwork
|
||||
// registry rules
|
||||
ErrSubnetworkRegistry
|
||||
|
||||
// ErrInvalidParentsRelation indicates that one of the parents of a block
|
||||
// is also an ancestor of another parent
|
||||
ErrInvalidParentsRelation
|
||||
)
|
||||
|
||||
// Map of ErrorCode values back to their constant names for pretty printing.
|
||||
var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrDuplicateBlock: "ErrDuplicateBlock",
|
||||
ErrBlockTooBig: "ErrBlockTooBig",
|
||||
ErrBlockMassTooHigh: "ErrBlockMassTooHigh",
|
||||
ErrBlockVersionTooOld: "ErrBlockVersionTooOld",
|
||||
ErrInvalidTime: "ErrInvalidTime",
|
||||
ErrTimeTooOld: "ErrTimeTooOld",
|
||||
@@ -257,11 +239,10 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrHighHash: "ErrHighHash",
|
||||
ErrBadMerkleRoot: "ErrBadMerkleRoot",
|
||||
ErrBadCheckpoint: "ErrBadCheckpoint",
|
||||
ErrForkTooOld: "ErrForkTooOld",
|
||||
ErrCheckpointTimeTooOld: "ErrCheckpointTimeTooOld",
|
||||
ErrFinalityPointTimeTooOld: "ErrFinalityPointTimeTooOld",
|
||||
ErrNoTransactions: "ErrNoTransactions",
|
||||
ErrNoTxInputs: "ErrNoTxInputs",
|
||||
ErrTxTooBig: "ErrTxTooBig",
|
||||
ErrTxMassTooHigh: "ErrTxMassTooHigh",
|
||||
ErrBadTxOutValue: "ErrBadTxOutValue",
|
||||
ErrDuplicateTxInputs: "ErrDuplicateTxInputs",
|
||||
ErrBadTxInput: "ErrBadTxInput",
|
||||
@@ -275,13 +256,8 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrTooManySigOps: "ErrTooManySigOps",
|
||||
ErrFirstTxNotCoinbase: "ErrFirstTxNotCoinbase",
|
||||
ErrMultipleCoinbases: "ErrMultipleCoinbases",
|
||||
ErrBadCoinbaseScriptLen: "ErrBadCoinbaseScriptLen",
|
||||
ErrBadCoinbaseValue: "ErrBadCoinbaseValue",
|
||||
ErrMissingCoinbaseHeight: "ErrMissingCoinbaseHeight",
|
||||
ErrBadCoinbaseHeight: "ErrBadCoinbaseHeight",
|
||||
ErrSecondTxNotFeeTransaction: "ErrSecondTxNotFeeTransaction",
|
||||
ErrBadFeeTransaction: "ErrBadFeeTransaction",
|
||||
ErrMultipleFeeTransactions: "ErrMultipleFeeTransactions",
|
||||
ErrBadCoinbasePayloadLen: "ErrBadCoinbasePayloadLen",
|
||||
ErrBadCoinbaseTransaction: "ErrBadCoinbaseTransaction",
|
||||
ErrScriptMalformed: "ErrScriptMalformed",
|
||||
ErrScriptValidation: "ErrScriptValidation",
|
||||
ErrParentBlockUnknown: "ErrParentBlockUnknown",
|
||||
@@ -293,6 +269,7 @@ var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrInvalidGas: "ErrInvalidGas",
|
||||
ErrInvalidPayload: "ErrInvalidPayload",
|
||||
ErrInvalidPayloadHash: "ErrInvalidPayloadHash",
|
||||
ErrInvalidParentsRelation: "ErrInvalidParentsRelation",
|
||||
}
|
||||
|
||||
// String returns the ErrorCode as a human-readable name.
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
want string
|
||||
}{
|
||||
{ErrDuplicateBlock, "ErrDuplicateBlock"},
|
||||
{ErrBlockTooBig, "ErrBlockTooBig"},
|
||||
{ErrBlockMassTooHigh, "ErrBlockMassTooHigh"},
|
||||
{ErrBlockVersionTooOld, "ErrBlockVersionTooOld"},
|
||||
{ErrInvalidTime, "ErrInvalidTime"},
|
||||
{ErrTimeTooOld, "ErrTimeTooOld"},
|
||||
@@ -28,11 +28,10 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrHighHash, "ErrHighHash"},
|
||||
{ErrBadMerkleRoot, "ErrBadMerkleRoot"},
|
||||
{ErrBadCheckpoint, "ErrBadCheckpoint"},
|
||||
{ErrForkTooOld, "ErrForkTooOld"},
|
||||
{ErrCheckpointTimeTooOld, "ErrCheckpointTimeTooOld"},
|
||||
{ErrFinalityPointTimeTooOld, "ErrFinalityPointTimeTooOld"},
|
||||
{ErrNoTransactions, "ErrNoTransactions"},
|
||||
{ErrNoTxInputs, "ErrNoTxInputs"},
|
||||
{ErrTxTooBig, "ErrTxTooBig"},
|
||||
{ErrTxMassTooHigh, "ErrTxMassTooHigh"},
|
||||
{ErrBadTxOutValue, "ErrBadTxOutValue"},
|
||||
{ErrDuplicateTxInputs, "ErrDuplicateTxInputs"},
|
||||
{ErrBadTxInput, "ErrBadTxInput"},
|
||||
@@ -47,13 +46,8 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrTooManySigOps, "ErrTooManySigOps"},
|
||||
{ErrFirstTxNotCoinbase, "ErrFirstTxNotCoinbase"},
|
||||
{ErrMultipleCoinbases, "ErrMultipleCoinbases"},
|
||||
{ErrBadCoinbaseScriptLen, "ErrBadCoinbaseScriptLen"},
|
||||
{ErrBadCoinbaseValue, "ErrBadCoinbaseValue"},
|
||||
{ErrMissingCoinbaseHeight, "ErrMissingCoinbaseHeight"},
|
||||
{ErrBadCoinbaseHeight, "ErrBadCoinbaseHeight"},
|
||||
{ErrSecondTxNotFeeTransaction, "ErrSecondTxNotFeeTransaction"},
|
||||
{ErrBadFeeTransaction, "ErrBadFeeTransaction"},
|
||||
{ErrMultipleFeeTransactions, "ErrMultipleFeeTransactions"},
|
||||
{ErrBadCoinbasePayloadLen, "ErrBadCoinbasePayloadLen"},
|
||||
{ErrBadCoinbaseTransaction, "ErrBadCoinbaseTransaction"},
|
||||
{ErrScriptMalformed, "ErrScriptMalformed"},
|
||||
{ErrScriptValidation, "ErrScriptValidation"},
|
||||
{ErrParentBlockUnknown, "ErrParentBlockUnknown"},
|
||||
@@ -65,6 +59,7 @@ func TestErrorCodeStringer(t *testing.T) {
|
||||
{ErrInvalidGas, "ErrInvalidGas"},
|
||||
{ErrInvalidPayload, "ErrInvalidPayload"},
|
||||
{ErrInvalidPayloadHash, "ErrInvalidPayloadHash"},
|
||||
{ErrInvalidParentsRelation, "ErrInvalidParentsRelation"},
|
||||
{0xffff, "Unknown ErrorCode (65535)"},
|
||||
}
|
||||
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
// Copyright (c) 2014-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
_ "github.com/daglabs/btcd/database/ffldb"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
// This example demonstrates how to create a new chain instance and use
|
||||
// ProcessBlock to attempt to add a block to the chain. As the package
|
||||
// overview documentation describes, this includes all of the Bitcoin consensus
|
||||
// rules. This example intentionally attempts to insert a duplicate genesis
|
||||
// block to illustrate how an invalid block is handled.
|
||||
func ExampleBlockDAG_ProcessBlock() {
|
||||
// Create a new database to store the accepted blocks into. Typically
|
||||
// this would be opening an existing database and would not be deleting
|
||||
// and creating a new database like this, but it is done here so this is
|
||||
// a complete working example and does not leave temporary files laying
|
||||
// around.
|
||||
dbPath := filepath.Join(os.TempDir(), "exampleprocessblock")
|
||||
_ = os.RemoveAll(dbPath)
|
||||
db, err := database.Create("ffldb", dbPath, dagconfig.MainNetParams.Net)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create database: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(dbPath)
|
||||
defer db.Close()
|
||||
|
||||
// Create a new BlockDAG instance using the underlying database for
|
||||
// the main bitcoin network. This example does not demonstrate some
|
||||
// of the other available configuration options such as specifying a
|
||||
// notification callback and signature cache. Also, the caller would
|
||||
// ordinarily keep a reference to the median time source and add time
|
||||
// values obtained from other peers on the network so the local time is
|
||||
// adjusted to be in agreement with other peers.
|
||||
chain, err := blockdag.New(&blockdag.Config{
|
||||
DB: db,
|
||||
DAGParams: &dagconfig.MainNetParams,
|
||||
TimeSource: blockdag.NewMedianTime(),
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create chain instance: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Process a block. For this example, we are going to intentionally
|
||||
// cause an error by trying to process the genesis block which already
|
||||
// exists.
|
||||
genesisBlock := util.NewBlock(dagconfig.MainNetParams.GenesisBlock)
|
||||
isOrphan, err := chain.ProcessBlock(genesisBlock,
|
||||
blockdag.BFNone)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to process block: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Block accepted. Is it an orphan?: %v", isOrphan)
|
||||
|
||||
// Output:
|
||||
// Failed to process block: already have block 6477863f190fac902e556da4671c7537da4fe367022b1f00fa5270e0d073cc08
|
||||
}
|
||||
@@ -2,30 +2,32 @@ package blockdag_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/util/testtools"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/mining"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// TestFinality checks that the finality mechanism works as expected.
|
||||
// This is how the flow goes:
|
||||
// 1) We build a chain of blockdag.FinalityInterval blocks and call its tip altChainTip.
|
||||
// 2) We build another chain (let's call it mainChain) of 2 * blockdag.FinalityInterval
|
||||
// 1) We build a chain of params.FinalityInterval blocks and call its tip altChainTip.
|
||||
// 2) We build another chain (let's call it mainChain) of 2 * params.FinalityInterval
|
||||
// blocks, which points to genesis, and then we check that the block in that
|
||||
// chain with height of blockdag.FinalityInterval is marked as finality point (This is
|
||||
// chain with height of params.FinalityInterval is marked as finality point (This is
|
||||
// very predictable, because the blue score of each new block in a chain is the
|
||||
// parents plus one).
|
||||
// 3) We make a new child to block with height (2 * blockdag.FinalityInterval - 1)
|
||||
// 3) We make a new child to block with height (2 * params.FinalityInterval - 1)
|
||||
// in mainChain, and we check that connecting it to the DAG
|
||||
// doesn't affect the last finality point.
|
||||
// 4) We make a block that points to genesis, and check that it
|
||||
@@ -37,6 +39,7 @@ import (
|
||||
func TestFinality(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.K = 1
|
||||
params.FinalityInterval = 100
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
@@ -45,18 +48,22 @@ func TestFinality(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
buildNodeToDag := func(parentHashes []*daghash.Hash) (*util.Block, error) {
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, nil, false, 1)
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, nil, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block := util.NewBlock(msgBlock)
|
||||
|
||||
isOrphan, err := dag.ProcessBlock(block, blockdag.BFNoPoWCheck)
|
||||
isOrphan, delay, err := dag.ProcessBlock(block, blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if delay != 0 {
|
||||
return nil, errors.Errorf("ProcessBlock: block " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
return nil, fmt.Errorf("ProcessBlock: unexpected returned orphan block")
|
||||
return nil, errors.Errorf("ProcessBlock: unexpected returned orphan block")
|
||||
}
|
||||
|
||||
return block, nil
|
||||
@@ -65,8 +72,8 @@ func TestFinality(t *testing.T) {
|
||||
genesis := util.NewBlock(params.GenesisBlock)
|
||||
currentNode := genesis
|
||||
|
||||
// First we build a chain of blockdag.FinalityInterval blocks for future use
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
// First we build a chain of params.FinalityInterval blocks for future use
|
||||
for i := 0; i < params.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -75,10 +82,10 @@ func TestFinality(t *testing.T) {
|
||||
|
||||
altChainTip := currentNode
|
||||
|
||||
// Now we build a new chain of 2 * blockdag.FinalityInterval blocks, pointed to genesis, and
|
||||
// we expect the block with height 1 * blockdag.FinalityInterval to be the last finality point
|
||||
// Now we build a new chain of 2 * params.FinalityInterval blocks, pointed to genesis, and
|
||||
// we expect the block with height 1 * params.FinalityInterval to be the last finality point
|
||||
currentNode = genesis
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
for i := 0; i < params.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -87,7 +94,7 @@ func TestFinality(t *testing.T) {
|
||||
|
||||
expectedFinalityPoint := currentNode
|
||||
|
||||
for i := 0; i < blockdag.FinalityInterval; i++ {
|
||||
for i := 0; i < params.FinalityInterval; i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
@@ -111,7 +118,22 @@ func TestFinality(t *testing.T) {
|
||||
|
||||
// Here we check that a block with lower blue score than the last finality
|
||||
// point will get rejected
|
||||
_, err = buildNodeToDag([]*daghash.Hash{genesis.Hash()})
|
||||
fakeCoinbaseTx, err := dag.NextBlockCoinbaseTransaction(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("NextBlockCoinbaseTransaction: %s", err)
|
||||
}
|
||||
merkleRoot := blockdag.BuildHashMerkleTreeStore([]*util.Tx{fakeCoinbaseTx}).Root()
|
||||
beforeFinalityBlock := wire.NewMsgBlock(&wire.BlockHeader{
|
||||
Version: 0x10000000,
|
||||
ParentHashes: []*daghash.Hash{genesis.Hash()},
|
||||
HashMerkleRoot: merkleRoot,
|
||||
AcceptedIDMerkleRoot: &daghash.ZeroHash,
|
||||
UTXOCommitment: &daghash.ZeroHash,
|
||||
Timestamp: dag.SelectedTipHeader().Timestamp,
|
||||
Bits: genesis.MsgBlock().Header.Bits,
|
||||
})
|
||||
beforeFinalityBlock.AddTransaction(fakeCoinbaseTx.MsgTx())
|
||||
_, _, err = dag.ProcessBlock(util.NewBlock(beforeFinalityBlock), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
|
||||
}
|
||||
@@ -121,7 +143,7 @@ func TestFinality(t *testing.T) {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", rErr)
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Here we check that a block that doesn't have the last finality point in
|
||||
@@ -140,11 +162,22 @@ func TestFinality(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestFinalityInterval tests that the finality interval is
|
||||
// smaller then wire.MaxInvPerMsg, so when a peer receives
|
||||
// a getblocks message it should always be able to send
|
||||
// all the necessary invs.
|
||||
func TestFinalityInterval(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
if params.FinalityInterval > wire.MaxInvPerMsg {
|
||||
t.Errorf("dagconfig.SimNetParams.FinalityInterval should be lower or equal to wire.MaxInvPerMsg")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSubnetworkRegistry tests the full subnetwork registry flow
|
||||
func TestSubnetworkRegistry(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.K = 1
|
||||
params.BlockRewardMaturity = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
@@ -169,7 +202,7 @@ func TestSubnetworkRegistry(t *testing.T) {
|
||||
|
||||
func TestChainedTransactions(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.BlockRewardMaturity = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
@@ -179,48 +212,61 @@ func TestChainedTransactions(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
block1, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{params.GenesisHash}, nil, false, 1)
|
||||
block1, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{params.GenesisHash}, nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err := dag.ProcessBlock(util.NewBlock(block1), blockdag.BFNoPoWCheck)
|
||||
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(block1), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: block1 " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: block1 got unexpectedly orphaned")
|
||||
}
|
||||
cbTx := block1.Transactions[0]
|
||||
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build signature script: %s", err)
|
||||
}
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{TxID: cbTx.TxID(), Index: 0},
|
||||
SignatureScript: nil,
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := &wire.TxOut{
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
ScriptPubKey: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}
|
||||
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
|
||||
|
||||
chainedTxIn := &wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{TxID: tx.TxID(), Index: 0},
|
||||
SignatureScript: nil,
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *tx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
|
||||
scriptPubKey, err := txscript.PayToScriptHashScript(blockdag.OpTrueScript)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build public key script: %s", err)
|
||||
}
|
||||
chainedTxOut := &wire.TxOut{
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
ScriptPubKey: scriptPubKey,
|
||||
Value: uint64(1),
|
||||
}
|
||||
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
|
||||
|
||||
block2, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true, 1)
|
||||
block2, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
//Checks that dag.ProcessBlock fails because we don't allow a transaction to spend another transaction from the same block
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Errorf("ProcessBlock expected an error")
|
||||
} else if rErr, ok := err.(blockdag.RuleError); ok {
|
||||
@@ -230,41 +276,121 @@ func TestChainedTransactions(t *testing.T) {
|
||||
} else {
|
||||
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: block2 " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Errorf("ProcessBlock: block2 got unexpectedly orphaned")
|
||||
}
|
||||
|
||||
nonChainedTxIn := &wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{TxID: cbTx.TxID(), Index: 0},
|
||||
SignatureScript: nil,
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
nonChainedTxOut := &wire.TxOut{
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
ScriptPubKey: scriptPubKey,
|
||||
Value: uint64(1),
|
||||
}
|
||||
nonChainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{nonChainedTxIn}, []*wire.TxOut{nonChainedTxOut})
|
||||
|
||||
block3, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false, 1)
|
||||
block3, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
//Checks that dag.ProcessBlock doesn't fail because all of its transaction are dependant on transactions from previous blocks
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(block3), blockdag.BFNoPoWCheck)
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(block3), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Errorf("ProcessBlock: %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: block3 " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Errorf("ProcessBlock: block3 got unexpectedly orphaned")
|
||||
}
|
||||
}
|
||||
|
||||
// TestOrderInDiffFromAcceptanceData makes sure that the order of transactions in
|
||||
// dag.diffFromAcceptanceData is such that if txA is spent by txB then txA is processed
|
||||
// before txB.
|
||||
func TestOrderInDiffFromAcceptanceData(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimNetParams
|
||||
params.K = math.MaxUint32
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
dag.TestSetCoinbaseMaturity(0)
|
||||
|
||||
createBlock := func(previousBlock *util.Block) *util.Block {
|
||||
// Prepare a transaction that spends the previous block's coinbase transaction
|
||||
var txs []*wire.MsgTx
|
||||
if !previousBlock.IsGenesis() {
|
||||
previousCoinbaseTx := previousBlock.MsgBlock().Transactions[0]
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to build signature script: %s", err)
|
||||
}
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutpoint: wire.Outpoint{TxID: *previousCoinbaseTx.TxID(), Index: 0},
|
||||
SignatureScript: signatureScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
txOut := &wire.TxOut{
|
||||
ScriptPubKey: blockdag.OpTrueScript,
|
||||
Value: uint64(1),
|
||||
}
|
||||
txs = append(txs, wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut}))
|
||||
}
|
||||
|
||||
// Create the block
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, ¶ms, []*daghash.Hash{previousBlock.Hash()}, txs, false)
|
||||
if err != nil {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to prepare block: %s", err)
|
||||
}
|
||||
|
||||
// Add the block to the DAG
|
||||
newBlock := util.NewBlock(msgBlock)
|
||||
isOrphan, delay, err := dag.ProcessBlock(newBlock, blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Errorf("TestOrderInDiffFromAcceptanceData: %s", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: block is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: block got unexpectedly orphaned")
|
||||
}
|
||||
return newBlock
|
||||
}
|
||||
|
||||
// Create two block chains starting from the genesis block. Every time a block is added
|
||||
// one of the chains is selected as the selected parent chain while all the blocks in
|
||||
// the other chain (and their transactions) get accepted by the new virtual. If the
|
||||
// transactions in the non-selected parent chain get processed in the wrong order then
|
||||
// diffFromAcceptanceData panics.
|
||||
blockAmountPerChain := 100
|
||||
chainATip := util.NewBlock(params.GenesisBlock)
|
||||
chainBTip := chainATip
|
||||
for i := 0; i < blockAmountPerChain; i++ {
|
||||
chainATip = createBlock(chainATip)
|
||||
chainBTip = createBlock(chainBTip)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGasLimit tests the gas limit rules
|
||||
func TestGasLimit(t *testing.T) {
|
||||
params := dagconfig.SimNetParams
|
||||
params.K = 1
|
||||
params.BlockRewardMaturity = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
@@ -273,56 +399,74 @@ func TestGasLimit(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// First we prepare a subnetwrok and a block with coinbase outputs to fund our tests
|
||||
// First we prepare a subnetwork and a block with coinbase outputs to fund our tests
|
||||
gasLimit := uint64(12345)
|
||||
subnetworkID, err := testtools.RegisterSubnetworkForTest(dag, ¶ms, gasLimit)
|
||||
if err != nil {
|
||||
t.Fatalf("could not register network: %s", err)
|
||||
}
|
||||
|
||||
fundsBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), nil, false, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err := dag.ProcessBlock(util.NewBlock(fundsBlock), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: funds block got unexpectedly orphan")
|
||||
cbTxs := []*wire.MsgTx{}
|
||||
for i := 0; i < 4; i++ {
|
||||
fundsBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), nil, false)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(fundsBlock), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: the funds block " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: fundsBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
cbTxs = append(cbTxs, fundsBlock.Transactions[util.CoinbaseTransactionIndex])
|
||||
}
|
||||
|
||||
cbTxValue := fundsBlock.Transactions[0].TxOut[0].Value
|
||||
cbTxID := fundsBlock.Transactions[0].TxID()
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build signature script: %s", err)
|
||||
}
|
||||
|
||||
scriptPubKey, err := txscript.PayToScriptHashScript(blockdag.OpTrueScript)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to build public key script: %s", err)
|
||||
}
|
||||
|
||||
tx1In := &wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 0),
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[0].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
tx1Out := &wire.TxOut{
|
||||
Value: cbTxValue,
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: cbTxs[0].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
tx1 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx1In}, []*wire.TxOut{tx1Out}, subnetworkID, 10000, []byte{})
|
||||
|
||||
tx2In := &wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 1),
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[1].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
tx2Out := &wire.TxOut{
|
||||
Value: cbTxValue,
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: cbTxs[1].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
tx2 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx2In}, []*wire.TxOut{tx2Out}, subnetworkID, 10000, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that has transactions that exceed the gas limit
|
||||
overLimitBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true, 1)
|
||||
overLimitBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(overLimitBlock), blockdag.BFNoPoWCheck)
|
||||
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(overLimitBlock), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Fatalf("ProcessBlock expected to have an error")
|
||||
t.Fatalf("ProcessBlock expected to have an error in block that exceeds gas limit")
|
||||
}
|
||||
rErr, ok := err.(blockdag.RuleError)
|
||||
if !ok {
|
||||
@@ -330,27 +474,32 @@ func TestGasLimit(t *testing.T) {
|
||||
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
|
||||
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
overflowGasTxIn := &wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 1),
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[2].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
overflowGasTxOut := &wire.TxOut{
|
||||
Value: cbTxValue,
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: cbTxs[2].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
overflowGasTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{overflowGasTxIn}, []*wire.TxOut{overflowGasTxOut},
|
||||
subnetworkID, math.MaxUint64, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that its transactions' gas overflows uint64
|
||||
overflowGasBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true, 1)
|
||||
overflowGasBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(overflowGasBlock), blockdag.BFNoPoWCheck)
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(overflowGasBlock), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Fatalf("ProcessBlock expected to have an error")
|
||||
}
|
||||
@@ -366,37 +515,43 @@ func TestGasLimit(t *testing.T) {
|
||||
|
||||
nonExistentSubnetwork := &subnetworkid.SubnetworkID{123}
|
||||
nonExistentSubnetworkTxIn := &wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 0),
|
||||
PreviousOutpoint: *wire.NewOutpoint(cbTxs[3].TxID(), 0),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
nonExistentSubnetworkTxOut := &wire.TxOut{
|
||||
Value: cbTxValue,
|
||||
PkScript: blockdag.OpTrueScript,
|
||||
Value: cbTxs[3].TxOut[0].Value,
|
||||
ScriptPubKey: scriptPubKey,
|
||||
}
|
||||
nonExistentSubnetworkTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{nonExistentSubnetworkTxIn},
|
||||
[]*wire.TxOut{nonExistentSubnetworkTxOut}, nonExistentSubnetwork, 1, []byte{})
|
||||
|
||||
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true, 1)
|
||||
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
// Here we check that we can't process a block with a transaction from a non-existent subnetwork
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
|
||||
expectedErrStr := fmt.Sprintf("subnetwork '%s' not found", nonExistentSubnetwork)
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
|
||||
expectedErrStr := fmt.Sprintf("Error getting gas limit for subnetworkID '%s': subnetwork '%s' not found",
|
||||
nonExistentSubnetwork, nonExistentSubnetwork)
|
||||
if err.Error() != expectedErrStr {
|
||||
t.Fatalf("ProcessBlock expected error %v but got %v", expectedErrStr, err)
|
||||
t.Fatalf("ProcessBlock expected error \"%v\" but got \"%v\"", expectedErrStr, err)
|
||||
}
|
||||
|
||||
// Here we check that we can process a block with a transaction that doesn't exceed the gas limit
|
||||
validBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1}, true, 1)
|
||||
validBlock, err := mining.PrepareBlockForTest(dag, ¶ms, dag.TipHashes(), []*wire.MsgTx{tx1}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
isOrphan, err = dag.ProcessBlock(util.NewBlock(validBlock), blockdag.BFNoPoWCheck)
|
||||
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(validBlock), blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock: %v", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
|
||||
}
|
||||
|
||||
219
blockdag/fees.go
219
blockdag/fees.go
@@ -1,219 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/txsort"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
// compactFeeData is a specialized data type to store a compact list of fees
|
||||
// inside a block.
|
||||
// Every transaction gets a single uint64 value, stored as a plain binary list.
|
||||
// The transactions are ordered the same way they are ordered inside the block, making it easy
|
||||
// to traverse every transaction in a block and extract its fee.
|
||||
//
|
||||
// compactFeeFactory is used to create such a list.
|
||||
// compactFeeIterator is used to iterate over such a list.
|
||||
|
||||
type compactFeeData []byte
|
||||
|
||||
func (cfd compactFeeData) Len() int {
|
||||
return len(cfd) / 8
|
||||
}
|
||||
|
||||
type compactFeeFactory struct {
|
||||
buffer *bytes.Buffer
|
||||
writer *bufio.Writer
|
||||
}
|
||||
|
||||
func newCompactFeeFactory() *compactFeeFactory {
|
||||
buffer := bytes.NewBuffer([]byte{})
|
||||
return &compactFeeFactory{
|
||||
buffer: buffer,
|
||||
writer: bufio.NewWriter(buffer),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfw *compactFeeFactory) add(txFee uint64) error {
|
||||
return binary.Write(cfw.writer, binary.LittleEndian, txFee)
|
||||
}
|
||||
|
||||
func (cfw *compactFeeFactory) data() (compactFeeData, error) {
|
||||
err := cfw.writer.Flush()
|
||||
|
||||
return compactFeeData(cfw.buffer.Bytes()), err
|
||||
}
|
||||
|
||||
type compactFeeIterator struct {
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func (cfd compactFeeData) iterator() *compactFeeIterator {
|
||||
return &compactFeeIterator{
|
||||
reader: bufio.NewReader(bytes.NewBuffer(cfd)),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfr *compactFeeIterator) next() (uint64, error) {
|
||||
var txFee uint64
|
||||
|
||||
err := binary.Read(cfr.reader, binary.LittleEndian, &txFee)
|
||||
|
||||
return txFee, err
|
||||
}
|
||||
|
||||
// The following functions relate to storing and retrieving fee data from the database
|
||||
var feeBucket = []byte("fees")
|
||||
|
||||
// getBluesFeeData returns the compactFeeData for all nodes's blues,
|
||||
// used to calculate the fees this blockNode needs to pay
|
||||
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
|
||||
bluesFeeData := make(map[daghash.Hash]compactFeeData)
|
||||
|
||||
dag.db.View(func(dbTx database.Tx) error {
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
|
||||
}
|
||||
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return bluesFeeData, nil
|
||||
}
|
||||
|
||||
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
|
||||
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating or retrieving fee bucket: %s", err)
|
||||
}
|
||||
|
||||
return feeBucket.Put(blockHash.CloneBytes(), feeData)
|
||||
}
|
||||
|
||||
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
|
||||
feeBucket := dbTx.Metadata().Bucket(feeBucket)
|
||||
if feeBucket == nil {
|
||||
return nil, errors.New("Fee bucket does not exist")
|
||||
}
|
||||
|
||||
feeData := feeBucket.Get(blockHash.CloneBytes())
|
||||
if feeData == nil {
|
||||
return nil, fmt.Errorf("No fee data found for block %s", blockHash)
|
||||
}
|
||||
|
||||
return feeData, nil
|
||||
}
|
||||
|
||||
// The following functions deal with building and validating the fee transaction
|
||||
|
||||
func (node *blockNode) validateFeeTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
|
||||
if node.isGenesis() {
|
||||
return nil
|
||||
}
|
||||
expectedFeeTransaction, err := node.buildFeeTransaction(dag, txsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !expectedFeeTransaction.TxHash().IsEqual(block.FeeTransaction().Hash()) {
|
||||
return ruleError(ErrBadFeeTransaction, "Fee transaction is not built as expected")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildFeeTransaction returns the expected fee transaction for the current block
|
||||
func (node *blockNode) buildFeeTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData) (*wire.MsgTx, error) {
|
||||
bluesFeeData, err := node.getBluesFeeData(dag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txIns := []*wire.TxIn{}
|
||||
txOuts := []*wire.TxOut{}
|
||||
|
||||
for _, blue := range node.blues {
|
||||
txIn, txOut, err := feeInputAndOutputForBlueBlock(blue, txsAcceptanceData, bluesFeeData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txIns = append(txIns, txIn)
|
||||
if txOut != nil {
|
||||
txOuts = append(txOuts, txOut)
|
||||
}
|
||||
}
|
||||
feeTx := wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
|
||||
return txsort.Sort(feeTx), nil
|
||||
}
|
||||
|
||||
// feeInputAndOutputForBlueBlock calculates the input and output that should go into the fee transaction of blueBlock
|
||||
// If blueBlock gets no fee - returns only txIn and nil for txOut
|
||||
func feeInputAndOutputForBlueBlock(blueBlock *blockNode, txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (
|
||||
*wire.TxIn, *wire.TxOut, error) {
|
||||
|
||||
blockTxsAcceptanceData, ok := txsAcceptanceData[*blueBlock.hash]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
|
||||
}
|
||||
blockFeeData, ok := feeData[*blueBlock.hash]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("No feeData for block %s", blueBlock.hash)
|
||||
}
|
||||
|
||||
if len(blockTxsAcceptanceData) != blockFeeData.Len() {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"length of accepted transaction data(%d) and fee data(%d) is not equal for block %s",
|
||||
len(blockTxsAcceptanceData), blockFeeData.Len(), blueBlock.hash)
|
||||
}
|
||||
|
||||
txIn := &wire.TxIn{
|
||||
SignatureScript: []byte{},
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.TxID(*blueBlock.hash),
|
||||
Index: math.MaxUint32,
|
||||
},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
}
|
||||
|
||||
totalFees := uint64(0)
|
||||
feeIterator := blockFeeData.iterator()
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData {
|
||||
fee, err := feeIterator.next()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
|
||||
}
|
||||
if txAcceptanceData.IsAccepted {
|
||||
totalFees += fee
|
||||
}
|
||||
}
|
||||
|
||||
if totalFees == 0 {
|
||||
return txIn, nil, nil
|
||||
}
|
||||
|
||||
// the scriptPubKey for the fee is the same as the coinbase's first scriptPubKey
|
||||
pkScript := blockTxsAcceptanceData[0].Tx.MsgTx().TxOut[0].PkScript
|
||||
|
||||
txOut := &wire.TxOut{
|
||||
Value: totalFees,
|
||||
PkScript: pkScript,
|
||||
}
|
||||
|
||||
return txIn, txOut, nil
|
||||
}
|
||||
@@ -7,7 +7,7 @@ package blockdag_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -15,11 +15,11 @@ import (
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/blockdag/fullblocktests"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
_ "github.com/daglabs/btcd/database/ffldb"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
@@ -62,7 +62,7 @@ func isSupportedDbType(dbType string) bool {
|
||||
// a teardown function the caller should invoke when done testing to clean up.
|
||||
func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func(), error) {
|
||||
if !isSupportedDbType(testDbType) {
|
||||
return nil, nil, fmt.Errorf("unsupported db type %v", testDbType)
|
||||
return nil, nil, errors.Errorf("unsupported db type %v", testDbType)
|
||||
}
|
||||
|
||||
// Handle memory database specially since it doesn't need the disk
|
||||
@@ -72,7 +72,7 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
|
||||
if testDbType == "memdb" {
|
||||
ndb, err := database.Create(testDbType)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating db: %v", err)
|
||||
return nil, nil, errors.Errorf("error creating db: %v", err)
|
||||
}
|
||||
db = ndb
|
||||
|
||||
@@ -85,7 +85,7 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
|
||||
// Create the root directory for test databases.
|
||||
if !fileExists(testDbRoot) {
|
||||
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
|
||||
err := fmt.Errorf("unable to create test db "+
|
||||
err := errors.Errorf("unable to create test db "+
|
||||
"root: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -96,7 +96,7 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
|
||||
_ = os.RemoveAll(dbPath)
|
||||
ndb, err := database.Create(testDbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating db: %v", err)
|
||||
return nil, nil, errors.Errorf("error creating db: %v", err)
|
||||
}
|
||||
db = ndb
|
||||
|
||||
@@ -110,7 +110,7 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
|
||||
}
|
||||
|
||||
// Copy the chain params to ensure any modifications the tests do to
|
||||
// the chain parameters do not affect the global instance.
|
||||
// the DAG parameters do not affect the global instance.
|
||||
paramsCopy := *params
|
||||
|
||||
// Create the main chain instance.
|
||||
@@ -123,7 +123,7 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
|
||||
})
|
||||
if err != nil {
|
||||
teardown()
|
||||
err := fmt.Errorf("failed to create chain instance: %v", err)
|
||||
err := errors.Errorf("failed to create chain instance: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return chain, teardown, nil
|
||||
@@ -156,11 +156,11 @@ func TestFullBlocks(t *testing.T) {
|
||||
testAcceptedBlock := func(item fullblocktests.AcceptedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetHeight(blockHeight)
|
||||
block.SetChainHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
isOrphan, err := dag.ProcessBlock(block,
|
||||
isOrphan, delay, err := dag.ProcessBlock(block,
|
||||
blockdag.BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("block %q (hash %s, height %d) should "+
|
||||
@@ -168,6 +168,13 @@ func TestFullBlocks(t *testing.T) {
|
||||
block.Hash(), blockHeight, err)
|
||||
}
|
||||
|
||||
if delay != item.Delay {
|
||||
t.Fatalf("block %q (hash %s, height %d) unexpected "+
|
||||
"delay -- got %v, want %v", item.Name,
|
||||
block.Hash(), blockHeight, delay,
|
||||
item.Delay)
|
||||
}
|
||||
|
||||
if isOrphan != item.IsOrphan {
|
||||
t.Fatalf("block %q (hash %s, height %d) unexpected "+
|
||||
"orphan flag -- got %v, want %v", item.Name,
|
||||
@@ -182,11 +189,11 @@ func TestFullBlocks(t *testing.T) {
|
||||
testRejectedBlock := func(item fullblocktests.RejectedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetHeight(blockHeight)
|
||||
block.SetChainHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
_, err := dag.ProcessBlock(block, blockdag.BFNone)
|
||||
_, _, err := dag.ProcessBlock(block, blockdag.BFNone)
|
||||
if err == nil {
|
||||
t.Fatalf("block %q (hash %s, height %d) should not "+
|
||||
"have been accepted", item.Name, block.Hash(),
|
||||
@@ -239,11 +246,11 @@ func TestFullBlocks(t *testing.T) {
|
||||
testOrphanOrRejectedBlock := func(item fullblocktests.OrphanOrRejectedBlock) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetHeight(blockHeight)
|
||||
block.SetChainHeight(blockHeight)
|
||||
t.Logf("Testing block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
isOrphan, err := dag.ProcessBlock(block, blockdag.BFNone)
|
||||
isOrphan, delay, err := dag.ProcessBlock(block, blockdag.BFNone)
|
||||
if err != nil {
|
||||
// Ensure the error code is of the expected type.
|
||||
if _, ok := err.(blockdag.RuleError); !ok {
|
||||
@@ -255,6 +262,12 @@ func TestFullBlocks(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
if delay != 0 {
|
||||
t.Fatalf("block %q (hash %s, height %d) "+
|
||||
"is too far in the future",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
}
|
||||
|
||||
if !isOrphan {
|
||||
t.Fatalf("block %q (hash %s, height %d) was accepted, "+
|
||||
"but is not considered an orphan", item.Name,
|
||||
@@ -267,18 +280,18 @@ func TestFullBlocks(t *testing.T) {
|
||||
testExpectedTip := func(item fullblocktests.ExpectedTip) {
|
||||
blockHeight := item.Height
|
||||
block := util.NewBlock(item.Block)
|
||||
block.SetHeight(blockHeight)
|
||||
block.SetChainHeight(blockHeight)
|
||||
t.Logf("Testing tip for block %s (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight)
|
||||
|
||||
// Ensure hash and height match.
|
||||
if dag.HighestTipHash() != item.Block.BlockHash() ||
|
||||
dag.Height() != blockHeight { //TODO: (Ori) the use of dag.Height() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
|
||||
if dag.SelectedTipHash() != item.Block.BlockHash() ||
|
||||
dag.ChainHeight() != blockHeight { //TODO: (Ori) the use of dag.ChainHeight() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
|
||||
|
||||
t.Fatalf("block %q (hash %s, height %d) should be "+
|
||||
"the current tip -- got (hash %s, height %d)",
|
||||
item.Name, block.Hash(), blockHeight, dag.HighestTipHash(),
|
||||
dag.Height()) //TODO: (Ori) the use of dag.Height() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
|
||||
item.Name, block.Hash(), blockHeight, dag.SelectedTipHash(),
|
||||
dag.ChainHeight()) //TODO: (Ori) the use of dag.ChainHeight() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,8 +12,8 @@ package fullblocktests
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"runtime"
|
||||
"time"
|
||||
@@ -21,9 +21,9 @@ import (
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/btcec"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/util/random"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
@@ -67,8 +67,9 @@ type TestInstance interface {
|
||||
type AcceptedBlock struct {
|
||||
Name string
|
||||
Block *wire.MsgBlock
|
||||
Height int32
|
||||
Height uint64
|
||||
IsOrphan bool
|
||||
Delay time.Duration
|
||||
}
|
||||
|
||||
// Ensure AcceptedBlock implements the TestInstance interface.
|
||||
@@ -85,7 +86,7 @@ func (b AcceptedBlock) FullBlockTestInstance() {}
|
||||
type RejectedBlock struct {
|
||||
Name string
|
||||
Block *wire.MsgBlock
|
||||
Height int32
|
||||
Height uint64
|
||||
RejectCode blockdag.ErrorCode
|
||||
}
|
||||
|
||||
@@ -107,7 +108,7 @@ func (b RejectedBlock) FullBlockTestInstance() {}
|
||||
type OrphanOrRejectedBlock struct {
|
||||
Name string
|
||||
Block *wire.MsgBlock
|
||||
Height int32
|
||||
Height uint64
|
||||
}
|
||||
|
||||
// Ensure ExpectedTip implements the TestInstance interface.
|
||||
@@ -124,7 +125,7 @@ func (b OrphanOrRejectedBlock) FullBlockTestInstance() {}
|
||||
type ExpectedTip struct {
|
||||
Name string
|
||||
Block *wire.MsgBlock
|
||||
Height int32
|
||||
Height uint64
|
||||
}
|
||||
|
||||
// Ensure ExpectedTip implements the TestInstance interface.
|
||||
@@ -141,7 +142,7 @@ func (b ExpectedTip) FullBlockTestInstance() {}
|
||||
type RejectedNonCanonicalBlock struct {
|
||||
Name string
|
||||
RawBlock []byte
|
||||
Height int32
|
||||
Height uint64
|
||||
}
|
||||
|
||||
// FullBlockTestInstance only exists to allow RejectedNonCanonicalBlock to be treated as
|
||||
@@ -153,7 +154,7 @@ func (b RejectedNonCanonicalBlock) FullBlockTestInstance() {}
|
||||
// spendableOut represents a transaction output that is spendable along with
|
||||
// additional metadata such as the block its in and how much it pays.
|
||||
type spendableOut struct {
|
||||
prevOut wire.OutPoint
|
||||
prevOut wire.Outpoint
|
||||
amount util.Amount
|
||||
}
|
||||
|
||||
@@ -161,8 +162,8 @@ type spendableOut struct {
|
||||
// and transaction output index within the transaction.
|
||||
func makeSpendableOutForTx(tx *wire.MsgTx, txOutIndex uint32) spendableOut {
|
||||
return spendableOut{
|
||||
prevOut: wire.OutPoint{
|
||||
TxID: tx.TxID(),
|
||||
prevOut: wire.Outpoint{
|
||||
TxID: *tx.TxID(),
|
||||
Index: txOutIndex,
|
||||
},
|
||||
amount: util.Amount(tx.TxOut[txOutIndex].Value),
|
||||
@@ -182,10 +183,10 @@ type testGenerator struct {
|
||||
params *dagconfig.Params
|
||||
tip *wire.MsgBlock
|
||||
tipName string
|
||||
tipHeight int32
|
||||
tipHeight uint64
|
||||
blocks map[daghash.Hash]*wire.MsgBlock
|
||||
blocksByName map[string]*wire.MsgBlock
|
||||
blockHeights map[string]int32
|
||||
blockHeights map[string]uint64
|
||||
|
||||
// Used for tracking spendable coinbase outputs.
|
||||
spendableOuts []spendableOut
|
||||
@@ -193,6 +194,8 @@ type testGenerator struct {
|
||||
|
||||
// Common key for any tests which require signed transactions.
|
||||
privKey *btcec.PrivateKey
|
||||
|
||||
powMaxBits uint32
|
||||
}
|
||||
|
||||
// makeTestGenerator returns a test generator instance initialized with the
|
||||
@@ -205,11 +208,12 @@ func makeTestGenerator(params *dagconfig.Params) (testGenerator, error) {
|
||||
params: params,
|
||||
blocks: map[daghash.Hash]*wire.MsgBlock{*genesisHash: genesis},
|
||||
blocksByName: map[string]*wire.MsgBlock{"genesis": genesis},
|
||||
blockHeights: map[string]int32{"genesis": 0},
|
||||
blockHeights: map[string]uint64{"genesis": 0},
|
||||
tip: genesis,
|
||||
tipName: "genesis",
|
||||
tipHeight: 0,
|
||||
privKey: privKey,
|
||||
powMaxBits: util.BigToCompact(params.PowMax),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -243,8 +247,8 @@ func pushDataScript(items ...[]byte) []byte {
|
||||
// standardCoinbaseScript returns a standard script suitable for use as the
|
||||
// signature script of the coinbase transaction of a new block. In particular,
|
||||
// it starts with the block height that is required by version 2 blocks.
|
||||
func standardCoinbaseScript(blockHeight int32, extraNonce uint64) ([]byte, error) {
|
||||
return txscript.NewScriptBuilder().AddInt64(int64(blockHeight)).
|
||||
func standardCoinbaseScript(extraNonce uint64) ([]byte, error) {
|
||||
return txscript.NewScriptBuilder().
|
||||
AddInt64(int64(extraNonce)).Script()
|
||||
}
|
||||
|
||||
@@ -273,11 +277,10 @@ func uniqueOpReturnScript() []byte {
|
||||
}
|
||||
|
||||
// createCoinbaseTx returns a coinbase transaction paying an appropriate
|
||||
// subsidy based on the passed block height. The coinbase signature script
|
||||
// conforms to the requirements of version 2 blocks.
|
||||
func (g *testGenerator) createCoinbaseTx(blockHeight int32) *wire.MsgTx {
|
||||
// subsidy based on the passed block height.
|
||||
func (g *testGenerator) createCoinbaseTx(blueScore uint64) *wire.MsgTx {
|
||||
extraNonce := uint64(0)
|
||||
coinbaseScript, err := standardCoinbaseScript(blockHeight, extraNonce)
|
||||
coinbaseScript, err := standardCoinbaseScript(extraNonce)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -285,14 +288,14 @@ func (g *testGenerator) createCoinbaseTx(blockHeight int32) *wire.MsgTx {
|
||||
txIn := &wire.TxIn{
|
||||
// Coinbase transactions have no inputs, so previous outpoint is
|
||||
// zero hash and max index.
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.TxID{},
|
||||
PreviousOutpoint: *wire.NewOutpoint(&daghash.TxID{},
|
||||
wire.MaxPrevOutIndex),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: coinbaseScript,
|
||||
}
|
||||
txOut := &wire.TxOut{
|
||||
Value: blockdag.CalcBlockSubsidy(blockHeight, g.params),
|
||||
PkScript: opTrueScript,
|
||||
Value: blockdag.CalcBlockSubsidy(blueScore, g.params),
|
||||
ScriptPubKey: opTrueScript,
|
||||
}
|
||||
return wire.NewNativeMsgTx(1, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
|
||||
}
|
||||
@@ -407,9 +410,9 @@ func additionalSpendFee(fee util.Amount) func(*wire.MsgBlock) {
|
||||
|
||||
// replaceSpendScript returns a function that itself takes a block and modifies
|
||||
// it by replacing the public key script of the spending transaction.
|
||||
func replaceSpendScript(pkScript []byte) func(*wire.MsgBlock) {
|
||||
func replaceSpendScript(scriptPubKey []byte) func(*wire.MsgBlock) {
|
||||
return func(b *wire.MsgBlock) {
|
||||
b.Transactions[1].TxOut[0].PkScript = pkScript
|
||||
b.Transactions[1].TxOut[0].ScriptPubKey = scriptPubKey
|
||||
}
|
||||
}
|
||||
|
||||
@@ -436,7 +439,7 @@ func additionalTx(tx *wire.MsgTx) func(*wire.MsgBlock) {
|
||||
// tests.
|
||||
func createSpendTx(spend *spendableOut, fee util.Amount) *wire.MsgTx {
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutPoint: spend.prevOut,
|
||||
PreviousOutpoint: spend.prevOut,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: nil,
|
||||
}
|
||||
@@ -512,7 +515,7 @@ func (g *testGenerator) nextBlock(blockName string, spend *spendableOut, mungers
|
||||
Version: 1,
|
||||
ParentHashes: []*daghash.Hash{g.tip.BlockHash()}, // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
|
||||
HashMerkleRoot: calcHashMerkleRoot(txns),
|
||||
Bits: g.params.PowLimitBits,
|
||||
Bits: g.powMaxBits,
|
||||
Timestamp: ts,
|
||||
Nonce: 0, // To be solved.
|
||||
},
|
||||
@@ -606,7 +609,8 @@ func (g *testGenerator) saveSpendableCoinbaseOuts() {
|
||||
// reaching the block that has already had the coinbase outputs
|
||||
// collected.
|
||||
var collectBlocks []*wire.MsgBlock
|
||||
for b := g.tip; b != nil; b = g.blocks[*b.Header.SelectedParentHash()] {
|
||||
// TODO: (Evgeny) This is wrong. Modified only to satisfy compilation.
|
||||
for b := g.tip; b != nil; b = g.blocks[*b.Header.ParentHashes[0]] {
|
||||
if b.BlockHash() == g.prevCollectedHash {
|
||||
break
|
||||
}
|
||||
@@ -680,7 +684,7 @@ func countBlockSigOps(block *wire.MsgBlock) int {
|
||||
totalSigOps += numSigOps
|
||||
}
|
||||
for _, txOut := range tx.TxOut {
|
||||
numSigOps := txscript.GetSigOpCount(txOut.PkScript)
|
||||
numSigOps := txscript.GetSigOpCount(txOut.ScriptPubKey)
|
||||
totalSigOps += numSigOps
|
||||
}
|
||||
}
|
||||
@@ -773,7 +777,7 @@ func (g *testGenerator) assertTipBlockTxOutOpReturn(txIndex, txOutIndex uint32)
|
||||
}
|
||||
|
||||
txOut := tx.TxOut[txOutIndex]
|
||||
if txOut.PkScript[0] != txscript.OpReturn {
|
||||
if txOut.ScriptPubKey[0] != txscript.OpReturn {
|
||||
panic(fmt.Sprintf("transaction index %d output %d in block %q "+
|
||||
"(height %d) is not an OP_RETURN", txIndex, txOutIndex,
|
||||
g.tipName, g.tipHeight))
|
||||
@@ -836,7 +840,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
// block to be the current tip of the block chain.
|
||||
acceptBlock := func(blockName string, block *wire.MsgBlock, isOrphan bool) TestInstance {
|
||||
blockHeight := g.blockHeights[blockName]
|
||||
return AcceptedBlock{blockName, block, blockHeight, isOrphan}
|
||||
return AcceptedBlock{blockName, block, blockHeight, isOrphan, 0}
|
||||
}
|
||||
rejectBlock := func(blockName string, block *wire.MsgBlock, code blockdag.ErrorCode) TestInstance {
|
||||
blockHeight := g.blockHeights[blockName]
|
||||
@@ -910,9 +914,9 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
// genesis -> bm0 -> bm1 -> ... -> bm99
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
coinbaseMaturity := g.params.BlockRewardMaturity
|
||||
coinbaseMaturity := g.params.BlockCoinbaseMaturity
|
||||
var testInstances []TestInstance
|
||||
for i := uint16(0); i < coinbaseMaturity; i++ {
|
||||
for i := uint64(0); i < coinbaseMaturity; i++ {
|
||||
blockName := fmt.Sprintf("bm%d", i)
|
||||
g.nextBlock(blockName, nil)
|
||||
g.saveTipCoinbaseOut()
|
||||
@@ -923,7 +927,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
|
||||
// Collect spendable outputs. This simplifies the code below.
|
||||
var outs []*spendableOut
|
||||
for i := uint16(0); i < coinbaseMaturity; i++ {
|
||||
for i := uint64(0); i < coinbaseMaturity; i++ {
|
||||
op := g.oldestCoinbaseOut()
|
||||
outs = append(outs, &op)
|
||||
}
|
||||
@@ -1001,45 +1005,6 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
// Too much proof-of-work coinbase tests.
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
// Create a block that generates too coinbase.
|
||||
//
|
||||
// ... -> b1(0) -> b2(1) -> b5(2) -> b6(3)
|
||||
// \-> b9(4)
|
||||
// \-> b3(1) -> b4(2)
|
||||
g.setTip("b6")
|
||||
g.nextBlock("b9", outs[4], additionalCoinbase(1))
|
||||
rejected(blockdag.ErrBadCoinbaseValue)
|
||||
|
||||
// Create a fork that ends with block that generates too much coinbase.
|
||||
//
|
||||
// ... -> b1(0) -> b2(1) -> b5(2) -> b6(3)
|
||||
// \-> b10(3) -> b11(4)
|
||||
// \-> b3(1) -> b4(2)
|
||||
g.setTip("b5")
|
||||
g.nextBlock("b10", outs[3])
|
||||
acceptedToSideChainWithExpectedTip("b6")
|
||||
|
||||
g.nextBlock("b11", outs[4], additionalCoinbase(1))
|
||||
rejected(blockdag.ErrBadCoinbaseValue)
|
||||
|
||||
// Create a fork that ends with block that generates too much coinbase
|
||||
// as before, but with a valid fork first.
|
||||
//
|
||||
// ... -> b1(0) -> b2(1) -> b5(2) -> b6(3)
|
||||
// | \-> b12(3) -> b13(4) -> b14(5)
|
||||
// | (b12 added last)
|
||||
// \-> b3(1) -> b4(2)
|
||||
g.setTip("b5")
|
||||
b12 := g.nextBlock("b12", outs[3])
|
||||
b13 := g.nextBlock("b13", outs[4])
|
||||
b14 := g.nextBlock("b14", outs[5], additionalCoinbase(1))
|
||||
tests = append(tests, []TestInstance{
|
||||
acceptBlock("b13", b13, true),
|
||||
acceptBlock("b14", b14, true),
|
||||
rejectBlock("b12", b12, blockdag.ErrBadCoinbaseValue),
|
||||
expectTipBlock("b13", b13),
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
// Checksig signature operation count tests.
|
||||
// ---------------------------------------------------------------------
|
||||
@@ -1141,7 +1106,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
replaceSpendScript(sizePadScript)(b)
|
||||
})
|
||||
g.assertTipBlockSize(maxBlockSize + 1)
|
||||
rejected(blockdag.ErrBlockTooBig)
|
||||
rejected(blockdag.ErrBlockMassTooHigh)
|
||||
|
||||
// Parent was rejected, so this block must either be an orphan or
|
||||
// outright rejected due to an invalid parent.
|
||||
@@ -1161,7 +1126,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
g.setTip("b15")
|
||||
tooSmallCbScript := repeatOpcode(0x00, minCoinbaseScriptLen-1)
|
||||
g.nextBlock("b26", outs[6], replaceCoinbaseSigScript(tooSmallCbScript))
|
||||
rejected(blockdag.ErrBadCoinbaseScriptLen)
|
||||
rejected(blockdag.ErrBadCoinbasePayloadLen)
|
||||
|
||||
// Parent was rejected, so this block must either be an orphan or
|
||||
// outright rejected due to an invalid parent.
|
||||
@@ -1177,7 +1142,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
g.setTip("b15")
|
||||
tooLargeCbScript := repeatOpcode(0x00, maxCoinbaseScriptLen+1)
|
||||
g.nextBlock("b28", outs[6], replaceCoinbaseSigScript(tooLargeCbScript))
|
||||
rejected(blockdag.ErrBadCoinbaseScriptLen)
|
||||
rejected(blockdag.ErrBadCoinbasePayloadLen)
|
||||
|
||||
// Parent was rejected, so this block must either be an orphan or
|
||||
// outright rejected due to an invalid parent.
|
||||
@@ -1349,7 +1314,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
fill := maxBlockSigOps - (txnsNeeded * redeemScriptSigOps) + 1
|
||||
finalTx := b.Transactions[len(b.Transactions)-1]
|
||||
tx := createSpendTxForTx(finalTx, lowFee)
|
||||
tx.TxOut[0].PkScript = repeatOpcode(txscript.OpCheckSig, fill)
|
||||
tx.TxOut[0].ScriptPubKey = repeatOpcode(txscript.OpCheckSig, fill)
|
||||
b.AddTransaction(tx)
|
||||
})
|
||||
rejected(blockdag.ErrTooManySigOps)
|
||||
@@ -1383,7 +1348,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
}
|
||||
finalTx := b.Transactions[len(b.Transactions)-1]
|
||||
tx := createSpendTxForTx(finalTx, lowFee)
|
||||
tx.TxOut[0].PkScript = repeatOpcode(txscript.OpCheckSig, fill)
|
||||
tx.TxOut[0].ScriptPubKey = repeatOpcode(txscript.OpCheckSig, fill)
|
||||
b.AddTransaction(tx)
|
||||
})
|
||||
accepted()
|
||||
@@ -1444,7 +1409,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
b46.Header.Nonce++
|
||||
blockHash := b46.BlockHash()
|
||||
hashNum := daghash.HashToBig(blockHash)
|
||||
if hashNum.Cmp(g.params.PowLimit) >= 0 {
|
||||
if hashNum.Cmp(g.params.PowMax) >= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -1531,8 +1496,8 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
g.nextBlock("b52", outs[14], func(b *wire.MsgBlock) {
|
||||
txID := newTxIDFromStr("00000000000000000000000000000000" +
|
||||
"00000000000000000123456789abcdef")
|
||||
b.Transactions[1].TxIn[0].PreviousOutPoint.TxID = *txID
|
||||
b.Transactions[1].TxIn[0].PreviousOutPoint.Index = 0
|
||||
b.Transactions[1].TxIn[0].PreviousOutpoint.TxID = *txID
|
||||
b.Transactions[1].TxIn[0].PreviousOutpoint.Index = 0
|
||||
})
|
||||
rejected(blockdag.ErrMissingTxOut)
|
||||
|
||||
@@ -1553,9 +1518,11 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
// ... -> b33(9) -> b35(10) -> b39(11) -> b42(12) -> b43(13) -> b53(14)
|
||||
// \-> b54(15)
|
||||
g.nextBlock("b54", outs[15], func(b *wire.MsgBlock) {
|
||||
medianBlock := g.blocks[*b.Header.SelectedParentHash()]
|
||||
// TODO: (Evgeny) This is wrong. Modified only to satisfy compilation.
|
||||
medianBlock := g.blocks[*b.Header.ParentHashes[0]]
|
||||
for i := 0; i < medianTimeBlocks/2; i++ {
|
||||
medianBlock = g.blocks[*medianBlock.Header.SelectedParentHash()]
|
||||
// TODO: (Evgeny) This is wrong. Modified only to satisfy compilation.
|
||||
medianBlock = g.blocks[*medianBlock.Header.ParentHashes[0]]
|
||||
}
|
||||
b.Header.Timestamp = medianBlock.Header.Timestamp
|
||||
})
|
||||
@@ -1567,9 +1534,11 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
// ... -> b33(9) -> b35(10) -> b39(11) -> b42(12) -> b43(13) -> b53(14) -> b55(15)
|
||||
g.setTip("b53")
|
||||
g.nextBlock("b55", outs[15], func(b *wire.MsgBlock) {
|
||||
medianBlock := g.blocks[*b.Header.SelectedParentHash()]
|
||||
// TODO: (Evgeny) This is wrong. Modified only to satisfy compilation.
|
||||
medianBlock := g.blocks[*b.Header.ParentHashes[0]]
|
||||
for i := 0; i < medianTimeBlocks/2; i++ {
|
||||
medianBlock = g.blocks[*medianBlock.Header.SelectedParentHash()]
|
||||
// TODO: (Evgeny) This is wrong. Modified only to satisfy compilation.
|
||||
medianBlock = g.blocks[*medianBlock.Header.ParentHashes[0]]
|
||||
}
|
||||
medianBlockTime := medianBlock.Header.Timestamp
|
||||
b.Header.Timestamp = medianBlockTime.Add(time.Second)
|
||||
@@ -1684,7 +1653,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
// \-> b58(17)
|
||||
g.setTip("b57")
|
||||
g.nextBlock("b58", outs[17], func(b *wire.MsgBlock) {
|
||||
b.Transactions[1].TxIn[0].PreviousOutPoint.Index = 42
|
||||
b.Transactions[1].TxIn[0].PreviousOutpoint.Index = 42
|
||||
})
|
||||
rejected(blockdag.ErrMissingTxOut)
|
||||
|
||||
@@ -1717,7 +1686,8 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
g.nextBlock("b61", outs[18], func(b *wire.MsgBlock) {
|
||||
// Duplicate the coinbase of the parent block to force the
|
||||
// condition.
|
||||
parent := g.blocks[*b.Header.SelectedParentHash()]
|
||||
// TODO: (Evgeny) This is wrong. Modified only to satisfy compilation.
|
||||
parent := g.blocks[*b.Header.ParentHashes[0]]
|
||||
b.Transactions[0] = parent.Transactions[0]
|
||||
})
|
||||
rejected(blockdag.ErrOverwriteTx)
|
||||
@@ -1842,7 +1812,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
// \-> b68(20)
|
||||
g.setTip("b65")
|
||||
g.nextBlock("b68", outs[20], additionalCoinbase(10), additionalSpendFee(9))
|
||||
rejected(blockdag.ErrBadCoinbaseValue)
|
||||
rejected(blockdag.ErrBadCoinbaseTransaction)
|
||||
|
||||
// Create block that pays 10 extra to the coinbase and a tx that pays
|
||||
// the extra 10 fee.
|
||||
@@ -2059,7 +2029,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
// Collect all of the spendable coinbase outputs from the previous
|
||||
// collection point up to the current tip.
|
||||
g.saveSpendableCoinbaseOuts()
|
||||
spendableOutOffset := g.tipHeight - int32(coinbaseMaturity)
|
||||
spendableOutOffset := g.tipHeight - coinbaseMaturity
|
||||
|
||||
// Extend the main chain by a large number of max size blocks.
|
||||
//
|
||||
@@ -2068,7 +2038,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
reorgSpend := *outs[spendableOutOffset]
|
||||
reorgStartBlockName := g.tipName
|
||||
chain1TipName := g.tipName
|
||||
for i := int32(0); i < numLargeReorgBlocks; i++ {
|
||||
for i := uint64(0); i < numLargeReorgBlocks; i++ {
|
||||
chain1TipName = fmt.Sprintf("br%d", i)
|
||||
g.nextBlock(chain1TipName, &reorgSpend, func(b *wire.MsgBlock) {
|
||||
bytesToMaxSize := maxBlockSize - b.SerializeSize() - 3
|
||||
@@ -2083,7 +2053,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
// Use the next available spendable output. First use up any
|
||||
// remaining spendable outputs that were already popped into the
|
||||
// outs slice, then just pop them from the stack.
|
||||
if spendableOutOffset+1+i < int32(len(outs)) {
|
||||
if spendableOutOffset+1+i < uint64(len(outs)) {
|
||||
reorgSpend = *outs[spendableOutOffset+1+i]
|
||||
} else {
|
||||
reorgSpend = g.oldestCoinbaseOut()
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/daglabs/btcd/util/subnetworkid"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
@@ -77,7 +77,7 @@ var (
|
||||
Transactions: []*wire.MsgTx{{
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
@@ -90,7 +90,7 @@ var (
|
||||
}},
|
||||
TxOut: []*wire.TxOut{{
|
||||
Value: 0,
|
||||
PkScript: fromHex("4104678afdb0fe5548271967f1" +
|
||||
ScriptPubKey: fromHex("4104678afdb0fe5548271967f1" +
|
||||
"a67130b7105cd6a828e03909a67962e0ea1f" +
|
||||
"61deb649f6bc3f4cef38c4f35504e51ec138" +
|
||||
"c4f35504e51ec112de5c384df7ba0b8d578a" +
|
||||
@@ -111,22 +111,19 @@ var (
|
||||
// allow them to change out from under the tests potentially invalidating them.
|
||||
var regressionNetParams = &dagconfig.Params{
|
||||
Name: "regtest",
|
||||
Net: wire.TestNet,
|
||||
Net: wire.RegTest,
|
||||
DefaultPort: "18444",
|
||||
|
||||
// Chain parameters
|
||||
GenesisBlock: ®TestGenesisBlock,
|
||||
GenesisHash: newHashFromStr("5bec7567af40504e0994db3b573c186fffcc4edefe096ff2e58d00523bd7e8a6"),
|
||||
PowLimit: regressionPowLimit,
|
||||
PowLimitBits: 0x207fffff,
|
||||
BlockRewardMaturity: 100,
|
||||
SubsidyReductionInterval: 150,
|
||||
TargetTimespan: time.Hour * 24 * 14, // 14 days
|
||||
TargetTimePerBlock: time.Second * 10, // 10 seconds
|
||||
RetargetAdjustmentFactor: 4, // 25% less, 400% more
|
||||
ReduceMinDifficulty: true,
|
||||
MinDiffReductionTime: time.Minute * 20, // TargetTimePerBlock * 2
|
||||
GenerateSupported: true,
|
||||
// DAG parameters
|
||||
GenesisBlock: ®TestGenesisBlock,
|
||||
GenesisHash: newHashFromStr("5bec7567af40504e0994db3b573c186fffcc4edefe096ff2e58d00523bd7e8a6"),
|
||||
PowMax: regressionPowLimit,
|
||||
BlockCoinbaseMaturity: 100,
|
||||
SubsidyReductionInterval: 150,
|
||||
TargetTimePerBlock: time.Second * 10, // 10 seconds
|
||||
DifficultyAdjustmentWindowSize: 2640,
|
||||
TimestampDeviationTolerance: 132,
|
||||
GenerateSupported: true,
|
||||
|
||||
// Checkpoints ordered from oldest to newest.
|
||||
Checkpoints: nil,
|
||||
|
||||
234
blockdag/indexers/acceptanceindex.go
Normal file
234
blockdag/indexers/acceptanceindex.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// acceptanceIndexName is the human-readable name for the index.
|
||||
acceptanceIndexName = "acceptance index"
|
||||
)
|
||||
|
||||
var (
|
||||
// acceptanceIndexKey is the key of the acceptance index and the db bucket used
|
||||
// to house it.
|
||||
acceptanceIndexKey = []byte("acceptanceidx")
|
||||
)
|
||||
|
||||
// AcceptanceIndex implements a txAcceptanceData by block hash index. That is to say,
|
||||
// it stores a mapping between a block's hash and the set of transactions that the
|
||||
// block accepts among its blue blocks.
|
||||
type AcceptanceIndex struct {
|
||||
db database.DB
|
||||
dag *blockdag.BlockDAG
|
||||
}
|
||||
|
||||
// Ensure the AcceptanceIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*AcceptanceIndex)(nil)
|
||||
|
||||
// NewAcceptanceIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping between block hashes and their txAcceptanceData.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that in
|
||||
// turn is used by the blockdag package. This allows the index to be
|
||||
// seamlessly maintained along with the DAG.
|
||||
func NewAcceptanceIndex() *AcceptanceIndex {
|
||||
return &AcceptanceIndex{}
|
||||
}
|
||||
|
||||
// DropAcceptanceIndex drops the acceptance index from the provided database if it
|
||||
// exists.
|
||||
func DropAcceptanceIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
return dropIndex(db, acceptanceIndexKey, acceptanceIndexName, interrupt)
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Key() []byte {
|
||||
return acceptanceIndexKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Name() string {
|
||||
return acceptanceIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time. It creates the bucket for the
|
||||
// acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Create(dbTx database.Tx) error {
|
||||
_, err := dbTx.Metadata().CreateBucket(acceptanceIndexKey)
|
||||
return err
|
||||
}
|
||||
|
||||
// Init initializes the hash-based acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
idx.dag = dag
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the DAG.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) ConnectBlock(dbTx database.Tx, _ *util.Block, blockID uint64, _ *blockdag.BlockDAG,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
return dbPutTxsAcceptanceData(dbTx, blockID, txsAcceptanceData)
|
||||
}
|
||||
|
||||
// TxsAcceptanceData returns the acceptance data of all the transactions that
|
||||
// were accepted by the block with hash blockHash.
|
||||
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
var txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
txsAcceptanceData, err = dbFetchTxsAcceptanceDataByHash(dbTx, blockHash)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return txsAcceptanceData, nil
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
for blockID := currentBlockID + 1; blockID <= lastKnownBlockID; blockID++ {
|
||||
hash, err := blockdag.DBFetchBlockHashByID(dbTx, currentBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = idx.ConnectBlock(dbTx, nil, blockID, nil, txAcceptanceData, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dbPutTxsAcceptanceData(dbTx database.Tx, blockID uint64,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(txsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
|
||||
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedTxsAcceptanceData)
|
||||
}
|
||||
|
||||
func dbFetchTxsAcceptanceDataByHash(dbTx database.Tx,
|
||||
hash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
|
||||
blockID, err := blockdag.DBFetchBlockIDByHash(dbTx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dbFetchTxsAcceptanceDataByID(dbTx, blockID)
|
||||
}
|
||||
|
||||
func dbFetchTxsAcceptanceDataByID(dbTx database.Tx,
|
||||
blockID uint64) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
serializedBlockID := blockdag.SerializeBlockID(blockID)
|
||||
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
|
||||
serializedTxsAcceptanceData := bucket.Get(serializedBlockID)
|
||||
if serializedTxsAcceptanceData == nil {
|
||||
return nil, errors.Errorf("no entry in the accpetance index for block id %d", blockID)
|
||||
}
|
||||
|
||||
return deserializeMultiBlockTxsAcceptanceData(serializedTxsAcceptanceData)
|
||||
}
|
||||
|
||||
type serializableTxAcceptanceData struct {
|
||||
MsgTx wire.MsgTx
|
||||
IsAccepted bool
|
||||
}
|
||||
|
||||
type serializableBlockTxsAcceptanceData struct {
|
||||
BlockHash daghash.Hash
|
||||
TxAcceptanceData []serializableTxAcceptanceData
|
||||
}
|
||||
|
||||
type serializableMultiBlockTxsAcceptanceData []serializableBlockTxsAcceptanceData
|
||||
|
||||
func serializeMultiBlockTxsAcceptanceData(
|
||||
multiBlockTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) ([]byte, error) {
|
||||
// Convert MultiBlockTxsAcceptanceData to a serializable format
|
||||
serializableData := make(serializableMultiBlockTxsAcceptanceData, len(multiBlockTxsAcceptanceData))
|
||||
for i, blockTxsAcceptanceData := range multiBlockTxsAcceptanceData {
|
||||
serializableBlockData := serializableBlockTxsAcceptanceData{
|
||||
BlockHash: blockTxsAcceptanceData.BlockHash,
|
||||
TxAcceptanceData: make([]serializableTxAcceptanceData, len(blockTxsAcceptanceData.TxAcceptanceData)),
|
||||
}
|
||||
for i, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
|
||||
serializableBlockData.TxAcceptanceData[i] = serializableTxAcceptanceData{
|
||||
MsgTx: *txAcceptanceData.Tx.MsgTx(),
|
||||
IsAccepted: txAcceptanceData.IsAccepted,
|
||||
}
|
||||
}
|
||||
serializableData[i] = serializableBlockData
|
||||
}
|
||||
|
||||
// Serialize
|
||||
var buffer bytes.Buffer
|
||||
encoder := gob.NewEncoder(&buffer)
|
||||
err := encoder.Encode(serializableData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func deserializeMultiBlockTxsAcceptanceData(
|
||||
serializedTxsAcceptanceData []byte) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
// Deserialize
|
||||
buffer := bytes.NewBuffer(serializedTxsAcceptanceData)
|
||||
decoder := gob.NewDecoder(buffer)
|
||||
var serializedData serializableMultiBlockTxsAcceptanceData
|
||||
err := decoder.Decode(&serializedData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert serializable format to MultiBlockTxsAcceptanceData
|
||||
multiBlockTxsAcceptanceData := make(blockdag.MultiBlockTxsAcceptanceData, len(serializedData))
|
||||
for i, serializableBlockData := range serializedData {
|
||||
blockTxsAcceptanceData := blockdag.BlockTxsAcceptanceData{
|
||||
BlockHash: serializableBlockData.BlockHash,
|
||||
TxAcceptanceData: make([]blockdag.TxAcceptanceData, len(serializableBlockData.TxAcceptanceData)),
|
||||
}
|
||||
for i, txData := range serializableBlockData.TxAcceptanceData {
|
||||
msgTx := txData.MsgTx
|
||||
blockTxsAcceptanceData.TxAcceptanceData[i] = blockdag.TxAcceptanceData{
|
||||
Tx: util.NewTx(&msgTx),
|
||||
IsAccepted: txData.IsAccepted,
|
||||
}
|
||||
}
|
||||
multiBlockTxsAcceptanceData[i] = blockTxsAcceptanceData
|
||||
}
|
||||
|
||||
return multiBlockTxsAcceptanceData, nil
|
||||
}
|
||||
340
blockdag/indexers/acceptanceindex_test.go
Normal file
340
blockdag/indexers/acceptanceindex_test.go
Normal file
@@ -0,0 +1,340 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAcceptanceIndexSerializationAndDeserialization(t *testing.T) {
|
||||
// Create test data
|
||||
hash, _ := daghash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
txIn1 := &wire.TxIn{SignatureScript: []byte{1}, PreviousOutpoint: wire.Outpoint{Index: 1}, Sequence: 0}
|
||||
txIn2 := &wire.TxIn{SignatureScript: []byte{2}, PreviousOutpoint: wire.Outpoint{Index: 2}, Sequence: 0}
|
||||
txOut1 := &wire.TxOut{ScriptPubKey: []byte{1}, Value: 10}
|
||||
txOut2 := &wire.TxOut{ScriptPubKey: []byte{2}, Value: 20}
|
||||
blockTxsAcceptanceData := blockdag.BlockTxsAcceptanceData{
|
||||
BlockHash: *hash,
|
||||
TxAcceptanceData: []blockdag.TxAcceptanceData{
|
||||
{
|
||||
Tx: util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn1}, []*wire.TxOut{txOut1})),
|
||||
IsAccepted: true,
|
||||
},
|
||||
{
|
||||
Tx: util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn2}, []*wire.TxOut{txOut2})),
|
||||
IsAccepted: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
multiBlockTxsAcceptanceData := blockdag.MultiBlockTxsAcceptanceData{blockTxsAcceptanceData}
|
||||
|
||||
// Serialize
|
||||
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(multiBlockTxsAcceptanceData)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexSerializationAndDeserialization: serialization failed: %s", err)
|
||||
}
|
||||
|
||||
// Deserialize
|
||||
deserializedTxsAcceptanceData, err := deserializeMultiBlockTxsAcceptanceData(serializedTxsAcceptanceData)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexSerializationAndDeserialization: deserialization failed: %s", err)
|
||||
}
|
||||
|
||||
// Check that they're the same
|
||||
if !reflect.DeepEqual(multiBlockTxsAcceptanceData, deserializedTxsAcceptanceData) {
|
||||
t.Fatalf("TestAcceptanceIndexSerializationAndDeserialization: original data and deseralize data aren't equal")
|
||||
}
|
||||
}
|
||||
|
||||
// TestAcceptanceIndexRecover tests the recoverability of the
|
||||
// acceptance index.
|
||||
// It does it by following these steps:
|
||||
// * It creates a DAG with enabled acceptance index (let's call it dag1) and
|
||||
// make it process some blocks.
|
||||
// * It creates a copy of dag1 (let's call it dag2), and disables the acceptance
|
||||
// index in it.
|
||||
// * It processes two more blocks in both dag1 and dag2.
|
||||
// * A copy of dag2 is created (let's call it dag3) with enabled
|
||||
// acceptance index
|
||||
// * It checks that the two missing blocks are added to dag3 acceptance index by
|
||||
// comparing dag1's last block acceptance data and dag3's last block acceptance
|
||||
// data.
|
||||
func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
params := &dagconfig.SimNetParams
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
|
||||
testFiles := []string{
|
||||
"blk_0_to_4.dat",
|
||||
"blk_3B.dat",
|
||||
}
|
||||
|
||||
var blocks []*util.Block
|
||||
for _, file := range testFiles {
|
||||
blockTmp, err := blockdag.LoadBlocks(filepath.Join("../testdata/", file))
|
||||
if err != nil {
|
||||
t.Fatalf("Error loading file: %v\n", err)
|
||||
}
|
||||
blocks = append(blocks, blockTmp...)
|
||||
}
|
||||
|
||||
db1AcceptanceIndex := NewAcceptanceIndex()
|
||||
db1IndexManager := NewManager([]Indexer{db1AcceptanceIndex})
|
||||
db1Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover1")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(db1Path)
|
||||
|
||||
db1, err := database.Create("ffldb", db1Path, params.Net)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
db1Config := blockdag.Config{
|
||||
IndexManager: db1IndexManager,
|
||||
DAGParams: params,
|
||||
DB: db1,
|
||||
}
|
||||
|
||||
db1DAG, teardown, err := blockdag.DAGSetup("", db1Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
if teardown != nil {
|
||||
defer teardown()
|
||||
}
|
||||
|
||||
for i := 1; i < len(blocks)-2; i++ {
|
||||
isOrphan, delay, err := db1DAG.ProcessBlock(blocks[i], blockdag.BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock fail on block %v: %v\n", i, err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: block %d "+
|
||||
"is too far in the future", i)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned block %v "+
|
||||
"is an orphan\n", i)
|
||||
}
|
||||
}
|
||||
|
||||
err = db1.FlushCache()
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing database to disk: %s", err)
|
||||
}
|
||||
|
||||
db2Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover2")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(db2Path)
|
||||
|
||||
err = copyDirectory(db1Path, db2Path)
|
||||
if err != nil {
|
||||
t.Fatalf("copyDirectory: %s", err)
|
||||
}
|
||||
|
||||
for i := len(blocks) - 2; i < len(blocks); i++ {
|
||||
isOrphan, delay, err := db1DAG.ProcessBlock(blocks[i], blockdag.BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock fail on block %v: %v\n", i, err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: block %d "+
|
||||
"is too far in the future", i)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned block %v "+
|
||||
"is an orphan\n", i)
|
||||
}
|
||||
}
|
||||
|
||||
db1LastBlockAcceptanceData, err := db1AcceptanceIndex.TxsAcceptanceData(blocks[len(blocks)-1].Hash())
|
||||
if err != nil {
|
||||
t.Fatalf("Error fetching acceptance data: %s", err)
|
||||
}
|
||||
|
||||
db2, err := database.Open("ffldb", db2Path, params.Net)
|
||||
if err != nil {
|
||||
t.Fatalf("Error opening database: %s", err)
|
||||
}
|
||||
|
||||
db2Config := blockdag.Config{
|
||||
DAGParams: params,
|
||||
DB: db2,
|
||||
}
|
||||
|
||||
db2DAG, teardown, err := blockdag.DAGSetup("", db2Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
if teardown != nil {
|
||||
defer teardown()
|
||||
}
|
||||
|
||||
for i := len(blocks) - 2; i < len(blocks); i++ {
|
||||
isOrphan, delay, err := db2DAG.ProcessBlock(blocks[i], blockdag.BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock fail on block %v: %v\n", i, err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: block %d "+
|
||||
"is too far in the future", i)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned block %v "+
|
||||
"is an orphan\n", i)
|
||||
}
|
||||
}
|
||||
|
||||
err = db2.FlushCache()
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing database to disk: %s", err)
|
||||
}
|
||||
db3Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover3")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(db3Path)
|
||||
err = copyDirectory(db2Path, db3Path)
|
||||
if err != nil {
|
||||
t.Fatalf("copyDirectory: %s", err)
|
||||
}
|
||||
|
||||
db3, err := database.Open("ffldb", db3Path, params.Net)
|
||||
if err != nil {
|
||||
t.Fatalf("Error opening database: %s", err)
|
||||
}
|
||||
|
||||
db3AcceptanceIndex := NewAcceptanceIndex()
|
||||
db3IndexManager := NewManager([]Indexer{db3AcceptanceIndex})
|
||||
db3Config := blockdag.Config{
|
||||
IndexManager: db3IndexManager,
|
||||
DAGParams: params,
|
||||
DB: db3,
|
||||
}
|
||||
|
||||
_, teardown, err = blockdag.DAGSetup("", db3Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
if teardown != nil {
|
||||
defer teardown()
|
||||
}
|
||||
|
||||
db3LastBlockAcceptanceData, err := db3AcceptanceIndex.TxsAcceptanceData(blocks[len(blocks)-1].Hash())
|
||||
if err != nil {
|
||||
t.Fatalf("Error fetching acceptance data: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(db1LastBlockAcceptanceData, db3LastBlockAcceptanceData) {
|
||||
t.Fatalf("recovery failed")
|
||||
}
|
||||
}
|
||||
|
||||
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
|
||||
func copyDirectory(scrDir, dest string) error {
|
||||
entries, err := ioutil.ReadDir(scrDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
sourcePath := filepath.Join(scrDir, entry.Name())
|
||||
destPath := filepath.Join(dest, entry.Name())
|
||||
|
||||
fileInfo, err := os.Stat(sourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stat, ok := fileInfo.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.Errorf("failed to get raw syscall.Stat_t data for '%s'", sourcePath)
|
||||
}
|
||||
|
||||
switch fileInfo.Mode() & os.ModeType {
|
||||
case os.ModeDir:
|
||||
if err := createIfNotExists(destPath, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := copyDirectory(sourcePath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
case os.ModeSymlink:
|
||||
if err := copySymLink(sourcePath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := copyFile(sourcePath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.Lchown(destPath, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
isSymlink := entry.Mode()&os.ModeSymlink != 0
|
||||
if !isSymlink {
|
||||
if err := os.Chmod(destPath, entry.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
|
||||
func copyFile(srcFile, dstFile string) error {
|
||||
out, err := os.Create(dstFile)
|
||||
defer out.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
in, err := os.Open(srcFile)
|
||||
defer in.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
|
||||
func createIfNotExists(dir string, perm os.FileMode) error {
|
||||
if blockdag.FileExists(dir) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, perm); err != nil {
|
||||
return errors.Errorf("failed to create directory: '%s', error: '%s'", dir, err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
|
||||
func copySymLink(source, dest string) error {
|
||||
link, err := os.Readlink(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Symlink(link, dest)
|
||||
}
|
||||
@@ -5,16 +5,16 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"sync"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
@@ -51,9 +51,9 @@ const (
|
||||
// hash.
|
||||
addrKeyTypeScriptHash = 1
|
||||
|
||||
// Size of a transaction entry. It consists of 4 bytes block id + 4
|
||||
// Size of a transaction entry. It consists of 8 bytes block id + 4
|
||||
// bytes offset + 4 bytes length.
|
||||
txEntrySize = 4 + 4 + 4
|
||||
txEntrySize = 8 + 4 + 4
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -117,11 +117,11 @@ var (
|
||||
// [<block id><start offset><tx length>,...]
|
||||
//
|
||||
// Field Type Size
|
||||
// block id uint32 4 bytes
|
||||
// block id uint64 8 bytes
|
||||
// start offset uint32 4 bytes
|
||||
// tx length uint32 4 bytes
|
||||
// -----
|
||||
// Total: 12 bytes per indexed tx
|
||||
// Total: 16 bytes per indexed tx
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// fetchBlockHashFunc defines a callback function to use in order to convert a
|
||||
@@ -130,12 +130,12 @@ type fetchBlockHashFunc func(serializedID []byte) (*daghash.Hash, error)
|
||||
|
||||
// serializeAddrIndexEntry serializes the provided block id and transaction
|
||||
// location according to the format described in detail above.
|
||||
func serializeAddrIndexEntry(blockID uint32, txLoc wire.TxLoc) []byte {
|
||||
func serializeAddrIndexEntry(blockID uint64, txLoc wire.TxLoc) []byte {
|
||||
// Serialize the entry.
|
||||
serialized := make([]byte, 12)
|
||||
byteOrder.PutUint32(serialized, blockID)
|
||||
byteOrder.PutUint32(serialized[4:], uint32(txLoc.TxStart))
|
||||
byteOrder.PutUint32(serialized[8:], uint32(txLoc.TxLen))
|
||||
serialized := make([]byte, 16)
|
||||
byteOrder.PutUint64(serialized, blockID)
|
||||
byteOrder.PutUint32(serialized[8:], uint32(txLoc.TxStart))
|
||||
byteOrder.PutUint32(serialized[12:], uint32(txLoc.TxLen))
|
||||
return serialized
|
||||
}
|
||||
|
||||
@@ -149,13 +149,13 @@ func deserializeAddrIndexEntry(serialized []byte, region *database.BlockRegion,
|
||||
return errDeserialize("unexpected end of data")
|
||||
}
|
||||
|
||||
hash, err := fetchBlockHash(serialized[0:4])
|
||||
hash, err := fetchBlockHash(serialized[0:8])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
region.Hash = hash
|
||||
region.Offset = byteOrder.Uint32(serialized[4:8])
|
||||
region.Len = byteOrder.Uint32(serialized[8:12])
|
||||
region.Offset = byteOrder.Uint32(serialized[8:12])
|
||||
region.Len = byteOrder.Uint32(serialized[12:16])
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -170,7 +170,7 @@ func keyForLevel(addrKey [addrKeySize]byte, level uint8) [levelKeySize]byte {
|
||||
|
||||
// dbPutAddrIndexEntry updates the address index to include the provided entry
|
||||
// according to the level-based scheme described in detail above.
|
||||
func dbPutAddrIndexEntry(bucket internalBucket, addrKey [addrKeySize]byte, blockID uint32, txLoc wire.TxLoc) error {
|
||||
func dbPutAddrIndexEntry(bucket internalBucket, addrKey [addrKeySize]byte, blockID uint64, txLoc wire.TxLoc) error {
|
||||
// Start with level 0 and its initial max number of entries.
|
||||
curLevel := uint8(0)
|
||||
maxLevelBytes := level0MaxEntries * txEntrySize
|
||||
@@ -528,12 +528,6 @@ func addrToKey(addr util.Address) ([addrKeySize]byte, error) {
|
||||
result[0] = addrKeyTypeScriptHash
|
||||
copy(result[1:], addr.Hash160()[:])
|
||||
return result, nil
|
||||
|
||||
case *util.AddressPubKey:
|
||||
var result [addrKeySize]byte
|
||||
result[0] = addrKeyTypePubKeyHash
|
||||
copy(result[1:], addr.AddressPubKeyHash().Hash160()[:])
|
||||
return result, nil
|
||||
}
|
||||
|
||||
return [addrKeySize]byte{}, errUnsupportedAddressType
|
||||
@@ -591,7 +585,7 @@ func (idx *AddrIndex) NeedsInputs() bool {
|
||||
// initialize for this index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Init(db database.DB) error {
|
||||
func (idx *AddrIndex) Init(db database.DB, _ *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
return nil
|
||||
}
|
||||
@@ -626,37 +620,35 @@ func (idx *AddrIndex) Create(dbTx database.Tx) error {
|
||||
// stored in the order they appear in the block.
|
||||
type writeIndexData map[[addrKeySize]byte][]int
|
||||
|
||||
// indexPkScript extracts all standard addresses from the passed public key
|
||||
// indexScriptPubKey extracts all standard addresses from the passed public key
|
||||
// script and maps each of them to the associated transaction using the passed
|
||||
// map.
|
||||
func (idx *AddrIndex) indexPkScript(data writeIndexData, pkScript []byte, txIdx int) {
|
||||
func (idx *AddrIndex) indexScriptPubKey(data writeIndexData, scriptPubKey []byte, txIdx int) {
|
||||
// Nothing to index if the script is non-standard or otherwise doesn't
|
||||
// contain any addresses.
|
||||
_, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript,
|
||||
_, addr, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
|
||||
idx.dagParams)
|
||||
if err != nil || len(addrs) == 0 {
|
||||
if err != nil || addr == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
// Ignore unsupported address types.
|
||||
continue
|
||||
}
|
||||
|
||||
// Avoid inserting the transaction more than once. Since the
|
||||
// transactions are indexed serially any duplicates will be
|
||||
// indexed in a row, so checking the most recent entry for the
|
||||
// address is enough to detect duplicates.
|
||||
indexedTxns := data[addrKey]
|
||||
numTxns := len(indexedTxns)
|
||||
if numTxns > 0 && indexedTxns[numTxns-1] == txIdx {
|
||||
continue
|
||||
}
|
||||
indexedTxns = append(indexedTxns, txIdx)
|
||||
data[addrKey] = indexedTxns
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
// Ignore unsupported address types.
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid inserting the transaction more than once. Since the
|
||||
// transactions are indexed serially any duplicates will be
|
||||
// indexed in a row, so checking the most recent entry for the
|
||||
// address is enough to detect duplicates.
|
||||
indexedTxns := data[addrKey]
|
||||
numTxns := len(indexedTxns)
|
||||
if numTxns > 0 && indexedTxns[numTxns-1] == txIdx {
|
||||
return
|
||||
}
|
||||
indexedTxns = append(indexedTxns, txIdx)
|
||||
data[addrKey] = indexedTxns
|
||||
}
|
||||
|
||||
// indexBlock extract all of the standard addresses from all of the transactions
|
||||
@@ -667,23 +659,23 @@ func (idx *AddrIndex) indexBlock(data writeIndexData, block *util.Block, dag *bl
|
||||
// Coinbases do not reference any inputs. Since the block is
|
||||
// required to have already gone through full validation, it has
|
||||
// already been proven on the first transaction in the block is
|
||||
// a coinbase, and the second one is a fee transaction.
|
||||
if txIdx > 1 {
|
||||
// a coinbase.
|
||||
if txIdx > util.CoinbaseTransactionIndex {
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
// The UTXO should always have the input since
|
||||
// the index contract requires it, however, be
|
||||
// safe and simply ignore any missing entries.
|
||||
entry, ok := dag.GetUTXOEntry(txIn.PreviousOutPoint)
|
||||
entry, ok := dag.GetUTXOEntry(txIn.PreviousOutpoint)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
idx.indexPkScript(data, entry.PkScript(), txIdx)
|
||||
idx.indexScriptPubKey(data, entry.ScriptPubKey(), txIdx)
|
||||
}
|
||||
}
|
||||
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
idx.indexPkScript(data, txOut.PkScript, txIdx)
|
||||
idx.indexScriptPubKey(data, txOut.ScriptPubKey, txIdx)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -693,7 +685,9 @@ func (idx *AddrIndex) indexBlock(data writeIndexData, block *util.Block, dag *bl
|
||||
// the transactions in the block involve.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
|
||||
_ blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
// The offset and length of the transactions within the serialized
|
||||
// block.
|
||||
txLocs, err := block.TxLoc()
|
||||
@@ -701,12 +695,6 @@ func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, dag *blo
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the internal block ID associated with the block.
|
||||
blockID, err := dbFetchBlockIDByHash(dbTx, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build all of the address to transaction mappings in a local map.
|
||||
addrsToTxns := make(writeIndexData)
|
||||
idx.indexBlock(addrsToTxns, block, dag)
|
||||
@@ -772,7 +760,7 @@ func (idx *AddrIndex) TxRegionsForAddress(dbTx database.Tx, addr util.Address, n
|
||||
// the database transaction.
|
||||
fetchBlockHash := func(id []byte) (*daghash.Hash, error) {
|
||||
// Deserialize and populate the result.
|
||||
return dbFetchBlockHashBySerializedID(dbTx, id)
|
||||
return blockdag.DBFetchBlockHashBySerializedID(dbTx, id)
|
||||
}
|
||||
|
||||
var err error
|
||||
@@ -791,37 +779,35 @@ func (idx *AddrIndex) TxRegionsForAddress(dbTx database.Tx, addr util.Address, n
|
||||
// script to the transaction.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) indexUnconfirmedAddresses(pkScript []byte, tx *util.Tx) {
|
||||
func (idx *AddrIndex) indexUnconfirmedAddresses(scriptPubKey []byte, tx *util.Tx) {
|
||||
// The error is ignored here since the only reason it can fail is if the
|
||||
// script fails to parse and it was already validated before being
|
||||
// admitted to the mempool.
|
||||
_, addresses, _, _ := txscript.ExtractPkScriptAddrs(pkScript,
|
||||
_, addr, _ := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
|
||||
idx.dagParams)
|
||||
for _, addr := range addresses {
|
||||
// Ignore unsupported address types.
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add a mapping from the address to the transaction.
|
||||
idx.unconfirmedLock.Lock()
|
||||
addrIndexEntry := idx.txnsByAddr[addrKey]
|
||||
if addrIndexEntry == nil {
|
||||
addrIndexEntry = make(map[daghash.TxID]*util.Tx)
|
||||
idx.txnsByAddr[addrKey] = addrIndexEntry
|
||||
}
|
||||
addrIndexEntry[*tx.ID()] = tx
|
||||
|
||||
// Add a mapping from the transaction to the address.
|
||||
addrsByTxEntry := idx.addrsByTx[*tx.ID()]
|
||||
if addrsByTxEntry == nil {
|
||||
addrsByTxEntry = make(map[[addrKeySize]byte]struct{})
|
||||
idx.addrsByTx[*tx.ID()] = addrsByTxEntry
|
||||
}
|
||||
addrsByTxEntry[addrKey] = struct{}{}
|
||||
idx.unconfirmedLock.Unlock()
|
||||
// Ignore unsupported address types.
|
||||
addrKey, err := addrToKey(addr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Add a mapping from the address to the transaction.
|
||||
idx.unconfirmedLock.Lock()
|
||||
addrIndexEntry := idx.txnsByAddr[addrKey]
|
||||
if addrIndexEntry == nil {
|
||||
addrIndexEntry = make(map[daghash.TxID]*util.Tx)
|
||||
idx.txnsByAddr[addrKey] = addrIndexEntry
|
||||
}
|
||||
addrIndexEntry[*tx.ID()] = tx
|
||||
|
||||
// Add a mapping from the transaction to the address.
|
||||
addrsByTxEntry := idx.addrsByTx[*tx.ID()]
|
||||
if addrsByTxEntry == nil {
|
||||
addrsByTxEntry = make(map[[addrKeySize]byte]struct{})
|
||||
idx.addrsByTx[*tx.ID()] = addrsByTxEntry
|
||||
}
|
||||
addrsByTxEntry[addrKey] = struct{}{}
|
||||
idx.unconfirmedLock.Unlock()
|
||||
}
|
||||
|
||||
// AddUnconfirmedTx adds all addresses related to the transaction to the
|
||||
@@ -840,19 +826,19 @@ func (idx *AddrIndex) AddUnconfirmedTx(tx *util.Tx, utxoSet blockdag.UTXOSet) {
|
||||
// transaction has already been validated and thus all inputs are
|
||||
// already known to exist.
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
entry, ok := utxoSet.Get(txIn.PreviousOutPoint)
|
||||
entry, ok := utxoSet.Get(txIn.PreviousOutpoint)
|
||||
if !ok {
|
||||
// Ignore missing entries. This should never happen
|
||||
// in practice since the function comments specifically
|
||||
// call out all inputs must be available.
|
||||
continue
|
||||
}
|
||||
idx.indexUnconfirmedAddresses(entry.PkScript(), tx)
|
||||
idx.indexUnconfirmedAddresses(entry.ScriptPubKey(), tx)
|
||||
}
|
||||
|
||||
// Index addresses of all created outputs.
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
idx.indexUnconfirmedAddresses(txOut.PkScript, tx)
|
||||
idx.indexUnconfirmedAddresses(txOut.ScriptPubKey, tx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -907,6 +893,15 @@ func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr util.Address) []*util.Tx {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AddrIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
return errors.Errorf("addrindex was turned off for %d blocks and can't be recovered."+
|
||||
" To resume working drop the addrindex with --dropaddrindex", lastKnownBlockID-currentBlockID)
|
||||
}
|
||||
|
||||
// NewAddrIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping of all addresses in the blockchain to the respective transactions
|
||||
// that involve them.
|
||||
|
||||
@@ -7,6 +7,7 @@ package indexers
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/wire"
|
||||
@@ -127,17 +128,17 @@ func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal i
|
||||
if (highestLevel != 0 && numEntries == 0) ||
|
||||
numEntries > maxEntries {
|
||||
|
||||
return fmt.Errorf("level %d has %d entries",
|
||||
return errors.Errorf("level %d has %d entries",
|
||||
level, numEntries)
|
||||
}
|
||||
} else if numEntries != maxEntries && numEntries != maxEntries/2 {
|
||||
return fmt.Errorf("level %d has %d entries", level,
|
||||
return errors.Errorf("level %d has %d entries", level,
|
||||
numEntries)
|
||||
}
|
||||
maxEntries *= 2
|
||||
}
|
||||
if totalEntries != expectedTotal {
|
||||
return fmt.Errorf("expected %d entries - got %d", expectedTotal,
|
||||
return errors.Errorf("expected %d entries - got %d", expectedTotal,
|
||||
totalEntries)
|
||||
}
|
||||
|
||||
@@ -151,7 +152,7 @@ func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal i
|
||||
start := i * txEntrySize
|
||||
num := byteOrder.Uint32(data[start:])
|
||||
if num != expectedNum {
|
||||
return fmt.Errorf("level %d offset %d does "+
|
||||
return errors.Errorf("level %d offset %d does "+
|
||||
"not contain the expected number of "+
|
||||
"%d - got %d", level, i, num,
|
||||
expectedNum)
|
||||
@@ -222,7 +223,7 @@ nextTest:
|
||||
for i := 0; i < test.numInsert; i++ {
|
||||
txLoc := wire.TxLoc{TxStart: i * 2}
|
||||
err := dbPutAddrIndexEntry(populatedBucket, test.key,
|
||||
uint32(i), txLoc)
|
||||
uint64(i), txLoc)
|
||||
if err != nil {
|
||||
t.Errorf("dbPutAddrIndexEntry #%d (%s) - "+
|
||||
"unexpected error: %v", testNum,
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/daglabs/btcd/util"
|
||||
)
|
||||
|
||||
// blockProgressLogger provides periodic logging for other services in order
|
||||
// to show users progress of certain "actions" involving some or all current
|
||||
// blocks. Ex: syncing to best chain, indexing all blocks, etc.
|
||||
type blockProgressLogger struct {
|
||||
receivedLogBlocks int64
|
||||
receivedLogTx int64
|
||||
lastBlockLogTime time.Time
|
||||
|
||||
subsystemLogger btclog.Logger
|
||||
progressAction string
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// newBlockProgressLogger returns a new block progress logger.
|
||||
// The progress message is templated as follows:
|
||||
// {progressAction} {numProcessed} {blocks|block} in the last {timePeriod}
|
||||
// ({numTxs}, height {lastBlockHeight}, {lastBlockTimeStamp})
|
||||
func newBlockProgressLogger(progressMessage string, logger btclog.Logger) *blockProgressLogger {
|
||||
return &blockProgressLogger{
|
||||
lastBlockLogTime: time.Now(),
|
||||
progressAction: progressMessage,
|
||||
subsystemLogger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// LogBlockHeight logs a new block height as an information message to show
|
||||
// progress to the user. In order to prevent spam, it limits logging to one
|
||||
// message every 10 seconds with duration and totals included.
|
||||
func (b *blockProgressLogger) LogBlockHeight(block *util.Block) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
b.receivedLogBlocks++
|
||||
b.receivedLogTx += int64(len(block.MsgBlock().Transactions))
|
||||
|
||||
now := time.Now()
|
||||
duration := now.Sub(b.lastBlockLogTime)
|
||||
if duration < time.Second*10 {
|
||||
return
|
||||
}
|
||||
|
||||
// Truncate the duration to 10s of milliseconds.
|
||||
durationMillis := int64(duration / time.Millisecond)
|
||||
tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10)
|
||||
|
||||
// Log information about new block height.
|
||||
blockStr := "blocks"
|
||||
if b.receivedLogBlocks == 1 {
|
||||
blockStr = "block"
|
||||
}
|
||||
txStr := "transactions"
|
||||
if b.receivedLogTx == 1 {
|
||||
txStr = "transaction"
|
||||
}
|
||||
b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, height %d, %s)",
|
||||
b.progressAction, b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx,
|
||||
txStr, block.Height(), block.MsgBlock().Header.Timestamp)
|
||||
|
||||
b.receivedLogBlocks = 0
|
||||
b.receivedLogTx = 0
|
||||
b.lastBlockLogTime = now
|
||||
}
|
||||
@@ -5,13 +5,13 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/util/gcs"
|
||||
"github.com/daglabs/btcd/util/gcs/builder"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
@@ -84,7 +84,7 @@ var _ Indexer = (*CfIndex)(nil)
|
||||
|
||||
// Init initializes the hash-based cf index. This is part of the Indexer
|
||||
// interface.
|
||||
func (idx *CfIndex) Init(db database.DB) error {
|
||||
func (idx *CfIndex) Init(db database.DB, _ *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
return nil
|
||||
}
|
||||
@@ -176,14 +176,19 @@ func storeFilter(dbTx database.Tx, block *util.Block, f *gcs.Filter,
|
||||
if header.IsGenesis() {
|
||||
prevHeader = &daghash.ZeroHash
|
||||
} else {
|
||||
ph := header.SelectedParentHash()
|
||||
pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph)
|
||||
// TODO(Evgeny): Current implementation of GCS filter inherited from chain
|
||||
// (single parent) and must be ported to DAG (multiple parents)
|
||||
var parentHash *daghash.Hash
|
||||
if header.NumParentBlocks() != 0 {
|
||||
parentHash = header.ParentHashes[0]
|
||||
}
|
||||
prevFilterHashBytes, err := dbFetchFilterIdxEntry(dbTx, hkey, parentHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct the new block's filter header, and store it.
|
||||
prevHeader, err = daghash.NewHash(pfh)
|
||||
prevHeader, err = daghash.NewHash(prevFilterHashBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -199,8 +204,8 @@ func storeFilter(dbTx database.Tx, block *util.Block, f *gcs.Filter,
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the main chain. This indexer adds a hash-to-cf mapping for
|
||||
// every passed block. This is part of the Indexer interface.
|
||||
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *util.Block,
|
||||
_ *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *util.Block, _ uint64,
|
||||
_ *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
f, err := builder.BuildBasicFilter(block.MsgBlock())
|
||||
if err != nil {
|
||||
@@ -335,6 +340,15 @@ func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*daghash.Hash,
|
||||
return idx.entriesByBlockHashes(cfHashKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *CfIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
return errors.Errorf("cfindex was turned off for %d blocks and can't be recovered."+
|
||||
" To resume working drop the cfindex with --dropcfindex", lastKnownBlockID-currentBlockID)
|
||||
}
|
||||
|
||||
// NewCfIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping of the hashes of all blocks in the blockchain to their respective
|
||||
// committed filters.
|
||||
|
||||
@@ -9,11 +9,10 @@ package indexers
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -48,11 +47,20 @@ type Indexer interface {
|
||||
// Init is invoked when the index manager is first initializing the
|
||||
// index. This differs from the Create method in that it is called on
|
||||
// every load, including the case the index was just created.
|
||||
Init(db database.DB) error
|
||||
Init(db database.DB, dag *blockdag.BlockDAG) error
|
||||
|
||||
// ConnectBlock is invoked when the index manager is notified that a new
|
||||
// block has been connected to the DAG.
|
||||
ConnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData) error
|
||||
ConnectBlock(dbTx database.Tx,
|
||||
block *util.Block,
|
||||
blockID uint64,
|
||||
dag *blockdag.BlockDAG,
|
||||
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData,
|
||||
virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error
|
||||
}
|
||||
|
||||
// AssertError identifies an error that indicates an internal code consistency
|
||||
|
||||
@@ -5,16 +5,9 @@
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/daglabs/btcd/logger"
|
||||
"github.com/daglabs/btcd/util/panics"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This
|
||||
// means the package will not perform any logging by default until the caller
|
||||
// requests it.
|
||||
var log btclog.Logger
|
||||
|
||||
// The default amount of logging is none.
|
||||
func init() {
|
||||
log, _ = logger.Get(logger.SubsystemTags.INDX)
|
||||
}
|
||||
var log, _ = logger.Get(logger.SubsystemTags.INDX)
|
||||
var spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
|
||||
@@ -8,12 +8,15 @@ import (
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
var (
|
||||
// indexTipsBucketName is the name of the db bucket used to house the
|
||||
// current tip of each index.
|
||||
indexTipsBucketName = []byte("idxtips")
|
||||
|
||||
indexCurrentBlockIDBucketName = []byte("idxcurrentblockid")
|
||||
)
|
||||
|
||||
// Manager defines an index manager that manages multiple optional indexes and
|
||||
@@ -146,6 +149,9 @@ func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucketIfNotExists(indexCurrentBlockIDBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.maybeCreateIndexes(dbTx)
|
||||
})
|
||||
@@ -155,12 +161,35 @@ func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-
|
||||
|
||||
// Initialize each of the enabled indexes.
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
if err := indexer.Init(db); err != nil {
|
||||
if err := indexer.Init(db, blockDAG); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return m.recoverIfNeeded()
|
||||
}
|
||||
|
||||
// recoverIfNeeded checks if the node worked for some time
|
||||
// without one of the current enabled indexes, and if it's
|
||||
// the case, recovers the missing blocks from the index.
|
||||
func (m *Manager) recoverIfNeeded() error {
|
||||
return m.db.Update(func(dbTx database.Tx) error {
|
||||
lastKnownBlockID := blockdag.DBFetchCurrentBlockID(dbTx)
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
serializedCurrentIdxBlockID := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Get(indexer.Key())
|
||||
currentIdxBlockID := uint64(0)
|
||||
if serializedCurrentIdxBlockID != nil {
|
||||
currentIdxBlockID = blockdag.DeserializeBlockID(serializedCurrentIdxBlockID)
|
||||
}
|
||||
if lastKnownBlockID > currentIdxBlockID {
|
||||
err := indexer.Recover(dbTx, currentIdxBlockID, lastKnownBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// ConnectBlock must be invoked when a block is extending the main chain. It
|
||||
@@ -168,12 +197,32 @@ func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-
|
||||
// checks, and invokes each indexer.
|
||||
//
|
||||
// This is part of the blockchain.IndexManager interface.
|
||||
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
// Call each of the currently active optional indexes with the block
|
||||
// being connected so they can update accordingly.
|
||||
for _, index := range m.enabledIndexes {
|
||||
// Notify the indexer with the connected block so it can index it.
|
||||
if err := index.ConnectBlock(dbTx, block, dag, txsAcceptanceData); err != nil {
|
||||
if err := index.ConnectBlock(dbTx, block, blockID, dag, txsAcceptanceData, virtualTxsAcceptanceData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the new block ID index entry for the block being connected and
|
||||
// update the current internal block ID accordingly.
|
||||
err := m.updateIndexersWithCurrentBlockID(dbTx, block.Hash(), blockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) updateIndexersWithCurrentBlockID(dbTx database.Tx, blockHash *daghash.Hash, blockID uint64) error {
|
||||
serializedBlockID := blockdag.SerializeBlockID(blockID)
|
||||
for _, index := range m.enabledIndexes {
|
||||
err := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Put(index.Key(), serializedBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -316,13 +365,6 @@ func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan s
|
||||
})
|
||||
}
|
||||
|
||||
// Call extra index specific deinitialization for the transaction index.
|
||||
if idxName == txIndexName {
|
||||
if err := dropBlockIDIndex(db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the index tip, index bucket, and in-progress drop flag now
|
||||
// that all index entries have been removed.
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
@@ -332,6 +374,10 @@ func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan s
|
||||
return err
|
||||
}
|
||||
|
||||
if err := meta.Bucket(indexCurrentBlockIDBucketName).Delete(idxKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return indexesBucket.Delete(indexDropKey(idxKey))
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -6,12 +6,12 @@ package indexers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -19,40 +19,26 @@ const (
|
||||
txIndexName = "transaction index"
|
||||
|
||||
includingBlocksIndexKeyEntrySize = 8 // 4 bytes for offset + 4 bytes for transaction length
|
||||
|
||||
acceptingBlocksIndexKeyEntrySize = 4 // 4 bytes for accepting block ID
|
||||
)
|
||||
|
||||
var (
|
||||
includingBlocksIndexKey = []byte("includingblocksidx")
|
||||
|
||||
acceptingBlocksIndexKey = []byte("acceptingblocksidx")
|
||||
|
||||
// idByHashIndexBucketName is the name of the db bucket used to house
|
||||
// the block id -> block hash index.
|
||||
idByHashIndexBucketName = []byte("idbyhashidx")
|
||||
|
||||
// hashByIDIndexBucketName is the name of the db bucket used to house
|
||||
// the block hash -> block id index.
|
||||
hashByIDIndexBucketName = []byte("hashbyididx")
|
||||
)
|
||||
|
||||
// txsAcceptedByVirtual is the in-memory index of txIDs that were accepted
|
||||
// by the current virtual
|
||||
var txsAcceptedByVirtual map[daghash.TxID]bool
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The transaction index consists of an entry for every transaction in the DAG.
|
||||
// In order to significantly optimize the space requirements a separate
|
||||
// index which provides an internal mapping between each block that has been
|
||||
// indexed and a unique ID for use within the hash to location mappings. The ID
|
||||
// is simply a sequentially incremented uint32. This is useful because it is
|
||||
// only 4 bytes versus 32 bytes hashes and thus saves a ton of space in the
|
||||
// index.
|
||||
//
|
||||
// There are four buckets used in total. The first bucket maps the hash of
|
||||
// There are two buckets used in total. The first bucket maps the hash of
|
||||
// each transaction to its location in each block it's included in. The second bucket
|
||||
// contains all of the blocks that from their viewpoint the transaction has been
|
||||
// accepted (i.e. the transaction is found in their blue set without double spends),
|
||||
// and their blue block (or themselves) that included the transaction. The third
|
||||
// bucket maps the hash of each block to the unique ID and the fourth maps
|
||||
// that ID back to the block hash.
|
||||
// and their blue block (or themselves) that included the transaction.
|
||||
//
|
||||
// NOTE: Although it is technically possible for multiple transactions to have
|
||||
// the same hash as long as the previous transaction with the same hash is fully
|
||||
@@ -67,123 +53,43 @@ var (
|
||||
// <block id> = <start offset><tx length>
|
||||
//
|
||||
// Field Type Size
|
||||
// block id uint32 4 bytes
|
||||
// block id uint64 8 bytes
|
||||
// start offset uint32 4 bytes
|
||||
// tx length uint32 4 bytes
|
||||
// -----
|
||||
// Total: 12 bytes
|
||||
// Total: 16 bytes
|
||||
//
|
||||
// The accepting blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
|
||||
//
|
||||
// <accepting block id> = <including block id>
|
||||
//
|
||||
// Field Type Size
|
||||
// accepting block id uint32 4 bytes
|
||||
// including block id uint32 4 bytes
|
||||
// accepting block id uint64 8 bytes
|
||||
// including block id uint64 8 bytes
|
||||
// -----
|
||||
// Total: 8 bytes
|
||||
//
|
||||
// The serialized format for keys and values in the block hash to ID bucket is:
|
||||
// <hash> = <ID>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash 32 bytes
|
||||
// ID uint32 4 bytes
|
||||
// -----
|
||||
// Total: 36 bytes
|
||||
//
|
||||
// The serialized format for keys and values in the ID to block hash bucket is:
|
||||
// <ID> = <hash>
|
||||
//
|
||||
// Field Type Size
|
||||
// ID uint32 4 bytes
|
||||
// hash daghash.Hash 32 bytes
|
||||
// -----
|
||||
// Total: 36 bytes
|
||||
// Total: 16 bytes
|
||||
//
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
|
||||
// the index entries for the hash to id and id to hash mappings for the provided
|
||||
// values.
|
||||
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, id uint32) error {
|
||||
// Serialize the height for use in the index entries.
|
||||
var serializedID [4]byte
|
||||
byteOrder.PutUint32(serializedID[:], id)
|
||||
|
||||
// Add the block hash to ID mapping to the index.
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(idByHashIndexBucketName)
|
||||
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the block ID to hash mapping to the index.
|
||||
idIndex := meta.Bucket(hashByIDIndexBucketName)
|
||||
return idIndex.Put(serializedID[:], hash[:])
|
||||
}
|
||||
|
||||
// dbFetchBlockIDByHash uses an existing database transaction to retrieve the
|
||||
// block id for the provided hash from the index.
|
||||
func dbFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint32, error) {
|
||||
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
|
||||
serializedID := hashIndex.Get(hash[:])
|
||||
if serializedID == nil {
|
||||
return 0, fmt.Errorf("No entry in the block ID index for block with hash %s", hash)
|
||||
}
|
||||
|
||||
return byteOrder.Uint32(serializedID), nil
|
||||
}
|
||||
|
||||
// dbFetchBlockHashBySerializedID uses an existing database transaction to
|
||||
// retrieve the hash for the provided serialized block id from the index.
|
||||
func dbFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
|
||||
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
|
||||
hashBytes := idIndex.Get(serializedID)
|
||||
if hashBytes == nil {
|
||||
return nil, fmt.Errorf("No entry in the block ID index for block with id %d", byteOrder.Uint32(serializedID))
|
||||
}
|
||||
|
||||
var hash daghash.Hash
|
||||
copy(hash[:], hashBytes)
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
// dbFetchBlockHashByID uses an existing database transaction to retrieve the
|
||||
// hash for the provided block id from the index.
|
||||
func dbFetchBlockHashByID(dbTx database.Tx, id uint32) (*daghash.Hash, error) {
|
||||
var serializedID [4]byte
|
||||
byteOrder.PutUint32(serializedID[:], id)
|
||||
return dbFetchBlockHashBySerializedID(dbTx, serializedID[:])
|
||||
}
|
||||
|
||||
func putIncludingBlocksEntry(target []byte, txLoc wire.TxLoc) {
|
||||
byteOrder.PutUint32(target, uint32(txLoc.TxStart))
|
||||
byteOrder.PutUint32(target[4:], uint32(txLoc.TxLen))
|
||||
}
|
||||
|
||||
func putAcceptingBlocksEntry(target []byte, includingBlockID uint32) {
|
||||
byteOrder.PutUint32(target, includingBlockID)
|
||||
}
|
||||
|
||||
func dbPutIncludingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint32, serializedData []byte) error {
|
||||
func dbPutIncludingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
|
||||
bucket, err := dbTx.Metadata().Bucket(includingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockIDBytes := make([]byte, 4)
|
||||
byteOrder.PutUint32(blockIDBytes, uint32(blockID))
|
||||
return bucket.Put(blockIDBytes, serializedData)
|
||||
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
|
||||
}
|
||||
|
||||
func dbPutAcceptingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint32, serializedData []byte) error {
|
||||
func dbPutAcceptingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
|
||||
bucket, err := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockIDBytes := make([]byte, 4)
|
||||
byteOrder.PutUint32(blockIDBytes, uint32(blockID))
|
||||
return bucket.Put(blockIDBytes, serializedData)
|
||||
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
|
||||
}
|
||||
|
||||
// dbFetchFirstTxRegion uses an existing database transaction to fetch the block
|
||||
@@ -199,7 +105,7 @@ func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.Block
|
||||
if txBucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No block region"+
|
||||
Description: fmt.Sprintf("No block region "+
|
||||
"was found for %s", txID),
|
||||
}
|
||||
}
|
||||
@@ -207,11 +113,11 @@ func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.Block
|
||||
if ok := cursor.First(); !ok {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No block region"+
|
||||
Description: fmt.Sprintf("No block region "+
|
||||
"was found for %s", txID),
|
||||
}
|
||||
}
|
||||
blockIDBytes := cursor.Key()
|
||||
serializedBlockID := cursor.Key()
|
||||
serializedData := cursor.Value()
|
||||
if len(serializedData) == 0 {
|
||||
return nil, nil
|
||||
@@ -227,7 +133,7 @@ func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.Block
|
||||
}
|
||||
|
||||
// Load the block hash associated with the block ID.
|
||||
hash, err := dbFetchBlockHashBySerializedID(dbTx, blockIDBytes)
|
||||
hash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
|
||||
if err != nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
@@ -247,7 +153,7 @@ func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.Block
|
||||
|
||||
// dbAddTxIndexEntries uses an existing database transaction to add a
|
||||
// transaction index entry for every transaction in the passed block.
|
||||
func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint32, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint64, multiBlockTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// The offset and length of the transactions within the serialized
|
||||
// block.
|
||||
txLocs, err := block.TxLoc()
|
||||
@@ -273,22 +179,21 @@ func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint32, tx
|
||||
includingBlocksOffset += includingBlocksIndexKeyEntrySize
|
||||
}
|
||||
|
||||
for includingBlockHash, blockTxsAcceptanceData := range txsAcceptanceData {
|
||||
var includingBlockID uint32
|
||||
if includingBlockHash.IsEqual(block.Hash()) {
|
||||
for _, blockTxsAcceptanceData := range multiBlockTxsAcceptanceData {
|
||||
var includingBlockID uint64
|
||||
if blockTxsAcceptanceData.BlockHash.IsEqual(block.Hash()) {
|
||||
includingBlockID = blockID
|
||||
} else {
|
||||
includingBlockID, err = dbFetchBlockIDByHash(dbTx, &includingBlockHash)
|
||||
includingBlockID, err = blockdag.DBFetchBlockIDByHash(dbTx, &blockTxsAcceptanceData.BlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
includingBlockIDBytes := make([]byte, 4)
|
||||
byteOrder.PutUint32(includingBlockIDBytes, uint32(includingBlockID))
|
||||
serializedIncludingBlockID := blockdag.SerializeBlockID(includingBlockID)
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData {
|
||||
err = dbPutAcceptingBlocksEntry(dbTx, txAcceptanceData.Tx.ID(), blockID, includingBlockIDBytes)
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
|
||||
err = dbPutAcceptingBlocksEntry(dbTx, txAcceptanceData.Tx.ID(), blockID, serializedIncludingBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -298,11 +203,28 @@ func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint32, tx
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateTxsAcceptedByVirtual(virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// Initialize a new txsAcceptedByVirtual
|
||||
entries := 0
|
||||
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
|
||||
entries += len(blockTxsAcceptanceData.TxAcceptanceData)
|
||||
}
|
||||
txsAcceptedByVirtual = make(map[daghash.TxID]bool, entries)
|
||||
|
||||
// Copy virtualTxsAcceptanceData to txsAcceptedByVirtual
|
||||
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
|
||||
txsAcceptedByVirtual[*txAcceptanceData.Tx.ID()] = true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxIndex implements a transaction by hash index. That is to say, it supports
|
||||
// querying all transactions by their hash.
|
||||
type TxIndex struct {
|
||||
db database.DB
|
||||
curBlockID uint32
|
||||
db database.DB
|
||||
}
|
||||
|
||||
// Ensure the TxIndex type implements the Indexer interface.
|
||||
@@ -313,63 +235,18 @@ var _ Indexer = (*TxIndex)(nil)
|
||||
// disconnecting blocks.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Init(db database.DB) error {
|
||||
func (idx *TxIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
|
||||
// Find the latest known block id field for the internal block id
|
||||
// index and initialize it. This is done because it's a lot more
|
||||
// efficient to do a single search at initialize time than it is to
|
||||
// write another value to the database on every update.
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
// Scan forward in large gaps to find a block id that doesn't
|
||||
// exist yet to serve as an upper bound for the binary search
|
||||
// below.
|
||||
var highestKnown, nextUnknown uint32
|
||||
testBlockID := uint32(1)
|
||||
increment := uint32(100000)
|
||||
for {
|
||||
_, err := dbFetchBlockHashByID(dbTx, testBlockID)
|
||||
if err != nil {
|
||||
nextUnknown = testBlockID
|
||||
break
|
||||
}
|
||||
|
||||
highestKnown = testBlockID
|
||||
testBlockID += increment
|
||||
}
|
||||
log.Tracef("Forward scan (highest known %d, next unknown %d)",
|
||||
highestKnown, nextUnknown)
|
||||
|
||||
// No used block IDs due to new database.
|
||||
if nextUnknown == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use a binary search to find the final highest used block id.
|
||||
// This will take at most ceil(log_2(increment)) attempts.
|
||||
for {
|
||||
testBlockID = (highestKnown + nextUnknown) / 2
|
||||
_, err := dbFetchBlockHashByID(dbTx, testBlockID)
|
||||
if err != nil {
|
||||
nextUnknown = testBlockID
|
||||
} else {
|
||||
highestKnown = testBlockID
|
||||
}
|
||||
log.Tracef("Binary scan (highest known %d, next "+
|
||||
"unknown %d)", highestKnown, nextUnknown)
|
||||
if highestKnown+1 == nextUnknown {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
idx.curBlockID = highestKnown
|
||||
return nil
|
||||
})
|
||||
// Initialize the txsAcceptedByVirtual index
|
||||
virtualTxsAcceptanceData, err := dag.TxsAcceptedByVirtual()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Current internal block ID: %d", idx.curBlockID)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -394,12 +271,6 @@ func (idx *TxIndex) Name() string {
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Create(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
if _, err := meta.CreateBucket(idByHashIndexBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucket(hashByIDIndexBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucket(includingBlocksIndexKey); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -413,24 +284,16 @@ func (idx *TxIndex) Create(dbTx database.Tx) error {
|
||||
// for every transaction in the passed block.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *util.Block, _ *blockdag.BlockDAG, acceptedTxsData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
// Increment the internal block ID to use for the block being connected
|
||||
// and add all of the transactions in the block to the index.
|
||||
newBlockID := idx.curBlockID + 1
|
||||
if block.MsgBlock().Header.IsGenesis() {
|
||||
newBlockID = 0
|
||||
}
|
||||
if err := dbAddTxIndexEntries(dbTx, block, newBlockID, acceptedTxsData); err != nil {
|
||||
func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
|
||||
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
if err := dbAddTxIndexEntries(dbTx, block, blockID, acceptedTxsData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the new block ID index entry for the block being connected and
|
||||
// update the current internal block ID accordingly.
|
||||
err := dbPutBlockIDIndexEntry(dbTx, block.Hash(), newBlockID)
|
||||
err := updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idx.curBlockID = newBlockID
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -474,9 +337,8 @@ func dbFetchTxBlocks(dbTx database.Tx, txHash *daghash.Hash) ([]*daghash.Hash, e
|
||||
"were found for %s", txHash),
|
||||
}
|
||||
}
|
||||
err := bucket.ForEach(func(blockIDBytes, _ []byte) error {
|
||||
blockID := byteOrder.Uint32(blockIDBytes)
|
||||
blockHash, err := dbFetchBlockHashByID(dbTx, blockID)
|
||||
err := bucket.ForEach(func(serializedBlockID, _ []byte) error {
|
||||
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -501,29 +363,34 @@ func (idx *TxIndex) BlockThatAcceptedTx(dag *blockdag.BlockDAG, txID *daghash.Tx
|
||||
}
|
||||
|
||||
func dbFetchTxAcceptingBlock(dbTx database.Tx, txID *daghash.TxID, dag *blockdag.BlockDAG) (*daghash.Hash, error) {
|
||||
// If the transaction was accepted by the current virtual,
|
||||
// return the zeroHash immediately
|
||||
if _, ok := txsAcceptedByVirtual[*txID]; ok {
|
||||
return &daghash.ZeroHash, nil
|
||||
}
|
||||
|
||||
bucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(txID[:])
|
||||
if bucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No accepting blocks "+
|
||||
"were found for %s", txID),
|
||||
Description: fmt.Sprintf("No accepting blocks bucket "+
|
||||
"exists for %s", txID),
|
||||
}
|
||||
}
|
||||
cursor := bucket.Cursor()
|
||||
if !cursor.First() {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No accepting blocks "+
|
||||
"were found for %s", txID),
|
||||
Description: fmt.Sprintf("Accepting blocks bucket is "+
|
||||
"empty for %s", txID),
|
||||
}
|
||||
}
|
||||
for ; cursor.Key() != nil; cursor.Next() {
|
||||
blockID := byteOrder.Uint32(cursor.Key())
|
||||
blockHash, err := dbFetchBlockHashByID(dbTx, blockID)
|
||||
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, cursor.Key())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dag.IsInSelectedPathChain(blockHash) {
|
||||
if dag.IsInSelectedParentChain(blockHash) {
|
||||
return blockHash, nil
|
||||
}
|
||||
}
|
||||
@@ -541,19 +408,6 @@ func NewTxIndex() *TxIndex {
|
||||
return &TxIndex{}
|
||||
}
|
||||
|
||||
// dropBlockIDIndex drops the internal block id index.
|
||||
func dropBlockIDIndex(db database.DB) error {
|
||||
return db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
err := meta.DeleteBucket(idByHashIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return meta.DeleteBucket(hashByIDIndexBucketName)
|
||||
})
|
||||
}
|
||||
|
||||
// DropTxIndex drops the transaction index from the provided database if it
|
||||
// exists. Since the address index relies on it, the address index will also be
|
||||
// dropped when it exists.
|
||||
@@ -570,3 +424,12 @@ func DropTxIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
|
||||
return dropIndex(db, acceptingBlocksIndexKey, txIndexName, interrupt)
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *TxIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
return errors.Errorf("txindex was turned off for %d blocks and can't be recovered."+
|
||||
" To resume working drop the txindex with --droptxindex", lastKnownBlockID-currentBlockID)
|
||||
}
|
||||
|
||||
@@ -7,19 +7,25 @@ import (
|
||||
|
||||
"github.com/daglabs/btcd/blockdag"
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/mining"
|
||||
"github.com/daglabs/btcd/txscript"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
)
|
||||
|
||||
func createTransaction(value uint64, originTx *wire.MsgTx, outputIndex uint32) *wire.MsgTx {
|
||||
func createTransaction(t *testing.T, value uint64, originTx *wire.MsgTx, outputIndex uint32) *wire.MsgTx {
|
||||
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating signature script: %s", err)
|
||||
}
|
||||
txIn := &wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: originTx.TxID(),
|
||||
PreviousOutpoint: wire.Outpoint{
|
||||
TxID: *originTx.TxID(),
|
||||
Index: outputIndex,
|
||||
},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: signatureScript,
|
||||
}
|
||||
txOut := wire.NewTxOut(value, blockdag.OpTrueScript)
|
||||
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
|
||||
@@ -34,7 +40,7 @@ func TestTxIndexConnectBlock(t *testing.T) {
|
||||
indexManager := NewManager([]Indexer{txIndex})
|
||||
|
||||
params := dagconfig.SimNetParams
|
||||
params.BlockRewardMaturity = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
params.K = 1
|
||||
|
||||
config := blockdag.Config{
|
||||
@@ -51,16 +57,20 @@ func TestTxIndexConnectBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
prepareAndProcessBlock := func(parentHashes []*daghash.Hash, transactions []*wire.MsgTx, blockName string) *wire.MsgBlock {
|
||||
block, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, transactions, false, 1)
|
||||
block, err := mining.PrepareBlockForTest(dag, ¶ms, parentHashes, transactions, false)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: block %v got unexpected error from PrepareBlockForTest: %v", blockName, err)
|
||||
}
|
||||
utilBlock := util.NewBlock(block)
|
||||
blocks[*block.BlockHash()] = utilBlock
|
||||
isOrphan, err := dag.ProcessBlock(utilBlock, blockdag.BFNoPoWCheck)
|
||||
isOrphan, delay, err := dag.ProcessBlock(utilBlock, blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: dag.ProcessBlock got unexpected error for block %v: %v", blockName, err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("TestTxIndexConnectBlock: block %s "+
|
||||
"is too far in the future", blockName)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("TestTxIndexConnectBlock: block %v was unexpectedly orphan", blockName)
|
||||
}
|
||||
@@ -68,37 +78,47 @@ func TestTxIndexConnectBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
block1 := prepareAndProcessBlock([]*daghash.Hash{params.GenesisHash}, nil, "1")
|
||||
block2Tx := createTransaction(block1.Transactions[0].TxOut[0].Value, block1.Transactions[0], 0)
|
||||
block2Tx := createTransaction(t, block1.Transactions[0].TxOut[0].Value, block1.Transactions[0], 0)
|
||||
block2 := prepareAndProcessBlock([]*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{block2Tx}, "2")
|
||||
block3Tx := createTransaction(block2.Transactions[0].TxOut[0].Value, block2.Transactions[0], 0)
|
||||
block3Tx := createTransaction(t, block2.Transactions[0].TxOut[0].Value, block2.Transactions[0], 0)
|
||||
block3 := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3")
|
||||
|
||||
block3TxID := block3Tx.TxID()
|
||||
block3TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, &block3TxID)
|
||||
block2TxID := block2Tx.TxID()
|
||||
block2TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
|
||||
if err != nil {
|
||||
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
|
||||
}
|
||||
block3Hash := block3.BlockHash()
|
||||
if !block3TxNewAcceptedBlock.IsEqual(block3Hash) {
|
||||
if !block2TxNewAcceptedBlock.IsEqual(block3Hash) {
|
||||
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
|
||||
"been accepted in block %v but instead got accepted in block %v", block3Hash, block2TxNewAcceptedBlock)
|
||||
}
|
||||
|
||||
block3TxID := block3Tx.TxID()
|
||||
block3TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block3TxID)
|
||||
if err != nil {
|
||||
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
|
||||
}
|
||||
if !block3TxNewAcceptedBlock.IsEqual(&daghash.ZeroHash) {
|
||||
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
|
||||
"been accepted in block %v but instead got accepted in block %v", block3Hash, block3TxNewAcceptedBlock)
|
||||
"been accepted by the virtual block but instead got accepted in block %v", block3TxNewAcceptedBlock)
|
||||
}
|
||||
|
||||
block3A := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3A")
|
||||
block4 := prepareAndProcessBlock([]*daghash.Hash{block3.BlockHash()}, nil, "4")
|
||||
prepareAndProcessBlock([]*daghash.Hash{block3A.BlockHash(), block4.BlockHash()}, nil, "5")
|
||||
|
||||
block3TxAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, &block3TxID)
|
||||
block2TxAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
|
||||
if err != nil {
|
||||
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
|
||||
}
|
||||
block3AHash := block3A.BlockHash()
|
||||
if !block3TxAcceptedBlock.IsEqual(block3AHash) {
|
||||
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
|
||||
"been accepted in block %v but instead got accepted in block %v", block3AHash, block3TxAcceptedBlock)
|
||||
if !block2TxAcceptedBlock.IsEqual(block3AHash) {
|
||||
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
|
||||
"been accepted in block %v but instead got accepted in block %v", block3AHash, block2TxAcceptedBlock)
|
||||
}
|
||||
|
||||
region, err := txIndex.TxFirstBlockRegion(&block3TxID)
|
||||
region, err := txIndex.TxFirstBlockRegion(block3TxID)
|
||||
if err != nil {
|
||||
t.Fatalf("TestTxIndexConnectBlock: no block region was found for block3Tx")
|
||||
}
|
||||
|
||||
@@ -5,16 +5,9 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/daglabs/btcd/logger"
|
||||
"github.com/daglabs/btcd/util/panics"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This
|
||||
// means the package will not perform any logging by default until the caller
|
||||
// requests it.
|
||||
var log btclog.Logger
|
||||
|
||||
// The default amount of logging is none.
|
||||
func init() {
|
||||
log, _ = logger.Get(logger.SubsystemTags.CHAN)
|
||||
}
|
||||
var log, _ = logger.Get(logger.SubsystemTags.BDAG)
|
||||
var spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||
|
||||
@@ -7,8 +7,8 @@ package blockdag
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// MerkleTree holds the hashes of a merkle tree
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/util"
|
||||
@@ -24,8 +25,11 @@ func TestMerkle(t *testing.T) {
|
||||
|
||||
idMerkleTree := BuildIDMerkleTreeStore(block.Transactions())
|
||||
calculatedIDMerkleRoot := idMerkleTree.Root()
|
||||
wantIDMerkleRoot := Block100000.Header.IDMerkleRoot
|
||||
if !wantIDMerkleRoot.IsEqual(calculatedIDMerkleRoot) {
|
||||
wantIDMerkleRoot, err := daghash.NewHashFromStr("3f69feb7edf5d0d67930afc990c8ec931e3428d7c7a65d7af6b81079319eb110")
|
||||
if err != nil {
|
||||
t.Errorf("BuildIDMerkleTreeStore: unexpected error: %s", err)
|
||||
}
|
||||
if !calculatedIDMerkleRoot.IsEqual(wantIDMerkleRoot) {
|
||||
t.Errorf("BuildIDMerkleTreeStore: ID merkle root mismatch - "+
|
||||
"got %v, want %v", calculatedIDMerkleRoot, wantIDMerkleRoot)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// NotificationType represents the type of a notification message.
|
||||
@@ -20,12 +22,17 @@ const (
|
||||
// NTBlockAdded indicates the associated block was added into
|
||||
// the blockDAG.
|
||||
NTBlockAdded NotificationType = iota
|
||||
|
||||
// NTChainChanged indicates that selected parent
|
||||
// chain had changed.
|
||||
NTChainChanged
|
||||
)
|
||||
|
||||
// notificationTypeStrings is a map of notification types back to their constant
|
||||
// names for pretty printing.
|
||||
var notificationTypeStrings = map[NotificationType]string{
|
||||
NTBlockAdded: "NTBlockAdded",
|
||||
NTBlockAdded: "NTBlockAdded",
|
||||
NTChainChanged: "NTChainChanged",
|
||||
}
|
||||
|
||||
// String returns the NotificationType in human-readable form.
|
||||
@@ -66,3 +73,17 @@ func (dag *BlockDAG) sendNotification(typ NotificationType, data interface{}) {
|
||||
}
|
||||
dag.notificationsLock.RUnlock()
|
||||
}
|
||||
|
||||
// BlockAddedNotificationData defines data to be sent along with a BlockAdded
|
||||
// notification
|
||||
type BlockAddedNotificationData struct {
|
||||
Block *util.Block
|
||||
WasUnorphaned bool
|
||||
}
|
||||
|
||||
// ChainChangedNotificationData defines data to be sent along with a ChainChanged
|
||||
// notification
|
||||
type ChainChangedNotificationData struct {
|
||||
RemovedChainBlockHashes []*daghash.Hash
|
||||
AddedChainBlockHashes []*daghash.Hash
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
@@ -12,7 +13,7 @@ import (
|
||||
|
||||
// TestNotifications ensures that notification callbacks are fired on events.
|
||||
func TestNotifications(t *testing.T) {
|
||||
blocks, err := loadBlocks("blk_0_to_4.dat")
|
||||
blocks, err := LoadBlocks(filepath.Join("testdata/blk_0_to_4.dat"))
|
||||
if err != nil {
|
||||
t.Fatalf("Error loading file: %v\n", err)
|
||||
}
|
||||
@@ -40,14 +41,18 @@ func TestNotifications(t *testing.T) {
|
||||
dag.Subscribe(callback)
|
||||
}
|
||||
|
||||
isOrphan, err := dag.ProcessBlock(blocks[1], BFNone)
|
||||
isOrphan, delay, err := dag.ProcessBlock(blocks[1], BFNone)
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock fail on block 1: %v\n", err)
|
||||
}
|
||||
if delay != 0 {
|
||||
t.Fatalf("ProcessBlock: block 1 " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock incorrectly returned block " +
|
||||
"is an orphan\n")
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("ProcessBlock fail on block 1: %v\n", err)
|
||||
}
|
||||
|
||||
if notificationCount != numSubscribers {
|
||||
t.Fatalf("Expected notification callback to be executed %d "+
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
)
|
||||
|
||||
// phantom calculates and returns the block's blue set, selected parent and blue score.
|
||||
@@ -77,7 +77,7 @@ func blueCandidates(chainStart *blockNode) blockSet {
|
||||
func traverseCandidates(newBlock *blockNode, candidates blockSet, selectedParent *blockNode) []*blockNode {
|
||||
blues := []*blockNode{}
|
||||
selectedParentPast := newSet()
|
||||
queue := NewDownHeap()
|
||||
queue := newDownHeap()
|
||||
visited := newSet()
|
||||
|
||||
for _, parent := range newBlock.parents {
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/util/daghash"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig"
|
||||
)
|
||||
@@ -20,11 +20,6 @@ type testBlockData struct {
|
||||
expectedBlues []string
|
||||
}
|
||||
|
||||
type hashIDPair struct {
|
||||
hash *daghash.Hash
|
||||
id string
|
||||
}
|
||||
|
||||
//TestPhantom iterate over several dag simulations, and checks
|
||||
//that the blue score, blue set and selected parent of each
|
||||
//block calculated as expected
|
||||
@@ -113,7 +108,7 @@ func TestPhantom(t *testing.T) {
|
||||
id: "K",
|
||||
expectedScore: 9,
|
||||
expectedSelectedParent: "H",
|
||||
expectedBlues: []string{"I", "J", "G", "F", "H"},
|
||||
expectedBlues: []string{"I", "G", "J", "F", "H"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user